1 /*
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "modules/video_coding/utility/simulcast_test_fixture_impl.h"
12
13 #include <algorithm>
14 #include <map>
15 #include <memory>
16 #include <vector>
17
18 #include "api/video/encoded_image.h"
19 #include "api/video_codecs/sdp_video_format.h"
20 #include "api/video_codecs/video_encoder.h"
21 #include "common_video/libyuv/include/webrtc_libyuv.h"
22 #include "modules/video_coding/include/video_codec_interface.h"
23 #include "modules/video_coding/include/video_coding_defines.h"
24 #include "rtc_base/checks.h"
25 #include "test/gtest.h"
26
27 using ::testing::_;
28 using ::testing::AllOf;
29 using ::testing::Field;
30 using ::testing::Return;
31
32 namespace webrtc {
33 namespace test {
34
35 namespace {
36
37 const int kDefaultWidth = 1280;
38 const int kDefaultHeight = 720;
39 const int kNumberOfSimulcastStreams = 3;
40 const int kColorY = 66;
41 const int kColorU = 22;
42 const int kColorV = 33;
43 const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
44 const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
45 const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
46 const float kMaxFramerates[kNumberOfSimulcastStreams] = {30, 30, 30};
47 const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
48 const int kNoTemporalLayerProfile[3] = {0, 0, 0};
49
50 const VideoEncoder::Capabilities kCapabilities(false);
51 const VideoEncoder::Settings kSettings(kCapabilities, 1, 1200);
52
53 template <typename T>
SetExpectedValues3(T value0,T value1,T value2,T * expected_values)54 void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
55 expected_values[0] = value0;
56 expected_values[1] = value1;
57 expected_values[2] = value2;
58 }
59
60 enum PlaneType {
61 kYPlane = 0,
62 kUPlane = 1,
63 kVPlane = 2,
64 kNumOfPlanes = 3,
65 };
66
67 } // namespace
68
69 class SimulcastTestFixtureImpl::TestEncodedImageCallback
70 : public EncodedImageCallback {
71 public:
TestEncodedImageCallback()72 TestEncodedImageCallback() {
73 memset(temporal_layer_, -1, sizeof(temporal_layer_));
74 memset(layer_sync_, false, sizeof(layer_sync_));
75 }
76
OnEncodedImage(const EncodedImage & encoded_image,const CodecSpecificInfo * codec_specific_info)77 Result OnEncodedImage(const EncodedImage& encoded_image,
78 const CodecSpecificInfo* codec_specific_info) override {
79 bool is_vp8 = (codec_specific_info->codecType == kVideoCodecVP8);
80 bool is_h264 = (codec_specific_info->codecType == kVideoCodecH264);
81 // Only store the base layer.
82 if (encoded_image.SpatialIndex().value_or(0) == 0) {
83 if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
84 encoded_key_frame_.SetEncodedData(EncodedImageBuffer::Create(
85 encoded_image.data(), encoded_image.size()));
86 encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey;
87 } else {
88 encoded_frame_.SetEncodedData(EncodedImageBuffer::Create(
89 encoded_image.data(), encoded_image.size()));
90 }
91 }
92 if (is_vp8) {
93 layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
94 codec_specific_info->codecSpecific.VP8.layerSync;
95 temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
96 codec_specific_info->codecSpecific.VP8.temporalIdx;
97 } else if (is_h264) {
98 layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
99 codec_specific_info->codecSpecific.H264.base_layer_sync;
100 temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
101 codec_specific_info->codecSpecific.H264.temporal_idx;
102 }
103 return Result(Result::OK, encoded_image.Timestamp());
104 }
105 // This method only makes sense for VP8.
GetLastEncodedFrameInfo(int * temporal_layer,bool * layer_sync,int stream)106 void GetLastEncodedFrameInfo(int* temporal_layer,
107 bool* layer_sync,
108 int stream) {
109 *temporal_layer = temporal_layer_[stream];
110 *layer_sync = layer_sync_[stream];
111 }
GetLastEncodedKeyFrame(EncodedImage * encoded_key_frame)112 void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
113 *encoded_key_frame = encoded_key_frame_;
114 }
GetLastEncodedFrame(EncodedImage * encoded_frame)115 void GetLastEncodedFrame(EncodedImage* encoded_frame) {
116 *encoded_frame = encoded_frame_;
117 }
118
119 private:
120 EncodedImage encoded_key_frame_;
121 EncodedImage encoded_frame_;
122 int temporal_layer_[kNumberOfSimulcastStreams];
123 bool layer_sync_[kNumberOfSimulcastStreams];
124 };
125
126 class SimulcastTestFixtureImpl::TestDecodedImageCallback
127 : public DecodedImageCallback {
128 public:
TestDecodedImageCallback()129 TestDecodedImageCallback() : decoded_frames_(0) {}
Decoded(VideoFrame & decoded_image)130 int32_t Decoded(VideoFrame& decoded_image) override {
131 rtc::scoped_refptr<I420BufferInterface> i420_buffer =
132 decoded_image.video_frame_buffer()->ToI420();
133 for (int i = 0; i < decoded_image.width(); ++i) {
134 EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
135 }
136
137 // TODO(mikhal): Verify the difference between U,V and the original.
138 for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
139 EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
140 EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
141 }
142 decoded_frames_++;
143 return 0;
144 }
Decoded(VideoFrame & decoded_image,int64_t decode_time_ms)145 int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
146 RTC_DCHECK_NOTREACHED();
147 return -1;
148 }
Decoded(VideoFrame & decoded_image,absl::optional<int32_t> decode_time_ms,absl::optional<uint8_t> qp)149 void Decoded(VideoFrame& decoded_image,
150 absl::optional<int32_t> decode_time_ms,
151 absl::optional<uint8_t> qp) override {
152 Decoded(decoded_image);
153 }
DecodedFrames()154 int DecodedFrames() { return decoded_frames_; }
155
156 private:
157 int decoded_frames_;
158 };
159
160 namespace {
161
SetPlane(uint8_t * data,uint8_t value,int width,int height,int stride)162 void SetPlane(uint8_t* data, uint8_t value, int width, int height, int stride) {
163 for (int i = 0; i < height; i++, data += stride) {
164 // Setting allocated area to zero - setting only image size to
165 // requested values - will make it easier to distinguish between image
166 // size and frame size (accounting for stride).
167 memset(data, value, width);
168 memset(data + width, 0, stride - width);
169 }
170 }
171
172 // Fills in an I420Buffer from `plane_colors`.
CreateImage(const rtc::scoped_refptr<I420Buffer> & buffer,int plane_colors[kNumOfPlanes])173 void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
174 int plane_colors[kNumOfPlanes]) {
175 SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
176 buffer->height(), buffer->StrideY());
177
178 SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
179 buffer->ChromaHeight(), buffer->StrideU());
180
181 SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
182 buffer->ChromaHeight(), buffer->StrideV());
183 }
184
ConfigureStream(int width,int height,int max_bitrate,int min_bitrate,int target_bitrate,float max_framerate,SimulcastStream * stream,int num_temporal_layers)185 void ConfigureStream(int width,
186 int height,
187 int max_bitrate,
188 int min_bitrate,
189 int target_bitrate,
190 float max_framerate,
191 SimulcastStream* stream,
192 int num_temporal_layers) {
193 RTC_DCHECK(stream);
194 stream->width = width;
195 stream->height = height;
196 stream->maxBitrate = max_bitrate;
197 stream->minBitrate = min_bitrate;
198 stream->targetBitrate = target_bitrate;
199 stream->maxFramerate = max_framerate;
200 if (num_temporal_layers >= 0) {
201 stream->numberOfTemporalLayers = num_temporal_layers;
202 }
203 stream->qpMax = 45;
204 stream->active = true;
205 }
206
207 } // namespace
208
DefaultSettings(VideoCodec * settings,const int * temporal_layer_profile,VideoCodecType codec_type,bool reverse_layer_order)209 void SimulcastTestFixtureImpl::DefaultSettings(
210 VideoCodec* settings,
211 const int* temporal_layer_profile,
212 VideoCodecType codec_type,
213 bool reverse_layer_order) {
214 RTC_CHECK(settings);
215 *settings = {};
216 settings->codecType = codec_type;
217 settings->startBitrate = 300;
218 settings->minBitrate = 30;
219 settings->maxBitrate = 0;
220 settings->maxFramerate = 30;
221 settings->width = kDefaultWidth;
222 settings->height = kDefaultHeight;
223 settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
224 settings->active = true;
225 ASSERT_EQ(3, kNumberOfSimulcastStreams);
226 int layer_order[3] = {0, 1, 2};
227 if (reverse_layer_order) {
228 layer_order[0] = 2;
229 layer_order[2] = 0;
230 }
231 settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
232 kDefaultOutlierFrameSizePercent};
233 ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
234 kMinBitrates[0], kTargetBitrates[0], kMaxFramerates[0],
235 &settings->simulcastStream[layer_order[0]],
236 temporal_layer_profile[0]);
237 ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
238 kMinBitrates[1], kTargetBitrates[1], kMaxFramerates[1],
239 &settings->simulcastStream[layer_order[1]],
240 temporal_layer_profile[1]);
241 ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
242 kMinBitrates[2], kTargetBitrates[2], kMaxFramerates[2],
243 &settings->simulcastStream[layer_order[2]],
244 temporal_layer_profile[2]);
245 settings->SetFrameDropEnabled(true);
246 if (codec_type == kVideoCodecVP8) {
247 settings->VP8()->denoisingOn = true;
248 settings->VP8()->automaticResizeOn = false;
249 settings->VP8()->keyFrameInterval = 3000;
250 } else {
251 settings->H264()->keyFrameInterval = 3000;
252 }
253 }
254
SimulcastTestFixtureImpl(std::unique_ptr<VideoEncoderFactory> encoder_factory,std::unique_ptr<VideoDecoderFactory> decoder_factory,SdpVideoFormat video_format)255 SimulcastTestFixtureImpl::SimulcastTestFixtureImpl(
256 std::unique_ptr<VideoEncoderFactory> encoder_factory,
257 std::unique_ptr<VideoDecoderFactory> decoder_factory,
258 SdpVideoFormat video_format)
259 : codec_type_(PayloadStringToCodecType(video_format.name)) {
260 encoder_ = encoder_factory->CreateVideoEncoder(video_format);
261 decoder_ = decoder_factory->CreateVideoDecoder(video_format);
262 SetUpCodec((codec_type_ == kVideoCodecVP8 || codec_type_ == kVideoCodecH264)
263 ? kDefaultTemporalLayerProfile
264 : kNoTemporalLayerProfile);
265 }
266
~SimulcastTestFixtureImpl()267 SimulcastTestFixtureImpl::~SimulcastTestFixtureImpl() {
268 encoder_->Release();
269 decoder_->Release();
270 }
271
SetUpCodec(const int * temporal_layer_profile)272 void SimulcastTestFixtureImpl::SetUpCodec(const int* temporal_layer_profile) {
273 encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
274 decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
275 DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
276 SetUpRateAllocator();
277 EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
278 VideoDecoder::Settings decoder_settings;
279 decoder_settings.set_max_render_resolution({kDefaultWidth, kDefaultHeight});
280 decoder_settings.set_codec_type(codec_type_);
281 EXPECT_TRUE(decoder_->Configure(decoder_settings));
282 input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
283 input_buffer_->InitializeData();
284 input_frame_ = std::make_unique<webrtc::VideoFrame>(
285 webrtc::VideoFrame::Builder()
286 .set_video_frame_buffer(input_buffer_)
287 .set_rotation(webrtc::kVideoRotation_0)
288 .set_timestamp_us(0)
289 .build());
290 }
291
SetUpRateAllocator()292 void SimulcastTestFixtureImpl::SetUpRateAllocator() {
293 rate_allocator_.reset(new SimulcastRateAllocator(settings_));
294 }
295
SetRates(uint32_t bitrate_kbps,uint32_t fps)296 void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
297 encoder_->SetRates(VideoEncoder::RateControlParameters(
298 rate_allocator_->Allocate(
299 VideoBitrateAllocationParameters(bitrate_kbps * 1000, fps)),
300 static_cast<double>(fps)));
301 }
302
RunActiveStreamsTest(const std::vector<bool> active_streams)303 void SimulcastTestFixtureImpl::RunActiveStreamsTest(
304 const std::vector<bool> active_streams) {
305 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
306 VideoFrameType::kVideoFrameDelta);
307 UpdateActiveStreams(active_streams);
308 // Set sufficient bitrate for all streams so we can test active without
309 // bitrate being an issue.
310 SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
311
312 ExpectStreams(VideoFrameType::kVideoFrameKey, active_streams);
313 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
314 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
315
316 ExpectStreams(VideoFrameType::kVideoFrameDelta, active_streams);
317 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
318 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
319 }
320
UpdateActiveStreams(const std::vector<bool> active_streams)321 void SimulcastTestFixtureImpl::UpdateActiveStreams(
322 const std::vector<bool> active_streams) {
323 ASSERT_EQ(static_cast<int>(active_streams.size()), kNumberOfSimulcastStreams);
324 for (size_t i = 0; i < active_streams.size(); ++i) {
325 settings_.simulcastStream[i].active = active_streams[i];
326 }
327 // Re initialize the allocator and encoder with the new settings.
328 // TODO(bugs.webrtc.org/8807): Currently, we do a full "hard"
329 // reconfiguration of the allocator and encoder. When the video bitrate
330 // allocator has support for updating active streams without a
331 // reinitialization, we can just call that here instead.
332 SetUpRateAllocator();
333 EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
334 }
335
ExpectStream(VideoFrameType frame_type,int scaleResolutionDownBy)336 void SimulcastTestFixtureImpl::ExpectStream(VideoFrameType frame_type,
337 int scaleResolutionDownBy) {
338 EXPECT_CALL(
339 encoder_callback_,
340 OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, frame_type),
341 Field(&EncodedImage::_encodedWidth,
342 kDefaultWidth / scaleResolutionDownBy),
343 Field(&EncodedImage::_encodedHeight,
344 kDefaultHeight / scaleResolutionDownBy)),
345 _))
346 .Times(1)
347 .WillRepeatedly(Return(
348 EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
349 }
350
ExpectStreams(VideoFrameType frame_type,const std::vector<bool> expected_streams_active)351 void SimulcastTestFixtureImpl::ExpectStreams(
352 VideoFrameType frame_type,
353 const std::vector<bool> expected_streams_active) {
354 ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
355 kNumberOfSimulcastStreams);
356 if (expected_streams_active[0]) {
357 ExpectStream(frame_type, 4);
358 }
359 if (expected_streams_active[1]) {
360 ExpectStream(frame_type, 2);
361 }
362 if (expected_streams_active[2]) {
363 ExpectStream(frame_type, 1);
364 }
365 }
366
ExpectStreams(VideoFrameType frame_type,int expected_video_streams)367 void SimulcastTestFixtureImpl::ExpectStreams(VideoFrameType frame_type,
368 int expected_video_streams) {
369 ASSERT_GE(expected_video_streams, 0);
370 ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
371 std::vector<bool> expected_streams_active(kNumberOfSimulcastStreams, false);
372 for (int i = 0; i < expected_video_streams; ++i) {
373 expected_streams_active[i] = true;
374 }
375 ExpectStreams(frame_type, expected_streams_active);
376 }
377
VerifyTemporalIdxAndSyncForAllSpatialLayers(TestEncodedImageCallback * encoder_callback,const int * expected_temporal_idx,const bool * expected_layer_sync,int num_spatial_layers)378 void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
379 TestEncodedImageCallback* encoder_callback,
380 const int* expected_temporal_idx,
381 const bool* expected_layer_sync,
382 int num_spatial_layers) {
383 int temporal_layer = -1;
384 bool layer_sync = false;
385 for (int i = 0; i < num_spatial_layers; i++) {
386 encoder_callback->GetLastEncodedFrameInfo(&temporal_layer, &layer_sync, i);
387 EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
388 EXPECT_EQ(expected_layer_sync[i], layer_sync);
389 }
390 }
391
392 // We currently expect all active streams to generate a key frame even though
393 // a key frame was only requested for some of them.
TestKeyFrameRequestsOnAllStreams()394 void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
395 SetRates(kMaxBitrates[2], 30); // To get all three streams.
396 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
397 VideoFrameType::kVideoFrameDelta);
398 ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
399 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
400
401 ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
402 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
403 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
404
405 frame_types[0] = VideoFrameType::kVideoFrameKey;
406 ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
407 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
408 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
409
410 std::fill(frame_types.begin(), frame_types.end(),
411 VideoFrameType::kVideoFrameDelta);
412 frame_types[1] = VideoFrameType::kVideoFrameKey;
413 ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
414 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
415 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
416
417 std::fill(frame_types.begin(), frame_types.end(),
418 VideoFrameType::kVideoFrameDelta);
419 frame_types[2] = VideoFrameType::kVideoFrameKey;
420 ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
421 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
422 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
423
424 std::fill(frame_types.begin(), frame_types.end(),
425 VideoFrameType::kVideoFrameDelta);
426 ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
427 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
428 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
429 }
430
TestPaddingAllStreams()431 void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
432 // We should always encode the base layer.
433 SetRates(kMinBitrates[0] - 1, 30);
434 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
435 VideoFrameType::kVideoFrameDelta);
436 ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
437 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
438
439 ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
440 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
441 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
442 }
443
TestPaddingTwoStreams()444 void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
445 // We have just enough to get only the first stream and padding for two.
446 SetRates(kMinBitrates[0], 30);
447 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
448 VideoFrameType::kVideoFrameDelta);
449 ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
450 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
451
452 ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
453 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
454 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
455 }
456
TestPaddingTwoStreamsOneMaxedOut()457 void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
458 // We are just below limit of sending second stream, so we should get
459 // the first stream maxed out (at `maxBitrate`), and padding for two.
460 SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
461 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
462 VideoFrameType::kVideoFrameDelta);
463 ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
464 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
465
466 ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
467 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
468 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
469 }
470
TestPaddingOneStream()471 void SimulcastTestFixtureImpl::TestPaddingOneStream() {
472 // We have just enough to send two streams, so padding for one stream.
473 SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
474 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
475 VideoFrameType::kVideoFrameDelta);
476 ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
477 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
478
479 ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
480 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
481 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
482 }
483
TestPaddingOneStreamTwoMaxedOut()484 void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
485 // We are just below limit of sending third stream, so we should get
486 // first stream's rate maxed out at `targetBitrate`, second at `maxBitrate`.
487 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
488 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
489 VideoFrameType::kVideoFrameDelta);
490 ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
491 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
492
493 ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
494 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
495 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
496 }
497
TestSendAllStreams()498 void SimulcastTestFixtureImpl::TestSendAllStreams() {
499 // We have just enough to send all streams.
500 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
501 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
502 VideoFrameType::kVideoFrameDelta);
503 ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
504 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
505
506 ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
507 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
508 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
509 }
510
TestDisablingStreams()511 void SimulcastTestFixtureImpl::TestDisablingStreams() {
512 // We should get three media streams.
513 SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
514 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
515 VideoFrameType::kVideoFrameDelta);
516 ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
517 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
518
519 ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
520 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
521 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
522
523 // We should only get two streams and padding for one.
524 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
525 ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
526 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
527 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
528
529 // We should only get the first stream and padding for two.
530 SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
531 ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
532 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
533 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
534
535 // We don't have enough bitrate for the thumbnail stream, but we should get
536 // it anyway with current configuration.
537 SetRates(kTargetBitrates[0] - 1, 30);
538 ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
539 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
540 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
541
542 // We should only get two streams and padding for one.
543 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
544 // We get a key frame because a new stream is being enabled.
545 ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
546 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
547 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
548
549 // We should get all three streams.
550 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
551 // We get a key frame because a new stream is being enabled.
552 ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
553 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
554 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
555 }
556
TestActiveStreams()557 void SimulcastTestFixtureImpl::TestActiveStreams() {
558 // All streams on.
559 RunActiveStreamsTest({true, true, true});
560 // All streams off.
561 RunActiveStreamsTest({false, false, false});
562 // Low stream off.
563 RunActiveStreamsTest({false, true, true});
564 // Middle stream off.
565 RunActiveStreamsTest({true, false, true});
566 // High stream off.
567 RunActiveStreamsTest({true, true, false});
568 // Only low stream turned on.
569 RunActiveStreamsTest({true, false, false});
570 // Only middle stream turned on.
571 RunActiveStreamsTest({false, true, false});
572 // Only high stream turned on.
573 RunActiveStreamsTest({false, false, true});
574 }
575
SwitchingToOneStream(int width,int height)576 void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
577 const int* temporal_layer_profile = nullptr;
578 // Disable all streams except the last and set the bitrate of the last to
579 // 100 kbps. This verifies the way GTP switches to screenshare mode.
580 if (codec_type_ == kVideoCodecVP8) {
581 settings_.VP8()->numberOfTemporalLayers = 1;
582 temporal_layer_profile = kDefaultTemporalLayerProfile;
583 } else {
584 settings_.H264()->numberOfTemporalLayers = 1;
585 temporal_layer_profile = kNoTemporalLayerProfile;
586 }
587 settings_.maxBitrate = 100;
588 settings_.startBitrate = 100;
589 settings_.width = width;
590 settings_.height = height;
591 for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
592 settings_.simulcastStream[i].maxBitrate = 0;
593 settings_.simulcastStream[i].width = settings_.width;
594 settings_.simulcastStream[i].height = settings_.height;
595 settings_.simulcastStream[i].numberOfTemporalLayers = 1;
596 }
597 // Setting input image to new resolution.
598 input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
599 input_buffer_->InitializeData();
600
601 input_frame_ = std::make_unique<webrtc::VideoFrame>(
602 webrtc::VideoFrame::Builder()
603 .set_video_frame_buffer(input_buffer_)
604 .set_rotation(webrtc::kVideoRotation_0)
605 .set_timestamp_us(0)
606 .build());
607
608 // The for loop above did not set the bitrate of the highest layer.
609 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].maxBitrate =
610 0;
611 // The highest layer has to correspond to the non-simulcast resolution.
612 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
613 settings_.width;
614 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
615 settings_.height;
616 SetUpRateAllocator();
617 EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
618
619 // Encode one frame and verify.
620 SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
621 std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
622 VideoFrameType::kVideoFrameDelta);
623 EXPECT_CALL(
624 encoder_callback_,
625 OnEncodedImage(AllOf(Field(&EncodedImage::_frameType,
626 VideoFrameType::kVideoFrameKey),
627 Field(&EncodedImage::_encodedWidth, width),
628 Field(&EncodedImage::_encodedHeight, height)),
629 _))
630 .Times(1)
631 .WillRepeatedly(Return(
632 EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
633 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
634
635 // Switch back.
636 DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
637 // Start at the lowest bitrate for enabling base stream.
638 settings_.startBitrate = kMinBitrates[0];
639 SetUpRateAllocator();
640 EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
641 SetRates(settings_.startBitrate, 30);
642 ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
643 // Resize `input_frame_` to the new resolution.
644 input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
645 input_buffer_->InitializeData();
646 input_frame_ = std::make_unique<webrtc::VideoFrame>(
647 webrtc::VideoFrame::Builder()
648 .set_video_frame_buffer(input_buffer_)
649 .set_rotation(webrtc::kVideoRotation_0)
650 .set_timestamp_us(0)
651 .build());
652 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
653 }
654
TestSwitchingToOneStream()655 void SimulcastTestFixtureImpl::TestSwitchingToOneStream() {
656 SwitchingToOneStream(1024, 768);
657 }
658
TestSwitchingToOneOddStream()659 void SimulcastTestFixtureImpl::TestSwitchingToOneOddStream() {
660 SwitchingToOneStream(1023, 769);
661 }
662
TestSwitchingToOneSmallStream()663 void SimulcastTestFixtureImpl::TestSwitchingToOneSmallStream() {
664 SwitchingToOneStream(4, 4);
665 }
666
667 // Test the layer pattern and sync flag for various spatial-temporal patterns.
668 // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
669 // temporal_layer id and layer_sync is expected for all streams.
TestSpatioTemporalLayers333PatternEncoder()670 void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
671 bool is_h264 = codec_type_ == kVideoCodecH264;
672 TestEncodedImageCallback encoder_callback;
673 encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
674 SetRates(kMaxBitrates[2], 30); // To get all three streams.
675
676 int expected_temporal_idx[3] = {-1, -1, -1};
677 bool expected_layer_sync[3] = {false, false, false};
678
679 // First frame: #0.
680 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
681 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
682 SetExpectedValues3<bool>(!is_h264, !is_h264, !is_h264, expected_layer_sync);
683 VerifyTemporalIdxAndSyncForAllSpatialLayers(
684 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
685
686 // Next frame: #1.
687 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
688 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
689 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
690 SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
691 VerifyTemporalIdxAndSyncForAllSpatialLayers(
692 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
693
694 // Next frame: #2.
695 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
696 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
697 SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
698 SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
699 VerifyTemporalIdxAndSyncForAllSpatialLayers(
700 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
701
702 // Next frame: #3.
703 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
704 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
705 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
706 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
707 VerifyTemporalIdxAndSyncForAllSpatialLayers(
708 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
709
710 // Next frame: #4.
711 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
712 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
713 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
714 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
715 VerifyTemporalIdxAndSyncForAllSpatialLayers(
716 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
717
718 // Next frame: #5.
719 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
720 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
721 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
722 SetExpectedValues3<bool>(is_h264, is_h264, is_h264, expected_layer_sync);
723 VerifyTemporalIdxAndSyncForAllSpatialLayers(
724 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
725 }
726
727 // Test the layer pattern and sync flag for various spatial-temporal patterns.
728 // 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
729 // 1 temporal layer for highest resolution.
730 // For this profile, we expect the temporal index pattern to be:
731 // 1st stream: 0, 2, 1, 2, ....
732 // 2nd stream: 0, 1, 0, 1, ...
733 // 3rd stream: -1, -1, -1, -1, ....
734 // Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
735 // should always have temporal layer idx set to kNoTemporalIdx = -1.
736 // Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
737 // TODO(marpan): Although this seems safe for now, we should fix this.
TestSpatioTemporalLayers321PatternEncoder()738 void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
739 EXPECT_EQ(codec_type_, kVideoCodecVP8);
740 int temporal_layer_profile[3] = {3, 2, 1};
741 SetUpCodec(temporal_layer_profile);
742 TestEncodedImageCallback encoder_callback;
743 encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
744 SetRates(kMaxBitrates[2], 30); // To get all three streams.
745
746 int expected_temporal_idx[3] = {-1, -1, -1};
747 bool expected_layer_sync[3] = {false, false, false};
748
749 // First frame: #0.
750 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
751 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
752 SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
753 VerifyTemporalIdxAndSyncForAllSpatialLayers(
754 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
755
756 // Next frame: #1.
757 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
758 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
759 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
760 SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
761 VerifyTemporalIdxAndSyncForAllSpatialLayers(
762 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
763
764 // Next frame: #2.
765 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
766 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
767 SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
768 SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
769 VerifyTemporalIdxAndSyncForAllSpatialLayers(
770 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
771
772 // Next frame: #3.
773 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
774 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
775 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
776 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
777 VerifyTemporalIdxAndSyncForAllSpatialLayers(
778 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
779
780 // Next frame: #4.
781 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
782 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
783 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
784 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
785 VerifyTemporalIdxAndSyncForAllSpatialLayers(
786 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
787
788 // Next frame: #5.
789 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
790 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
791 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
792 SetExpectedValues3<bool>(false, true, false, expected_layer_sync);
793 VerifyTemporalIdxAndSyncForAllSpatialLayers(
794 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
795 }
796
TestStrideEncodeDecode()797 void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
798 TestEncodedImageCallback encoder_callback;
799 TestDecodedImageCallback decoder_callback;
800 encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
801 decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
802
803 SetRates(kMaxBitrates[2], 30); // To get all three streams.
804 // Setting two (possibly) problematic use cases for stride:
805 // 1. stride > width 2. stride_y != stride_uv/2
806 int stride_y = kDefaultWidth + 20;
807 int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
808 input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
809 stride_uv, stride_uv);
810 input_frame_ = std::make_unique<webrtc::VideoFrame>(
811 webrtc::VideoFrame::Builder()
812 .set_video_frame_buffer(input_buffer_)
813 .set_rotation(webrtc::kVideoRotation_0)
814 .set_timestamp_us(0)
815 .build());
816
817 // Set color.
818 int plane_offset[kNumOfPlanes];
819 plane_offset[kYPlane] = kColorY;
820 plane_offset[kUPlane] = kColorU;
821 plane_offset[kVPlane] = kColorV;
822 CreateImage(input_buffer_, plane_offset);
823
824 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
825
826 // Change color.
827 plane_offset[kYPlane] += 1;
828 plane_offset[kUPlane] += 1;
829 plane_offset[kVPlane] += 1;
830 CreateImage(input_buffer_, plane_offset);
831 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
832 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
833
834 EncodedImage encoded_frame;
835 // Only encoding one frame - so will be a key frame.
836 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
837 EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, 0));
838 encoder_callback.GetLastEncodedFrame(&encoded_frame);
839 decoder_->Decode(encoded_frame, false, 0);
840 EXPECT_EQ(2, decoder_callback.DecodedFrames());
841 }
842
TestDecodeWidthHeightSet()843 void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
844 MockEncodedImageCallback encoder_callback;
845 MockDecodedImageCallback decoder_callback;
846
847 EncodedImage encoded_frame[3];
848 SetRates(kMaxBitrates[2], 30); // To get all three streams.
849 encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
850 decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
851
852 EXPECT_CALL(encoder_callback, OnEncodedImage(_, _))
853 .Times(3)
854 .WillRepeatedly(
855 ::testing::Invoke([&](const EncodedImage& encoded_image,
856 const CodecSpecificInfo* codec_specific_info) {
857 EXPECT_EQ(encoded_image._frameType, VideoFrameType::kVideoFrameKey);
858
859 size_t index = encoded_image.SpatialIndex().value_or(0);
860 encoded_frame[index].SetEncodedData(EncodedImageBuffer::Create(
861 encoded_image.data(), encoded_image.size()));
862 encoded_frame[index]._frameType = encoded_image._frameType;
863 return EncodedImageCallback::Result(
864 EncodedImageCallback::Result::OK, 0);
865 }));
866 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
867
868 EXPECT_CALL(decoder_callback, Decoded(_, _, _))
869 .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
870 absl::optional<int32_t> decode_time_ms,
871 absl::optional<uint8_t> qp) {
872 EXPECT_EQ(decodedImage.width(), kDefaultWidth / 4);
873 EXPECT_EQ(decodedImage.height(), kDefaultHeight / 4);
874 }));
875 EXPECT_EQ(0, decoder_->Decode(encoded_frame[0], false, 0));
876
877 EXPECT_CALL(decoder_callback, Decoded(_, _, _))
878 .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
879 absl::optional<int32_t> decode_time_ms,
880 absl::optional<uint8_t> qp) {
881 EXPECT_EQ(decodedImage.width(), kDefaultWidth / 2);
882 EXPECT_EQ(decodedImage.height(), kDefaultHeight / 2);
883 }));
884 EXPECT_EQ(0, decoder_->Decode(encoded_frame[1], false, 0));
885
886 EXPECT_CALL(decoder_callback, Decoded(_, _, _))
887 .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
888 absl::optional<int32_t> decode_time_ms,
889 absl::optional<uint8_t> qp) {
890 EXPECT_EQ(decodedImage.width(), kDefaultWidth);
891 EXPECT_EQ(decodedImage.height(), kDefaultHeight);
892 }));
893 EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, 0));
894 }
895
896 void SimulcastTestFixtureImpl::
TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation()897 TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation() {
898 VideoEncoder::EncoderInfo encoder_info = encoder_->GetEncoderInfo();
899 EXPECT_EQ(encoder_info.fps_allocation[0].size(),
900 static_cast<size_t>(kDefaultTemporalLayerProfile[0]));
901 EXPECT_EQ(encoder_info.fps_allocation[1].size(),
902 static_cast<size_t>(kDefaultTemporalLayerProfile[1]));
903 EXPECT_EQ(encoder_info.fps_allocation[2].size(),
904 static_cast<size_t>(kDefaultTemporalLayerProfile[2]));
905 }
906 } // namespace test
907 } // namespace webrtc
908