1 // Copyright 2023 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //#define LOG_NDEBUG 0
6 #define LOG_TAG "DecodeInterface"
7
8 #include <v4l2_codec2/components/DecodeInterface.h>
9
10 #include <C2PlatformSupport.h>
11 #include <SimpleC2Interface.h>
12 #include <android/hardware/graphics/common/1.0/types.h>
13 #include <log/log.h>
14 #include <media/stagefright/foundation/MediaDefs.h>
15
16 #include <v4l2_codec2/common/Common.h>
17 #include <v4l2_codec2/plugin_store/V4L2AllocatorId.h>
18
19 namespace android {
20 namespace {
21
22 constexpr size_t k1080pArea = 1920 * 1088;
23 constexpr size_t k4KArea = 3840 * 2160;
24 // Input bitstream buffer size for up to 1080p streams.
25 // Set it to 2MB since it is possible for the encoded bitstream to exceed the size of 1MB
26 // when using higher bitrates, like 1Mb/s on slower devices. Also, this brings up compability
27 // with the Chrome browser as it is using 2MB buffer size for a 1080p stream, ref:
28 // https://source.chromium.org/chromium/chromium/src/+/main:media/gpu/gpu_video_encode_accelerator_helpers.cc;l=25
29 constexpr size_t kInputBufferSizeFor1080p = 2 * 1024 * 1024; // 2MB
30 // Input bitstream buffer size for up to 4k streams.
31 constexpr size_t kInputBufferSizeFor4K = 4 * kInputBufferSizeFor1080p;
32
calculateInputBufferSize(size_t area)33 size_t calculateInputBufferSize(size_t area) {
34 if (area > k4KArea) {
35 ALOGW("Input buffer size for video size (%zu) larger than 4K (%zu) might be too small.",
36 area, k4KArea);
37 }
38
39 // Enlarge the input buffer for 4k video
40 if (area > k1080pArea) return kInputBufferSizeFor4K;
41 return kInputBufferSizeFor1080p;
42 }
43 } // namespace
44
45 // static
ProfileLevelSetter(bool,C2P<C2StreamProfileLevelInfo::input> & info)46 C2R DecodeInterface::ProfileLevelSetter(bool /* mayBlock */,
47 C2P<C2StreamProfileLevelInfo::input>& info) {
48 return info.F(info.v.profile)
49 .validatePossible(info.v.profile)
50 .plus(info.F(info.v.level).validatePossible(info.v.level));
51 }
52
53 // static
SizeSetter(bool,C2P<C2StreamPictureSizeInfo::output> & videoSize)54 C2R DecodeInterface::SizeSetter(bool /* mayBlock */,
55 C2P<C2StreamPictureSizeInfo::output>& videoSize) {
56 return videoSize.F(videoSize.v.width)
57 .validatePossible(videoSize.v.width)
58 .plus(videoSize.F(videoSize.v.height).validatePossible(videoSize.v.height));
59 }
60
InputSizeSetter(bool,C2P<C2StreamMaxBufferSizeInfo::input> & inputSize)61 C2R DecodeInterface::InputSizeSetter(bool /* mayBlock */,
62 C2P<C2StreamMaxBufferSizeInfo::input>& inputSize) {
63 return inputSize.F(inputSize.v.value).validatePossible(inputSize.v.value);
64 }
65
66 // static
67 template <typename T>
DefaultColorAspectsSetter(bool,C2P<T> & def)68 C2R DecodeInterface::DefaultColorAspectsSetter(bool /* mayBlock */, C2P<T>& def) {
69 if (def.v.range > C2Color::RANGE_OTHER) {
70 def.set().range = C2Color::RANGE_OTHER;
71 }
72 if (def.v.primaries > C2Color::PRIMARIES_OTHER) {
73 def.set().primaries = C2Color::PRIMARIES_OTHER;
74 }
75 if (def.v.transfer > C2Color::TRANSFER_OTHER) {
76 def.set().transfer = C2Color::TRANSFER_OTHER;
77 }
78 if (def.v.matrix > C2Color::MATRIX_OTHER) {
79 def.set().matrix = C2Color::MATRIX_OTHER;
80 }
81 return C2R::Ok();
82 }
83
84 // static
MergedColorAspectsSetter(bool,C2P<C2StreamColorAspectsInfo::output> & merged,const C2P<C2StreamColorAspectsTuning::output> & def,const C2P<C2StreamColorAspectsInfo::input> & coded)85 C2R DecodeInterface::MergedColorAspectsSetter(bool /* mayBlock */,
86 C2P<C2StreamColorAspectsInfo::output>& merged,
87 const C2P<C2StreamColorAspectsTuning::output>& def,
88 const C2P<C2StreamColorAspectsInfo::input>& coded) {
89 // Take coded values for all specified fields, and default values for unspecified ones.
90 merged.set().range = coded.v.range == RANGE_UNSPECIFIED ? def.v.range : coded.v.range;
91 merged.set().primaries =
92 coded.v.primaries == PRIMARIES_UNSPECIFIED ? def.v.primaries : coded.v.primaries;
93 merged.set().transfer =
94 coded.v.transfer == TRANSFER_UNSPECIFIED ? def.v.transfer : coded.v.transfer;
95 merged.set().matrix = coded.v.matrix == MATRIX_UNSPECIFIED ? def.v.matrix : coded.v.matrix;
96 return C2R::Ok();
97 }
98
99 // static
MaxInputBufferSizeCalculator(bool,C2P<C2StreamMaxBufferSizeInfo::input> & me,const C2P<C2StreamPictureSizeInfo::output> & size)100 C2R DecodeInterface::MaxInputBufferSizeCalculator(
101 bool /* mayBlock */, C2P<C2StreamMaxBufferSizeInfo::input>& me,
102 const C2P<C2StreamPictureSizeInfo::output>& size) {
103 size_t calculatedSize = calculateInputBufferSize(size.v.width * size.v.height);
104
105 if (me.v.value < calculatedSize) me.set().value = calculatedSize;
106
107 return C2R::Ok();
108 }
109
DecodeInterface(const std::string & name,const std::shared_ptr<C2ReflectorHelper> & helper,const SupportedCapabilities & caps)110 DecodeInterface::DecodeInterface(const std::string& name,
111 const std::shared_ptr<C2ReflectorHelper>& helper,
112 const SupportedCapabilities& caps)
113 : C2InterfaceHelper(helper), mInitStatus(C2_OK), mVideoCodec(caps.codec) {
114 ALOGV("%s(%s)", __func__, name.c_str());
115
116 setDerivedInstance(this);
117
118 addParameter(DefineParam(mKind, C2_PARAMKEY_COMPONENT_KIND)
119 .withConstValue(new C2ComponentKindSetting(C2Component::KIND_DECODER))
120 .build());
121
122 std::string inputMime;
123
124 ui::Size maxSize(1, 1);
125
126 std::vector<uint32_t> profiles;
127 for (const auto& supportedProfile : caps.supportedProfiles) {
128 if (isValidProfileForCodec(mVideoCodec.value(), supportedProfile.profile)) {
129 profiles.push_back(static_cast<uint32_t>(supportedProfile.profile));
130 maxSize.setWidth(std::max(maxSize.width, supportedProfile.max_resolution.width));
131 maxSize.setHeight(std::max(maxSize.height, supportedProfile.max_resolution.height));
132 }
133 }
134
135 // In case of no supported profile or uninitialized device maxSize is set to default
136 if (maxSize == ui::Size(1, 1)) maxSize = ui::Size(4096, 4096);
137
138 if (profiles.empty()) {
139 ALOGW("No supported profiles for H264 codec");
140 switch (*mVideoCodec) { //default values used when querry is not supported
141 case VideoCodec::H264:
142 profiles = {
143 C2Config::PROFILE_AVC_BASELINE,
144 C2Config::PROFILE_AVC_CONSTRAINED_BASELINE,
145 C2Config::PROFILE_AVC_MAIN,
146 C2Config::PROFILE_AVC_HIGH,
147 };
148 break;
149 case VideoCodec::VP8:
150 profiles = {C2Config::PROFILE_VP8_0};
151 break;
152 case VideoCodec::VP9:
153 profiles = {C2Config::PROFILE_VP9_0};
154 break;
155 case VideoCodec::HEVC:
156 profiles = {C2Config::PROFILE_HEVC_MAIN};
157 break;
158 }
159 }
160
161 uint32_t defaultProfile = caps.defaultProfile;
162 if (defaultProfile == C2Config::PROFILE_UNUSED)
163 defaultProfile = *std::min_element(profiles.begin(), profiles.end());
164
165 std::vector<unsigned int> levels;
166 std::vector<C2Config::level_t> supportedLevels = caps.supportedLevels;
167 for (const auto& supportedLevel : supportedLevels) {
168 levels.push_back(static_cast<unsigned int>(supportedLevel));
169 }
170
171 if (levels.empty()) {
172 ALOGE("No supported levels for H264 codec");
173 switch (*mVideoCodec) { //default values used when querry is not supported
174 case VideoCodec::H264:
175 levels = {C2Config::LEVEL_AVC_1, C2Config::LEVEL_AVC_1B, C2Config::LEVEL_AVC_1_1,
176 C2Config::LEVEL_AVC_1_2, C2Config::LEVEL_AVC_1_3, C2Config::LEVEL_AVC_2,
177 C2Config::LEVEL_AVC_2_1, C2Config::LEVEL_AVC_2_2, C2Config::LEVEL_AVC_3,
178 C2Config::LEVEL_AVC_3_1, C2Config::LEVEL_AVC_3_2, C2Config::LEVEL_AVC_4,
179 C2Config::LEVEL_AVC_4_1, C2Config::LEVEL_AVC_4_2, C2Config::LEVEL_AVC_5,
180 C2Config::LEVEL_AVC_5_1, C2Config::LEVEL_AVC_5_2};
181 break;
182 case VideoCodec::VP8:
183 levels = {C2Config::LEVEL_UNUSED};
184 break;
185 case VideoCodec::VP9:
186 levels = {C2Config::LEVEL_VP9_1, C2Config::LEVEL_VP9_1_1, C2Config::LEVEL_VP9_2,
187 C2Config::LEVEL_VP9_2_1, C2Config::LEVEL_VP9_3, C2Config::LEVEL_VP9_3_1,
188 C2Config::LEVEL_VP9_4, C2Config::LEVEL_VP9_4_1, C2Config::LEVEL_VP9_5};
189 break;
190 case VideoCodec::HEVC:
191 levels = {C2Config::LEVEL_HEVC_MAIN_1, C2Config::LEVEL_HEVC_MAIN_2,
192 C2Config::LEVEL_HEVC_MAIN_2_1, C2Config::LEVEL_HEVC_MAIN_3,
193 C2Config::LEVEL_HEVC_MAIN_3_1, C2Config::LEVEL_HEVC_MAIN_4,
194 C2Config::LEVEL_HEVC_MAIN_4_1, C2Config::LEVEL_HEVC_MAIN_5,
195 C2Config::LEVEL_HEVC_MAIN_5_1, C2Config::LEVEL_HEVC_MAIN_5_2,
196 C2Config::LEVEL_HEVC_MAIN_6, C2Config::LEVEL_HEVC_MAIN_6_1,
197 C2Config::LEVEL_HEVC_MAIN_6_2};
198 break;
199 }
200 }
201
202 uint32_t defaultLevel = caps.defaultLevel;
203 if (defaultLevel == C2Config::LEVEL_UNUSED)
204 defaultLevel = *std::min_element(levels.begin(), levels.end());
205
206 switch (*mVideoCodec) {
207 case VideoCodec::H264:
208 inputMime = MEDIA_MIMETYPE_VIDEO_AVC;
209 addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
210 .withDefault(new C2StreamProfileLevelInfo::input(
211 0u, static_cast<C2Config::profile_t>(defaultProfile),
212 static_cast<C2Config::level_t>(defaultLevel)))
213 .withFields({C2F(mProfileLevel, profile).oneOf(profiles),
214 C2F(mProfileLevel, level).oneOf(levels)})
215 .withSetter(ProfileLevelSetter)
216 .build());
217 break;
218
219 case VideoCodec::VP8:
220 inputMime = MEDIA_MIMETYPE_VIDEO_VP8;
221 addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
222 .withConstValue(new C2StreamProfileLevelInfo::input(
223 0u, C2Config::PROFILE_UNUSED, C2Config::LEVEL_UNUSED))
224 .build());
225 break;
226
227 case VideoCodec::VP9:
228 inputMime = MEDIA_MIMETYPE_VIDEO_VP9;
229 addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
230 .withDefault(new C2StreamProfileLevelInfo::input(
231 0u, static_cast<C2Config::profile_t>(defaultProfile),
232 static_cast<C2Config::level_t>(defaultLevel)))
233 .withFields({C2F(mProfileLevel, profile).oneOf(profiles),
234 C2F(mProfileLevel, level).oneOf(levels)})
235 .withSetter(ProfileLevelSetter)
236 .build());
237 break;
238
239 case VideoCodec::HEVC:
240 inputMime = MEDIA_MIMETYPE_VIDEO_HEVC;
241 addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
242 .withDefault(new C2StreamProfileLevelInfo::input(
243 0u, static_cast<C2Config::profile_t>(defaultProfile),
244 static_cast<C2Config::level_t>(defaultLevel)))
245 .withFields({C2F(mProfileLevel, profile).oneOf(profiles),
246 C2F(mProfileLevel, level).oneOf(levels)})
247 .withSetter(ProfileLevelSetter)
248 .build());
249 break;
250 }
251
252 addParameter(
253 DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
254 .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
255 .build());
256 addParameter(
257 DefineParam(mInputMemoryUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
258 .withConstValue(new C2StreamUsageTuning::input(
259 0u, static_cast<uint64_t>(android::hardware::graphics::common::V1_0::
260 BufferUsage::VIDEO_DECODER)))
261 .build());
262
263 addParameter(DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
264 .withConstValue(
265 new C2StreamBufferTypeSetting::output(0u, C2BufferData::GRAPHIC))
266 .build());
267 addParameter(
268 DefineParam(mOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
269 .withConstValue(new C2PortDelayTuning::output(getOutputDelay(*mVideoCodec)))
270 .build());
271
272 // This value is set according to the relation between kNumInputBuffers = 16 and the current
273 // codec2 framework implementation. Specifically, this generally limits the framework to using
274 // <= 16 input buffers, although certain timing of events can result in a few more input buffers
275 // being allocated but rarely used. This lets us avoid remapping v4l2 input buffers and DMA
276 // buffers in the common case. We could go up to 4 here, to limit the framework to
277 // simultaneously enqueuing 16 input buffers, but there doesn't seem to be much of an a
278 // performance improvement from that.
279 addParameter(DefineParam(mPipelineDelay, C2_PARAMKEY_PIPELINE_DELAY)
280 .withConstValue(new C2PipelineDelayTuning(3))
281 .build());
282
283 addParameter(DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
284 .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
285 inputMime.c_str()))
286 .build());
287
288 addParameter(DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
289 .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
290 MEDIA_MIMETYPE_VIDEO_RAW))
291 .build());
292
293 // Note(b/165826281): The check is not used at Android framework currently.
294 // In order to fasten the bootup time, we use the maximum supported size instead of querying the
295 // capability from the V4L2 device.
296 addParameter(DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
297 .withDefault(new C2StreamPictureSizeInfo::output(
298 0u, std::min(320, maxSize.width), std::min(240, maxSize.height)))
299 .withFields({
300 C2F(mSize, width).inRange(16, maxSize.width, 16),
301 C2F(mSize, height).inRange(16, maxSize.height, 16),
302 })
303 .withSetter(SizeSetter)
304 .build());
305
306 addParameter(
307 DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
308 .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kInputBufferSizeFor1080p))
309 .withFields({
310 C2F(mMaxInputSize, value).any(),
311 })
312 .withSetter(InputSizeSetter)
313 .calculatedAs(MaxInputBufferSizeCalculator, mSize)
314 .build());
315
316 bool secureMode = name.find(".secure") != std::string::npos;
317 const C2Allocator::id_t inputAllocators[] = {secureMode ? V4L2AllocatorId::SECURE_LINEAR
318 : C2AllocatorStore::DEFAULT_LINEAR};
319
320 const C2Allocator::id_t outputAllocators[] = {C2PlatformAllocatorStore::GRALLOC};
321 const C2Allocator::id_t surfaceAllocator =
322 secureMode ? V4L2AllocatorId::SECURE_GRAPHIC : C2PlatformAllocatorStore::BUFFERQUEUE;
323 const C2BlockPool::local_id_t outputBlockPools[] = {C2BlockPool::BASIC_GRAPHIC};
324
325 addParameter(
326 DefineParam(mInputAllocatorIds, C2_PARAMKEY_INPUT_ALLOCATORS)
327 .withConstValue(C2PortAllocatorsTuning::input::AllocShared(inputAllocators))
328 .build());
329
330 addParameter(
331 DefineParam(mOutputAllocatorIds, C2_PARAMKEY_OUTPUT_ALLOCATORS)
332 .withConstValue(C2PortAllocatorsTuning::output::AllocShared(outputAllocators))
333 .build());
334
335 addParameter(DefineParam(mOutputSurfaceAllocatorId, C2_PARAMKEY_OUTPUT_SURFACE_ALLOCATOR)
336 .withConstValue(new C2PortSurfaceAllocatorTuning::output(surfaceAllocator))
337 .build());
338
339 addParameter(
340 DefineParam(mOutputBlockPoolIds, C2_PARAMKEY_OUTPUT_BLOCK_POOLS)
341 .withDefault(C2PortBlockPoolsTuning::output::AllocShared(outputBlockPools))
342 .withFields({C2F(mOutputBlockPoolIds, m.values[0]).any(),
343 C2F(mOutputBlockPoolIds, m.values).inRange(0, 1)})
344 .withSetter(Setter<C2PortBlockPoolsTuning::output>::NonStrictValuesWithNoDeps)
345 .build());
346
347 addParameter(
348 DefineParam(mDefaultColorAspects, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS)
349 .withDefault(new C2StreamColorAspectsTuning::output(
350 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
351 C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
352 .withFields(
353 {C2F(mDefaultColorAspects, range)
354 .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
355 C2F(mDefaultColorAspects, primaries)
356 .inRange(C2Color::PRIMARIES_UNSPECIFIED,
357 C2Color::PRIMARIES_OTHER),
358 C2F(mDefaultColorAspects, transfer)
359 .inRange(C2Color::TRANSFER_UNSPECIFIED,
360 C2Color::TRANSFER_OTHER),
361 C2F(mDefaultColorAspects, matrix)
362 .inRange(C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)})
363 .withSetter(DefaultColorAspectsSetter)
364 .build());
365
366 addParameter(
367 DefineParam(mCodedColorAspects, C2_PARAMKEY_VUI_COLOR_ASPECTS)
368 .withDefault(new C2StreamColorAspectsInfo::input(
369 0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_UNSPECIFIED,
370 C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
371 .withFields(
372 {C2F(mCodedColorAspects, range)
373 .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
374 C2F(mCodedColorAspects, primaries)
375 .inRange(C2Color::PRIMARIES_UNSPECIFIED,
376 C2Color::PRIMARIES_OTHER),
377 C2F(mCodedColorAspects, transfer)
378 .inRange(C2Color::TRANSFER_UNSPECIFIED,
379 C2Color::TRANSFER_OTHER),
380 C2F(mCodedColorAspects, matrix)
381 .inRange(C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)})
382 .withSetter(DefaultColorAspectsSetter)
383 .build());
384
385 // At this moment v4l2_codec2 support decoding this information only for
386 // unprotected H264 and both protected and unprotected HEVC.
387 if ((mVideoCodec == VideoCodec::H264 && !secureMode) || mVideoCodec == VideoCodec::HEVC) {
388 addParameter(DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
389 .withDefault(new C2StreamColorAspectsInfo::output(
390 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
391 C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
392 .withFields({C2F(mColorAspects, range)
393 .inRange(C2Color::RANGE_UNSPECIFIED,
394 C2Color::RANGE_OTHER),
395 C2F(mColorAspects, primaries)
396 .inRange(C2Color::PRIMARIES_UNSPECIFIED,
397 C2Color::PRIMARIES_OTHER),
398 C2F(mColorAspects, transfer)
399 .inRange(C2Color::TRANSFER_UNSPECIFIED,
400 C2Color::TRANSFER_OTHER),
401 C2F(mColorAspects, matrix)
402 .inRange(C2Color::MATRIX_UNSPECIFIED,
403 C2Color::MATRIX_OTHER)})
404 .withSetter(MergedColorAspectsSetter, mDefaultColorAspects,
405 mCodedColorAspects)
406 .build());
407 }
408 }
409
getInputBufferSize() const410 size_t DecodeInterface::getInputBufferSize() const {
411 return mMaxInputSize->value;
412 }
413
queryColorAspects(std::shared_ptr<C2StreamColorAspectsInfo::output> * targetColorAspects)414 c2_status_t DecodeInterface::queryColorAspects(
415 std::shared_ptr<C2StreamColorAspectsInfo::output>* targetColorAspects) {
416 std::unique_ptr<C2StreamColorAspectsInfo::output> colorAspects =
417 std::make_unique<C2StreamColorAspectsInfo::output>(
418 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
419 C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED);
420 c2_status_t status = query({colorAspects.get()}, {}, C2_DONT_BLOCK, nullptr);
421 if (status == C2_OK) {
422 *targetColorAspects = std::move(colorAspects);
423 }
424 return status;
425 }
426
getOutputDelay(VideoCodec codec)427 uint32_t DecodeInterface::getOutputDelay(VideoCodec codec) {
428 switch (codec) {
429 case VideoCodec::H264:
430 // Due to frame reordering an H264 decoder might need multiple additional input frames to be
431 // queued before being able to output the associated decoded buffers. We need to tell the
432 // codec2 framework that it should not stop queuing new work items until the maximum number
433 // of frame reordering is reached, to avoid stalling the decoder.
434 return 16;
435 case VideoCodec::HEVC:
436 return 16;
437 case VideoCodec::VP8:
438 // The decoder might held a few frames as a reference for decoding. Since Android T
439 // the Codec2 is more prone to timeout the component if one is not producing frames. This
440 // might especially occur when those frames are held for reference and playback/decoding
441 // is paused. With increased output delay we inform Codec2 not to timeout the component,
442 // if number of frames in components is less then the number of maximum reference frames
443 // that could be held by decoder.
444 // Reference: RFC 6386 Section 3. Compressed Frame Types
445 return 3;
446 case VideoCodec::VP9:
447 // Reference: https://www.webmproject.org/vp9/levels/
448 return 8;
449 }
450 }
451
452 } // namespace android
453