1 /*
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 *
10 */
11
12 // Everything declared/defined in this header is only required when WebRTC is
13 // build with H264 support, please do not move anything out of the
14 // #ifdef unless needed and tested.
15 #ifdef WEBRTC_USE_H264
16
17 #include "modules/video_coding/codecs/h264/h264_decoder_impl.h"
18
19 #include <algorithm>
20 #include <limits>
21 #include <memory>
22
23 extern "C" {
24 #include "third_party/ffmpeg/libavcodec/avcodec.h"
25 #include "third_party/ffmpeg/libavformat/avformat.h"
26 #include "third_party/ffmpeg/libavutil/imgutils.h"
27 } // extern "C"
28
29 #include "api/video/color_space.h"
30 #include "api/video/i010_buffer.h"
31 #include "api/video/i420_buffer.h"
32 #include "common_video/include/video_frame_buffer.h"
33 #include "modules/video_coding/codecs/h264/h264_color_space.h"
34 #include "rtc_base/checks.h"
35 #include "rtc_base/logging.h"
36 #include "system_wrappers/include/metrics.h"
37
38 namespace webrtc {
39
40 namespace {
41
42 constexpr std::array<AVPixelFormat, 8> kPixelFormatsSupported = {
43 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
44 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
45 AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV422P10LE};
46 const size_t kYPlaneIndex = 0;
47 const size_t kUPlaneIndex = 1;
48 const size_t kVPlaneIndex = 2;
49
50 // Used by histograms. Values of entries should not be changed.
51 enum H264DecoderImplEvent {
52 kH264DecoderEventInit = 0,
53 kH264DecoderEventError = 1,
54 kH264DecoderEventMax = 16,
55 };
56
57 struct ScopedPtrAVFreePacket {
operator ()webrtc::__anon0b21c34f0111::ScopedPtrAVFreePacket58 void operator()(AVPacket* packet) { av_packet_free(&packet); }
59 };
60 typedef std::unique_ptr<AVPacket, ScopedPtrAVFreePacket> ScopedAVPacket;
61
MakeScopedAVPacket()62 ScopedAVPacket MakeScopedAVPacket() {
63 ScopedAVPacket packet(av_packet_alloc());
64 return packet;
65 }
66
67 } // namespace
68
AVGetBuffer2(AVCodecContext * context,AVFrame * av_frame,int flags)69 int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
70 AVFrame* av_frame,
71 int flags) {
72 // Set in `Configure`.
73 H264DecoderImpl* decoder = static_cast<H264DecoderImpl*>(context->opaque);
74 // DCHECK values set in `Configure`.
75 RTC_DCHECK(decoder);
76 // Necessary capability to be allowed to provide our own buffers.
77 RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
78
79 auto pixelFormatSupported = std::find_if(
80 kPixelFormatsSupported.begin(), kPixelFormatsSupported.end(),
81 [context](AVPixelFormat format) { return context->pix_fmt == format; });
82
83 RTC_CHECK(pixelFormatSupported != kPixelFormatsSupported.end());
84
85 // `av_frame->width` and `av_frame->height` are set by FFmpeg. These are the
86 // actual image's dimensions and may be different from `context->width` and
87 // `context->coded_width` due to reordering.
88 int width = av_frame->width;
89 int height = av_frame->height;
90 // See `lowres`, if used the decoder scales the image by 1/2^(lowres). This
91 // has implications on which resolutions are valid, but we don't use it.
92 RTC_CHECK_EQ(context->lowres, 0);
93 // Adjust the `width` and `height` to values acceptable by the decoder.
94 // Without this, FFmpeg may overflow the buffer. If modified, `width` and/or
95 // `height` are larger than the actual image and the image has to be cropped
96 // (top-left corner) after decoding to avoid visible borders to the right and
97 // bottom of the actual image.
98 avcodec_align_dimensions(context, &width, &height);
99
100 RTC_CHECK_GE(width, 0);
101 RTC_CHECK_GE(height, 0);
102 int ret = av_image_check_size(static_cast<unsigned int>(width),
103 static_cast<unsigned int>(height), 0, nullptr);
104 if (ret < 0) {
105 RTC_LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
106 decoder->ReportError();
107 return ret;
108 }
109
110 // The video frame is stored in `frame_buffer`. `av_frame` is FFmpeg's version
111 // of a video frame and will be set up to reference `frame_buffer`'s data.
112
113 // FFmpeg expects the initial allocation to be zero-initialized according to
114 // http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
115 // TODO(https://crbug.com/390941): Delete that feature from the video pool,
116 // instead add an explicit call to InitializeData here.
117 rtc::scoped_refptr<PlanarYuvBuffer> frame_buffer;
118 rtc::scoped_refptr<I444Buffer> i444_buffer;
119 rtc::scoped_refptr<I420Buffer> i420_buffer;
120 rtc::scoped_refptr<I422Buffer> i422_buffer;
121 rtc::scoped_refptr<I010Buffer> i010_buffer;
122 rtc::scoped_refptr<I210Buffer> i210_buffer;
123 int bytes_per_pixel = 1;
124 switch (context->pix_fmt) {
125 case AV_PIX_FMT_YUV420P:
126 case AV_PIX_FMT_YUVJ420P:
127 i420_buffer =
128 decoder->ffmpeg_buffer_pool_.CreateI420Buffer(width, height);
129 // Set `av_frame` members as required by FFmpeg.
130 av_frame->data[kYPlaneIndex] = i420_buffer->MutableDataY();
131 av_frame->linesize[kYPlaneIndex] = i420_buffer->StrideY();
132 av_frame->data[kUPlaneIndex] = i420_buffer->MutableDataU();
133 av_frame->linesize[kUPlaneIndex] = i420_buffer->StrideU();
134 av_frame->data[kVPlaneIndex] = i420_buffer->MutableDataV();
135 av_frame->linesize[kVPlaneIndex] = i420_buffer->StrideV();
136 RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
137 frame_buffer = i420_buffer;
138 break;
139 case AV_PIX_FMT_YUV444P:
140 case AV_PIX_FMT_YUVJ444P:
141 i444_buffer =
142 decoder->ffmpeg_buffer_pool_.CreateI444Buffer(width, height);
143 // Set `av_frame` members as required by FFmpeg.
144 av_frame->data[kYPlaneIndex] = i444_buffer->MutableDataY();
145 av_frame->linesize[kYPlaneIndex] = i444_buffer->StrideY();
146 av_frame->data[kUPlaneIndex] = i444_buffer->MutableDataU();
147 av_frame->linesize[kUPlaneIndex] = i444_buffer->StrideU();
148 av_frame->data[kVPlaneIndex] = i444_buffer->MutableDataV();
149 av_frame->linesize[kVPlaneIndex] = i444_buffer->StrideV();
150 frame_buffer = i444_buffer;
151 break;
152 case AV_PIX_FMT_YUV422P:
153 case AV_PIX_FMT_YUVJ422P:
154 i422_buffer =
155 decoder->ffmpeg_buffer_pool_.CreateI422Buffer(width, height);
156 // Set `av_frame` members as required by FFmpeg.
157 av_frame->data[kYPlaneIndex] = i422_buffer->MutableDataY();
158 av_frame->linesize[kYPlaneIndex] = i422_buffer->StrideY();
159 av_frame->data[kUPlaneIndex] = i422_buffer->MutableDataU();
160 av_frame->linesize[kUPlaneIndex] = i422_buffer->StrideU();
161 av_frame->data[kVPlaneIndex] = i422_buffer->MutableDataV();
162 av_frame->linesize[kVPlaneIndex] = i422_buffer->StrideV();
163 frame_buffer = i422_buffer;
164 break;
165 case AV_PIX_FMT_YUV420P10LE:
166 i010_buffer =
167 decoder->ffmpeg_buffer_pool_.CreateI010Buffer(width, height);
168 // Set `av_frame` members as required by FFmpeg.
169 av_frame->data[kYPlaneIndex] =
170 reinterpret_cast<uint8_t*>(i010_buffer->MutableDataY());
171 av_frame->linesize[kYPlaneIndex] = i010_buffer->StrideY() * 2;
172 av_frame->data[kUPlaneIndex] =
173 reinterpret_cast<uint8_t*>(i010_buffer->MutableDataU());
174 av_frame->linesize[kUPlaneIndex] = i010_buffer->StrideU() * 2;
175 av_frame->data[kVPlaneIndex] =
176 reinterpret_cast<uint8_t*>(i010_buffer->MutableDataV());
177 av_frame->linesize[kVPlaneIndex] = i010_buffer->StrideV() * 2;
178 frame_buffer = i010_buffer;
179 bytes_per_pixel = 2;
180 break;
181 case AV_PIX_FMT_YUV422P10LE:
182 i210_buffer =
183 decoder->ffmpeg_buffer_pool_.CreateI210Buffer(width, height);
184 // Set `av_frame` members as required by FFmpeg.
185 av_frame->data[kYPlaneIndex] =
186 reinterpret_cast<uint8_t*>(i210_buffer->MutableDataY());
187 av_frame->linesize[kYPlaneIndex] = i210_buffer->StrideY() * 2;
188 av_frame->data[kUPlaneIndex] =
189 reinterpret_cast<uint8_t*>(i210_buffer->MutableDataU());
190 av_frame->linesize[kUPlaneIndex] = i210_buffer->StrideU() * 2;
191 av_frame->data[kVPlaneIndex] =
192 reinterpret_cast<uint8_t*>(i210_buffer->MutableDataV());
193 av_frame->linesize[kVPlaneIndex] = i210_buffer->StrideV() * 2;
194 frame_buffer = i210_buffer;
195 bytes_per_pixel = 2;
196 break;
197 default:
198 RTC_LOG(LS_ERROR) << "Unsupported buffer type " << context->pix_fmt
199 << ". Check supported supported pixel formats!";
200 decoder->ReportError();
201 return -1;
202 }
203
204 int y_size = width * height * bytes_per_pixel;
205 int uv_size = frame_buffer->ChromaWidth() * frame_buffer->ChromaHeight() *
206 bytes_per_pixel;
207 // DCHECK that we have a continuous buffer as is required.
208 RTC_DCHECK_EQ(av_frame->data[kUPlaneIndex],
209 av_frame->data[kYPlaneIndex] + y_size);
210 RTC_DCHECK_EQ(av_frame->data[kVPlaneIndex],
211 av_frame->data[kUPlaneIndex] + uv_size);
212 int total_size = y_size + 2 * uv_size;
213
214 av_frame->format = context->pix_fmt;
215 av_frame->reordered_opaque = context->reordered_opaque;
216
217 // Create a VideoFrame object, to keep a reference to the buffer.
218 // TODO(nisse): The VideoFrame's timestamp and rotation info is not used.
219 // Refactor to do not use a VideoFrame object at all.
220 av_frame->buf[0] = av_buffer_create(
221 av_frame->data[kYPlaneIndex], total_size, AVFreeBuffer2,
222 static_cast<void*>(
223 std::make_unique<VideoFrame>(VideoFrame::Builder()
224 .set_video_frame_buffer(frame_buffer)
225 .set_rotation(kVideoRotation_0)
226 .set_timestamp_us(0)
227 .build())
228 .release()),
229 0);
230 RTC_CHECK(av_frame->buf[0]);
231 return 0;
232 }
233
AVFreeBuffer2(void * opaque,uint8_t * data)234 void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) {
235 // The buffer pool recycles the buffer used by `video_frame` when there are no
236 // more references to it. `video_frame` is a thin buffer holder and is not
237 // recycled.
238 VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
239 delete video_frame;
240 }
241
H264DecoderImpl()242 H264DecoderImpl::H264DecoderImpl()
243 : ffmpeg_buffer_pool_(true),
244 decoded_image_callback_(nullptr),
245 has_reported_init_(false),
246 has_reported_error_(false) {}
247
~H264DecoderImpl()248 H264DecoderImpl::~H264DecoderImpl() {
249 Release();
250 }
251
Configure(const Settings & settings)252 bool H264DecoderImpl::Configure(const Settings& settings) {
253 ReportInit();
254 if (settings.codec_type() != kVideoCodecH264) {
255 ReportError();
256 return false;
257 }
258
259 // Release necessary in case of re-initializing.
260 int32_t ret = Release();
261 if (ret != WEBRTC_VIDEO_CODEC_OK) {
262 ReportError();
263 return false;
264 }
265 RTC_DCHECK(!av_context_);
266
267 // Initialize AVCodecContext.
268 av_context_.reset(avcodec_alloc_context3(nullptr));
269
270 av_context_->codec_type = AVMEDIA_TYPE_VIDEO;
271 av_context_->codec_id = AV_CODEC_ID_H264;
272 const RenderResolution& resolution = settings.max_render_resolution();
273 if (resolution.Valid()) {
274 av_context_->coded_width = resolution.Width();
275 av_context_->coded_height = resolution.Height();
276 }
277 av_context_->extradata = nullptr;
278 av_context_->extradata_size = 0;
279
280 // If this is ever increased, look at `av_context_->thread_safe_callbacks` and
281 // make it possible to disable the thread checker in the frame buffer pool.
282 av_context_->thread_count = 1;
283 av_context_->thread_type = FF_THREAD_SLICE;
284
285 // Function used by FFmpeg to get buffers to store decoded frames in.
286 av_context_->get_buffer2 = AVGetBuffer2;
287 // `get_buffer2` is called with the context, there `opaque` can be used to get
288 // a pointer `this`.
289 av_context_->opaque = this;
290
291 const AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
292 if (!codec) {
293 // This is an indication that FFmpeg has not been initialized or it has not
294 // been compiled/initialized with the correct set of codecs.
295 RTC_LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
296 Release();
297 ReportError();
298 return false;
299 }
300 int res = avcodec_open2(av_context_.get(), codec, nullptr);
301 if (res < 0) {
302 RTC_LOG(LS_ERROR) << "avcodec_open2 error: " << res;
303 Release();
304 ReportError();
305 return false;
306 }
307
308 av_frame_.reset(av_frame_alloc());
309
310 if (absl::optional<int> buffer_pool_size = settings.buffer_pool_size()) {
311 if (!ffmpeg_buffer_pool_.Resize(*buffer_pool_size)) {
312 return false;
313 }
314 }
315 return true;
316 }
317
Release()318 int32_t H264DecoderImpl::Release() {
319 av_context_.reset();
320 av_frame_.reset();
321 return WEBRTC_VIDEO_CODEC_OK;
322 }
323
RegisterDecodeCompleteCallback(DecodedImageCallback * callback)324 int32_t H264DecoderImpl::RegisterDecodeCompleteCallback(
325 DecodedImageCallback* callback) {
326 decoded_image_callback_ = callback;
327 return WEBRTC_VIDEO_CODEC_OK;
328 }
329
Decode(const EncodedImage & input_image,bool,int64_t)330 int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
331 bool /*missing_frames*/,
332 int64_t /*render_time_ms*/) {
333 if (!IsInitialized()) {
334 ReportError();
335 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
336 }
337 if (!decoded_image_callback_) {
338 RTC_LOG(LS_WARNING)
339 << "Configure() has been called, but a callback function "
340 "has not been set with RegisterDecodeCompleteCallback()";
341 ReportError();
342 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
343 }
344 if (!input_image.data() || !input_image.size()) {
345 ReportError();
346 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
347 }
348
349 ScopedAVPacket packet = MakeScopedAVPacket();
350 if (!packet) {
351 ReportError();
352 return WEBRTC_VIDEO_CODEC_ERROR;
353 }
354 // packet.data has a non-const type, but isn't modified by
355 // avcodec_send_packet.
356 packet->data = const_cast<uint8_t*>(input_image.data());
357 if (input_image.size() >
358 static_cast<size_t>(std::numeric_limits<int>::max())) {
359 ReportError();
360 return WEBRTC_VIDEO_CODEC_ERROR;
361 }
362 packet->size = static_cast<int>(input_image.size());
363 int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs
364 av_context_->reordered_opaque = frame_timestamp_us;
365
366 int result = avcodec_send_packet(av_context_.get(), packet.get());
367
368 if (result < 0) {
369 RTC_LOG(LS_ERROR) << "avcodec_send_packet error: " << result;
370 ReportError();
371 return WEBRTC_VIDEO_CODEC_ERROR;
372 }
373
374 result = avcodec_receive_frame(av_context_.get(), av_frame_.get());
375 if (result < 0) {
376 RTC_LOG(LS_ERROR) << "avcodec_receive_frame error: " << result;
377 ReportError();
378 return WEBRTC_VIDEO_CODEC_ERROR;
379 }
380
381 // We don't expect reordering. Decoded frame timestamp should match
382 // the input one.
383 RTC_DCHECK_EQ(av_frame_->reordered_opaque, frame_timestamp_us);
384
385 // TODO(sakal): Maybe it is possible to get QP directly from FFmpeg.
386 h264_bitstream_parser_.ParseBitstream(input_image);
387 absl::optional<int> qp = h264_bitstream_parser_.GetLastSliceQp();
388
389 // Obtain the `video_frame` containing the decoded image.
390 VideoFrame* input_frame =
391 static_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
392 RTC_DCHECK(input_frame);
393 rtc::scoped_refptr<VideoFrameBuffer> frame_buffer =
394 input_frame->video_frame_buffer();
395
396 // Instantiate Planar YUV buffer according to video frame buffer type
397 const webrtc::PlanarYuvBuffer* planar_yuv_buffer = nullptr;
398 const webrtc::PlanarYuv8Buffer* planar_yuv8_buffer = nullptr;
399 const webrtc::PlanarYuv16BBuffer* planar_yuv16_buffer = nullptr;
400 VideoFrameBuffer::Type video_frame_buffer_type = frame_buffer->type();
401 switch (video_frame_buffer_type) {
402 case VideoFrameBuffer::Type::kI420:
403 planar_yuv_buffer = frame_buffer->GetI420();
404 planar_yuv8_buffer =
405 reinterpret_cast<const webrtc::PlanarYuv8Buffer*>(planar_yuv_buffer);
406 break;
407 case VideoFrameBuffer::Type::kI444:
408 planar_yuv_buffer = frame_buffer->GetI444();
409 planar_yuv8_buffer =
410 reinterpret_cast<const webrtc::PlanarYuv8Buffer*>(planar_yuv_buffer);
411 break;
412 case VideoFrameBuffer::Type::kI422:
413 planar_yuv_buffer = frame_buffer->GetI422();
414 planar_yuv8_buffer =
415 reinterpret_cast<const webrtc::PlanarYuv8Buffer*>(planar_yuv_buffer);
416 break;
417 case VideoFrameBuffer::Type::kI010:
418 planar_yuv_buffer = frame_buffer->GetI010();
419 planar_yuv16_buffer = reinterpret_cast<const webrtc::PlanarYuv16BBuffer*>(
420 planar_yuv_buffer);
421 break;
422 case VideoFrameBuffer::Type::kI210:
423 planar_yuv_buffer = frame_buffer->GetI210();
424 planar_yuv16_buffer = reinterpret_cast<const webrtc::PlanarYuv16BBuffer*>(
425 planar_yuv_buffer);
426 break;
427 default:
428 // If this code is changed to allow other video frame buffer type,
429 // make sure that the code below which wraps I420/I422/I444 buffer and
430 // code which converts to NV12 is changed
431 // to work with new video frame buffer type
432
433 RTC_LOG(LS_ERROR) << "frame_buffer type: "
434 << static_cast<int32_t>(video_frame_buffer_type)
435 << " is not supported!";
436 ReportError();
437 return WEBRTC_VIDEO_CODEC_ERROR;
438 }
439
440 // When needed, FFmpeg applies cropping by moving plane pointers and adjusting
441 // frame width/height. Ensure that cropped buffers lie within the allocated
442 // memory.
443 RTC_DCHECK_LE(av_frame_->width, planar_yuv_buffer->width());
444 RTC_DCHECK_LE(av_frame_->height, planar_yuv_buffer->height());
445 switch (video_frame_buffer_type) {
446 case VideoFrameBuffer::Type::kI420:
447 case VideoFrameBuffer::Type::kI444:
448 case VideoFrameBuffer::Type::kI422: {
449 RTC_DCHECK_GE(av_frame_->data[kYPlaneIndex], planar_yuv8_buffer->DataY());
450 RTC_DCHECK_LE(
451 av_frame_->data[kYPlaneIndex] +
452 av_frame_->linesize[kYPlaneIndex] * av_frame_->height,
453 planar_yuv8_buffer->DataY() +
454 planar_yuv8_buffer->StrideY() * planar_yuv8_buffer->height());
455 RTC_DCHECK_GE(av_frame_->data[kUPlaneIndex], planar_yuv8_buffer->DataU());
456 RTC_DCHECK_LE(
457 av_frame_->data[kUPlaneIndex] +
458 av_frame_->linesize[kUPlaneIndex] *
459 planar_yuv8_buffer->ChromaHeight(),
460 planar_yuv8_buffer->DataU() + planar_yuv8_buffer->StrideU() *
461 planar_yuv8_buffer->ChromaHeight());
462 RTC_DCHECK_GE(av_frame_->data[kVPlaneIndex], planar_yuv8_buffer->DataV());
463 RTC_DCHECK_LE(
464 av_frame_->data[kVPlaneIndex] +
465 av_frame_->linesize[kVPlaneIndex] *
466 planar_yuv8_buffer->ChromaHeight(),
467 planar_yuv8_buffer->DataV() + planar_yuv8_buffer->StrideV() *
468 planar_yuv8_buffer->ChromaHeight());
469 break;
470 }
471 case VideoFrameBuffer::Type::kI010:
472 case VideoFrameBuffer::Type::kI210: {
473 RTC_DCHECK_GE(
474 av_frame_->data[kYPlaneIndex],
475 reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataY()));
476 RTC_DCHECK_LE(
477 av_frame_->data[kYPlaneIndex] +
478 av_frame_->linesize[kYPlaneIndex] * av_frame_->height,
479 reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataY()) +
480 planar_yuv16_buffer->StrideY() * 2 *
481 planar_yuv16_buffer->height());
482 RTC_DCHECK_GE(
483 av_frame_->data[kUPlaneIndex],
484 reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataU()));
485 RTC_DCHECK_LE(
486 av_frame_->data[kUPlaneIndex] +
487 av_frame_->linesize[kUPlaneIndex] *
488 planar_yuv16_buffer->ChromaHeight(),
489 reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataU()) +
490 planar_yuv16_buffer->StrideU() * 2 *
491 planar_yuv16_buffer->ChromaHeight());
492 RTC_DCHECK_GE(
493 av_frame_->data[kVPlaneIndex],
494 reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataV()));
495 RTC_DCHECK_LE(
496 av_frame_->data[kVPlaneIndex] +
497 av_frame_->linesize[kVPlaneIndex] *
498 planar_yuv16_buffer->ChromaHeight(),
499 reinterpret_cast<const uint8_t*>(planar_yuv16_buffer->DataV()) +
500 planar_yuv16_buffer->StrideV() * 2 *
501 planar_yuv16_buffer->ChromaHeight());
502 break;
503 }
504 default:
505 RTC_LOG(LS_ERROR) << "frame_buffer type: "
506 << static_cast<int32_t>(video_frame_buffer_type)
507 << " is not supported!";
508 ReportError();
509 return WEBRTC_VIDEO_CODEC_ERROR;
510 }
511
512 rtc::scoped_refptr<webrtc::VideoFrameBuffer> cropped_buffer;
513 switch (video_frame_buffer_type) {
514 case VideoFrameBuffer::Type::kI420:
515 cropped_buffer = WrapI420Buffer(
516 av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
517 av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
518 av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
519 av_frame_->linesize[kVPlaneIndex],
520 // To keep reference alive.
521 [frame_buffer] {});
522 break;
523 case VideoFrameBuffer::Type::kI444:
524 cropped_buffer = WrapI444Buffer(
525 av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
526 av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
527 av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
528 av_frame_->linesize[kVPlaneIndex],
529 // To keep reference alive.
530 [frame_buffer] {});
531 break;
532 case VideoFrameBuffer::Type::kI422:
533 cropped_buffer = WrapI422Buffer(
534 av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex],
535 av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex],
536 av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex],
537 av_frame_->linesize[kVPlaneIndex],
538 // To keep reference alive.
539 [frame_buffer] {});
540 break;
541 case VideoFrameBuffer::Type::kI010:
542 cropped_buffer = WrapI010Buffer(
543 av_frame_->width, av_frame_->height,
544 reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
545 av_frame_->linesize[kYPlaneIndex] / 2,
546 reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
547 av_frame_->linesize[kUPlaneIndex] / 2,
548 reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
549 av_frame_->linesize[kVPlaneIndex] / 2,
550 // To keep reference alive.
551 [frame_buffer] {});
552 break;
553 case VideoFrameBuffer::Type::kI210:
554 cropped_buffer = WrapI210Buffer(
555 av_frame_->width, av_frame_->height,
556 reinterpret_cast<const uint16_t*>(av_frame_->data[kYPlaneIndex]),
557 av_frame_->linesize[kYPlaneIndex] / 2,
558 reinterpret_cast<const uint16_t*>(av_frame_->data[kUPlaneIndex]),
559 av_frame_->linesize[kUPlaneIndex] / 2,
560 reinterpret_cast<const uint16_t*>(av_frame_->data[kVPlaneIndex]),
561 av_frame_->linesize[kVPlaneIndex] / 2,
562 // To keep reference alive.
563 [frame_buffer] {});
564 break;
565 default:
566 RTC_LOG(LS_ERROR) << "frame_buffer type: "
567 << static_cast<int32_t>(video_frame_buffer_type)
568 << " is not supported!";
569 ReportError();
570 return WEBRTC_VIDEO_CODEC_ERROR;
571 }
572
573 // Pass on color space from input frame if explicitly specified.
574 const ColorSpace& color_space =
575 input_image.ColorSpace() ? *input_image.ColorSpace()
576 : ExtractH264ColorSpace(av_context_.get());
577
578 VideoFrame decoded_frame = VideoFrame::Builder()
579 .set_video_frame_buffer(cropped_buffer)
580 .set_timestamp_rtp(input_image.Timestamp())
581 .set_color_space(color_space)
582 .build();
583
584 // Return decoded frame.
585 // TODO(nisse): Timestamp and rotation are all zero here. Change decoder
586 // interface to pass a VideoFrameBuffer instead of a VideoFrame?
587 decoded_image_callback_->Decoded(decoded_frame, absl::nullopt, qp);
588
589 // Stop referencing it, possibly freeing `input_frame`.
590 av_frame_unref(av_frame_.get());
591 input_frame = nullptr;
592
593 return WEBRTC_VIDEO_CODEC_OK;
594 }
595
ImplementationName() const596 const char* H264DecoderImpl::ImplementationName() const {
597 return "FFmpeg";
598 }
599
IsInitialized() const600 bool H264DecoderImpl::IsInitialized() const {
601 return av_context_ != nullptr;
602 }
603
ReportInit()604 void H264DecoderImpl::ReportInit() {
605 if (has_reported_init_)
606 return;
607 RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
608 kH264DecoderEventInit, kH264DecoderEventMax);
609 has_reported_init_ = true;
610 }
611
ReportError()612 void H264DecoderImpl::ReportError() {
613 if (has_reported_error_)
614 return;
615 RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
616 kH264DecoderEventError, kH264DecoderEventMax);
617 has_reported_error_ = true;
618 }
619
620 } // namespace webrtc
621
622 #endif // WEBRTC_USE_H264
623