xref: /aosp_15_r20/external/v4l2_codec2/v4l2/V4L2Decoder.cpp (revision 0ec5a0ec62797f775085659156625e7f1bdb369f)
1 // Copyright 2020 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //#define LOG_NDEBUG 0
6 #define ATRACE_TAG ATRACE_TAG_VIDEO
7 #define LOG_TAG "V4L2Decoder"
8 
9 #include <v4l2_codec2/v4l2/V4L2Decoder.h>
10 
11 #include <stdint.h>
12 
13 #include <algorithm>
14 #include <vector>
15 
16 #include <base/bind.h>
17 #include <base/files/scoped_file.h>
18 #include <base/memory/ptr_util.h>
19 #include <log/log.h>
20 #include <utils/Trace.h>
21 
22 #include <v4l2_codec2/common/Common.h>
23 #include <v4l2_codec2/common/Fourcc.h>
24 #include <v4l2_codec2/common/H264NalParser.h>
25 #include <v4l2_codec2/common/HEVCNalParser.h>
26 #include <v4l2_codec2/plugin_store/DmabufHelpers.h>
27 
28 namespace android {
29 namespace {
30 
31 // Extra buffers for transmitting in the whole video pipeline.
32 constexpr size_t kNumExtraOutputBuffers = 4;
33 
waitForDRC(const C2ConstLinearBlock & input,std::optional<VideoCodec> codec)34 bool waitForDRC(const C2ConstLinearBlock& input, std::optional<VideoCodec> codec) {
35     C2ReadView view = input.map().get();
36     const uint8_t* pos = view.data();
37     // frame type takes the (2) position in first byte of VP9  uncompressed header
38     const uint8_t kVP9FrameTypeMask = 0x4;
39     // frame type takes the (0) position in first byte of VP8 uncompressed header
40     const uint8_t kVP8FrameTypeMask = 0x1;
41 
42     switch (*codec) {
43     case VideoCodec::H264: {
44         H264NalParser parser(view.data(), view.capacity());
45         return parser.locateIDR();
46     }
47     case VideoCodec::HEVC: {
48         HEVCNalParser parser(view.data(), view.capacity());
49         return parser.locateIDR();
50     }
51     // For VP8 and VP9 it is assumed that the input buffer contains a single
52     // frame that is not fragmented.
53     case VideoCodec::VP9:
54         // 0 - key frame; 1 - interframe
55         return ((pos[0] & kVP9FrameTypeMask) == 0);
56     case VideoCodec::VP8:
57         // 0 - key frame; 1 - interframe;
58         return ((pos[0] & kVP8FrameTypeMask) == 0);
59     }
60 
61     return false;
62 }
63 
64 }  // namespace
65 
66 // static
Create(uint32_t debugStreamId,const VideoCodec & codec,const size_t inputBufferSize,const size_t minNumOutputBuffers,GetPoolCB getPoolCb,OutputCB outputCb,ErrorCB errorCb,scoped_refptr<::base::SequencedTaskRunner> taskRunner,bool isSecure)67 std::unique_ptr<VideoDecoder> V4L2Decoder::Create(
68         uint32_t debugStreamId, const VideoCodec& codec, const size_t inputBufferSize,
69         const size_t minNumOutputBuffers, GetPoolCB getPoolCb, OutputCB outputCb, ErrorCB errorCb,
70         scoped_refptr<::base::SequencedTaskRunner> taskRunner, bool isSecure) {
71     std::unique_ptr<V4L2Decoder> decoder =
72             ::base::WrapUnique<V4L2Decoder>(new V4L2Decoder(debugStreamId, taskRunner));
73     if (!decoder->start(codec, inputBufferSize, minNumOutputBuffers, std::move(getPoolCb),
74                         std::move(outputCb), std::move(errorCb), isSecure)) {
75         return nullptr;
76     }
77     return decoder;
78 }
79 
V4L2Decoder(uint32_t debugStreamId,scoped_refptr<::base::SequencedTaskRunner> taskRunner)80 V4L2Decoder::V4L2Decoder(uint32_t debugStreamId,
81                          scoped_refptr<::base::SequencedTaskRunner> taskRunner)
82       : mDebugStreamId(debugStreamId), mTaskRunner(std::move(taskRunner)) {
83     ALOGV("%s()", __func__);
84 
85     mWeakThis = mWeakThisFactory.GetWeakPtr();
86 }
87 
~V4L2Decoder()88 V4L2Decoder::~V4L2Decoder() {
89     ALOGV("%s()", __func__);
90     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
91 
92     mWeakThisFactory.InvalidateWeakPtrs();
93 
94     // Streamoff input and output queue.
95     if (mOutputQueue) {
96         mOutputQueue->streamoff();
97         mOutputQueue->deallocateBuffers();
98         mOutputQueue = nullptr;
99     }
100     if (mInputQueue) {
101         mInputQueue->streamoff();
102         mInputQueue->deallocateBuffers();
103         mInputQueue = nullptr;
104     }
105     if (mDevice) {
106         mDevice->stopPolling();
107         mDevice = nullptr;
108     }
109     if (mInitialEosBuffer) {
110         mInitialEosBuffer = nullptr;
111     }
112 }
113 
start(const VideoCodec & codec,const size_t inputBufferSize,const size_t minNumOutputBuffers,GetPoolCB getPoolCb,OutputCB outputCb,ErrorCB errorCb,bool isSecure)114 bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize,
115                         const size_t minNumOutputBuffers, GetPoolCB getPoolCb, OutputCB outputCb,
116                         ErrorCB errorCb, bool isSecure) {
117     ATRACE_CALL();
118     ALOGV("%s(codec=%s, inputBufferSize=%zu, minNumOutputBuffers=%zu)", __func__,
119           VideoCodecToString(codec), inputBufferSize, minNumOutputBuffers);
120     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
121 
122     mMinNumOutputBuffers = minNumOutputBuffers;
123     mGetPoolCb = std::move(getPoolCb);
124     mOutputCb = std::move(outputCb);
125     mErrorCb = std::move(errorCb);
126     mCodec = codec;
127     mIsSecure = isSecure;
128 
129     if (mState == State::Error) {
130         ALOGE("Ignore due to error state.");
131         return false;
132     }
133 
134     mDevice = V4L2Device::create(mDebugStreamId);
135 
136     const uint32_t inputPixelFormat = V4L2Device::videoCodecToPixFmt(codec);
137     if (!mDevice->open(V4L2Device::Type::kDecoder, inputPixelFormat)) {
138         ALOGE("Failed to open device for %s", VideoCodecToString(codec));
139         return false;
140     }
141 
142     if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
143         ALOGE("Device does not have VIDEO_M2M_MPLANE and STREAMING capabilities.");
144         return false;
145     }
146 
147     if (!sendV4L2DecoderCmd(false)) {
148         ALOGE("Device does not support flushing (V4L2_DEC_CMD_STOP)");
149         return false;
150     }
151 
152     // Subscribe to the resolution change event.
153     struct v4l2_event_subscription sub;
154     memset(&sub, 0, sizeof(sub));
155     sub.type = V4L2_EVENT_SOURCE_CHANGE;
156     if (mDevice->ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
157         ALOGE("ioctl() failed: VIDIOC_SUBSCRIBE_EVENT: V4L2_EVENT_SOURCE_CHANGE");
158         return false;
159     }
160 
161     // Create Input/Output V4L2Queue, and setup input queue.
162     mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
163     mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
164     if (!mInputQueue || !mOutputQueue) {
165         ALOGE("Failed to create V4L2 queue.");
166         return false;
167     }
168     if (!setupInputFormat(inputPixelFormat, inputBufferSize)) {
169         ALOGE("Failed to setup input format.");
170         return false;
171     }
172     if (!setupInitialOutput()) {
173         ALOGE("Unable to setup initial output");
174         return false;
175     }
176 
177     if (!mDevice->startPolling(mTaskRunner,
178                                ::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
179                                ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
180         ALOGE("Failed to start polling V4L2 device.");
181         return false;
182     }
183 
184     setState(State::Idle);
185     return true;
186 }
187 
setupInputFormat(const uint32_t inputPixelFormat,const size_t inputBufferSize)188 bool V4L2Decoder::setupInputFormat(const uint32_t inputPixelFormat, const size_t inputBufferSize) {
189     ALOGV("%s(inputPixelFormat=%u, inputBufferSize=%zu)", __func__, inputPixelFormat,
190           inputBufferSize);
191     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
192 
193     // Check if the format is supported.
194     std::vector<uint32_t> formats =
195             mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
196     if (std::find(formats.begin(), formats.end(), inputPixelFormat) == formats.end()) {
197         ALOGE("Input codec s not supported by device.");
198         return false;
199     }
200 
201     // Setup the input format.
202     auto format = mInputQueue->setFormat(inputPixelFormat, ui::Size(), inputBufferSize, 0);
203     if (!format) {
204         ALOGE("Failed to call IOCTL to set input format.");
205         return false;
206     }
207     ALOG_ASSERT(format->fmt.pix_mp.pixelformat == inputPixelFormat);
208 
209     if (mInputQueue->allocateBuffers(kNumInputBuffers, V4L2_MEMORY_DMABUF) == 0) {
210         ALOGE("Failed to allocate input buffer.");
211         return false;
212     }
213     if (!mInputQueue->streamon()) {
214         ALOGE("Failed to streamon input queue.");
215         return false;
216     }
217     return true;
218 }
219 
setupInitialOutput()220 bool V4L2Decoder::setupInitialOutput() {
221     ATRACE_CALL();
222     ALOGV("%s()", __func__);
223     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
224 
225     if (!setupMinimalOutputFormat()) {
226         ALOGE("Failed to set minimal resolution for initial output buffers");
227         return false;
228     }
229 
230     if (!startOutputQueue(1, V4L2_MEMORY_DMABUF)) {
231         ALOGE("Failed to start initialy output queue");
232         return false;
233     }
234 
235     std::optional<V4L2WritableBufferRef> eosBuffer = mOutputQueue->getFreeBuffer();
236     if (!eosBuffer) {
237         ALOGE("Failed to acquire initial EOS buffer");
238         return false;
239     }
240 
241     mInitialEosBuffer =
242             new GraphicBuffer(mCodedSize.getWidth(), mCodedSize.getHeight(),
243                               static_cast<PixelFormat>(HalPixelFormat::YCBCR_420_888),
244                               GraphicBuffer::USAGE_HW_VIDEO_ENCODER, "V4L2DecodeComponent");
245 
246     if (mInitialEosBuffer->initCheck() != NO_ERROR) {
247         return false;
248     }
249 
250     std::vector<int> fds;
251     for (size_t i = 0; i < mInitialEosBuffer->handle->numFds; i++) {
252         fds.push_back(mInitialEosBuffer->handle->data[i]);
253     }
254 
255     if (!std::move(*eosBuffer).queueDMABuf(fds)) {
256         ALOGE("Failed to queue initial EOS buffer");
257         return false;
258     }
259 
260     return true;
261 }
262 
setupMinimalOutputFormat()263 bool V4L2Decoder::setupMinimalOutputFormat() {
264     ui::Size minResolution, maxResolution;
265 
266     for (const uint32_t& pixfmt :
267          mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
268         if (std::find(kSupportedOutputFourccs.begin(), kSupportedOutputFourccs.end(), pixfmt) ==
269             kSupportedOutputFourccs.end()) {
270             ALOGD("Pixel format %s is not supported, skipping...", fourccToString(pixfmt).c_str());
271             continue;
272         }
273 
274         mDevice->getSupportedResolution(pixfmt, &minResolution, &maxResolution);
275         if (minResolution.isEmpty()) {
276             minResolution.set(128, 128);
277         }
278 
279         if (mOutputQueue->setFormat(pixfmt, minResolution, 0) != std::nullopt) {
280             return true;
281         }
282     }
283 
284     ALOGE("Failed to find supported pixel format");
285     return false;
286 }
287 
startOutputQueue(size_t minOutputBuffersCount,enum v4l2_memory memory)288 bool V4L2Decoder::startOutputQueue(size_t minOutputBuffersCount, enum v4l2_memory memory) {
289     ALOGV("%s()", __func__);
290     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
291 
292     const std::optional<struct v4l2_format> format = getFormatInfo();
293     std::optional<size_t> numOutputBuffers = getNumOutputBuffers();
294     if (!format || !numOutputBuffers) {
295         return false;
296     }
297     *numOutputBuffers = std::max(*numOutputBuffers, minOutputBuffersCount);
298 
299     const ui::Size codedSize(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
300     if (!setupOutputFormat(codedSize)) {
301         return false;
302     }
303 
304     const std::optional<struct v4l2_format> adjustedFormat = getFormatInfo();
305     if (!adjustedFormat) {
306         return false;
307     }
308     mCodedSize.set(adjustedFormat->fmt.pix_mp.width, adjustedFormat->fmt.pix_mp.height);
309     mVisibleRect = getVisibleRect(mCodedSize);
310 
311     ALOGI("Need %zu output buffers. coded size: %s, visible rect: %s", *numOutputBuffers,
312           toString(mCodedSize).c_str(), toString(mVisibleRect).c_str());
313     if (isEmpty(mCodedSize)) {
314         ALOGE("Failed to get resolution from V4L2 driver.");
315         return false;
316     }
317 
318     if (mOutputQueue->isStreaming()) {
319         mOutputQueue->streamoff();
320     }
321     if (mOutputQueue->allocatedBuffersCount() > 0) {
322         mOutputQueue->deallocateBuffers();
323     }
324 
325     mFrameAtDevice.clear();
326     mBlockIdToV4L2Id.clear();
327     while (!mReuseFrameQueue.empty()) {
328         mReuseFrameQueue.pop();
329     }
330 
331     const size_t adjustedNumOutputBuffers =
332             mOutputQueue->allocateBuffers(*numOutputBuffers, memory);
333     if (adjustedNumOutputBuffers == 0) {
334         ALOGE("Failed to allocate output buffer.");
335         return false;
336     }
337 
338     ALOGV("Allocated %zu output buffers.", adjustedNumOutputBuffers);
339     if (!mOutputQueue->streamon()) {
340         ALOGE("Failed to streamon output queue.");
341         return false;
342     }
343 
344     return true;
345 }
346 
decode(std::unique_ptr<ConstBitstreamBuffer> buffer,DecodeCB decodeCb)347 void V4L2Decoder::decode(std::unique_ptr<ConstBitstreamBuffer> buffer, DecodeCB decodeCb) {
348     ATRACE_CALL();
349     ALOGV("%s(id=%d)", __func__, buffer->id);
350     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
351 
352     if (mState == State::Error) {
353         ALOGE("Ignore due to error state.");
354         mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(decodeCb),
355                                                           VideoDecoder::DecodeStatus::kError));
356         return;
357     }
358 
359     if (mState == State::Idle) {
360         setState(State::Decoding);
361     }
362 
363     // To determine if the DRC is pending, the access to the frame data is
364     // required. It's not possible to access the frame directly for the secure
365     // playback, so this check must be skipped. b/279834186
366     if (!mIsSecure && mInitialEosBuffer && !mPendingDRC)
367         mPendingDRC = waitForDRC(buffer->dmabuf, mCodec);
368 
369     mDecodeRequests.push(DecodeRequest(std::move(buffer), std::move(decodeCb)));
370     pumpDecodeRequest();
371 }
372 
drain(DecodeCB drainCb)373 void V4L2Decoder::drain(DecodeCB drainCb) {
374     ATRACE_CALL();
375     ALOGV("%s()", __func__);
376     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
377 
378     switch (mState) {
379     case State::Idle:
380         ALOGV("Nothing need to drain, ignore.");
381         mTaskRunner->PostTask(
382                 FROM_HERE, ::base::BindOnce(std::move(drainCb), VideoDecoder::DecodeStatus::kOk));
383         return;
384 
385     case State::Decoding:
386         mDecodeRequests.push(DecodeRequest(nullptr, std::move(drainCb)));
387         pumpDecodeRequest();
388         return;
389 
390     case State::Draining:
391     case State::Error:
392         ALOGE("Ignore due to wrong state: %s", StateToString(mState));
393         mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(drainCb),
394                                                           VideoDecoder::DecodeStatus::kError));
395         return;
396     }
397 }
398 
pumpDecodeRequest()399 void V4L2Decoder::pumpDecodeRequest() {
400     ATRACE_CALL();
401     ALOGV("%s()", __func__);
402     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
403 
404     if (mState != State::Decoding) return;
405 
406     while (!mDecodeRequests.empty()) {
407         // Drain the decoder.
408         if (mDecodeRequests.front().buffer == nullptr) {
409             ALOGV("Get drain request.");
410             // Send the flush command after all input buffers are dequeued. This makes
411             // sure all previous resolution changes have been handled because the
412             // driver must hold the input buffer that triggers resolution change. The
413             // driver cannot decode data in it without new output buffers. If we send
414             // the flush now and a queued input buffer triggers resolution change
415             // later, the driver will send an output buffer that has
416             // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded
417             // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
418             // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
419             // to the decoder.
420             if (mInputQueue->queuedBuffersCount() > 0) {
421                 ALOGV("Wait for all input buffers dequeued.");
422                 return;
423             }
424 
425             // If output queue is not streaming, then device is unable to notify
426             // whenever draining is finished. (EOS frame cannot be dequeued).
427             // This is likely to happen in the event of that the first resolution
428             // change event wasn't dequeued before the drain request.
429             if (!mOutputQueue->isStreaming()) {
430                 ALOGV("Wait for output queue to start streaming");
431                 return;
432             }
433 
434             auto request = std::move(mDecodeRequests.front());
435             mDecodeRequests.pop();
436 
437             // There is one more case that EOS frame cannot be dequeued because
438             // the first resolution change event wasn't dequeued before - output
439             // queues on the host are not streaming but ARCVM has no knowledge about
440             // it. Check if first resolution change event was received and if there
441             // was no previously sent non-empty frame (other than SPS/PPS/EOS) that
442             // may trigger config from host side.
443             // Drain can only be finished if we are sure there was no stream = no
444             // single frame in the stack.
445             if (mInitialEosBuffer && !mPendingDRC) {
446                 ALOGV("Terminate drain, because there was no stream");
447                 mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(request.decodeCb),
448                                                                   VideoDecoder::DecodeStatus::kOk));
449                 return;
450             }
451 
452             if (!sendV4L2DecoderCmd(false)) {
453                 std::move(request.decodeCb).Run(VideoDecoder::DecodeStatus::kError);
454                 onError();
455                 return;
456             }
457             mDrainCb = std::move(request.decodeCb);
458             setState(State::Draining);
459             return;
460         }
461 
462         auto dma_buf_id = getDmabufId(mDecodeRequests.front().buffer->dmabuf.handle()->data[0]);
463         if (!dma_buf_id) {
464             ALOGE("Failed to get dmabuf id");
465             onError();
466             return;
467         }
468 
469         std::optional<V4L2WritableBufferRef> inputBuffer;
470         size_t targetIndex = 0;
471 
472         // If there's an existing input buffer for this dma buffer, use it.
473         for (; targetIndex < mNextInputBufferId; targetIndex++) {
474             if (mLastDmaBufferId[targetIndex] == dma_buf_id) {
475                 break;
476             }
477         }
478 
479         if (targetIndex < kNumInputBuffers) {
480             // If we didn't find a buffer and there is an unused buffer, use that one.
481             if (targetIndex == mNextInputBufferId) {
482                 mNextInputBufferId++;
483             }
484 
485             inputBuffer = mInputQueue->getFreeBuffer(targetIndex);
486         }
487 
488         // If we didn't find a reusable/unused input buffer, clobber a free one.
489         if (!inputBuffer) {
490             inputBuffer = mInputQueue->getFreeBuffer();
491         }
492 
493         // Pause if no free input buffer. We resume decoding after dequeueing input buffers.
494         if (!inputBuffer) {
495             ALOGV("There is no free input buffer.");
496             return;
497         }
498 
499         mLastDmaBufferId[inputBuffer->bufferId()] = *dma_buf_id;
500 
501         auto request = std::move(mDecodeRequests.front());
502         mDecodeRequests.pop();
503 
504         const int32_t bitstreamId = request.buffer->id;
505         ALOGV("QBUF to input queue, bitstreadId=%d", bitstreamId);
506         inputBuffer->setTimeStamp({.tv_sec = bitstreamId});
507         size_t planeSize = inputBuffer->getPlaneSize(0);
508         if (request.buffer->size > planeSize) {
509             ALOGE("The input size (%zu) is not enough, we need %zu", planeSize,
510                   request.buffer->size);
511             onError();
512             return;
513         }
514 
515         ALOGV("Set bytes_used=%zu, offset=%zu", request.buffer->offset + request.buffer->size,
516               request.buffer->offset);
517         inputBuffer->setPlaneDataOffset(0, request.buffer->offset);
518         inputBuffer->setPlaneBytesUsed(0, request.buffer->offset + request.buffer->size);
519         std::vector<int> fds;
520         fds.push_back(std::move(request.buffer->dmabuf.handle()->data[0]));
521         if (!std::move(*inputBuffer).queueDMABuf(fds)) {
522             ALOGE("%s(): Failed to QBUF to input queue, bitstreamId=%d", __func__, bitstreamId);
523             onError();
524             return;
525         }
526 
527         mPendingDecodeCbs.insert(std::make_pair(bitstreamId, std::move(request.decodeCb)));
528     }
529 }
530 
flush()531 void V4L2Decoder::flush() {
532     ATRACE_CALL();
533     ALOGV("%s()", __func__);
534     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
535 
536     if (mState == State::Idle) {
537         ALOGV("Nothing need to flush, ignore.");
538         return;
539     }
540     if (mState == State::Error) {
541         ALOGE("Ignore due to error state.");
542         return;
543     }
544 
545     // Call all pending callbacks.
546     for (auto& item : mPendingDecodeCbs) {
547         std::move(item.second).Run(VideoDecoder::DecodeStatus::kAborted);
548     }
549     mPendingDecodeCbs.clear();
550     if (mDrainCb) {
551         std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kAborted);
552     }
553 
554     // Streamoff both V4L2 queues to drop input and output buffers.
555     const bool isOutputStreaming = mOutputQueue->isStreaming();
556     mDevice->stopPolling();
557     mOutputQueue->streamoff();
558 
559     // Extract currently enqueued output picture buffers to be queued later first.
560     // See b/270003218 and b/297228544.
561     for (auto& [v4l2Id, frame] : mFrameAtDevice) {
562         // Find corresponding mapping block ID (DMABUF ID) to V4L2 buffer ID.
563         // The buffer was enqueued to device therefore such mapping have to exist.
564         auto blockIdIter =
565                 std::find_if(mBlockIdToV4L2Id.begin(), mBlockIdToV4L2Id.end(),
566                              [v4l2Id = v4l2Id](const auto& el) { return el.second == v4l2Id; });
567 
568         ALOG_ASSERT(blockIdIter != mBlockIdToV4L2Id.end());
569         size_t blockId = blockIdIter->first;
570         mReuseFrameQueue.push(std::make_pair(blockId, std::move(frame)));
571     }
572     mFrameAtDevice.clear();
573 
574     mInputQueue->streamoff();
575 
576     // Streamon both V4L2 queues.
577     mInputQueue->streamon();
578     if (isOutputStreaming) {
579         mOutputQueue->streamon();
580     }
581 
582     // If there is no free buffer at mOutputQueue, tryFetchVideoFrame() should be triggerred after
583     // a buffer is DQBUF from output queue. Now all the buffers are dropped at mOutputQueue, we
584     // have to trigger tryFetchVideoFrame() here.
585     if (mVideoFramePool) {
586         tryFetchVideoFrame();
587     }
588 
589     if (!mDevice->startPolling(mTaskRunner,
590                                ::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
591                                ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
592         ALOGE("Failed to start polling V4L2 device.");
593         onError();
594         return;
595     }
596 
597     setState(State::Idle);
598 }
599 
serviceDeviceTask(bool event)600 void V4L2Decoder::serviceDeviceTask(bool event) {
601     ATRACE_CALL();
602     ALOGV("%s(event=%d) state=%s InputQueue(%s):%zu+%zu/%zu, OutputQueue(%s):%zu+%zu/%zu", __func__,
603           event, StateToString(mState), (mInputQueue->isStreaming() ? "streamon" : "streamoff"),
604           mInputQueue->freeBuffersCount(), mInputQueue->queuedBuffersCount(),
605           mInputQueue->allocatedBuffersCount(),
606           (mOutputQueue->isStreaming() ? "streamon" : "streamoff"),
607           mOutputQueue->freeBuffersCount(), mOutputQueue->queuedBuffersCount(),
608           mOutputQueue->allocatedBuffersCount());
609 
610     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
611 
612     if (mState == State::Error) return;
613 
614     // Dequeue output and input queue.
615     bool inputDequeued = false;
616     while (mInputQueue->queuedBuffersCount() > 0) {
617         bool success;
618         V4L2ReadableBufferRef dequeuedBuffer;
619         std::tie(success, dequeuedBuffer) = mInputQueue->dequeueBuffer();
620         if (!success) {
621             ALOGE("Failed to dequeue buffer from input queue.");
622             onError();
623             return;
624         }
625         if (!dequeuedBuffer) break;
626 
627         inputDequeued = true;
628 
629         // Run the corresponding decode callback.
630         int32_t id = dequeuedBuffer->getTimeStamp().tv_sec;
631         ALOGV("DQBUF from input queue, bitstreamId=%d", id);
632         auto it = mPendingDecodeCbs.find(id);
633         if (it == mPendingDecodeCbs.end()) {
634             ALOGW("Callback is already abandoned.");
635             continue;
636         }
637         std::move(it->second).Run(VideoDecoder::DecodeStatus::kOk);
638         mPendingDecodeCbs.erase(it);
639     }
640 
641     bool outputDequeued = false;
642     while (mOutputQueue->queuedBuffersCount() > 0) {
643         bool success;
644         V4L2ReadableBufferRef dequeuedBuffer;
645         std::tie(success, dequeuedBuffer) = mOutputQueue->dequeueBuffer();
646         if (!success) {
647             ALOGE("Failed to dequeue buffer from output queue.");
648             onError();
649             return;
650         }
651         if (!dequeuedBuffer) break;
652 
653         outputDequeued = true;
654 
655         const size_t bufferId = dequeuedBuffer->bufferId();
656         const int32_t bitstreamId = static_cast<int32_t>(dequeuedBuffer->getTimeStamp().tv_sec);
657         const size_t bytesUsed = dequeuedBuffer->getPlaneBytesUsed(0);
658         const bool isLast = dequeuedBuffer->isLast();
659         ALOGV("DQBUF from output queue, bufferId=%zu, bitstreamId=%d, bytesused=%zu, isLast=%d",
660               bufferId, bitstreamId, bytesUsed, isLast);
661 
662         // Get the corresponding VideoFrame of the dequeued buffer.
663         auto it = mFrameAtDevice.find(bufferId);
664         ALOG_ASSERT(it != mFrameAtDevice.end(), "buffer %zu is not found at mFrameAtDevice",
665                     bufferId);
666         auto frame = std::move(it->second);
667         mFrameAtDevice.erase(it);
668 
669         if (bytesUsed > 0) {
670             ALOGV("Send output frame(bitstreamId=%d) to client", bitstreamId);
671             frame->setBitstreamId(bitstreamId);
672             frame->setVisibleRect(mVisibleRect);
673             mOutputCb.Run(std::move(frame));
674         } else {
675             // Workaround(b/168750131): If the buffer is not enqueued before the next drain is done,
676             // then the driver will fail to notify EOS. So we recycle the buffer immediately.
677             ALOGV("Recycle empty buffer %zu back to V4L2 output queue.", bufferId);
678             dequeuedBuffer.reset();
679             auto outputBuffer = mOutputQueue->getFreeBuffer(bufferId);
680             ALOG_ASSERT(outputBuffer, "V4L2 output queue slot %zu is not freed.", bufferId);
681 
682             if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
683                 ALOGE("%s(): Failed to recycle empty buffer to output queue.", __func__);
684                 onError();
685                 return;
686             }
687             mFrameAtDevice.insert(std::make_pair(bufferId, std::move(frame)));
688         }
689 
690         if (mDrainCb && isLast) {
691             ALOGV("All buffers are drained.");
692             sendV4L2DecoderCmd(true);
693             std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kOk);
694             setState(State::Idle);
695         }
696     }
697 
698     // Handle resolution change event.
699     if (event && dequeueResolutionChangeEvent()) {
700         if (!changeResolution()) {
701             onError();
702             return;
703         }
704     }
705 
706     // We freed some input buffers, continue handling decode requests.
707     if (inputDequeued) {
708         mTaskRunner->PostTask(FROM_HERE,
709                               ::base::BindOnce(&V4L2Decoder::pumpDecodeRequest, mWeakThis));
710     }
711     // We free some output buffers, try to get VideoFrame.
712     if (outputDequeued) {
713         mTaskRunner->PostTask(FROM_HERE,
714                               ::base::BindOnce(&V4L2Decoder::tryFetchVideoFrame, mWeakThis));
715     }
716 }
717 
dequeueResolutionChangeEvent()718 bool V4L2Decoder::dequeueResolutionChangeEvent() {
719     ATRACE_CALL();
720     ALOGV("%s()", __func__);
721     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
722 
723     struct v4l2_event ev;
724     memset(&ev, 0, sizeof(ev));
725     while (mDevice->ioctl(VIDIOC_DQEVENT, &ev) == 0) {
726         if (ev.type == V4L2_EVENT_SOURCE_CHANGE &&
727             ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
728             return true;
729         }
730     }
731     return false;
732 }
733 
changeResolution()734 bool V4L2Decoder::changeResolution() {
735     ATRACE_CALL();
736     ALOGV("%s()", __func__);
737     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
738 
739     if (mInitialEosBuffer) {
740         mInitialEosBuffer = nullptr;
741     }
742 
743     if (!startOutputQueue(mMinNumOutputBuffers, V4L2_MEMORY_DMABUF)) {
744         ALOGE("Failed to start output queue during DRC.");
745         return false;
746     }
747 
748     // If drain request is pending then it means that previous call to pumpDecodeRequest
749     // stalled the request, bacause there was no way of notifing the component that
750     // drain has finished. Send this request the drain to device.
751     if (!mDecodeRequests.empty() && mDecodeRequests.front().buffer == nullptr) {
752         mTaskRunner->PostTask(FROM_HERE,
753                               ::base::BindOnce(&V4L2Decoder::pumpDecodeRequest, mWeakThis));
754     }
755 
756     // Release the previous VideoFramePool before getting a new one to guarantee only one pool
757     // exists at the same time.
758     mVideoFramePool.reset();
759     // Always use flexible pixel 420 format YCBCR_420_888 in Android.
760     mVideoFramePool = mGetPoolCb.Run(mCodedSize, HalPixelFormat::YCBCR_420_888,
761                                      mOutputQueue->allocatedBuffersCount());
762     if (!mVideoFramePool) {
763         ALOGE("Failed to get block pool with size: %s", toString(mCodedSize).c_str());
764         return false;
765     }
766 
767     tryFetchVideoFrame();
768     return true;
769 }
770 
setupOutputFormat(const ui::Size & size)771 bool V4L2Decoder::setupOutputFormat(const ui::Size& size) {
772     for (const uint32_t& pixfmt :
773          mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
774         if (std::find(kSupportedOutputFourccs.begin(), kSupportedOutputFourccs.end(), pixfmt) ==
775             kSupportedOutputFourccs.end()) {
776             ALOGD("Pixel format %s is not supported, skipping...", fourccToString(pixfmt).c_str());
777             continue;
778         }
779 
780         if (mOutputQueue->setFormat(pixfmt, size, 0) != std::nullopt) {
781             return true;
782         }
783     }
784 
785     ALOGE("Failed to find supported pixel format");
786     return false;
787 }
788 
tryFetchVideoFrame()789 void V4L2Decoder::tryFetchVideoFrame() {
790     ATRACE_CALL();
791     ALOGV("%s()", __func__);
792     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
793 
794     if (!mVideoFramePool) {
795         ALOGE("mVideoFramePool is null, failed to get the instance after resolution change?");
796         onError();
797         return;
798     }
799 
800     if (mOutputQueue->freeBuffersCount() == 0) {
801         ALOGV("No free V4L2 output buffers, ignore.");
802         return;
803     }
804 
805     if (mReuseFrameQueue.empty()) {
806         if (!mVideoFramePool->getVideoFrame(
807                     ::base::BindOnce(&V4L2Decoder::onVideoFrameReady, mWeakThis))) {
808             ALOGV("%s(): Previous callback is running, ignore.", __func__);
809         }
810 
811         return;
812     }
813 
814     // Reuse output picture buffers that were abandoned after STREAMOFF first.
815     // NOTE(b/270003218 and b/297228544): This avoids issues with lack of
816     // ability to return all picture buffers on STREAMOFF from VDA and
817     // saves on IPC with BufferQueue increasing overall responsiveness.
818     uint32_t blockId = mReuseFrameQueue.front().first;
819     std::unique_ptr<VideoFrame> frame = std::move(mReuseFrameQueue.front().second);
820     mReuseFrameQueue.pop();
821 
822     // Avoid recursive calls
823     mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(&V4L2Decoder::onVideoFrameReady, mWeakThis,
824                                                       std::make_pair(std::move(frame), blockId)));
825 }
826 
onVideoFrameReady(std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId)827 void V4L2Decoder::onVideoFrameReady(
828         std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId) {
829     ALOGV("%s()", __func__);
830     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
831 
832     if (!frameWithBlockId) {
833         ALOGE("Got nullptr VideoFrame.");
834         onError();
835         return;
836     }
837 
838     // Unwrap our arguments.
839     std::unique_ptr<VideoFrame> frame;
840     uint32_t blockId;
841     std::tie(frame, blockId) = std::move(*frameWithBlockId);
842 
843     std::optional<V4L2WritableBufferRef> outputBuffer;
844     // Find the V4L2 buffer that is associated with this block.
845     auto iter = mBlockIdToV4L2Id.find(blockId);
846     if (iter != mBlockIdToV4L2Id.end()) {
847         // If we have met this block in the past, reuse the same V4L2 buffer.
848         outputBuffer = mOutputQueue->getFreeBuffer(iter->second);
849         if (!outputBuffer) {
850             // NOTE(b/281477122): There is a bug in C2BufferQueueBlock. Its buffer queue slots
851             // cache is inconsistent when MediaSync is used and a buffer with the same dmabuf id
852             // can be returned twice despite being already in use by V4L2Decoder. We drop the
853             // buffer here in order to prevent unwanted errors. It is safe, bacause its allocation
854             // will be kept alive by the C2GraphicBlock instance.
855             ALOGW("%s(): The frame have been supplied again, despite being already enqueued",
856                   __func__);
857             tryFetchVideoFrame();
858             return;
859         }
860     } else if (mBlockIdToV4L2Id.size() < mOutputQueue->allocatedBuffersCount()) {
861         // If this is the first time we see this block, give it the next
862         // available V4L2 buffer.
863         const size_t v4l2BufferId = mBlockIdToV4L2Id.size();
864         mBlockIdToV4L2Id.emplace(blockId, v4l2BufferId);
865         outputBuffer = mOutputQueue->getFreeBuffer(v4l2BufferId);
866         ALOG_ASSERT(v4l2BufferId == outputBuffer->bufferId());
867     } else {
868         // If this happens, this is a bug in VideoFramePool. It should never
869         // provide more blocks than we have V4L2 buffers.
870         ALOGE("Got more different blocks than we have V4L2 buffers for.");
871     }
872 
873     if (!outputBuffer) {
874         ALOGE("V4L2 buffer not available. blockId=%u", blockId);
875         onError();
876         return;
877     }
878 
879     uint32_t v4l2Id = outputBuffer->bufferId();
880     ALOGV("QBUF to output queue, blockId=%u, V4L2Id=%u", blockId, v4l2Id);
881 
882     if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
883         ALOGE("%s(): Failed to QBUF to output queue, blockId=%u, V4L2Id=%u", __func__, blockId,
884               v4l2Id);
885         onError();
886         return;
887     }
888     if (mFrameAtDevice.find(v4l2Id) != mFrameAtDevice.end()) {
889         ALOGE("%s(): V4L2 buffer %d already enqueued.", __func__, v4l2Id);
890         onError();
891         return;
892     }
893     mFrameAtDevice.insert(std::make_pair(v4l2Id, std::move(frame)));
894 
895     tryFetchVideoFrame();
896 }
897 
getNumOutputBuffers()898 std::optional<size_t> V4L2Decoder::getNumOutputBuffers() {
899     ALOGV("%s()", __func__);
900     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
901 
902     struct v4l2_control ctrl;
903     memset(&ctrl, 0, sizeof(ctrl));
904     ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
905     if (mDevice->ioctl(VIDIOC_G_CTRL, &ctrl) != 0) {
906         ALOGE("ioctl() failed: VIDIOC_G_CTRL");
907         return std::nullopt;
908     }
909     ALOGV("%s() V4L2_CID_MIN_BUFFERS_FOR_CAPTURE returns %u", __func__, ctrl.value);
910 
911     return ctrl.value + kNumExtraOutputBuffers;
912 }
913 
getFormatInfo()914 std::optional<struct v4l2_format> V4L2Decoder::getFormatInfo() {
915     ALOGV("%s()", __func__);
916     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
917 
918     struct v4l2_format format;
919     memset(&format, 0, sizeof(format));
920     format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
921     if (mDevice->ioctl(VIDIOC_G_FMT, &format) != 0) {
922         ALOGE("ioctl() failed: VIDIOC_G_FMT");
923         return std::nullopt;
924     }
925 
926     return format;
927 }
928 
getVisibleRect(const ui::Size & codedSize)929 Rect V4L2Decoder::getVisibleRect(const ui::Size& codedSize) {
930     ALOGV("%s()", __func__);
931     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
932 
933     struct v4l2_rect* visible_rect = nullptr;
934     struct v4l2_selection selection_arg;
935     memset(&selection_arg, 0, sizeof(selection_arg));
936     selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
937     selection_arg.target = V4L2_SEL_TGT_COMPOSE;
938 
939     if (mDevice->ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
940         ALOGV("VIDIOC_G_SELECTION is supported");
941         visible_rect = &selection_arg.r;
942     } else {
943         ALOGV("Fallback to VIDIOC_G_CROP");
944         struct v4l2_crop crop_arg;
945         memset(&crop_arg, 0, sizeof(crop_arg));
946         crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
947 
948         if (mDevice->ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
949             ALOGW("ioctl() VIDIOC_G_CROP failed");
950             return Rect(codedSize.width, codedSize.height);
951         }
952         visible_rect = &crop_arg.c;
953     }
954 
955     Rect rect(visible_rect->left, visible_rect->top, visible_rect->left + visible_rect->width,
956               visible_rect->top + visible_rect->height);
957     ALOGV("visible rectangle is %s", toString(rect).c_str());
958     if (!contains(Rect(codedSize.width, codedSize.height), rect)) {
959         ALOGW("visible rectangle %s is not inside coded size %s", toString(rect).c_str(),
960               toString(codedSize).c_str());
961         return Rect(codedSize.width, codedSize.height);
962     }
963     if (rect.isEmpty()) {
964         ALOGW("visible size is empty");
965         return Rect(codedSize.width, codedSize.height);
966     }
967 
968     return rect;
969 }
970 
sendV4L2DecoderCmd(bool start)971 bool V4L2Decoder::sendV4L2DecoderCmd(bool start) {
972     ALOGV("%s(start=%d)", __func__, start);
973     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
974 
975     struct v4l2_decoder_cmd cmd;
976     memset(&cmd, 0, sizeof(cmd));
977     cmd.cmd = start ? V4L2_DEC_CMD_START : V4L2_DEC_CMD_STOP;
978     if (mDevice->ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
979         ALOGE("ioctl() VIDIOC_DECODER_CMD failed: start=%d", start);
980         return false;
981     }
982 
983     return true;
984 }
985 
onError()986 void V4L2Decoder::onError() {
987     ALOGV("%s()", __func__);
988     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
989 
990     setState(State::Error);
991     mErrorCb.Run();
992 }
993 
setState(State newState)994 void V4L2Decoder::setState(State newState) {
995     ALOGV("%s(%s)", __func__, StateToString(newState));
996     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
997 
998     if (mState == newState) return;
999     if (mState == State::Error) {
1000         ALOGV("Already in Error state.");
1001         return;
1002     }
1003 
1004     switch (newState) {
1005     case State::Idle:
1006         break;
1007     case State::Decoding:
1008         break;
1009     case State::Draining:
1010         if (mState != State::Decoding) newState = State::Error;
1011         break;
1012     case State::Error:
1013         break;
1014     }
1015 
1016     ALOGI("Set state %s => %s", StateToString(mState), StateToString(newState));
1017     mState = newState;
1018 }
1019 
1020 // static
StateToString(State state)1021 const char* V4L2Decoder::StateToString(State state) {
1022     switch (state) {
1023     case State::Idle:
1024         return "Idle";
1025     case State::Decoding:
1026         return "Decoding";
1027     case State::Draining:
1028         return "Draining";
1029     case State::Error:
1030         return "Error";
1031     }
1032 }
1033 
1034 }  // namespace android
1035