xref: /aosp_15_r20/external/v4l2_codec2/v4l2/V4L2Encoder.cpp (revision 0ec5a0ec62797f775085659156625e7f1bdb369f)
1 // Copyright 2021 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //#define LOG_NDEBUG 0
6 #define LOG_TAG "V4L2Encoder"
7 
8 #include <v4l2_codec2/v4l2/V4L2Encoder.h>
9 
10 #include <stdint.h>
11 #include <optional>
12 #include <vector>
13 
14 #include <base/bind.h>
15 #include <base/files/scoped_file.h>
16 #include <base/memory/ptr_util.h>
17 #include <log/log.h>
18 #include <ui/Rect.h>
19 
20 #include <v4l2_codec2/common/EncodeHelpers.h>
21 #include <v4l2_codec2/common/Fourcc.h>
22 #include <v4l2_codec2/components/BitstreamBuffer.h>
23 #include <v4l2_codec2/v4l2/V4L2Device.h>
24 
25 namespace android {
26 
27 namespace {
28 
29 // The maximum size for output buffer, which is chosen empirically for a 1080p video.
30 constexpr size_t kMaxBitstreamBufferSizeInBytes = 2 * 1024 * 1024;  // 2MB
31 // The frame size for 1080p (FHD) video in pixels.
32 constexpr int k1080PSizeInPixels = 1920 * 1080;
33 // The frame size for 1440p (QHD) video in pixels.
34 constexpr int k1440PSizeInPixels = 2560 * 1440;
35 
36 // Use quadruple size of kMaxBitstreamBufferSizeInBytes when the input frame size is larger than
37 // 1440p, double if larger than 1080p. This is chosen empirically for some 4k encoding use cases and
38 // the Android CTS VideoEncoderTest (crbug.com/927284).
GetMaxOutputBufferSize(const ui::Size & size)39 size_t GetMaxOutputBufferSize(const ui::Size& size) {
40     if (getArea(size) > k1440PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 4;
41     if (getArea(size) > k1080PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 2;
42     return kMaxBitstreamBufferSizeInBytes;
43 }
44 
45 // Define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR control code if not present in header files.
46 #ifndef V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR
47 #define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_MPEG_BASE + 644)
48 #endif
49 
50 }  // namespace
51 
52 // static
create(C2Config::profile_t outputProfile,std::optional<uint8_t> level,const ui::Size & visibleSize,uint32_t stride,uint32_t keyFramePeriod,C2Config::bitrate_mode_t bitrateMode,uint32_t bitrate,std::optional<uint32_t> peakBitrate,FetchOutputBufferCB fetchOutputBufferCb,InputBufferDoneCB inputBufferDoneCb,OutputBufferDoneCB outputBufferDoneCb,DrainDoneCB drainDoneCb,ErrorCB errorCb,scoped_refptr<::base::SequencedTaskRunner> taskRunner)53 std::unique_ptr<VideoEncoder> V4L2Encoder::create(
54         C2Config::profile_t outputProfile, std::optional<uint8_t> level,
55         const ui::Size& visibleSize, uint32_t stride, uint32_t keyFramePeriod,
56         C2Config::bitrate_mode_t bitrateMode, uint32_t bitrate, std::optional<uint32_t> peakBitrate,
57         FetchOutputBufferCB fetchOutputBufferCb, InputBufferDoneCB inputBufferDoneCb,
58         OutputBufferDoneCB outputBufferDoneCb, DrainDoneCB drainDoneCb, ErrorCB errorCb,
59         scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
60     ALOGV("%s()", __func__);
61 
62     std::unique_ptr<V4L2Encoder> encoder = ::base::WrapUnique<V4L2Encoder>(new V4L2Encoder(
63             std::move(taskRunner), std::move(fetchOutputBufferCb), std::move(inputBufferDoneCb),
64             std::move(outputBufferDoneCb), std::move(drainDoneCb), std::move(errorCb)));
65     if (!encoder->initialize(outputProfile, level, visibleSize, stride, keyFramePeriod, bitrateMode,
66                              bitrate, peakBitrate)) {
67         return nullptr;
68     }
69     return encoder;
70 }
71 
V4L2Encoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner,FetchOutputBufferCB fetchOutputBufferCb,InputBufferDoneCB inputBufferDoneCb,OutputBufferDoneCB outputBufferDoneCb,DrainDoneCB drainDoneCb,ErrorCB errorCb)72 V4L2Encoder::V4L2Encoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner,
73                          FetchOutputBufferCB fetchOutputBufferCb,
74                          InputBufferDoneCB inputBufferDoneCb, OutputBufferDoneCB outputBufferDoneCb,
75                          DrainDoneCB drainDoneCb, ErrorCB errorCb)
76       : mFetchOutputBufferCb(fetchOutputBufferCb),
77         mInputBufferDoneCb(inputBufferDoneCb),
78         mOutputBufferDoneCb(outputBufferDoneCb),
79         mDrainDoneCb(std::move(drainDoneCb)),
80         mErrorCb(std::move(errorCb)),
81         mTaskRunner(std::move(taskRunner)) {
82     ALOGV("%s()", __func__);
83 
84     mWeakThis = mWeakThisFactory.GetWeakPtr();
85 }
86 
~V4L2Encoder()87 V4L2Encoder::~V4L2Encoder() {
88     ALOGV("%s()", __func__);
89     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
90 
91     mWeakThisFactory.InvalidateWeakPtrs();
92 
93     // Flushing the encoder will stop polling and streaming on the V4L2 device queues.
94     flush();
95 
96     // Deallocate all V4L2 device input and output buffers.
97     destroyInputBuffers();
98     destroyOutputBuffers();
99 }
100 
encode(std::unique_ptr<InputFrame> frame)101 bool V4L2Encoder::encode(std::unique_ptr<InputFrame> frame) {
102     ALOGV("%s()", __func__);
103     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
104     ALOG_ASSERT(mState != State::UNINITIALIZED);
105 
106     // If we're in the error state we can immediately return, freeing the input buffer.
107     if (mState == State::ERROR) {
108         return false;
109     }
110 
111     if (!frame) {
112         ALOGW("Empty encode request scheduled");
113         return false;
114     }
115 
116     mEncodeRequests.push(EncodeRequest(std::move(frame)));
117 
118     // If we were waiting for encode requests, start encoding again.
119     if (mState == State::WAITING_FOR_INPUT_FRAME) {
120         setState(State::ENCODING);
121         mTaskRunner->PostTask(FROM_HERE,
122                               ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
123     }
124 
125     return true;
126 }
127 
drain()128 void V4L2Encoder::drain() {
129     ALOGV("%s()", __func__);
130     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
131 
132     // We can only start draining if all the requests in our input queue has been queued on the V4L2
133     // device input queue, so we mark the last item in the input queue as EOS.
134     if (!mEncodeRequests.empty()) {
135         ALOGV("Marking last item (index: %" PRIu64 ") in encode request queue as EOS",
136               mEncodeRequests.back().video_frame->index());
137         mEncodeRequests.back().end_of_stream = true;
138         return;
139     }
140 
141     // Start a drain operation on the device. If no buffers are currently queued the device will
142     // return an empty buffer with the V4L2_BUF_FLAG_LAST flag set.
143     handleDrainRequest();
144 }
145 
flush()146 void V4L2Encoder::flush() {
147     ALOGV("%s()", __func__);
148     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
149 
150     handleFlushRequest();
151 }
152 
setBitrate(uint32_t bitrate)153 bool V4L2Encoder::setBitrate(uint32_t bitrate) {
154     ALOGV("%s()", __func__);
155     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
156 
157     if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
158                               {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE, bitrate)})) {
159         ALOGE("Setting bitrate to %u failed", bitrate);
160         return false;
161     }
162     return true;
163 }
164 
setPeakBitrate(uint32_t peakBitrate)165 bool V4L2Encoder::setPeakBitrate(uint32_t peakBitrate) {
166     ALOGV("%s()", __func__);
167     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
168 
169     if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
170                               {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, peakBitrate)})) {
171         // TODO(b/190336806): Our stack doesn't support dynamic peak bitrate changes yet, ignore
172         // errors for now.
173         ALOGW("Setting peak bitrate to %u failed", peakBitrate);
174     }
175     return true;
176 }
177 
setFramerate(uint32_t framerate)178 bool V4L2Encoder::setFramerate(uint32_t framerate) {
179     ALOGV("%s()", __func__);
180     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
181 
182     if (framerate == 0) {
183         ALOGE("Requesting invalid framerate 0");
184         return false;
185     }
186 
187     struct v4l2_streamparm parms;
188     memset(&parms, 0, sizeof(v4l2_streamparm));
189     parms.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
190     parms.parm.output.timeperframe.numerator = 1;
191     parms.parm.output.timeperframe.denominator = framerate;
192     if (mDevice->ioctl(VIDIOC_S_PARM, &parms) != 0) {
193         ALOGE("Setting framerate to %u failed", framerate);
194         return false;
195     }
196     return true;
197 }
198 
requestKeyframe()199 void V4L2Encoder::requestKeyframe() {
200     ALOGV("%s()", __func__);
201     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
202 
203     mKeyFrameCounter = 0;
204 }
205 
inputFormat() const206 VideoPixelFormat V4L2Encoder::inputFormat() const {
207     return mInputLayout ? mInputLayout.value().mFormat : VideoPixelFormat::UNKNOWN;
208 }
209 
initialize(C2Config::profile_t outputProfile,std::optional<uint8_t> level,const ui::Size & visibleSize,uint32_t stride,uint32_t keyFramePeriod,C2Config::bitrate_mode_t bitrateMode,uint32_t bitrate,std::optional<uint32_t> peakBitrate)210 bool V4L2Encoder::initialize(C2Config::profile_t outputProfile, std::optional<uint8_t> level,
211                              const ui::Size& visibleSize, uint32_t stride, uint32_t keyFramePeriod,
212                              C2Config::bitrate_mode_t bitrateMode, uint32_t bitrate,
213                              std::optional<uint32_t> peakBitrate) {
214     ALOGV("%s()", __func__);
215     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
216     ALOG_ASSERT(keyFramePeriod > 0);
217 
218     mVisibleSize = visibleSize;
219     mKeyFramePeriod = keyFramePeriod;
220     mKeyFrameCounter = 0;
221 
222     // Open the V4L2 device for encoding to the requested output format.
223     // TODO(dstaessens): Avoid conversion to VideoCodecProfile and use C2Config::profile_t directly.
224     uint32_t outputPixelFormat = V4L2Device::c2ProfileToV4L2PixFmt(outputProfile, false);
225     if (!outputPixelFormat) {
226         ALOGE("Invalid output profile %s", profileToString(outputProfile));
227         return false;
228     }
229 
230     mDevice = V4L2Device::create();
231     if (!mDevice) {
232         ALOGE("Failed to create V4L2 device");
233         return false;
234     }
235 
236     if (!mDevice->open(V4L2Device::Type::kEncoder, outputPixelFormat)) {
237         ALOGE("Failed to open device for profile %s (%s)", profileToString(outputProfile),
238               fourccToString(outputPixelFormat).c_str());
239         return false;
240     }
241 
242     // Make sure the device has all required capabilities (multi-planar Memory-To-Memory and
243     // streaming I/O), and whether flushing is supported.
244     if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
245         ALOGE("Device doesn't have the required capabilities");
246         return false;
247     }
248     if (!mDevice->isCommandSupported(V4L2_ENC_CMD_STOP)) {
249         ALOGE("Device does not support flushing (V4L2_ENC_CMD_STOP)");
250         return false;
251     }
252 
253     // Get input/output queues so we can send encode request to the device and get back the results.
254     mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
255     mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
256     if (!mInputQueue || !mOutputQueue) {
257         ALOGE("Failed to get V4L2 device queues");
258         return false;
259     }
260 
261     // Configure the requested bitrate mode and bitrate on the device.
262     if (!configureBitrateMode(bitrateMode) || !setBitrate(bitrate)) return false;
263 
264     // If the bitrate mode is VBR we also need to configure the peak bitrate on the device.
265     if ((bitrateMode == C2Config::BITRATE_VARIABLE) && !setPeakBitrate(*peakBitrate)) return false;
266 
267     // First try to configure the specified output format, as changing the output format can affect
268     // the configured input format.
269     if (!configureOutputFormat(outputProfile)) return false;
270 
271     // Configure the input format. If the device doesn't support the specified format we'll use one
272     // of the device's preferred formats in combination with an input format convertor.
273     if (!configureInputFormat(kInputPixelFormat, stride)) return false;
274 
275     // Create input and output buffers.
276     if (!createInputBuffers() || !createOutputBuffers()) return false;
277 
278     // Configure the device, setting all required controls.
279     if (!configureDevice(outputProfile, level)) return false;
280 
281     // We're ready to start encoding now.
282     setState(State::WAITING_FOR_INPUT_FRAME);
283     return true;
284 }
285 
handleEncodeRequest()286 void V4L2Encoder::handleEncodeRequest() {
287     ALOGV("%s()", __func__);
288     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
289     ALOG_ASSERT(mState == State::ENCODING || mState == State::ERROR);
290 
291     // If we're in the error state we can immediately return.
292     if (mState == State::ERROR) {
293         return;
294     }
295 
296     // It's possible we flushed the encoder since this function was scheduled.
297     if (mEncodeRequests.empty()) {
298         return;
299     }
300 
301     // Get the next encode request from the queue.
302     EncodeRequest& encodeRequest = mEncodeRequests.front();
303 
304     // Check if the device has free input buffers available. If not we'll switch to the
305     // WAITING_FOR_INPUT_BUFFERS state, and resume encoding once we've dequeued an input buffer.
306     // Note: The input buffers are not copied into the device's input buffers, but rather a memory
307     // pointer is imported. We still have to throttle the number of enqueues queued simultaneously
308     // on the device however.
309     if (mInputQueue->freeBuffersCount() == 0) {
310         ALOGV("Waiting for device to return input buffers");
311         setState(State::WAITING_FOR_V4L2_BUFFER);
312         return;
313     }
314 
315     // Request the next frame to be a key frame each time the counter reaches 0.
316     if (mKeyFrameCounter == 0) {
317         if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
318                                   {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME)})) {
319             ALOGE("Failed requesting key frame");
320             onError();
321             return;
322         }
323     }
324     mKeyFrameCounter = (mKeyFrameCounter + 1) % mKeyFramePeriod;
325 
326     // Enqueue the input frame in the V4L2 device.
327     uint64_t index = encodeRequest.video_frame->index();
328     uint64_t timestamp = encodeRequest.video_frame->timestamp();
329     bool end_of_stream = encodeRequest.end_of_stream;
330     if (!enqueueInputBuffer(std::move(encodeRequest.video_frame))) {
331         ALOGE("Failed to enqueue input frame (index: %" PRIu64 ", timestamp: %" PRId64 ")", index,
332               timestamp);
333         onError();
334         return;
335     }
336     mEncodeRequests.pop();
337 
338     // Start streaming and polling on the input and output queue if required.
339     if (!mInputQueue->isStreaming()) {
340         ALOG_ASSERT(!mOutputQueue->isStreaming());
341         if (!mOutputQueue->streamon() || !mInputQueue->streamon()) {
342             ALOGE("Failed to start streaming on input and output queue");
343             onError();
344             return;
345         }
346         startDevicePoll();
347     }
348 
349     // Queue buffers on output queue. These buffers will be used to store the encoded bitstream.
350     while (mOutputQueue->freeBuffersCount() > 0) {
351         if (!enqueueOutputBuffer()) return;
352     }
353 
354     // Drain the encoder if requested.
355     if (end_of_stream) {
356         handleDrainRequest();
357         return;
358     }
359 
360     if (mEncodeRequests.empty()) {
361         setState(State::WAITING_FOR_INPUT_FRAME);
362         return;
363     }
364 
365     // Schedule the next buffer to be encoded.
366     mTaskRunner->PostTask(FROM_HERE,
367                           ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
368 }
369 
handleFlushRequest()370 void V4L2Encoder::handleFlushRequest() {
371     ALOGV("%s()", __func__);
372     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
373 
374     // Stop the device poll thread.
375     stopDevicePoll();
376 
377     // Stop streaming on the V4L2 device, which stops all currently queued encode operations and
378     // releases all buffers currently in use by the device.
379     for (auto& queue : {mInputQueue, mOutputQueue}) {
380         if (queue && queue->isStreaming() && !queue->streamoff()) {
381             ALOGE("Failed to stop streaming on the device queue");
382             onError();
383         }
384     }
385 
386     // Clear all outstanding encode requests and references to input and output queue buffers.
387     while (!mEncodeRequests.empty()) {
388         mEncodeRequests.pop();
389     }
390     for (auto& buf : mInputBuffers) {
391         buf = nullptr;
392     }
393     for (auto& buf : mOutputBuffers) {
394         buf = nullptr;
395     }
396 
397     // Streaming and polling on the V4L2 device input and output queues will be resumed once new
398     // encode work is queued.
399     if (mState != State::ERROR) {
400         setState(State::WAITING_FOR_INPUT_FRAME);
401     }
402 }
403 
handleDrainRequest()404 void V4L2Encoder::handleDrainRequest() {
405     ALOGV("%s()", __func__);
406     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
407 
408     if (mState == State::DRAINING || mState == State::ERROR) {
409         return;
410     }
411 
412     setState(State::DRAINING);
413 
414     // If we're not streaming we can consider the request completed immediately.
415     if (!mInputQueue->isStreaming()) {
416         onDrainDone(true);
417         return;
418     }
419 
420     struct v4l2_encoder_cmd cmd;
421     memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
422     cmd.cmd = V4L2_ENC_CMD_STOP;
423     if (mDevice->ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
424         ALOGE("Failed to stop encoder");
425         onDrainDone(false);
426         return;
427     }
428     ALOGV("%s(): Sent STOP command to encoder", __func__);
429 }
430 
onDrainDone(bool done)431 void V4L2Encoder::onDrainDone(bool done) {
432     ALOGV("%s()", __func__);
433     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
434     ALOG_ASSERT(mState == State::DRAINING || mState == State::ERROR);
435 
436     if (mState == State::ERROR) {
437         return;
438     }
439 
440     if (!done) {
441         ALOGE("draining the encoder failed");
442         mDrainDoneCb.Run(false);
443         onError();
444         return;
445     }
446 
447     ALOGV("Draining done");
448     mDrainDoneCb.Run(true);
449 
450     // Draining the encoder is done, we can now start encoding again.
451     if (!mEncodeRequests.empty()) {
452         setState(State::ENCODING);
453         mTaskRunner->PostTask(FROM_HERE,
454                               ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
455     } else {
456         setState(State::WAITING_FOR_INPUT_FRAME);
457     }
458 }
459 
configureInputFormat(VideoPixelFormat inputFormat,uint32_t stride)460 bool V4L2Encoder::configureInputFormat(VideoPixelFormat inputFormat, uint32_t stride) {
461     ALOGV("%s()", __func__);
462     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
463     ALOG_ASSERT(mState == State::UNINITIALIZED);
464     ALOG_ASSERT(!mInputQueue->isStreaming());
465     ALOG_ASSERT(!isEmpty(mVisibleSize));
466 
467     // First try to use the requested pixel format directly.
468     std::optional<struct v4l2_format> format;
469     auto fourcc = Fourcc::fromVideoPixelFormat(inputFormat, false);
470     if (fourcc) {
471         format = mInputQueue->setFormat(fourcc->toV4L2PixFmt(), mVisibleSize, 0, stride);
472     }
473 
474     // If the device doesn't support the requested input format we'll try the device's preferred
475     // input pixel formats and use a format convertor. We need to try all formats as some formats
476     // might not be supported for the configured output format.
477     if (!format) {
478         std::vector<uint32_t> preferredFormats =
479                 mDevice->preferredInputFormat(V4L2Device::Type::kEncoder);
480         for (uint32_t i = 0; !format && i < preferredFormats.size(); ++i) {
481             format = mInputQueue->setFormat(preferredFormats[i], mVisibleSize, 0, stride);
482         }
483     }
484 
485     if (!format) {
486         ALOGE("Failed to set input format to %s", videoPixelFormatToString(inputFormat).c_str());
487         return false;
488     }
489 
490     // Check whether the negotiated input format is valid. The coded size might be adjusted to match
491     // encoder minimums, maximums and alignment requirements of the currently selected formats.
492     auto layout = V4L2Device::v4L2FormatToVideoFrameLayout(*format);
493     if (!layout) {
494         ALOGE("Invalid input layout");
495         return false;
496     }
497 
498     mInputLayout = layout.value();
499     if (!contains(Rect(mInputLayout->mCodedSize.width, mInputLayout->mCodedSize.height),
500                   Rect(mVisibleSize.width, mVisibleSize.height))) {
501         ALOGE("Input size %s exceeds encoder capability, encoder can handle %s",
502               toString(mVisibleSize).c_str(), toString(mInputLayout->mCodedSize).c_str());
503         return false;
504     }
505 
506     // Calculate the input coded size from the format.
507     // TODO(dstaessens): How is this different from mInputLayout->coded_size()?
508     mInputCodedSize = V4L2Device::allocatedSizeFromV4L2Format(*format);
509 
510     // Configuring the input format might cause the output buffer size to change.
511     auto outputFormat = mOutputQueue->getFormat();
512     if (!outputFormat.first) {
513         ALOGE("Failed to get output format (errno: %i)", outputFormat.second);
514         return false;
515     }
516     uint32_t AdjustedOutputBufferSize = outputFormat.first->fmt.pix_mp.plane_fmt[0].sizeimage;
517     if (mOutputBufferSize != AdjustedOutputBufferSize) {
518         mOutputBufferSize = AdjustedOutputBufferSize;
519         ALOGV("Output buffer size adjusted to: %u", mOutputBufferSize);
520     }
521 
522     // The coded input size might be different from the visible size due to alignment requirements,
523     // So we need to specify the visible rectangle. Note that this rectangle might still be adjusted
524     // due to hardware limitations.
525     Rect visibleRectangle(mVisibleSize.width, mVisibleSize.height);
526 
527     struct v4l2_rect rect;
528     memset(&rect, 0, sizeof(rect));
529     rect.left = visibleRectangle.left;
530     rect.top = visibleRectangle.top;
531     rect.width = visibleRectangle.width();
532     rect.height = visibleRectangle.height();
533 
534     // Try to adjust the visible rectangle using the VIDIOC_S_SELECTION command. If this is not
535     // supported we'll try to use the VIDIOC_S_CROP command instead. The visible rectangle might be
536     // adjusted to conform to hardware limitations (e.g. round to closest horizontal and vertical
537     // offsets, width and height).
538     struct v4l2_selection selection_arg;
539     memset(&selection_arg, 0, sizeof(selection_arg));
540     selection_arg.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
541     selection_arg.target = V4L2_SEL_TGT_CROP;
542     selection_arg.r = rect;
543     if (mDevice->ioctl(VIDIOC_S_SELECTION, &selection_arg) == 0) {
544         visibleRectangle = Rect(selection_arg.r.left, selection_arg.r.top,
545                                 selection_arg.r.left + selection_arg.r.width,
546                                 selection_arg.r.top + selection_arg.r.height);
547     } else {
548         struct v4l2_crop crop;
549         memset(&crop, 0, sizeof(v4l2_crop));
550         crop.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
551         crop.c = rect;
552         if (mDevice->ioctl(VIDIOC_S_CROP, &crop) != 0 ||
553             mDevice->ioctl(VIDIOC_G_CROP, &crop) != 0) {
554             ALOGE("Failed to crop to specified visible rectangle");
555             return false;
556         }
557         visibleRectangle = Rect(crop.c.left, crop.c.top, crop.c.left + crop.c.width,
558                                 crop.c.top + crop.c.height);
559     }
560 
561     ALOGV("Input format set to %s (size: %s, adjusted size: %dx%d, coded size: %s)",
562           videoPixelFormatToString(mInputLayout->mFormat).c_str(), toString(mVisibleSize).c_str(),
563           visibleRectangle.width(), visibleRectangle.height(), toString(mInputCodedSize).c_str());
564 
565     mVisibleSize.set(visibleRectangle.width(), visibleRectangle.height());
566     return true;
567 }
568 
configureOutputFormat(C2Config::profile_t outputProfile)569 bool V4L2Encoder::configureOutputFormat(C2Config::profile_t outputProfile) {
570     ALOGV("%s()", __func__);
571     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
572     ALOG_ASSERT(mState == State::UNINITIALIZED);
573     ALOG_ASSERT(!mOutputQueue->isStreaming());
574     ALOG_ASSERT(!isEmpty(mVisibleSize));
575 
576     auto format = mOutputQueue->setFormat(V4L2Device::c2ProfileToV4L2PixFmt(outputProfile, false),
577                                           mVisibleSize, GetMaxOutputBufferSize(mVisibleSize));
578     if (!format) {
579         ALOGE("Failed to set output format to %s", profileToString(outputProfile));
580         return false;
581     }
582 
583     // The device might adjust the requested output buffer size to match hardware requirements.
584     mOutputBufferSize = format->fmt.pix_mp.plane_fmt[0].sizeimage;
585 
586     ALOGV("Output format set to %s (buffer size: %u)", profileToString(outputProfile),
587           mOutputBufferSize);
588     return true;
589 }
590 
configureDevice(C2Config::profile_t outputProfile,std::optional<const uint8_t> outputH264Level)591 bool V4L2Encoder::configureDevice(C2Config::profile_t outputProfile,
592                                   std::optional<const uint8_t> outputH264Level) {
593     ALOGV("%s()", __func__);
594     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
595 
596     // Enable frame-level bitrate control. This is the only mandatory general control.
597     if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
598                               {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 1)})) {
599         ALOGW("Failed enabling bitrate control");
600         // TODO(b/161508368): V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE is currently not supported yet,
601         // assume the operation was successful for now.
602     }
603 
604     // Additional optional controls:
605     // - Enable macroblock-level bitrate control.
606     // - Set GOP length to 0 to disable periodic key frames.
607     mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG, {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE, 1),
608                                                 V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0)});
609 
610     // All controls below are H.264-specific, so we can return here if the profile is not H.264.
611     if (outputProfile >= C2Config::PROFILE_AVC_BASELINE &&
612         outputProfile <= C2Config::PROFILE_AVC_ENHANCED_MULTIVIEW_DEPTH_HIGH) {
613         return configureH264(outputProfile, outputH264Level);
614     }
615 
616     return true;
617 }
618 
configureH264(C2Config::profile_t outputProfile,std::optional<const uint8_t> outputH264Level)619 bool V4L2Encoder::configureH264(C2Config::profile_t outputProfile,
620                                 std::optional<const uint8_t> outputH264Level) {
621     // When encoding H.264 we want to prepend SPS and PPS to each IDR for resilience. Some
622     // devices support this through the V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR control.
623     // Otherwise we have to cache the latest SPS and PPS and inject these manually.
624     if (mDevice->isCtrlExposed(V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR)) {
625         if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
626                                   {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR, 1)})) {
627             ALOGE("Failed to configure device to prepend SPS and PPS to each IDR");
628             return false;
629         }
630         mInjectParamsBeforeIDR = false;
631         ALOGV("Device supports prepending SPS and PPS to each IDR");
632     } else {
633         mInjectParamsBeforeIDR = true;
634         ALOGV("Device doesn't support prepending SPS and PPS to IDR, injecting manually.");
635     }
636 
637     // Set the H.264 profile.
638     const int32_t profile = V4L2Device::c2ProfileToV4L2H264Profile(outputProfile);
639     if (profile < 0) {
640         ALOGE("Trying to set invalid H.264 profile");
641         return false;
642     }
643     if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
644                               {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_H264_PROFILE, profile)})) {
645         ALOGE("Failed setting H.264 profile to %u", outputProfile);
646         return false;
647     }
648 
649     std::vector<V4L2ExtCtrl> h264Ctrls;
650 
651     // No B-frames, for lowest decoding latency.
652     h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_B_FRAMES, 0);
653     // Quantization parameter maximum value (for variable bitrate control).
654     h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 51);
655 
656     // Set H.264 output level. Use Level 4.0 as fallback default.
657     int32_t h264Level =
658             static_cast<int32_t>(outputH264Level.value_or(V4L2_MPEG_VIDEO_H264_LEVEL_4_0));
659     h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_LEVEL, h264Level);
660 
661     // Ask not to put SPS and PPS into separate bitstream buffers.
662     h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_HEADER_MODE,
663                            V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
664 
665     // Ignore return value as these controls are optional.
666     mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG, std::move(h264Ctrls));
667 
668     return true;
669 }
670 
configureBitrateMode(C2Config::bitrate_mode_t bitrateMode)671 bool V4L2Encoder::configureBitrateMode(C2Config::bitrate_mode_t bitrateMode) {
672     ALOGV("%s()", __func__);
673     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
674 
675     v4l2_mpeg_video_bitrate_mode v4l2BitrateMode =
676             V4L2Device::c2BitrateModeToV4L2BitrateMode(bitrateMode);
677     if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
678                               {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE_MODE, v4l2BitrateMode)})) {
679         // TODO(b/190336806): Our stack doesn't support bitrate mode changes yet. We default to CBR
680         // which is currently the only supported mode so we can safely ignore this for now.
681         ALOGW("Setting bitrate mode to %u failed", v4l2BitrateMode);
682     }
683     return true;
684 }
685 
startDevicePoll()686 bool V4L2Encoder::startDevicePoll() {
687     ALOGV("%s()", __func__);
688     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
689 
690     if (!mDevice->startPolling(mTaskRunner,
691                                ::base::BindRepeating(&V4L2Encoder::serviceDeviceTask, mWeakThis),
692                                ::base::BindRepeating(&V4L2Encoder::onPollError, mWeakThis))) {
693         ALOGE("Device poll thread failed to start");
694         onError();
695         return false;
696     }
697 
698     ALOGV("Device poll started");
699     return true;
700 }
701 
stopDevicePoll()702 bool V4L2Encoder::stopDevicePoll() {
703     ALOGV("%s()", __func__);
704     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
705 
706     if (!mDevice->stopPolling()) {
707         ALOGE("Failed to stop polling on the device");
708         onError();
709         return false;
710     }
711 
712     ALOGV("Device poll stopped");
713     return true;
714 }
715 
onPollError()716 void V4L2Encoder::onPollError() {
717     ALOGV("%s()", __func__);
718     onError();
719 }
720 
serviceDeviceTask(bool)721 void V4L2Encoder::serviceDeviceTask(bool /*event*/) {
722     ALOGV("%s()", __func__);
723     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
724     ALOG_ASSERT(mState != State::UNINITIALIZED);
725 
726     if (mState == State::ERROR) {
727         return;
728     }
729 
730     // Dequeue completed input (VIDEO_OUTPUT) buffers, and recycle to the free list.
731     while (mInputQueue->queuedBuffersCount() > 0) {
732         if (!dequeueInputBuffer()) break;
733     }
734 
735     // Dequeue completed output (VIDEO_CAPTURE) buffers, and recycle to the free list.
736     while (mOutputQueue->queuedBuffersCount() > 0) {
737         if (!dequeueOutputBuffer()) break;
738     }
739 
740     ALOGV("%s() - done", __func__);
741 }
742 
enqueueInputBuffer(std::unique_ptr<InputFrame> frame)743 bool V4L2Encoder::enqueueInputBuffer(std::unique_ptr<InputFrame> frame) {
744     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
745     ALOG_ASSERT(mInputQueue->freeBuffersCount() > 0);
746     ALOG_ASSERT(mState == State::ENCODING);
747     ALOG_ASSERT(frame);
748     ALOG_ASSERT(mInputLayout->mFormat == frame->pixelFormat());
749     ALOG_ASSERT(mInputLayout->mPlanes.size() == frame->planes().size());
750 
751     auto format = frame->pixelFormat();
752     auto& planes = frame->planes();
753     auto index = frame->index();
754     auto timestamp = frame->timestamp();
755 
756     ALOGV("%s(): queuing input buffer (index: %" PRId64 ")", __func__, index);
757 
758     auto buffer = mInputQueue->getFreeBuffer();
759     if (!buffer) {
760         ALOGE("Failed to get free buffer from device input queue");
761         return false;
762     }
763 
764     // Mark the buffer with the frame's timestamp so we can identify the associated output buffers.
765     buffer->setTimeStamp(
766             {.tv_sec = static_cast<time_t>(timestamp / ::base::Time::kMicrosecondsPerSecond),
767              .tv_usec = static_cast<time_t>(timestamp % ::base::Time::kMicrosecondsPerSecond)});
768     size_t bufferId = buffer->bufferId();
769 
770     std::vector<int> fds = frame->fds();
771     if (mInputLayout->mMultiPlanar) {
772         // If the input format is multi-planar, then we need to submit one memory plane per color
773         // plane of our input frames.
774         for (size_t i = 0; i < planes.size(); ++i) {
775             size_t bytesUsed = ::base::checked_cast<size_t>(
776                     getArea(planeSize(format, i, mInputLayout->mCodedSize)).value());
777 
778             // TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is not defined
779             // in V4L2 specification, so we abuse data_offset for now. Fix it when we have the
780             // right interface, including any necessary validation and potential alignment.
781             buffer->setPlaneDataOffset(i, planes[i].mOffset);
782             bytesUsed += planes[i].mOffset;
783             // Workaround: filling length should not be needed. This is a bug of videobuf2 library.
784             buffer->setPlaneSize(i, mInputLayout->mPlanes[i].mSize + planes[i].mOffset);
785             buffer->setPlaneBytesUsed(i, bytesUsed);
786         }
787     } else {
788         ALOG_ASSERT(!planes.empty());
789         // If the input format is single-planar, then we only submit one buffer which contains
790         // all the color planes.
791         size_t bytesUsed = allocationSize(format, mInputLayout->mCodedSize);
792 
793         // TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is not defined
794         // in V4L2 specification, so we abuse data_offset for now. Fix it when we have the
795         // right interface, including any necessary validation and potential alignment.
796         buffer->setPlaneDataOffset(0, planes[0].mOffset);
797         bytesUsed += planes[0].mOffset;
798         // Workaround: filling length should not be needed. This is a bug of videobuf2 library.
799         buffer->setPlaneSize(0, bytesUsed);
800         buffer->setPlaneBytesUsed(0, bytesUsed);
801         // We only have one memory plane so we shall submit only one FD. The others are duplicates
802         // of the first one anyway.
803         fds.resize(1);
804     }
805 
806     if (!std::move(*buffer).queueDMABuf(fds)) {
807         ALOGE("Failed to queue input buffer using QueueDMABuf");
808         onError();
809         return false;
810     }
811 
812     ALOGV("Queued buffer in input queue (index: %" PRId64 ", timestamp: %" PRId64
813           ", bufferId: %zu)",
814           index, timestamp, bufferId);
815 
816     ALOG_ASSERT(!mInputBuffers[bufferId]);
817     mInputBuffers[bufferId] = std::move(frame);
818 
819     return true;
820 }
821 
enqueueOutputBuffer()822 bool V4L2Encoder::enqueueOutputBuffer() {
823     ALOGV("%s()", __func__);
824     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
825     ALOG_ASSERT(mOutputQueue->freeBuffersCount() > 0);
826 
827     auto buffer = mOutputQueue->getFreeBuffer();
828     if (!buffer) {
829         ALOGE("Failed to get free buffer from device output queue");
830         onError();
831         return false;
832     }
833 
834     std::unique_ptr<BitstreamBuffer> bitstreamBuffer;
835     mFetchOutputBufferCb.Run(mOutputBufferSize, &bitstreamBuffer);
836     if (!bitstreamBuffer) {
837         ALOGE("Failed to fetch output block");
838         onError();
839         return false;
840     }
841 
842     size_t bufferId = buffer->bufferId();
843 
844     std::vector<int> fds;
845     fds.push_back(bitstreamBuffer->dmabuf->handle()->data[0]);
846     if (!std::move(*buffer).queueDMABuf(fds)) {
847         ALOGE("Failed to queue output buffer using QueueDMABuf");
848         onError();
849         return false;
850     }
851 
852     ALOG_ASSERT(!mOutputBuffers[bufferId]);
853     mOutputBuffers[bufferId] = std::move(bitstreamBuffer);
854     ALOGV("%s(): Queued buffer in output queue (bufferId: %zu)", __func__, bufferId);
855     return true;
856 }
857 
dequeueInputBuffer()858 bool V4L2Encoder::dequeueInputBuffer() {
859     ALOGV("%s()", __func__);
860     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
861     ALOG_ASSERT(mState != State::UNINITIALIZED);
862     ALOG_ASSERT(mInputQueue->queuedBuffersCount() > 0);
863 
864     if (mState == State::ERROR) {
865         return false;
866     }
867 
868     bool success;
869     V4L2ReadableBufferRef buffer;
870     std::tie(success, buffer) = mInputQueue->dequeueBuffer();
871     if (!success) {
872         ALOGE("Failed to dequeue buffer from input queue");
873         onError();
874         return false;
875     }
876     if (!buffer) {
877         // No more buffers ready to be dequeued in input queue.
878         return false;
879     }
880 
881     uint64_t index = mInputBuffers[buffer->bufferId()]->index();
882     int64_t timestamp = buffer->getTimeStamp().tv_usec +
883                         buffer->getTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond;
884     ALOGV("Dequeued buffer from input queue (index: %" PRId64 ", timestamp: %" PRId64
885           ", bufferId: %zu)",
886           index, timestamp, buffer->bufferId());
887 
888     mInputBuffers[buffer->bufferId()] = nullptr;
889 
890     mInputBufferDoneCb.Run(index);
891 
892     // If we previously used up all input queue buffers we can start encoding again now.
893     if ((mState == State::WAITING_FOR_V4L2_BUFFER) && !mEncodeRequests.empty()) {
894         setState(State::ENCODING);
895         mTaskRunner->PostTask(FROM_HERE,
896                               ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
897     }
898 
899     return true;
900 }
901 
dequeueOutputBuffer()902 bool V4L2Encoder::dequeueOutputBuffer() {
903     ALOGV("%s()", __func__);
904     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
905     ALOG_ASSERT(mState != State::UNINITIALIZED);
906     ALOG_ASSERT(mOutputQueue->queuedBuffersCount() > 0);
907 
908     if (mState == State::ERROR) {
909         return false;
910     }
911 
912     bool success;
913     V4L2ReadableBufferRef buffer;
914     std::tie(success, buffer) = mOutputQueue->dequeueBuffer();
915     if (!success) {
916         ALOGE("Failed to dequeue buffer from output queue");
917         onError();
918         return false;
919     }
920     if (!buffer) {
921         // No more buffers ready to be dequeued in output queue.
922         return false;
923     }
924 
925     size_t encodedDataSize = buffer->getPlaneBytesUsed(0) - buffer->getPlaneDataOffset(0);
926     ::base::TimeDelta timestamp = ::base::TimeDelta::FromMicroseconds(
927             buffer->getTimeStamp().tv_usec +
928             buffer->getTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond);
929 
930     ALOGV("Dequeued buffer from output queue (timestamp: %" PRId64
931           ", bufferId: %zu, data size: %zu, EOS: %d)",
932           timestamp.InMicroseconds(), buffer->bufferId(), encodedDataSize, buffer->isLast());
933 
934     if (!mOutputBuffers[buffer->bufferId()]) {
935         ALOGE("Failed to find output block associated with output buffer");
936         onError();
937         return false;
938     }
939 
940     std::unique_ptr<BitstreamBuffer> bitstreamBuffer =
941             std::move(mOutputBuffers[buffer->bufferId()]);
942     if (encodedDataSize > 0) {
943         if (!mInjectParamsBeforeIDR) {
944             // No need to inject SPS or PPS before IDR frames, we can just return the buffer as-is.
945             mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(),
946                                     buffer->isKeyframe(), std::move(bitstreamBuffer));
947         } else if (!buffer->isKeyframe()) {
948             // We need to inject SPS and PPS before IDR frames, but this frame is not a key frame.
949             // We can return the buffer as-is, but need to update our SPS and PPS cache if required.
950             C2ConstLinearBlock constBlock = bitstreamBuffer->dmabuf->share(
951                     bitstreamBuffer->dmabuf->offset(), encodedDataSize, C2Fence());
952             C2ReadView readView = constBlock.map().get();
953             extractSPSPPS(readView.data(), encodedDataSize, &mCachedSPS, &mCachedPPS);
954             mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(),
955                                     buffer->isKeyframe(), std::move(bitstreamBuffer));
956         } else {
957             // We need to inject our cached SPS and PPS NAL units to the IDR frame. It's possible
958             // this frame already has SPS and PPS NAL units attached, in which case we only need to
959             // update our cached SPS and PPS.
960             C2ConstLinearBlock constBlock = bitstreamBuffer->dmabuf->share(
961                     bitstreamBuffer->dmabuf->offset(), encodedDataSize, C2Fence());
962             C2ReadView readView = constBlock.map().get();
963 
964             // Allocate a new buffer to copy the data with prepended SPS and PPS into.
965             std::unique_ptr<BitstreamBuffer> prependedBitstreamBuffer;
966             mFetchOutputBufferCb.Run(mOutputBufferSize, &prependedBitstreamBuffer);
967             if (!prependedBitstreamBuffer) {
968                 ALOGE("Failed to fetch output block");
969                 onError();
970                 return false;
971             }
972             C2WriteView writeView = prependedBitstreamBuffer->dmabuf->map().get();
973 
974             // If there is not enough space in the output buffer just return the original buffer.
975             size_t newSize = prependSPSPPSToIDR(readView.data(), encodedDataSize, writeView.data(),
976                                                 writeView.size(), &mCachedSPS, &mCachedPPS);
977             if (newSize > 0) {
978                 mOutputBufferDoneCb.Run(newSize, timestamp.InMicroseconds(), buffer->isKeyframe(),
979                                         std::move(prependedBitstreamBuffer));
980             } else {
981                 mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(),
982                                         buffer->isKeyframe(), std::move(bitstreamBuffer));
983             }
984         }
985     }
986 
987     // If the buffer is marked as last and we were flushing the encoder, flushing is now done.
988     if ((mState == State::DRAINING) && buffer->isLast()) {
989         onDrainDone(true);
990         // Start the encoder again.
991         struct v4l2_encoder_cmd cmd;
992         memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
993         cmd.cmd = V4L2_ENC_CMD_START;
994         if (mDevice->ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
995             ALOGE("Failed to restart encoder after draining (V4L2_ENC_CMD_START)");
996             onError();
997             return false;
998         }
999     }
1000 
1001     // Queue a new output buffer to replace the one we dequeued.
1002     buffer = nullptr;
1003     enqueueOutputBuffer();
1004 
1005     return true;
1006 }
1007 
createInputBuffers()1008 bool V4L2Encoder::createInputBuffers() {
1009     ALOGV("%s()", __func__);
1010     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1011     ALOG_ASSERT(!mInputQueue->isStreaming());
1012     ALOG_ASSERT(mInputBuffers.empty());
1013 
1014     // No memory is allocated here, we just generate a list of buffers on the input queue, which
1015     // will hold memory handles to the real buffers.
1016     if (mInputQueue->allocateBuffers(kInputBufferCount, V4L2_MEMORY_DMABUF) < kInputBufferCount) {
1017         ALOGE("Failed to create V4L2 input buffers.");
1018         return false;
1019     }
1020 
1021     mInputBuffers.resize(mInputQueue->allocatedBuffersCount());
1022     return true;
1023 }
1024 
createOutputBuffers()1025 bool V4L2Encoder::createOutputBuffers() {
1026     ALOGV("%s()", __func__);
1027     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1028     ALOG_ASSERT(!mOutputQueue->isStreaming());
1029     ALOG_ASSERT(mOutputBuffers.empty());
1030 
1031     // No memory is allocated here, we just generate a list of buffers on the output queue, which
1032     // will hold memory handles to the real buffers.
1033     if (mOutputQueue->allocateBuffers(kOutputBufferCount, V4L2_MEMORY_DMABUF) <
1034         kOutputBufferCount) {
1035         ALOGE("Failed to create V4L2 output buffers.");
1036         return false;
1037     }
1038 
1039     mOutputBuffers.resize(mOutputQueue->allocatedBuffersCount());
1040     return true;
1041 }
1042 
destroyInputBuffers()1043 void V4L2Encoder::destroyInputBuffers() {
1044     ALOGV("%s()", __func__);
1045     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1046     ALOG_ASSERT(!mInputQueue->isStreaming());
1047 
1048     if (!mInputQueue || mInputQueue->allocatedBuffersCount() == 0) return;
1049     mInputQueue->deallocateBuffers();
1050     mInputBuffers.clear();
1051 }
1052 
destroyOutputBuffers()1053 void V4L2Encoder::destroyOutputBuffers() {
1054     ALOGV("%s()", __func__);
1055     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1056     ALOG_ASSERT(!mOutputQueue->isStreaming());
1057 
1058     if (!mOutputQueue || mOutputQueue->allocatedBuffersCount() == 0) return;
1059     mOutputQueue->deallocateBuffers();
1060     mOutputBuffers.clear();
1061 }
1062 
onError()1063 void V4L2Encoder::onError() {
1064     ALOGV("%s()", __func__);
1065     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1066 
1067     if (mState != State::ERROR) {
1068         setState(State::ERROR);
1069         mErrorCb.Run();
1070     }
1071 }
1072 
setState(State state)1073 void V4L2Encoder::setState(State state) {
1074     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1075 
1076     // Check whether the state change is valid.
1077     switch (state) {
1078     case State::UNINITIALIZED:
1079         break;
1080     case State::WAITING_FOR_INPUT_FRAME:
1081         ALOG_ASSERT(mState != State::ERROR);
1082         break;
1083     case State::WAITING_FOR_V4L2_BUFFER:
1084         ALOG_ASSERT(mState == State::ENCODING);
1085         break;
1086     case State::ENCODING:
1087         ALOG_ASSERT(mState == State::WAITING_FOR_INPUT_FRAME ||
1088                     mState == State::WAITING_FOR_V4L2_BUFFER || mState == State::DRAINING);
1089         break;
1090     case State::DRAINING:
1091         ALOG_ASSERT(mState == State::ENCODING || mState == State::WAITING_FOR_INPUT_FRAME);
1092         break;
1093     case State::ERROR:
1094         break;
1095     }
1096 
1097     ALOGV("Changed encoder state from %s to %s", stateToString(mState), stateToString(state));
1098     mState = state;
1099 }
1100 
stateToString(State state)1101 const char* V4L2Encoder::stateToString(State state) {
1102     switch (state) {
1103     case State::UNINITIALIZED:
1104         return "UNINITIALIZED";
1105     case State::WAITING_FOR_INPUT_FRAME:
1106         return "WAITING_FOR_INPUT_FRAME";
1107     case State::WAITING_FOR_V4L2_BUFFER:
1108         return "WAITING_FOR_V4L2_BUFFER";
1109     case State::ENCODING:
1110         return "ENCODING";
1111     case State::DRAINING:
1112         return "DRAINING";
1113     case State::ERROR:
1114         return "ERROR";
1115     }
1116 }
1117 
1118 }  // namespace android
1119