xref: /aosp_15_r20/frameworks/av/services/camera/libcameraservice/device3/Camera3OutputStream.cpp (revision ec779b8e0859a360c3d303172224686826e6e0e1)
1 /*
2  * Copyright (C) 2013-2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Camera3-OutputStream"
18 #define ATRACE_TAG ATRACE_TAG_CAMERA
19 //#define LOG_NDEBUG 0
20 
21 #include <algorithm>
22 #include <ctime>
23 #include <fstream>
24 
25 #include <aidl/android/hardware/camera/device/CameraBlob.h>
26 #include <aidl/android/hardware/camera/device/CameraBlobId.h>
27 #include "aidl/android/hardware/graphics/common/Dataspace.h"
28 
29 #include <android/data_space.h>
30 #include <android-base/unique_fd.h>
31 #include <com_android_internal_camera_flags.h>
32 #include <cutils/properties.h>
33 #include <ui/GraphicBuffer.h>
34 #include <utils/Log.h>
35 #include <utils/Trace.h>
36 #include <camera/StringUtils.h>
37 
38 #include <common/CameraDeviceBase.h>
39 #include "api1/client2/JpegProcessor.h"
40 #include "Camera3OutputStream.h"
41 #include "utils/TraceHFR.h"
42 
43 #ifndef container_of
44 #define container_of(ptr, type, member) \
45     (type *)((char*)(ptr) - offsetof(type, member))
46 #endif
47 
48 namespace flags = com::android::internal::camera::flags;
49 
50 namespace android {
51 
52 namespace camera3 {
53 
54 using aidl::android::hardware::camera::device::CameraBlob;
55 using aidl::android::hardware::camera::device::CameraBlobId;
56 
Camera3OutputStream(int id,sp<Surface> consumer,uint32_t width,uint32_t height,int format,android_dataspace dataSpace,camera_stream_rotation_t rotation,nsecs_t timestampOffset,const std::string & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode,int32_t colorSpace,bool useReadoutTimestamp)57 Camera3OutputStream::Camera3OutputStream(int id,
58         sp<Surface> consumer,
59         uint32_t width, uint32_t height, int format,
60         android_dataspace dataSpace, camera_stream_rotation_t rotation,
61         nsecs_t timestampOffset, const std::string& physicalCameraId,
62         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
63         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
64         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
65         int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) :
66         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
67                             /*maxSize*/0, format, dataSpace, rotation,
68                             physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
69                             dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
70                             timestampBase, colorSpace),
71         mConsumer(consumer),
72         mTransform(0),
73         mTraceFirstBuffer(true),
74         mUseBufferManager(false),
75         mTimestampOffset(timestampOffset),
76         mUseReadoutTime(useReadoutTimestamp),
77         mConsumerUsage(0),
78         mDropBuffers(false),
79         mMirrorMode(mirrorMode),
80         mDequeueBufferLatency(kDequeueLatencyBinSize),
81         mIPCTransport(transport) {
82 
83     if (mConsumer == NULL) {
84         ALOGE("%s: Consumer is NULL!", __FUNCTION__);
85         mState = STATE_ERROR;
86     }
87 
88     bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
89     mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
90 }
91 
Camera3OutputStream(int id,sp<Surface> consumer,uint32_t width,uint32_t height,size_t maxSize,int format,android_dataspace dataSpace,camera_stream_rotation_t rotation,nsecs_t timestampOffset,const std::string & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int mirrorMode,int32_t colorSpace,bool useReadoutTimestamp)92 Camera3OutputStream::Camera3OutputStream(int id,
93         sp<Surface> consumer,
94         uint32_t width, uint32_t height, size_t maxSize, int format,
95         android_dataspace dataSpace, camera_stream_rotation_t rotation,
96         nsecs_t timestampOffset, const std::string& physicalCameraId,
97         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
98         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
99         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
100         int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) :
101         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
102                             format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
103                             setId, isMultiResolution, dynamicRangeProfile, streamUseCase,
104                             deviceTimeBaseIsRealtime, timestampBase, colorSpace),
105         mConsumer(consumer),
106         mTransform(0),
107         mTraceFirstBuffer(true),
108         mUseBufferManager(false),
109         mTimestampOffset(timestampOffset),
110         mUseReadoutTime(useReadoutTimestamp),
111         mConsumerUsage(0),
112         mDropBuffers(false),
113         mMirrorMode(mirrorMode),
114         mDequeueBufferLatency(kDequeueLatencyBinSize),
115         mIPCTransport(transport) {
116 
117     if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
118         ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
119                 format);
120         mState = STATE_ERROR;
121     }
122 
123     if (mConsumer == NULL) {
124         ALOGE("%s: Consumer is NULL!", __FUNCTION__);
125         mState = STATE_ERROR;
126     }
127 
128     bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
129     mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
130 }
131 
Camera3OutputStream(int id,uint32_t width,uint32_t height,int format,uint64_t consumerUsage,android_dataspace dataSpace,camera_stream_rotation_t rotation,nsecs_t timestampOffset,const std::string & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int32_t colorSpace,bool useReadoutTimestamp)132 Camera3OutputStream::Camera3OutputStream(int id,
133         uint32_t width, uint32_t height, int format,
134         uint64_t consumerUsage, android_dataspace dataSpace,
135         camera_stream_rotation_t rotation, nsecs_t timestampOffset,
136         const std::string& physicalCameraId,
137         const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
138         int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
139         int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
140         int32_t colorSpace, bool useReadoutTimestamp) :
141         Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
142                             /*maxSize*/0, format, dataSpace, rotation,
143                             physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
144                             dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
145                             timestampBase, colorSpace),
146         mConsumer(nullptr),
147         mTransform(0),
148         mTraceFirstBuffer(true),
149         mUseBufferManager(false),
150         mTimestampOffset(timestampOffset),
151         mUseReadoutTime(useReadoutTimestamp),
152         mConsumerUsage(consumerUsage),
153         mDropBuffers(false),
154         mMirrorMode(OutputConfiguration::MIRROR_MODE_AUTO),
155         mDequeueBufferLatency(kDequeueLatencyBinSize),
156         mIPCTransport(transport) {
157     // Deferred consumer only support preview surface format now.
158     if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
159         ALOGE("%s: Deferred consumer only supports IMPLEMENTATION_DEFINED format now!",
160                 __FUNCTION__);
161         mState = STATE_ERROR;
162     }
163 
164     // Validation check for the consumer usage flag.
165     if ((consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) == 0 &&
166             (consumerUsage & GraphicBuffer::USAGE_HW_COMPOSER) == 0) {
167         ALOGE("%s: Deferred consumer usage flag is illegal %" PRIu64 "!",
168               __FUNCTION__, consumerUsage);
169         mState = STATE_ERROR;
170     }
171 
172     bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
173     mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
174 }
175 
Camera3OutputStream(int id,camera_stream_type_t type,uint32_t width,uint32_t height,int format,android_dataspace dataSpace,camera_stream_rotation_t rotation,const std::string & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,IPCTransport transport,uint64_t consumerUsage,nsecs_t timestampOffset,int setId,bool isMultiResolution,int64_t dynamicRangeProfile,int64_t streamUseCase,bool deviceTimeBaseIsRealtime,int timestampBase,int32_t colorSpace,bool useReadoutTimestamp)176 Camera3OutputStream::Camera3OutputStream(int id, camera_stream_type_t type,
177                                          uint32_t width, uint32_t height,
178                                          int format,
179                                          android_dataspace dataSpace,
180                                          camera_stream_rotation_t rotation,
181                                          const std::string& physicalCameraId,
182                                          const std::unordered_set<int32_t> &sensorPixelModesUsed,
183                                          IPCTransport transport,
184                                          uint64_t consumerUsage, nsecs_t timestampOffset,
185                                          int setId, bool isMultiResolution,
186                                          int64_t dynamicRangeProfile, int64_t streamUseCase,
187                                          bool deviceTimeBaseIsRealtime, int timestampBase,
188                                          int32_t colorSpace, bool useReadoutTimestamp) :
189         Camera3IOStreamBase(id, type, width, height,
190                             /*maxSize*/0,
191                             format, dataSpace, rotation,
192                             physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
193                             dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
194                             timestampBase, colorSpace),
195         mTransform(0),
196         mTraceFirstBuffer(true),
197         mUseBufferManager(false),
198         mTimestampOffset(timestampOffset),
199         mUseReadoutTime(useReadoutTimestamp),
200         mConsumerUsage(consumerUsage),
201         mDropBuffers(false),
202         mMirrorMode(OutputConfiguration::MIRROR_MODE_AUTO),
203         mDequeueBufferLatency(kDequeueLatencyBinSize),
204         mIPCTransport(transport) {
205 
206     bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
207     mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
208 
209     // Subclasses expected to initialize mConsumer themselves
210 }
211 
212 
~Camera3OutputStream()213 Camera3OutputStream::~Camera3OutputStream() {
214     disconnectLocked();
215 }
216 
getBufferLocked(camera_stream_buffer * buffer,const std::vector<size_t> &)217 status_t Camera3OutputStream::getBufferLocked(camera_stream_buffer *buffer,
218         const std::vector<size_t>&) {
219     ATRACE_HFR_CALL();
220 
221     ANativeWindowBuffer* anb;
222     int fenceFd = -1;
223 
224     status_t res;
225     res = getBufferLockedCommon(&anb, &fenceFd);
226     if (res != OK) {
227         return res;
228     }
229 
230     /**
231      * FenceFD now owned by HAL except in case of error,
232      * in which case we reassign it to acquire_fence
233      */
234     handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
235                         /*releaseFence*/-1, CAMERA_BUFFER_STATUS_OK, /*output*/true);
236 
237     return OK;
238 }
239 
queueBufferToConsumer(sp<ANativeWindow> & consumer,ANativeWindowBuffer * buffer,int anwReleaseFence,const std::vector<size_t> &)240 status_t Camera3OutputStream::queueBufferToConsumer(sp<ANativeWindow>& consumer,
241             ANativeWindowBuffer* buffer, int anwReleaseFence,
242             const std::vector<size_t>&) {
243     return consumer->queueBuffer(consumer.get(), buffer, anwReleaseFence);
244 }
245 
returnBufferLocked(const camera_stream_buffer & buffer,nsecs_t timestamp,nsecs_t readoutTimestamp,int32_t transform,const std::vector<size_t> & surface_ids)246 status_t Camera3OutputStream::returnBufferLocked(
247         const camera_stream_buffer &buffer,
248         nsecs_t timestamp, nsecs_t readoutTimestamp,
249         int32_t transform, const std::vector<size_t>& surface_ids) {
250     ATRACE_HFR_CALL();
251 
252     if (mHandoutTotalBufferCount == 1) {
253         returnPrefetchedBuffersLocked();
254     }
255 
256     status_t res = returnAnyBufferLocked(buffer, timestamp, readoutTimestamp,
257                                          /*output*/true, transform, surface_ids);
258 
259     if (res != OK) {
260         return res;
261     }
262 
263     mLastTimestamp = timestamp;
264     mFrameCount++;
265 
266     return OK;
267 }
268 
fixUpHidlJpegBlobHeader(ANativeWindowBuffer * anwBuffer,int fence)269 status_t Camera3OutputStream::fixUpHidlJpegBlobHeader(ANativeWindowBuffer* anwBuffer, int fence) {
270     // Lock the JPEG buffer for CPU read
271     sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(anwBuffer);
272     void* mapped = nullptr;
273     base::unique_fd fenceFd(dup(fence));
274     // Use USAGE_SW_WRITE_RARELY since we're going to re-write the CameraBlob
275     // header.
276     GraphicBufferLocker gbLocker(graphicBuffer);
277     status_t res =
278             gbLocker.lockAsync(
279                     GraphicBuffer::USAGE_SW_READ_OFTEN | GraphicBuffer::USAGE_SW_WRITE_RARELY,
280                     &mapped, fenceFd.release());
281     if (res != OK) {
282         ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
283         return res;
284     }
285 
286     uint8_t *hidlHeaderStart =
287             static_cast<uint8_t*>(mapped) + graphicBuffer->getWidth() - sizeof(camera_jpeg_blob_t);
288     // Check that the jpeg buffer is big enough to contain HIDL camera blob
289     if (hidlHeaderStart < static_cast<uint8_t *>(mapped)) {
290         ALOGE("%s, jpeg buffer not large enough to fit HIDL camera blob %" PRIu32, __FUNCTION__,
291                 graphicBuffer->getWidth());
292         return BAD_VALUE;
293     }
294     camera_jpeg_blob_t *hidlBlobHeader = reinterpret_cast<camera_jpeg_blob_t *>(hidlHeaderStart);
295 
296     // Check that the blob is indeed the jpeg blob id.
297     if (hidlBlobHeader->jpeg_blob_id != CAMERA_JPEG_BLOB_ID) {
298         ALOGE("%s, jpeg blob id %d is not correct", __FUNCTION__, hidlBlobHeader->jpeg_blob_id);
299         return BAD_VALUE;
300     }
301 
302     // Retrieve id and blob size
303     CameraBlobId blobId = static_cast<CameraBlobId>(hidlBlobHeader->jpeg_blob_id);
304     uint32_t blobSizeBytes = hidlBlobHeader->jpeg_size;
305 
306     if (blobSizeBytes > (graphicBuffer->getWidth() - sizeof(camera_jpeg_blob_t))) {
307         ALOGE("%s, blobSize in HIDL jpeg blob : %d is corrupt, buffer size %" PRIu32, __FUNCTION__,
308                   blobSizeBytes, graphicBuffer->getWidth());
309     }
310 
311     uint8_t *aidlHeaderStart =
312             static_cast<uint8_t*>(mapped) + graphicBuffer->getWidth() - sizeof(CameraBlob);
313 
314     // Check that the jpeg buffer is big enough to contain AIDL camera blob
315     if (aidlHeaderStart < static_cast<uint8_t *>(mapped)) {
316         ALOGE("%s, jpeg buffer not large enough to fit AIDL camera blob %" PRIu32, __FUNCTION__,
317                 graphicBuffer->getWidth());
318         return BAD_VALUE;
319     }
320 
321     if (static_cast<uint8_t*>(mapped) + blobSizeBytes > aidlHeaderStart) {
322         ALOGE("%s, jpeg blob with size %d , buffer size %" PRIu32 " not large enough to fit"
323                 " AIDL camera blob without corrupting jpeg", __FUNCTION__, blobSizeBytes,
324                 graphicBuffer->getWidth());
325         return BAD_VALUE;
326     }
327 
328     // Fill in JPEG header
329     CameraBlob aidlHeader = {
330             .blobId = blobId,
331             .blobSizeBytes = static_cast<int32_t>(blobSizeBytes)
332     };
333     memcpy(aidlHeaderStart, &aidlHeader, sizeof(CameraBlob));
334     graphicBuffer->unlock();
335     return OK;
336 }
337 
returnBufferCheckedLocked(const camera_stream_buffer & buffer,nsecs_t timestamp,nsecs_t readoutTimestamp,bool output,int32_t transform,const std::vector<size_t> & surface_ids,sp<Fence> * releaseFenceOut)338 status_t Camera3OutputStream::returnBufferCheckedLocked(
339             const camera_stream_buffer &buffer,
340             nsecs_t timestamp,
341             nsecs_t readoutTimestamp,
342             [[maybe_unused]] bool output,
343             int32_t transform,
344             const std::vector<size_t>& surface_ids,
345             /*out*/
346             sp<Fence> *releaseFenceOut) {
347 
348     ALOG_ASSERT(output, "Expected output to be true");
349 
350     status_t res;
351 
352     // Fence management - always honor release fence from HAL
353     sp<Fence> releaseFence = new Fence(buffer.release_fence);
354     int anwReleaseFence = releaseFence->dup();
355 
356     /**
357      * Release the lock briefly to avoid deadlock with
358      * StreamingProcessor::startStream -> Camera3Stream::isConfiguring (this
359      * thread will go into StreamingProcessor::onFrameAvailable) during
360      * queueBuffer
361      */
362     sp<ANativeWindow> currentConsumer = mConsumer;
363     StreamState state = mState;
364     mLock.unlock();
365 
366     ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
367     bool bufferDeferred = false;
368     /**
369      * Return buffer back to ANativeWindow
370      */
371     if (buffer.status == CAMERA_BUFFER_STATUS_ERROR || mDropBuffers || timestamp == 0) {
372         // Cancel buffer
373         if (mDropBuffers) {
374             ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
375         } else if (buffer.status == CAMERA_BUFFER_STATUS_ERROR) {
376             ALOGV("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
377         } else {
378             ALOGE("%s: Stream %d: timestamp shouldn't be 0", __FUNCTION__, mId);
379         }
380 
381         res = currentConsumer->cancelBuffer(currentConsumer.get(),
382                 anwBuffer,
383                 anwReleaseFence);
384         if (shouldLogError(res, state)) {
385             ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
386                   " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
387         }
388 
389         notifyBufferReleased(anwBuffer);
390         if (mUseBufferManager) {
391             // Return this buffer back to buffer manager.
392             mBufferProducerListener->onBufferReleased();
393         }
394     } else {
395         if (mTraceFirstBuffer && (stream_type == CAMERA_STREAM_OUTPUT)) {
396             {
397                 char traceLog[48];
398                 snprintf(traceLog, sizeof(traceLog), "Stream %d: first full buffer\n", mId);
399                 ATRACE_NAME(traceLog);
400             }
401             mTraceFirstBuffer = false;
402         }
403         // Fix CameraBlob id type discrepancy between HIDL and AIDL, details : http://b/229688810
404         if (getFormat() == HAL_PIXEL_FORMAT_BLOB && (getDataSpace() == HAL_DATASPACE_V0_JFIF ||
405                     (getDataSpace() ==
406                      static_cast<android_dataspace_t>(
407                          aidl::android::hardware::graphics::common::Dataspace::HEIF_ULTRAHDR)) ||
408                     (getDataSpace() ==
409                      static_cast<android_dataspace_t>(
410                          aidl::android::hardware::graphics::common::Dataspace::JPEG_R)))) {
411             if (mIPCTransport == IPCTransport::HIDL) {
412                 fixUpHidlJpegBlobHeader(anwBuffer, anwReleaseFence);
413             }
414             // If this is a JPEG output, and image dump mask is set, save image to
415             // disk.
416             if (mImageDumpMask) {
417                 dumpImageToDisk(timestamp, anwBuffer, anwReleaseFence);
418             }
419         }
420 
421         nsecs_t captureTime = ((mUseReadoutTime || mSyncToDisplay) && readoutTimestamp != 0 ?
422                 readoutTimestamp : timestamp) - mTimestampOffset;
423         if (mPreviewFrameSpacer != nullptr) {
424             nsecs_t readoutTime = (readoutTimestamp != 0 ? readoutTimestamp : timestamp)
425                     - mTimestampOffset;
426             res = mPreviewFrameSpacer->queuePreviewBuffer(captureTime, readoutTime,
427                     transform, anwBuffer, anwReleaseFence);
428             if (res != OK) {
429                 ALOGE("%s: Stream %d: Error queuing buffer to preview buffer spacer: %s (%d)",
430                         __FUNCTION__, mId, strerror(-res), res);
431                 return res;
432             }
433             bufferDeferred = true;
434         } else {
435             nsecs_t presentTime = mSyncToDisplay ?
436                     syncTimestampToDisplayLocked(captureTime, releaseFence) : captureTime;
437 
438             setTransform(transform, true/*mayChangeMirror*/);
439             res = native_window_set_buffers_timestamp(mConsumer.get(), presentTime);
440             if (res != OK) {
441                 ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
442                       __FUNCTION__, mId, strerror(-res), res);
443                 return res;
444             }
445 
446             queueHDRMetadata(anwBuffer->handle, currentConsumer, dynamic_range_profile);
447 
448             res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
449             if (shouldLogError(res, state)) {
450                 ALOGE("%s: Stream %d: Error queueing buffer to native window:"
451                       " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
452             }
453         }
454     }
455     mLock.lock();
456 
457     if (bufferDeferred) {
458         mCachedOutputBufferCount++;
459     }
460 
461     // Once a valid buffer has been returned to the queue, can no longer
462     // dequeue all buffers for preallocation.
463     if (buffer.status != CAMERA_BUFFER_STATUS_ERROR) {
464         mStreamUnpreparable = true;
465     }
466 
467     *releaseFenceOut = releaseFence;
468 
469     return res;
470 }
471 
dump(int fd,const Vector<String16> & args)472 void Camera3OutputStream::dump(int fd, [[maybe_unused]] const Vector<String16> &args) {
473     std::string lines;
474     lines += fmt::sprintf("    Stream[%d]: Output\n", mId);
475     lines += fmt::sprintf("      Consumer name: %s\n", (mConsumer.get() != nullptr) ?
476             mConsumer->getConsumerName() : "Deferred");
477     write(fd, lines.c_str(), lines.size());
478 
479     Camera3IOStreamBase::dump(fd, args);
480 
481     mDequeueBufferLatency.dump(fd,
482         "      DequeueBuffer latency histogram:");
483 }
484 
setTransform(int transform,bool mayChangeMirror,int surfaceId)485 status_t Camera3OutputStream::setTransform(int transform, bool mayChangeMirror, int surfaceId) {
486     ATRACE_CALL();
487     Mutex::Autolock l(mLock);
488 
489     if (mMirrorMode != OutputConfiguration::MIRROR_MODE_AUTO && mayChangeMirror) {
490         // If the mirroring mode is not AUTO, do not allow transform update
491         // which may change mirror.
492         return OK;
493     }
494 
495     status_t res = OK;
496 
497     if (surfaceId != 0) {
498         ALOGE("%s: Invalid surfaceId %d", __FUNCTION__, surfaceId);
499         return BAD_VALUE;
500     }
501 
502     if (transform == -1) return res;
503 
504     if (mState == STATE_ERROR) {
505         ALOGE("%s: Stream in error state", __FUNCTION__);
506         return INVALID_OPERATION;
507     }
508 
509     mTransform = transform;
510     if (mState == STATE_CONFIGURED) {
511         res = native_window_set_buffers_transform(mConsumer.get(),
512                 transform);
513         if (res != OK) {
514             ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
515                     __FUNCTION__, transform, strerror(-res), res);
516         }
517     }
518     return res;
519 }
520 
configureQueueLocked()521 status_t Camera3OutputStream::configureQueueLocked() {
522     status_t res;
523 
524     mTraceFirstBuffer = true;
525     if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
526         return res;
527     }
528 
529     if ((res = configureConsumerQueueLocked(true /*allowPreviewRespace*/)) != OK) {
530         return res;
531     }
532 
533     if ((res = native_window_set_buffers_transform(mConsumer.get(), mTransform)) != OK) {
534         ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
535                 __FUNCTION__, mTransform, strerror(-res), res);
536         return res;
537     }
538 
539     // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
540     // We need skip these cases as timeout will disable the non-blocking (async) mode.
541     if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
542         if (mUseBufferManager) {
543             // When buffer manager is handling the buffer, we should have available buffers in
544             // buffer queue before we calls into dequeueBuffer because buffer manager is tracking
545             // free buffers.
546             // There are however some consumer side feature (ImageReader::discardFreeBuffers) that
547             // can discard free buffers without notifying buffer manager. We want the timeout to
548             // happen immediately here so buffer manager can try to update its internal state and
549             // try to allocate a buffer instead of waiting.
550             mConsumer->setDequeueTimeout(0);
551         } else {
552             mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
553         }
554     }
555 
556     return OK;
557 }
558 
configureConsumerQueueLocked(bool allowPreviewRespace)559 status_t Camera3OutputStream::configureConsumerQueueLocked(bool allowPreviewRespace) {
560     status_t res;
561 
562     mTraceFirstBuffer = true;
563 
564     ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
565 
566     // Configure consumer-side ANativeWindow interface. The listener may be used
567     // to notify buffer manager (if it is used) of the returned buffers.
568     res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
569             /*listener*/mBufferProducerListener,
570             /*reportBufferRemoval*/true);
571     if (res != OK) {
572         ALOGE("%s: Unable to connect to native window for stream %d",
573                 __FUNCTION__, mId);
574         return res;
575     }
576 
577     res = native_window_set_usage(mConsumer.get(), mUsage);
578     if (res != OK) {
579         ALOGE("%s: Unable to configure usage %" PRIu64 " for stream %d",
580                 __FUNCTION__, mUsage, mId);
581         return res;
582     }
583 
584     res = native_window_set_scaling_mode(mConsumer.get(),
585             NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
586     if (res != OK) {
587         ALOGE("%s: Unable to configure stream scaling: %s (%d)",
588                 __FUNCTION__, strerror(-res), res);
589         return res;
590     }
591 
592     if (mMaxSize == 0) {
593         // For buffers of known size
594         res = native_window_set_buffers_dimensions(mConsumer.get(),
595                 camera_stream::width, camera_stream::height);
596     } else {
597         // For buffers with bounded size
598         res = native_window_set_buffers_dimensions(mConsumer.get(),
599                 mMaxSize, 1);
600     }
601     if (res != OK) {
602         ALOGE("%s: Unable to configure stream buffer dimensions"
603                 " %d x %d (maxSize %zu) for stream %d",
604                 __FUNCTION__, camera_stream::width, camera_stream::height,
605                 mMaxSize, mId);
606         return res;
607     }
608     res = native_window_set_buffers_format(mConsumer.get(),
609             camera_stream::format);
610     if (res != OK) {
611         ALOGE("%s: Unable to configure stream buffer format %#x for stream %d",
612                 __FUNCTION__, camera_stream::format, mId);
613         return res;
614     }
615 
616     res = native_window_set_buffers_data_space(mConsumer.get(),
617             camera_stream::data_space);
618     if (res != OK) {
619         ALOGE("%s: Unable to configure stream dataspace %#x for stream %d",
620                 __FUNCTION__, camera_stream::data_space, mId);
621         return res;
622     }
623 
624     int maxConsumerBuffers = 0;
625     res = static_cast<ANativeWindow*>(mConsumer.get())->query(
626             mConsumer.get(),
627             NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
628     if (res != OK) {
629         ALOGE("%s: Unable to query consumer undequeued"
630                 " buffer count for stream %d", __FUNCTION__, mId);
631         return res;
632     }
633 
634     ALOGV("%s: Consumer wants %d buffers, HAL wants %d", __FUNCTION__,
635             maxConsumerBuffers, camera_stream::max_buffers);
636     if (camera_stream::max_buffers == 0) {
637         ALOGE("%s: Camera HAL requested max_buffer count: %d, requires at least 1",
638                 __FUNCTION__, camera_stream::max_buffers);
639         return INVALID_OPERATION;
640     }
641 
642     mTotalBufferCount = maxConsumerBuffers + camera_stream::max_buffers;
643 
644     int timestampBase = getTimestampBase();
645     bool isDefaultTimeBase = (timestampBase ==
646             OutputConfiguration::TIMESTAMP_BASE_DEFAULT);
647     if (allowPreviewRespace)  {
648         bool forceChoreographer = (timestampBase ==
649                 OutputConfiguration::TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED);
650         bool defaultToChoreographer = (isDefaultTimeBase &&
651                 isConsumedByHWComposer());
652         bool defaultToSpacer = (isDefaultTimeBase &&
653                 isConsumedByHWTexture() &&
654                 !isConsumedByCPU() &&
655                 !isVideoStream());
656         if (forceChoreographer || defaultToChoreographer) {
657             mSyncToDisplay = true;
658             // For choreographer synced stream, extra buffers aren't kept by
659             // camera service. So no need to update mMaxCachedBufferCount.
660             mTotalBufferCount += kDisplaySyncExtraBuffer;
661         } else if (defaultToSpacer) {
662             mPreviewFrameSpacer = new PreviewFrameSpacer(this, mConsumer);
663             // For preview frame spacer, the extra buffer is kept by camera
664             // service. So update mMaxCachedBufferCount.
665             mMaxCachedBufferCount = 1;
666             mTotalBufferCount += mMaxCachedBufferCount;
667             res = mPreviewFrameSpacer->run((std::string("PreviewSpacer-")
668                     + std::to_string(mId)).c_str());
669             if (res != OK) {
670                 ALOGE("%s: Unable to start preview spacer: %s (%d)", __FUNCTION__,
671                         strerror(-res), res);
672                 return res;
673             }
674         }
675     }
676     mHandoutTotalBufferCount = 0;
677     mFrameCount = 0;
678     mLastTimestamp = 0;
679 
680     if (isDeviceTimeBaseRealtime()) {
681         if (isDefaultTimeBase && !isConsumedByHWComposer() && !isVideoStream()) {
682             // Default time base, but not hardware composer or video encoder
683             mTimestampOffset = 0;
684         } else if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME ||
685                 timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR) {
686             mTimestampOffset = 0;
687         }
688         // If timestampBase is CHOREOGRAPHER SYNCED or MONOTONIC, leave
689         // timestamp offset as bootTime - monotonicTime.
690     } else {
691         if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME) {
692             // Reverse offset for monotonicTime -> bootTime
693             mTimestampOffset = -mTimestampOffset;
694         } else {
695             // If timestampBase is DEFAULT, MONOTONIC, SENSOR or
696             // CHOREOGRAPHER_SYNCED, timestamp offset is 0.
697             mTimestampOffset = 0;
698         }
699     }
700 
701     res = mConsumer->setMaxDequeuedBufferCount(mTotalBufferCount - maxConsumerBuffers);
702     if (res != OK) {
703         ALOGE("%s: Unable to set buffer count for stream %d",
704                 __FUNCTION__, mId);
705         return res;
706     }
707 
708     /**
709      * Camera3 Buffer manager is only supported by HAL3.3 onwards, as the older HALs requires
710      * buffers to be statically allocated for internal static buffer registration, while the
711      * buffers provided by buffer manager are really dynamically allocated. Camera3Device only
712      * sets the mBufferManager if device version is > HAL3.2, which guarantees that the buffer
713      * manager setup is skipped in below code. Note that HAL3.2 is also excluded here, as some
714      * HAL3.2 devices may not support the dynamic buffer registeration.
715      * Also Camera3BufferManager does not support display/texture streams as they have its own
716      * buffer management logic.
717      */
718     if (mBufferManager != 0 && mSetId > CAMERA3_STREAM_SET_ID_INVALID &&
719             !(isConsumedByHWComposer() || isConsumedByHWTexture())) {
720         uint64_t consumerUsage = 0;
721         getEndpointUsage(&consumerUsage);
722         uint32_t width = (mMaxSize == 0) ? getWidth() : mMaxSize;
723         uint32_t height = (mMaxSize == 0) ? getHeight() : 1;
724         StreamInfo streamInfo(
725                 getId(), getStreamSetId(), width, height, getFormat(), getDataSpace(),
726                 mUsage | consumerUsage, mTotalBufferCount,
727                 /*isConfigured*/true, isMultiResolution());
728         wp<Camera3OutputStream> weakThis(this);
729         res = mBufferManager->registerStream(weakThis,
730                 streamInfo);
731         if (res == OK) {
732             // Disable buffer allocation for this BufferQueue, buffer manager will take over
733             // the buffer allocation responsibility.
734 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
735             mConsumer->allowAllocation(false);
736 #else
737             mConsumer->getIGraphicBufferProducer()->allowAllocation(false);
738 #endif
739             mUseBufferManager = true;
740         } else {
741             ALOGE("%s: Unable to register stream %d to camera3 buffer manager, "
742                   "(error %d %s), fall back to BufferQueue for buffer management!",
743                   __FUNCTION__, mId, res, strerror(-res));
744         }
745     }
746 
747     return OK;
748 }
749 
getBufferLockedCommon(ANativeWindowBuffer ** anb,int * fenceFd)750 status_t Camera3OutputStream::getBufferLockedCommon(ANativeWindowBuffer** anb, int* fenceFd) {
751     ATRACE_HFR_CALL();
752     status_t res;
753 
754     if ((res = getBufferPreconditionCheckLocked()) != OK) {
755         return res;
756     }
757 
758     bool gotBufferFromManager = false;
759 
760     if (mUseBufferManager) {
761         sp<GraphicBuffer> gb;
762         res = mBufferManager->getBufferForStream(getId(), getStreamSetId(),
763                 isMultiResolution(), &gb, fenceFd);
764         if (res == OK) {
765             // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
766             // successful return.
767             *anb = gb.get();
768             res = mConsumer->attachBuffer(*anb);
769             if (shouldLogError(res, mState)) {
770                 ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
771                         __FUNCTION__, mId, strerror(-res), res);
772             }
773             if (res != OK) {
774                 checkRetAndSetAbandonedLocked(res);
775                 return res;
776             }
777             gotBufferFromManager = true;
778             ALOGV("Stream %d: Attached new buffer", getId());
779         } else if (res == ALREADY_EXISTS) {
780             // Have sufficient free buffers already attached, can just
781             // dequeue from buffer queue
782             ALOGV("Stream %d: Reusing attached buffer", getId());
783             gotBufferFromManager = false;
784         } else if (res != OK) {
785             ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager: %s (%d)",
786                     __FUNCTION__, mId, strerror(-res), res);
787             return res;
788         }
789     }
790     if (!gotBufferFromManager) {
791         /**
792          * Release the lock briefly to avoid deadlock for below scenario:
793          * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
794          * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
795          * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
796          * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
797          * StreamingProcessor lock.
798          * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
799          * and try to lock bufferQueue lock.
800          * Then there is circular locking dependency.
801          */
802         sp<Surface> consumer = mConsumer;
803         size_t remainingBuffers = (mState == STATE_PREPARING ? mTotalBufferCount :
804                                    camera_stream::max_buffers) - mHandoutTotalBufferCount;
805         mLock.unlock();
806 
807         nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
808 
809         size_t batchSize = mBatchSize.load();
810         if (batchSize == 1) {
811             sp<ANativeWindow> anw = consumer;
812             res = anw->dequeueBuffer(anw.get(), anb, fenceFd);
813         } else {
814             std::unique_lock<std::mutex> batchLock(mBatchLock);
815             res = OK;
816             if (mBatchedBuffers.size() == 0) {
817                 if (remainingBuffers == 0) {
818                     ALOGE("%s: cannot get buffer while all buffers are handed out", __FUNCTION__);
819                     return INVALID_OPERATION;
820                 }
821                 if (batchSize > remainingBuffers) {
822                     batchSize = remainingBuffers;
823                 }
824                 batchLock.unlock();
825                 // Refill batched buffers
826                 std::vector<Surface::BatchBuffer> batchedBuffers;
827                 batchedBuffers.resize(batchSize);
828                 res = consumer->dequeueBuffers(&batchedBuffers);
829                 batchLock.lock();
830                 if (res != OK) {
831                     ALOGE("%s: batch dequeueBuffers call failed! %s (%d)",
832                             __FUNCTION__, strerror(-res), res);
833                 } else {
834                     mBatchedBuffers = std::move(batchedBuffers);
835                 }
836             }
837 
838             if (res == OK) {
839                 // Dispatch batch buffers
840                 *anb = mBatchedBuffers.back().buffer;
841                 *fenceFd = mBatchedBuffers.back().fenceFd;
842                 mBatchedBuffers.pop_back();
843             }
844         }
845 
846         nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
847         mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
848 
849         mLock.lock();
850 
851         if (mUseBufferManager && res == TIMED_OUT) {
852             checkRemovedBuffersLocked();
853 
854             sp<GraphicBuffer> gb;
855             res = mBufferManager->getBufferForStream(
856                     getId(), getStreamSetId(), isMultiResolution(),
857                     &gb, fenceFd, /*noFreeBuffer*/true);
858 
859             if (res == OK) {
860                 // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after
861                 // a successful return.
862                 *anb = gb.get();
863                 res = mConsumer->attachBuffer(*anb);
864                 gotBufferFromManager = true;
865                 ALOGV("Stream %d: Attached new buffer", getId());
866 
867                 if (res != OK) {
868                     if (shouldLogError(res, mState)) {
869                         ALOGE("%s: Stream %d: Can't attach the output buffer to this surface:"
870                                 " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
871                     }
872                     checkRetAndSetAbandonedLocked(res);
873                     return res;
874                 }
875             } else {
876                 ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager:"
877                         " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
878                 return res;
879             }
880         } else if (res != OK) {
881             if (shouldLogError(res, mState)) {
882                 ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
883                         __FUNCTION__, mId, strerror(-res), res);
884             }
885             checkRetAndSetAbandonedLocked(res);
886             return res;
887         }
888     }
889 
890     if (res == OK) {
891         checkRemovedBuffersLocked();
892     }
893 
894     return res;
895 }
896 
checkRemovedBuffersLocked(bool notifyBufferManager)897 void Camera3OutputStream::checkRemovedBuffersLocked(bool notifyBufferManager) {
898     std::vector<sp<GraphicBuffer>> removedBuffers;
899     status_t res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
900     if (res == OK) {
901         onBuffersRemovedLocked(removedBuffers);
902 
903         if (notifyBufferManager && mUseBufferManager && removedBuffers.size() > 0) {
904             mBufferManager->onBuffersRemoved(getId(), getStreamSetId(), isMultiResolution(),
905                     removedBuffers.size());
906         }
907     }
908 }
909 
checkRetAndSetAbandonedLocked(status_t res)910 void Camera3OutputStream::checkRetAndSetAbandonedLocked(status_t res) {
911     // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is
912     // STATE_PREPARING, let prepareNextBuffer handle the error.)
913     if ((res == NO_INIT || res == DEAD_OBJECT) && mState == STATE_CONFIGURED) {
914         mState = STATE_ABANDONED;
915     }
916 }
917 
shouldLogError(status_t res,StreamState state)918 bool Camera3OutputStream::shouldLogError(status_t res, StreamState state) {
919     if (res == OK) {
920         return false;
921     }
922     if ((res == DEAD_OBJECT || res == NO_INIT) && state == STATE_ABANDONED) {
923         return false;
924     }
925     return true;
926 }
927 
onCachedBufferQueued()928 void Camera3OutputStream::onCachedBufferQueued() {
929     Mutex::Autolock l(mLock);
930     mCachedOutputBufferCount--;
931     // Signal whoever is waiting for the buffer to be returned to the buffer
932     // queue.
933     mOutputBufferReturnedSignal.signal();
934 }
935 
disconnectLocked()936 status_t Camera3OutputStream::disconnectLocked() {
937     status_t res;
938 
939     if ((res = Camera3IOStreamBase::disconnectLocked()) != OK) {
940         return res;
941     }
942 
943     // Stream configuration was not finished (can only be in STATE_IN_CONFIG or STATE_CONSTRUCTED
944     // state), don't need change the stream state, return OK.
945     if (mConsumer == nullptr) {
946         return OK;
947     }
948 
949     returnPrefetchedBuffersLocked();
950 
951     if (mPreviewFrameSpacer != nullptr) {
952         mPreviewFrameSpacer->requestExit();
953     }
954 
955     ALOGV("%s: disconnecting stream %d from native window", __FUNCTION__, getId());
956 
957     res = native_window_api_disconnect(mConsumer.get(),
958                                        NATIVE_WINDOW_API_CAMERA);
959     /**
960      * This is not an error. if client calling process dies, the window will
961      * also die and all calls to it will return DEAD_OBJECT, thus it's already
962      * "disconnected"
963      */
964     if (res == DEAD_OBJECT) {
965         ALOGW("%s: While disconnecting stream %d from native window, the"
966                 " native window died from under us", __FUNCTION__, mId);
967     }
968     else if (res != OK) {
969         ALOGE("%s: Unable to disconnect stream %d from native window "
970               "(error %d %s)",
971               __FUNCTION__, mId, res, strerror(-res));
972         mState = STATE_ERROR;
973         return res;
974     }
975 
976     // Since device is already idle, there is no getBuffer call to buffer manager, unregister the
977     // stream at this point should be safe.
978     if (mUseBufferManager) {
979         res = mBufferManager->unregisterStream(getId(), getStreamSetId(), isMultiResolution());
980         if (res != OK) {
981             ALOGE("%s: Unable to unregister stream %d from buffer manager "
982                     "(error %d %s)", __FUNCTION__, mId, res, strerror(-res));
983             mState = STATE_ERROR;
984             return res;
985         }
986         // Note that, to make prepare/teardown case work, we must not mBufferManager.clear(), as
987         // the stream is still in usable state after this call.
988         mUseBufferManager = false;
989     }
990 
991     mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
992                                            : STATE_CONSTRUCTED;
993 
994     mDequeueBufferLatency.log("Stream %d dequeueBuffer latency histogram", mId);
995     mDequeueBufferLatency.reset();
996     return OK;
997 }
998 
getEndpointUsage(uint64_t * usage)999 status_t Camera3OutputStream::getEndpointUsage(uint64_t *usage) {
1000 
1001     status_t res;
1002 
1003     if (mConsumer == nullptr) {
1004         // mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
1005         *usage = mConsumerUsage;
1006         return OK;
1007     }
1008 
1009     res = getEndpointUsageForSurface(usage, mConsumer);
1010 
1011     return res;
1012 }
1013 
applyZSLUsageQuirk(int format,uint64_t * consumerUsage)1014 void Camera3OutputStream::applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/) {
1015     if (consumerUsage == nullptr) {
1016         return;
1017     }
1018 
1019     // If an opaque output stream's endpoint is ImageReader, add
1020     // GRALLOC_USAGE_HW_CAMERA_ZSL to the usage so HAL knows it will be used
1021     // for the ZSL use case.
1022     // Assume it's for ImageReader if the consumer usage doesn't have any of these bits set:
1023     //     1. GRALLOC_USAGE_HW_TEXTURE
1024     //     2. GRALLOC_USAGE_HW_RENDER
1025     //     3. GRALLOC_USAGE_HW_COMPOSER
1026     //     4. GRALLOC_USAGE_HW_VIDEO_ENCODER
1027     if (format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
1028             (*consumerUsage & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
1029             GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
1030         *consumerUsage |= GRALLOC_USAGE_HW_CAMERA_ZSL;
1031     }
1032 }
1033 
getEndpointUsageForSurface(uint64_t * usage,const sp<Surface> & surface)1034 status_t Camera3OutputStream::getEndpointUsageForSurface(uint64_t *usage,
1035         const sp<Surface>& surface) {
1036     bool internalConsumer = (mConsumer.get() != nullptr) && (mConsumer == surface);
1037     if (mConsumerUsageCachedValue.has_value() && internalConsumer) {
1038         *usage = mConsumerUsageCachedValue.value();
1039         return OK;
1040     }
1041 
1042     status_t res;
1043 
1044     res = native_window_get_consumer_usage(static_cast<ANativeWindow*>(surface.get()), usage);
1045     applyZSLUsageQuirk(camera_stream::format, usage);
1046     if (internalConsumer) {
1047         mConsumerUsageCachedValue = *usage;
1048     }
1049     return res;
1050 }
1051 
isVideoStream()1052 bool Camera3OutputStream::isVideoStream() {
1053     uint64_t usage = 0;
1054     status_t res = getEndpointUsage(&usage);
1055     if (res != OK) {
1056         ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1057         return false;
1058     }
1059 
1060     return (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) != 0;
1061 }
1062 
setBufferManager(sp<Camera3BufferManager> bufferManager)1063 status_t Camera3OutputStream::setBufferManager(sp<Camera3BufferManager> bufferManager) {
1064     Mutex::Autolock l(mLock);
1065     if (mState != STATE_CONSTRUCTED) {
1066         ALOGE("%s: this method can only be called when stream in CONSTRUCTED state.",
1067                 __FUNCTION__);
1068         return INVALID_OPERATION;
1069     }
1070     mBufferManager = bufferManager;
1071 
1072     return OK;
1073 }
1074 
updateStream(const std::vector<SurfaceHolder> &,const std::vector<OutputStreamInfo> &,const std::vector<size_t> &,KeyedVector<sp<Surface>,size_t> *)1075 status_t Camera3OutputStream::updateStream(const std::vector<SurfaceHolder> &/*outputSurfaces*/,
1076             const std::vector<OutputStreamInfo> &/*outputInfo*/,
1077             const std::vector<size_t> &/*removedSurfaceIds*/,
1078             KeyedVector<sp<Surface>, size_t> * /*outputMapo*/) {
1079     ALOGE("%s: this method is not supported!", __FUNCTION__);
1080     return INVALID_OPERATION;
1081 }
1082 
onBufferReleased()1083 void Camera3OutputStream::BufferProducerListener::onBufferReleased() {
1084     sp<Camera3OutputStream> stream = mParent.promote();
1085     if (stream == nullptr) {
1086         ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
1087         return;
1088     }
1089 
1090     Mutex::Autolock l(stream->mLock);
1091     if (!(stream->mUseBufferManager)) {
1092         return;
1093     }
1094 
1095     ALOGV("Stream %d: Buffer released", stream->getId());
1096     bool shouldFreeBuffer = false;
1097     status_t res = stream->mBufferManager->onBufferReleased(
1098         stream->getId(), stream->getStreamSetId(), stream->isMultiResolution(),
1099         &shouldFreeBuffer);
1100     if (res != OK) {
1101         ALOGE("%s: signaling buffer release to buffer manager failed: %s (%d).", __FUNCTION__,
1102                 strerror(-res), res);
1103         stream->mState = STATE_ERROR;
1104     }
1105 
1106     if (shouldFreeBuffer) {
1107         sp<GraphicBuffer> buffer;
1108         // Detach and free a buffer (when buffer goes out of scope)
1109         stream->detachBufferLocked(&buffer, /*fenceFd*/ nullptr);
1110         if (buffer.get() != nullptr) {
1111             stream->mBufferManager->notifyBufferRemoved(
1112                     stream->getId(), stream->getStreamSetId(), stream->isMultiResolution());
1113         }
1114     }
1115 }
1116 
onBuffersDiscarded(const std::vector<sp<GraphicBuffer>> & buffers)1117 void Camera3OutputStream::BufferProducerListener::onBuffersDiscarded(
1118         const std::vector<sp<GraphicBuffer>>& buffers) {
1119     sp<Camera3OutputStream> stream = mParent.promote();
1120     if (stream == nullptr) {
1121         ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
1122         return;
1123     }
1124 
1125     if (buffers.size() > 0) {
1126         Mutex::Autolock l(stream->mLock);
1127         stream->onBuffersRemovedLocked(buffers);
1128         if (stream->mUseBufferManager) {
1129             stream->mBufferManager->onBuffersRemoved(stream->getId(),
1130                     stream->getStreamSetId(), stream->isMultiResolution(), buffers.size());
1131         }
1132         ALOGV("Stream %d: %zu Buffers discarded.", stream->getId(), buffers.size());
1133     }
1134 }
1135 
onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>> & removedBuffers)1136 void Camera3OutputStream::onBuffersRemovedLocked(
1137         const std::vector<sp<GraphicBuffer>>& removedBuffers) {
1138     sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
1139     if (callback != nullptr) {
1140         for (const auto& gb : removedBuffers) {
1141             callback->onBufferFreed(mId, gb->handle);
1142         }
1143     }
1144 }
1145 
detachBuffer(sp<GraphicBuffer> * buffer,int * fenceFd)1146 status_t Camera3OutputStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
1147     Mutex::Autolock l(mLock);
1148     return detachBufferLocked(buffer, fenceFd);
1149 }
1150 
detachBufferLocked(sp<GraphicBuffer> * buffer,int * fenceFd)1151 status_t Camera3OutputStream::detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd) {
1152     ALOGV("Stream %d: detachBuffer", getId());
1153     if (buffer == nullptr) {
1154         return BAD_VALUE;
1155     }
1156 
1157     sp<Fence> fence;
1158     status_t res = mConsumer->detachNextBuffer(buffer, &fence);
1159     if (res == NO_MEMORY) {
1160         // This may rarely happen, which indicates that the released buffer was freed by other
1161         // call (e.g., attachBuffer, dequeueBuffer etc.) before reaching here. We should notify the
1162         // buffer manager that this buffer has been freed. It's not fatal, but should be avoided,
1163         // therefore log a warning.
1164         *buffer = 0;
1165         ALOGW("%s: the released buffer has already been freed by the buffer queue!", __FUNCTION__);
1166     } else if (res != OK) {
1167         // Treat other errors as abandonment
1168         if (shouldLogError(res, mState)) {
1169             ALOGE("%s: detach next buffer failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1170         }
1171         mState = STATE_ABANDONED;
1172         return res;
1173     }
1174 
1175     if (fenceFd != nullptr) {
1176         if (fence!= 0 && fence->isValid()) {
1177             *fenceFd = fence->dup();
1178         } else {
1179             *fenceFd = -1;
1180         }
1181     }
1182 
1183     // Here we assume detachBuffer is called by buffer manager so it doesn't need to be notified
1184     checkRemovedBuffersLocked(/*notifyBufferManager*/false);
1185     return res;
1186 }
1187 
dropBuffers(bool dropping)1188 status_t Camera3OutputStream::dropBuffers(bool dropping) {
1189     Mutex::Autolock l(mLock);
1190     mDropBuffers = dropping;
1191     return OK;
1192 }
1193 
getPhysicalCameraId() const1194 const std::string& Camera3OutputStream::getPhysicalCameraId() const {
1195     Mutex::Autolock l(mLock);
1196     return physicalCameraId();
1197 }
1198 
notifyBufferReleased(ANativeWindowBuffer *)1199 status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
1200     return OK;
1201 }
1202 
isConsumerConfigurationDeferred(size_t surface_id) const1203 bool Camera3OutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
1204     Mutex::Autolock l(mLock);
1205 
1206     if (surface_id != 0) {
1207         ALOGE("%s: surface_id %zu for Camera3OutputStream should be 0!", __FUNCTION__, surface_id);
1208     }
1209     return mConsumer == nullptr;
1210 }
1211 
setConsumers(const std::vector<SurfaceHolder> & consumers)1212 status_t Camera3OutputStream::setConsumers(const std::vector<SurfaceHolder>& consumers) {
1213     Mutex::Autolock l(mLock);
1214     if (consumers.size() != 1) {
1215         ALOGE("%s: it's illegal to set %zu consumer surfaces!",
1216                   __FUNCTION__, consumers.size());
1217         return INVALID_OPERATION;
1218     }
1219     if (consumers[0].mSurface == nullptr) {
1220         ALOGE("%s: it's illegal to set null consumer surface!", __FUNCTION__);
1221         return INVALID_OPERATION;
1222     }
1223 
1224     if (mConsumer != nullptr) {
1225         ALOGE("%s: consumer surface was already set!", __FUNCTION__);
1226         return INVALID_OPERATION;
1227     }
1228 
1229     mConsumer = consumers[0].mSurface;
1230     mMirrorMode = consumers[0].mMirrorMode;
1231     return OK;
1232 }
1233 
isConsumedByHWComposer()1234 bool Camera3OutputStream::isConsumedByHWComposer() {
1235     uint64_t usage = 0;
1236     status_t res = getEndpointUsage(&usage);
1237     if (res != OK) {
1238         ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1239         return false;
1240     }
1241 
1242     return (usage & GRALLOC_USAGE_HW_COMPOSER) != 0;
1243 }
1244 
isConsumedByHWTexture()1245 bool Camera3OutputStream::isConsumedByHWTexture() {
1246     uint64_t usage = 0;
1247     status_t res = getEndpointUsage(&usage);
1248     if (res != OK) {
1249         ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1250         return false;
1251     }
1252 
1253     return (usage & GRALLOC_USAGE_HW_TEXTURE) != 0;
1254 }
1255 
isConsumedByCPU()1256 bool Camera3OutputStream::isConsumedByCPU() {
1257     uint64_t usage = 0;
1258     status_t res = getEndpointUsage(&usage);
1259     if (res != OK) {
1260         ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
1261         return false;
1262     }
1263 
1264     return (usage & GRALLOC_USAGE_SW_READ_MASK) != 0;
1265 }
1266 
dumpImageToDisk(nsecs_t timestamp,ANativeWindowBuffer * anwBuffer,int fence)1267 void Camera3OutputStream::dumpImageToDisk(nsecs_t timestamp,
1268         ANativeWindowBuffer* anwBuffer, int fence) {
1269     // Deriver output file name
1270     std::string fileExtension = "jpg";
1271     char imageFileName[64];
1272     time_t now = time(0);
1273     tm *localTime = localtime(&now);
1274     snprintf(imageFileName, sizeof(imageFileName), "IMG_%4d%02d%02d_%02d%02d%02d_%" PRId64 ".%s",
1275             1900 + localTime->tm_year, localTime->tm_mon + 1, localTime->tm_mday,
1276             localTime->tm_hour, localTime->tm_min, localTime->tm_sec,
1277             timestamp, fileExtension.c_str());
1278 
1279     // Lock the image for CPU read
1280     sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(anwBuffer);
1281     void* mapped = nullptr;
1282     base::unique_fd fenceFd(dup(fence));
1283     status_t res = graphicBuffer->lockAsync(GraphicBuffer::USAGE_SW_READ_OFTEN, &mapped,
1284             fenceFd.release());
1285     if (res != OK) {
1286         ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
1287         return;
1288     }
1289 
1290     // Figure out actual file size
1291     auto actualJpegSize = android::camera2::JpegProcessor::findJpegSize((uint8_t*)mapped, mMaxSize);
1292     if (actualJpegSize == 0) {
1293         actualJpegSize = mMaxSize;
1294     }
1295 
1296     // Output image data to file
1297     std::string filePath = "/data/misc/cameraserver/";
1298     filePath += imageFileName;
1299     std::ofstream imageFile(filePath, std::ofstream::binary);
1300     if (!imageFile.is_open()) {
1301         ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
1302         graphicBuffer->unlock();
1303         return;
1304     }
1305     imageFile.write((const char*)mapped, actualJpegSize);
1306 
1307     graphicBuffer->unlock();
1308 }
1309 
setBatchSize(size_t batchSize)1310 status_t Camera3OutputStream::setBatchSize(size_t batchSize) {
1311     Mutex::Autolock l(mLock);
1312     if (batchSize == 0) {
1313         ALOGE("%s: invalid batch size 0", __FUNCTION__);
1314         return BAD_VALUE;
1315     }
1316 
1317     if (mUseBufferManager) {
1318         ALOGE("%s: batch operation is not supported with buffer manager", __FUNCTION__);
1319         return INVALID_OPERATION;
1320     }
1321 
1322     if (!isVideoStream()) {
1323         ALOGE("%s: batch operation is not supported with non-video stream", __FUNCTION__);
1324         return INVALID_OPERATION;
1325     }
1326 
1327     if (camera_stream::max_buffers < batchSize) {
1328         ALOGW("%s: batch size is capped by max_buffers %d", __FUNCTION__,
1329                 camera_stream::max_buffers);
1330         batchSize = camera_stream::max_buffers;
1331     }
1332 
1333     size_t defaultBatchSize = 1;
1334     if (!mBatchSize.compare_exchange_strong(defaultBatchSize, batchSize)) {
1335         ALOGE("%s: change batch size from %zu to %zu dynamically is not supported",
1336                 __FUNCTION__, defaultBatchSize, batchSize);
1337         return INVALID_OPERATION;
1338     }
1339 
1340     return OK;
1341 }
1342 
onMinDurationChanged(nsecs_t duration,bool fixedFps)1343 void Camera3OutputStream::onMinDurationChanged(nsecs_t duration, bool fixedFps) {
1344     Mutex::Autolock l(mLock);
1345     mMinExpectedDuration = duration;
1346     mFixedFps = fixedFps;
1347 }
1348 
setStreamUseCase(int64_t streamUseCase)1349 void Camera3OutputStream::setStreamUseCase(int64_t streamUseCase) {
1350     Mutex::Autolock l(mLock);
1351     camera_stream::use_case = streamUseCase;
1352 }
1353 
returnPrefetchedBuffersLocked()1354 void Camera3OutputStream::returnPrefetchedBuffersLocked() {
1355     std::vector<Surface::BatchBuffer> batchedBuffers;
1356 
1357     {
1358         std::lock_guard<std::mutex> batchLock(mBatchLock);
1359         if (mBatchedBuffers.size() != 0) {
1360             ALOGW("%s: %zu extra prefetched buffers detected. Returning",
1361                    __FUNCTION__, mBatchedBuffers.size());
1362             batchedBuffers = std::move(mBatchedBuffers);
1363         }
1364     }
1365 
1366     if (batchedBuffers.size() > 0) {
1367         mConsumer->cancelBuffers(batchedBuffers);
1368     }
1369 }
1370 
syncTimestampToDisplayLocked(nsecs_t t,sp<Fence> releaseFence)1371 nsecs_t Camera3OutputStream::syncTimestampToDisplayLocked(nsecs_t t, sp<Fence> releaseFence) {
1372     nsecs_t currentTime = systemTime();
1373     if (!mFixedFps) {
1374         mLastCaptureTime = t;
1375         mLastPresentTime = currentTime;
1376         return t;
1377     }
1378 
1379     ParcelableVsyncEventData parcelableVsyncEventData;
1380     auto res = mDisplayEventReceiver.getLatestVsyncEventData(&parcelableVsyncEventData);
1381     if (res != OK) {
1382         ALOGE("%s: Stream %d: Error getting latest vsync event data: %s (%d)",
1383                 __FUNCTION__, mId, strerror(-res), res);
1384         mLastCaptureTime = t;
1385         mLastPresentTime = currentTime;
1386         return t;
1387     }
1388 
1389     const VsyncEventData& vsyncEventData = parcelableVsyncEventData.vsync;
1390     nsecs_t minPresentT = mLastPresentTime + vsyncEventData.frameInterval / 2;
1391 
1392     // Find the best presentation time without worrying about previous frame's
1393     // presentation time if capture interval is more than kSpacingResetIntervalNs.
1394     //
1395     // When frame interval is more than 50 ms apart (3 vsyncs for 60hz refresh rate),
1396     // there is little risk in starting over and finding the earliest vsync to latch onto.
1397     // - Update captureToPresentTime offset to be used for later frames.
1398     // - Example use cases:
1399     //   - when frame rate drops down to below 20 fps, or
1400     //   - A new streaming session starts (stopPreview followed by
1401     //   startPreview)
1402     //
1403     nsecs_t captureInterval = t - mLastCaptureTime;
1404     if (captureInterval > kSpacingResetIntervalNs) {
1405         for (size_t i = 0; i < vsyncEventData.frameTimelinesLength; i++) {
1406             const auto& timeline = vsyncEventData.frameTimelines[i];
1407             if (timeline.deadlineTimestamp >= currentTime &&
1408                     timeline.expectedPresentationTime > minPresentT) {
1409                 nsecs_t presentT = vsyncEventData.frameTimelines[i].expectedPresentationTime;
1410                 mCaptureToPresentOffset = presentT - t;
1411                 mLastCaptureTime = t;
1412                 mLastPresentTime = presentT;
1413 
1414                 // If releaseFence is available, store the fence to check signal
1415                 // time later.
1416                 mRefVsyncData = vsyncEventData;
1417                 mReferenceCaptureTime = t;
1418                 mReferenceArrivalTime = currentTime;
1419                 if (releaseFence->isValid()) {
1420                     mReferenceFrameFence = new Fence(releaseFence->dup());
1421                 } else {
1422                     mFenceSignalOffset = 0;
1423                 }
1424 
1425                 // Move the expected presentation time back by 1/3 of frame interval to
1426                 // mitigate the time drift. Due to time drift, if we directly use the
1427                 // expected presentation time, often times 2 expected presentation time
1428                 // falls into the same VSYNC interval.
1429                 return presentT - vsyncEventData.frameInterval/3;
1430             }
1431         }
1432     }
1433 
1434     // If there is a reference frame release fence, get the signal time and
1435     // update the captureToPresentOffset.
1436     if (mReferenceFrameFence != nullptr) {
1437         mFenceSignalOffset = 0;
1438         nsecs_t signalTime = mReferenceFrameFence->getSignalTime();
1439         // Now that the fence has signaled, recalculate the offsets based on
1440         // the timeline which was actually latched
1441         if (signalTime != INT64_MAX) {
1442             for (size_t i = 0; i < mRefVsyncData.frameTimelinesLength; i++) {
1443                 const auto& timeline = mRefVsyncData.frameTimelines[i];
1444                 if (timeline.deadlineTimestamp >= signalTime) {
1445                     nsecs_t originalOffset = mCaptureToPresentOffset;
1446                     mCaptureToPresentOffset = timeline.expectedPresentationTime
1447                             - mReferenceCaptureTime;
1448                     mLastPresentTime = timeline.expectedPresentationTime;
1449                     mFenceSignalOffset = signalTime > mReferenceArrivalTime ?
1450                             signalTime - mReferenceArrivalTime : 0;
1451 
1452                     ALOGV("%s: Last deadline %" PRId64 " signalTime %" PRId64
1453                             " original offset %" PRId64 " new offset %" PRId64
1454                             " fencesignal offset %" PRId64, __FUNCTION__,
1455                             timeline.deadlineTimestamp, signalTime, originalOffset,
1456                             mCaptureToPresentOffset, mFenceSignalOffset);
1457                     break;
1458                 }
1459             }
1460             mReferenceFrameFence.clear();
1461         }
1462     }
1463 
1464     nsecs_t idealPresentT = t + mCaptureToPresentOffset;
1465     nsecs_t expectedPresentT = mLastPresentTime;
1466     nsecs_t minDiff = INT64_MAX;
1467 
1468     // In fixed FPS case, when frame durations are close to multiples of display refresh
1469     // rate, derive minimum intervals between presentation times based on minimal
1470     // expected duration. The minimum number of Vsyncs is:
1471     // - 0 if minFrameDuration in (0, 1.5] * vSyncInterval,
1472     // - 1 if minFrameDuration in (1.5, 2.5] * vSyncInterval,
1473     // - and so on.
1474     //
1475     // This spaces out the displaying of the frames so that the frame
1476     // presentations are roughly in sync with frame captures.
1477     int minVsyncs = (mMinExpectedDuration - vsyncEventData.frameInterval / 2) /
1478             vsyncEventData.frameInterval;
1479     if (minVsyncs < 0) minVsyncs = 0;
1480     nsecs_t minInterval = minVsyncs * vsyncEventData.frameInterval;
1481 
1482     // In fixed FPS case, if the frame duration deviates from multiples of
1483     // display refresh rate, find the closest Vsync without requiring a minimum
1484     // number of Vsync.
1485     //
1486     // Example: (24fps camera, 60hz refresh):
1487     //   capture readout:  |  t1  |  t1  | .. |  t1  | .. |  t1  | .. |  t1  |
1488     //   display VSYNC:      | t2 | t2 | ... | t2 | ... | t2 | ... | t2 |
1489     //   |  : 1 frame
1490     //   t1 : 41.67ms
1491     //   t2 : 16.67ms
1492     //   t1/t2 = 2.5
1493     //
1494     //   24fps is a commonly used video frame rate. Because the capture
1495     //   interval is 2.5 times of display refresh interval, the minVsyncs
1496     //   calculation will directly fall at the boundary condition. In this case,
1497     //   we should fall back to the basic logic of finding closest vsync
1498     //   timestamp without worrying about minVsyncs.
1499     float captureToVsyncIntervalRatio = 1.0f * mMinExpectedDuration / vsyncEventData.frameInterval;
1500     float ratioDeviation = std::fabs(
1501             captureToVsyncIntervalRatio - std::roundf(captureToVsyncIntervalRatio));
1502     bool captureDeviateFromVsync = ratioDeviation >= kMaxIntervalRatioDeviation;
1503     bool cameraDisplayInSync = (mFixedFps && !captureDeviateFromVsync);
1504 
1505     // Find best timestamp in the vsync timelines:
1506     // - Only use at most kMaxTimelines timelines to avoid long latency
1507     // - Add an extra timeline if display fence is used
1508     // - closest to the ideal presentation time,
1509     // - deadline timestamp is greater than the current time, and
1510     // - For fixed FPS, if the capture interval doesn't deviate too much from refresh interval,
1511     //   the candidate presentation time is at least minInterval in the future compared to last
1512     //   presentation time.
1513     // - For variable FPS, or if the capture interval deviates from refresh
1514     //   interval for more than 5%, find a presentation time closest to the
1515     //   (lastPresentationTime + captureToPresentOffset) instead.
1516     int fenceAdjustment = (mFenceSignalOffset > 0) ? 1 : 0;
1517     int maxTimelines = std::min(kMaxTimelines + fenceAdjustment,
1518             (int)vsyncEventData.frameTimelinesLength);
1519     float biasForShortDelay = 1.0f;
1520     for (int i = 0; i < maxTimelines; i ++) {
1521         const auto& vsyncTime = vsyncEventData.frameTimelines[i];
1522         if (minVsyncs > 0) {
1523             // Bias towards using smaller timeline index:
1524             //   i = 0:                bias = 1
1525             //   i = maxTimelines-1:   bias = -1
1526             biasForShortDelay = 1.0 - 2.0 * i / (maxTimelines - 1);
1527         }
1528         if (std::abs(vsyncTime.expectedPresentationTime - idealPresentT) < minDiff &&
1529                 vsyncTime.deadlineTimestamp >= currentTime + mFenceSignalOffset &&
1530                 ((!cameraDisplayInSync && vsyncTime.expectedPresentationTime > minPresentT) ||
1531                  (cameraDisplayInSync && vsyncTime.expectedPresentationTime >
1532                 mLastPresentTime + minInterval +
1533                     static_cast<nsecs_t>(biasForShortDelay * kTimelineThresholdNs)))) {
1534             expectedPresentT = vsyncTime.expectedPresentationTime;
1535             minDiff = std::abs(vsyncTime.expectedPresentationTime - idealPresentT);
1536         }
1537     }
1538 
1539     if (expectedPresentT == mLastPresentTime && expectedPresentT <
1540             vsyncEventData.frameTimelines[maxTimelines-1].expectedPresentationTime) {
1541         // Couldn't find a reasonable presentation time. Using last frame's
1542         // presentation time would cause a frame drop. The best option now
1543         // is to use the next VSync as long as the last presentation time
1544         // doesn't already has the maximum latency, in which case dropping the
1545         // buffer is more desired than increasing latency.
1546         //
1547         // Example: (60fps camera, 59.9hz refresh):
1548         //   capture readout:  | t1 | t1 | .. | t1 | .. | t1 | .. | t1 |
1549         //                      \    \    \     \    \    \    \     \   \
1550         //   queue to BQ:       |    |    |     |    |    |    |      |    |
1551         //                      \    \    \     \    \     \    \      \    \
1552         //   display VSYNC:      | t2 | t2 | ... | t2 | ... | t2 | ... | t2 |
1553         //
1554         //   |: 1 frame
1555         //   t1 : 16.67ms
1556         //   t2 : 16.69ms
1557         //
1558         // It takes 833 frames for capture readout count and display VSYNC count to be off
1559         // by 1.
1560         //  - At frames [0, 832], presentationTime is set to timeline[0]
1561         //  - At frames [833, 833*2-1], presentationTime is set to timeline[1]
1562         //  - At frames [833*2, 833*3-1] presentationTime is set to timeline[2]
1563         //  - At frame 833*3, no presentation time is found because we only
1564         //    search for timeline[0..2].
1565         //  - Drop one buffer is better than further extend the presentation
1566         //    time.
1567         //
1568         // However, if frame 833*2 arrives 16.67ms early (right after frame
1569         // 833*2-1), no presentation time can be found because
1570         // getLatestVsyncEventData is called early. In that case, it's better to
1571         // set presentation time by offseting last presentation time.
1572         expectedPresentT += vsyncEventData.frameInterval;
1573     }
1574 
1575     mLastCaptureTime = t;
1576     mLastPresentTime = expectedPresentT;
1577 
1578     // Move the expected presentation time back by 1/3 of frame interval to
1579     // mitigate the time drift. Due to time drift, if we directly use the
1580     // expected presentation time, often times 2 expected presentation time
1581     // falls into the same VSYNC interval.
1582     return expectedPresentT - vsyncEventData.frameInterval/3;
1583 }
1584 
shouldLogError(status_t res)1585 bool Camera3OutputStream::shouldLogError(status_t res) {
1586     Mutex::Autolock l(mLock);
1587     return shouldLogError(res, mState);
1588 }
1589 
1590 }; // namespace camera3
1591 
1592 }; // namespace android
1593