xref: /aosp_15_r20/frameworks/av/services/camera/libcameraservice/api2/DepthCompositeStream.cpp (revision ec779b8e0859a360c3d303172224686826e6e0e1)
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Camera3-DepthCompositeStream"
18 #define ATRACE_TAG ATRACE_TAG_CAMERA
19 //#define LOG_NDEBUG 0
20 
21 #include <aidl/android/hardware/camera/device/CameraBlob.h>
22 #include <aidl/android/hardware/camera/device/CameraBlobId.h>
23 #include <camera/StringUtils.h>
24 
25 #include <com_android_graphics_libgui_flags.h>
26 #include <gui/Surface.h>
27 #include <utils/Log.h>
28 #include <utils/Trace.h>
29 
30 #include "api1/client2/JpegProcessor.h"
31 #include "common/CameraProviderManager.h"
32 #include "utils/SessionConfigurationUtils.h"
33 
34 #include "DepthCompositeStream.h"
35 
36 namespace android {
37 namespace camera3 {
38 
39 using aidl::android::hardware::camera::device::CameraBlob;
40 using aidl::android::hardware::camera::device::CameraBlobId;
41 
DepthCompositeStream(sp<CameraDeviceBase> device,wp<hardware::camera2::ICameraDeviceCallbacks> cb)42 DepthCompositeStream::DepthCompositeStream(sp<CameraDeviceBase> device,
43         wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
44         CompositeStream(device, cb),
45         mBlobStreamId(-1),
46         mBlobSurfaceId(-1),
47         mDepthStreamId(-1),
48         mDepthSurfaceId(-1),
49         mBlobWidth(0),
50         mBlobHeight(0),
51         mDepthBufferAcquired(false),
52         mBlobBufferAcquired(false),
53         mStreamSurfaceListener(new StreamSurfaceListener()),
54         mMaxJpegBufferSize(-1),
55         mUHRMaxJpegBufferSize(-1),
56         mIsLogicalCamera(false) {
57     if (device != nullptr) {
58         CameraMetadata staticInfo = device->info();
59         auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
60         if (entry.count > 0) {
61             mMaxJpegBufferSize = entry.data.i32[0];
62         } else {
63             ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
64         }
65 
66         mUHRMaxJpegSize =
67                 SessionConfigurationUtils::getMaxJpegResolution(staticInfo,
68                         /*ultraHighResolution*/true);
69         mDefaultMaxJpegSize =
70                 SessionConfigurationUtils::getMaxJpegResolution(staticInfo,
71                         /*isUltraHighResolution*/false);
72 
73         mUHRMaxJpegBufferSize =
74             SessionConfigurationUtils::getUHRMaxJpegBufferSize(mUHRMaxJpegSize, mDefaultMaxJpegSize,
75                     mMaxJpegBufferSize);
76 
77         entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
78         if (entry.count == 5) {
79             mIntrinsicCalibration.reserve(5);
80             mIntrinsicCalibration.insert(mIntrinsicCalibration.end(), entry.data.f,
81                     entry.data.f + 5);
82         } else {
83             ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
84         }
85 
86         entry = staticInfo.find(ANDROID_LENS_DISTORTION);
87         if (entry.count == 5) {
88             mLensDistortion.reserve(5);
89             mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
90         } else {
91             ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
92         }
93 
94         entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
95         for (size_t i = 0; i < entry.count; ++i) {
96             uint8_t capability = entry.data.u8[i];
97             if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
98                 mIsLogicalCamera = true;
99                 break;
100             }
101         }
102 
103         getSupportedDepthSizes(staticInfo, /*maxResolution*/false, &mSupportedDepthSizes);
104         if (SessionConfigurationUtils::supportsUltraHighResolutionCapture(staticInfo)) {
105             getSupportedDepthSizes(staticInfo, true, &mSupportedDepthSizesMaximumResolution);
106         }
107     }
108 }
109 
~DepthCompositeStream()110 DepthCompositeStream::~DepthCompositeStream() {
111     mBlobConsumer.clear(),
112     mBlobSurface.clear(),
113     mBlobStreamId = -1;
114     mBlobSurfaceId = -1;
115     mDepthConsumer.clear();
116     mDepthSurface.clear();
117     mDepthConsumer = nullptr;
118     mDepthSurface = nullptr;
119 }
120 
compilePendingInputLocked()121 void DepthCompositeStream::compilePendingInputLocked() {
122     CpuConsumer::LockedBuffer imgBuffer;
123 
124     while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
125         auto it = mInputJpegBuffers.begin();
126         auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
127         if (res == NOT_ENOUGH_DATA) {
128             // Can not lock any more buffers.
129             break;
130         } else if (res != OK) {
131             ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
132                     strerror(-res), res);
133             mPendingInputFrames[*it].error = true;
134             mInputJpegBuffers.erase(it);
135             continue;
136         }
137 
138         if (*it != imgBuffer.timestamp) {
139             ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
140                     "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
141         }
142 
143         if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
144                 (mPendingInputFrames[imgBuffer.timestamp].error)) {
145             mBlobConsumer->unlockBuffer(imgBuffer);
146         } else {
147             mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
148             mBlobBufferAcquired = true;
149         }
150         mInputJpegBuffers.erase(it);
151     }
152 
153     while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
154         auto it = mInputDepthBuffers.begin();
155         auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
156         if (res == NOT_ENOUGH_DATA) {
157             // Can not lock any more buffers.
158             break;
159         } else if (res != OK) {
160             ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
161                     strerror(-res), res);
162             mPendingInputFrames[*it].error = true;
163             mInputDepthBuffers.erase(it);
164             continue;
165         }
166 
167         if (*it != imgBuffer.timestamp) {
168             ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
169                     "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
170         }
171 
172         if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
173                 (mPendingInputFrames[imgBuffer.timestamp].error)) {
174             mDepthConsumer->unlockBuffer(imgBuffer);
175         } else {
176             mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
177             mDepthBufferAcquired = true;
178         }
179         mInputDepthBuffers.erase(it);
180     }
181 
182     while (!mCaptureResults.empty()) {
183         auto it = mCaptureResults.begin();
184         // Negative timestamp indicates that something went wrong during the capture result
185         // collection process.
186         if (it->first >= 0) {
187             mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
188             mPendingInputFrames[it->first].result = std::get<1>(it->second);
189         }
190         mCaptureResults.erase(it);
191     }
192 
193     while (!mFrameNumberMap.empty()) {
194         auto it = mFrameNumberMap.begin();
195         mPendingInputFrames[it->second].frameNumber = it->first;
196         mFrameNumberMap.erase(it);
197     }
198 
199     auto it = mErrorFrameNumbers.begin();
200     while (it != mErrorFrameNumbers.end()) {
201         bool frameFound = false;
202         for (auto &inputFrame : mPendingInputFrames) {
203             if (inputFrame.second.frameNumber == *it) {
204                 inputFrame.second.error = true;
205                 frameFound = true;
206                 break;
207             }
208         }
209 
210         if (frameFound) {
211             it = mErrorFrameNumbers.erase(it);
212         } else {
213             ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
214                     *it);
215             it++;
216         }
217     }
218 }
219 
getNextReadyInputLocked(int64_t * currentTs)220 bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
221     if (currentTs == nullptr) {
222         return false;
223     }
224 
225     bool newInputAvailable = false;
226     for (const auto& it : mPendingInputFrames) {
227         if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
228                 (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
229             *currentTs = it.first;
230             newInputAvailable = true;
231         }
232     }
233 
234     return newInputAvailable;
235 }
236 
getNextFailingInputLocked(int64_t * currentTs)237 int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
238     int64_t ret = -1;
239     if (currentTs == nullptr) {
240         return ret;
241     }
242 
243     for (const auto& it : mPendingInputFrames) {
244         if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
245             *currentTs = it.first;
246             ret = it.second.frameNumber;
247         }
248     }
249 
250     return ret;
251 }
252 
processInputFrame(nsecs_t ts,const InputFrame & inputFrame)253 status_t DepthCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) {
254     status_t res;
255     sp<ANativeWindow> outputANW = mOutputSurface;
256     ANativeWindowBuffer *anb;
257     int fenceFd;
258     void *dstBuffer;
259 
260     auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
261             inputFrame.jpegBuffer.width);
262     if (jpegSize == 0) {
263         ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
264         jpegSize = inputFrame.jpegBuffer.width;
265     }
266 
267     size_t maxDepthJpegBufferSize = 0;
268     if (mMaxJpegBufferSize > 0) {
269         // If this is an ultra high resolution sensor and the input frames size
270         // is > default res jpeg.
271         if (mUHRMaxJpegSize.width != 0 &&
272                 inputFrame.jpegBuffer.width * inputFrame.jpegBuffer.height >
273                 mDefaultMaxJpegSize.width * mDefaultMaxJpegSize.height) {
274             maxDepthJpegBufferSize = mUHRMaxJpegBufferSize;
275         } else {
276             maxDepthJpegBufferSize = mMaxJpegBufferSize;
277         }
278     } else {
279         maxDepthJpegBufferSize = std::max<size_t> (jpegSize,
280                 inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
281     }
282 
283     uint8_t jpegQuality = 100;
284     auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
285     if (entry.count > 0) {
286         jpegQuality = entry.data.u8[0];
287     }
288 
289     // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in
290     // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need
291     // max jpeg size.
292     size_t finalJpegBufferSize = maxDepthJpegBufferSize * 3;
293 
294     if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1))
295             != OK) {
296         ALOGE("%s: Unable to configure stream buffer dimensions"
297                 " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId);
298         return res;
299     }
300 
301     res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
302     if (res != OK) {
303         ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
304                 res);
305         return res;
306     }
307 
308     sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
309     GraphicBufferLocker gbLocker(gb);
310     res = gbLocker.lockAsync(&dstBuffer, fenceFd);
311     if (res != OK) {
312         ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
313                 strerror(-res), res);
314         outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
315         return res;
316     }
317 
318     if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) {
319         ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
320                 gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U);
321         outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
322         return BAD_VALUE;
323     }
324 
325     DepthPhotoInputFrame depthPhoto;
326     depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data);
327     depthPhoto.mMainJpegWidth = mBlobWidth;
328     depthPhoto.mMainJpegHeight = mBlobHeight;
329     depthPhoto.mMainJpegSize = jpegSize;
330     depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data);
331     depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width;
332     depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height;
333     depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride;
334     depthPhoto.mJpegQuality = jpegQuality;
335     depthPhoto.mIsLogical = mIsLogicalCamera;
336     depthPhoto.mMaxJpegSize = maxDepthJpegBufferSize;
337     // The camera intrinsic calibration layout is as follows:
338     // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
339     if (mIntrinsicCalibration.size() == 5) {
340         memcpy(depthPhoto.mIntrinsicCalibration, mIntrinsicCalibration.data(),
341                 sizeof(depthPhoto.mIntrinsicCalibration));
342         depthPhoto.mIsIntrinsicCalibrationValid = 1;
343     } else {
344         depthPhoto.mIsIntrinsicCalibrationValid = 0;
345     }
346     // The camera lens distortion contains the following lens correction coefficients.
347     // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
348     if (mLensDistortion.size() == 5) {
349         memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(),
350                 sizeof(depthPhoto.mLensDistortion));
351         depthPhoto.mIsLensDistortionValid = 1;
352     } else {
353         depthPhoto.mIsLensDistortionValid = 0;
354     }
355     entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
356     if (entry.count > 0) {
357         // The camera jpeg orientation values must be within [0, 90, 180, 270].
358         switch (entry.data.i32[0]) {
359             case 0:
360             case 90:
361             case 180:
362             case 270:
363                 depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]);
364                 break;
365             default:
366                 ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees",
367                         __FUNCTION__, entry.data.i32[0]);
368         }
369     }
370 
371     size_t actualJpegSize = 0;
372     res = processDepthPhotoFrame(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
373     if (res != 0) {
374         ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res);
375         outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
376         return res;
377     }
378 
379     size_t finalJpegSize = actualJpegSize + sizeof(CameraBlob);
380     if (finalJpegSize > finalJpegBufferSize) {
381         ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
382         outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
383         return NO_MEMORY;
384     }
385 
386     res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts);
387     if (res != OK) {
388         ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__,
389                 getStreamId(), strerror(-res), res);
390         return res;
391     }
392 
393     ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
394     uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
395         (gb->getWidth() - sizeof(CameraBlob));
396     CameraBlob *blob = reinterpret_cast<CameraBlob*> (header);
397     blob->blobId = CameraBlobId::JPEG;
398     blob->blobSizeBytes = actualJpegSize;
399     outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
400 
401     return res;
402 }
403 
releaseInputFrameLocked(InputFrame * inputFrame)404 void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
405     if (inputFrame == nullptr) {
406         return;
407     }
408 
409     if (inputFrame->depthBuffer.data != nullptr) {
410         mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
411         inputFrame->depthBuffer.data = nullptr;
412         mDepthBufferAcquired = false;
413     }
414 
415     if (inputFrame->jpegBuffer.data != nullptr) {
416         mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
417         inputFrame->jpegBuffer.data = nullptr;
418         mBlobBufferAcquired = false;
419     }
420 
421     if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
422         //TODO: Figure out correct requestId
423         notifyError(inputFrame->frameNumber, -1 /*requestId*/);
424         inputFrame->errorNotified = true;
425     }
426 }
427 
releaseInputFramesLocked(int64_t currentTs)428 void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
429     auto it = mPendingInputFrames.begin();
430     while (it != mPendingInputFrames.end()) {
431         if (it->first <= currentTs) {
432             releaseInputFrameLocked(&it->second);
433             it = mPendingInputFrames.erase(it);
434         } else {
435             it++;
436         }
437     }
438 }
439 
threadLoop()440 bool DepthCompositeStream::threadLoop() {
441     int64_t currentTs = INT64_MAX;
442     bool newInputAvailable = false;
443 
444     {
445         Mutex::Autolock l(mMutex);
446 
447         if (mErrorState) {
448             // In case we landed in error state, return any pending buffers and
449             // halt all further processing.
450             compilePendingInputLocked();
451             releaseInputFramesLocked(currentTs);
452             return false;
453         }
454 
455         while (!newInputAvailable) {
456             compilePendingInputLocked();
457             newInputAvailable = getNextReadyInputLocked(&currentTs);
458             if (!newInputAvailable) {
459                 auto failingFrameNumber = getNextFailingInputLocked(&currentTs);
460                 if (failingFrameNumber >= 0) {
461                     // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
462                     // possible for two internal stream buffers to fail. In such scenario the
463                     // composite stream should notify the client about a stream buffer error only
464                     // once and this information is kept within 'errorNotified'.
465                     // Any present failed input frames will be removed on a subsequent call to
466                     // 'releaseInputFramesLocked()'.
467                     releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
468                     currentTs = INT64_MAX;
469                 }
470 
471                 auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
472                 if (ret == TIMED_OUT) {
473                     return true;
474                 } else if (ret != OK) {
475                     ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
476                             strerror(-ret), ret);
477                     return false;
478                 }
479             }
480         }
481     }
482 
483     auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
484     Mutex::Autolock l(mMutex);
485     if (res != OK) {
486         ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
487                 currentTs, strerror(-res), res);
488         mPendingInputFrames[currentTs].error = true;
489     }
490 
491     releaseInputFramesLocked(currentTs);
492 
493     return true;
494 }
495 
isDepthCompositeStream(const sp<Surface> & surface)496 bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
497     ANativeWindow *anw = surface.get();
498     status_t err;
499     int format;
500     if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
501         std::string msg = fmt::sprintf("Failed to query Surface format: %s (%d)", strerror(-err),
502                 err);
503         ALOGE("%s: %s", __FUNCTION__, msg.c_str());
504         return false;
505     }
506 
507     int dataspace;
508     if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
509         std::string msg = fmt::sprintf("Failed to query Surface dataspace: %s (%d)", strerror(-err),
510                 err);
511         ALOGE("%s: %s", __FUNCTION__, msg.c_str());
512         return false;
513     }
514 
515     if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
516         return true;
517     }
518 
519     return false;
520 }
521 
isDepthCompositeStreamInfo(const OutputStreamInfo & streamInfo)522 bool DepthCompositeStream::isDepthCompositeStreamInfo(const OutputStreamInfo& streamInfo) {
523     if ((streamInfo.dataSpace == static_cast<android_dataspace_t>(HAL_DATASPACE_DYNAMIC_DEPTH)) &&
524             (streamInfo.format == HAL_PIXEL_FORMAT_BLOB)) {
525         return true;
526     }
527 
528     return false;
529 }
530 
setContains(std::unordered_set<int32_t> containerSet,int32_t value)531 static bool setContains(std::unordered_set<int32_t> containerSet, int32_t value) {
532     return containerSet.find(value) != containerSet.end();
533 }
534 
checkAndGetMatchingDepthSize(size_t width,size_t height,const std::vector<std::tuple<size_t,size_t>> & depthSizes,const std::vector<std::tuple<size_t,size_t>> & depthSizesMaximumResolution,const std::unordered_set<int32_t> & sensorPixelModesUsed,size_t * depthWidth,size_t * depthHeight)535 status_t DepthCompositeStream::checkAndGetMatchingDepthSize(size_t width, size_t height,
536         const std::vector<std::tuple<size_t, size_t>> &depthSizes,
537         const std::vector<std::tuple<size_t, size_t>> &depthSizesMaximumResolution,
538         const std::unordered_set<int32_t> &sensorPixelModesUsed,
539         size_t *depthWidth, size_t *depthHeight) {
540     if (depthWidth == nullptr || depthHeight == nullptr) {
541         return BAD_VALUE;
542     }
543     size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
544     bool hasDefaultSensorPixelMode =
545             setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
546 
547     bool hasMaximumResolutionSensorPixelMode =
548         setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION);
549 
550     if (!hasDefaultSensorPixelMode && !hasMaximumResolutionSensorPixelMode) {
551         ALOGE("%s: sensor pixel modes don't contain either maximum resolution or default modes",
552                 __FUNCTION__);
553         return BAD_VALUE;
554     }
555 
556     if (hasDefaultSensorPixelMode) {
557         auto ret = getMatchingDepthSize(width, height, depthSizes, &chosenDepthWidth,
558                 &chosenDepthHeight);
559         if (ret != OK) {
560             ALOGE("%s: No matching depth stream size found", __FUNCTION__);
561             return ret;
562         }
563     }
564 
565     if (hasMaximumResolutionSensorPixelMode) {
566         size_t depthWidth = 0, depthHeight = 0;
567         auto ret = getMatchingDepthSize(width, height,
568                 depthSizesMaximumResolution, &depthWidth, &depthHeight);
569         if (ret != OK) {
570             ALOGE("%s: No matching max resolution depth stream size found", __FUNCTION__);
571             return ret;
572         }
573         // Both matching depth sizes should be the same.
574         if (chosenDepthWidth != 0 && chosenDepthWidth != depthWidth &&
575                 chosenDepthHeight != depthHeight) {
576             ALOGE("%s: Maximum resolution sensor pixel mode and default sensor pixel mode don't"
577                     " have matching depth sizes", __FUNCTION__);
578             return BAD_VALUE;
579         }
580         if (chosenDepthWidth == 0) {
581             chosenDepthWidth = depthWidth;
582             chosenDepthHeight = depthHeight;
583         }
584     }
585     *depthWidth = chosenDepthWidth;
586     *depthHeight = chosenDepthHeight;
587     return OK;
588 }
589 
590 
createInternalStreams(const std::vector<SurfaceHolder> & consumers,bool,uint32_t width,uint32_t height,int format,camera_stream_rotation_t rotation,int * id,const std::string & physicalCameraId,const std::unordered_set<int32_t> & sensorPixelModesUsed,std::vector<int> * surfaceIds,int,bool,int32_t,int64_t,int64_t,bool useReadoutTimestamp)591 status_t DepthCompositeStream::createInternalStreams(const std::vector<SurfaceHolder>& consumers,
592         bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
593         camera_stream_rotation_t rotation, int *id, const std::string& physicalCameraId,
594         const std::unordered_set<int32_t> &sensorPixelModesUsed,
595         std::vector<int> *surfaceIds,
596         int /*streamSetId*/, bool /*isShared*/, int32_t /*colorSpace*/,
597         int64_t /*dynamicProfile*/, int64_t /*streamUseCase*/, bool useReadoutTimestamp) {
598     if (mSupportedDepthSizes.empty()) {
599         ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
600         return INVALID_OPERATION;
601     }
602 
603     size_t depthWidth, depthHeight;
604     auto ret =
605             checkAndGetMatchingDepthSize(width, height, mSupportedDepthSizes,
606                     mSupportedDepthSizesMaximumResolution, sensorPixelModesUsed, &depthWidth,
607                     &depthHeight);
608     if (ret != OK) {
609         ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
610         return ret;
611     }
612 
613     sp<CameraDeviceBase> device = mDevice.promote();
614     if (!device.get()) {
615         ALOGE("%s: Invalid camera device!", __FUNCTION__);
616         return NO_INIT;
617     }
618 
619 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
620     mBlobConsumer = new CpuConsumer(/*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
621     mBlobConsumer->setFrameAvailableListener(this);
622     mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
623     mBlobSurface = mBlobConsumer->getSurface();
624 #else
625     sp<IGraphicBufferProducer> producer;
626     sp<IGraphicBufferConsumer> consumer;
627     BufferQueue::createBufferQueue(&producer, &consumer);
628     mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
629     mBlobConsumer->setFrameAvailableListener(this);
630     mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
631     mBlobSurface = new Surface(producer);
632 #endif  // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
633 
634     ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
635             id, physicalCameraId, sensorPixelModesUsed, surfaceIds,
636             camera3::CAMERA3_STREAM_SET_ID_INVALID, /*isShared*/false, /*isMultiResolution*/false,
637             /*consumerUsage*/0, ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
638             ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
639             OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
640             OutputConfiguration::MIRROR_MODE_AUTO,
641             ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
642             useReadoutTimestamp);
643     if (ret == OK) {
644         mBlobStreamId = *id;
645         mBlobSurfaceId = (*surfaceIds)[0];
646         mOutputSurface = consumers[0].mSurface;
647     } else {
648         return ret;
649     }
650 
651 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
652     mDepthConsumer = new CpuConsumer(/*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
653     mDepthConsumer->setFrameAvailableListener(this);
654     mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
655     mDepthSurface = mDepthConsumer->getSurface();
656 #else
657     BufferQueue::createBufferQueue(&producer, &consumer);
658     mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
659     mDepthConsumer->setFrameAvailableListener(this);
660     mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
661     mDepthSurface = new Surface(producer);
662 #endif  // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
663     std::vector<int> depthSurfaceId;
664     ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
665             kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, sensorPixelModesUsed,
666             &depthSurfaceId, camera3::CAMERA3_STREAM_SET_ID_INVALID, /*isShared*/false,
667             /*isMultiResolution*/false, /*consumerUsage*/0,
668             ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
669             ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
670             OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
671             OutputConfiguration::MIRROR_MODE_AUTO,
672             ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
673             useReadoutTimestamp);
674     if (ret == OK) {
675         mDepthSurfaceId = depthSurfaceId[0];
676     } else {
677         return ret;
678     }
679 
680     ret = registerCompositeStreamListener(getStreamId());
681     if (ret != OK) {
682         ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
683         return ret;
684     }
685 
686     ret = registerCompositeStreamListener(mDepthStreamId);
687     if (ret != OK) {
688         ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
689         return ret;
690     }
691 
692     mBlobWidth = width;
693     mBlobHeight = height;
694 
695     return ret;
696 }
697 
configureStream()698 status_t DepthCompositeStream::configureStream() {
699     if (isRunning()) {
700         // Processing thread is already running, nothing more to do.
701         return NO_ERROR;
702     }
703 
704     if (mOutputSurface.get() == nullptr) {
705         ALOGE("%s: No valid output surface set!", __FUNCTION__);
706         return NO_INIT;
707     }
708 
709     auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mStreamSurfaceListener);
710     if (res != OK) {
711         ALOGE("%s: Unable to connect to native window for stream %d",
712                 __FUNCTION__, mBlobStreamId);
713         return res;
714     }
715 
716     if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
717             != OK) {
718         ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
719                 mBlobStreamId);
720         return res;
721     }
722 
723     int maxProducerBuffers;
724     ANativeWindow *anw = mBlobSurface.get();
725     if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
726         ALOGE("%s: Unable to query consumer undequeued"
727                 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
728         return res;
729     }
730 
731     ANativeWindow *anwConsumer = mOutputSurface.get();
732     int maxConsumerBuffers;
733     if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
734                     &maxConsumerBuffers)) != OK) {
735         ALOGE("%s: Unable to query consumer undequeued"
736                 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
737         return res;
738     }
739 
740     if ((res = native_window_set_buffer_count(
741                     anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
742         ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
743         return res;
744     }
745 
746     run("DepthCompositeStreamProc");
747 
748     return NO_ERROR;
749 }
750 
deleteInternalStreams()751 status_t DepthCompositeStream::deleteInternalStreams() {
752     // The 'CameraDeviceClient' parent will delete the blob stream
753     requestExit();
754 
755     auto ret = join();
756     if (ret != OK) {
757         ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
758                 strerror(-ret), ret);
759     }
760 
761     if (mDepthStreamId >= 0) {
762         // Camera devices may not be valid after switching to offline mode.
763         // In this case, all offline streams including internal composite streams
764         // are managed and released by the offline session.
765         sp<CameraDeviceBase> device = mDevice.promote();
766         if (device.get() != nullptr) {
767             ret = device->deleteStream(mDepthStreamId);
768         }
769 
770         mDepthStreamId = -1;
771     }
772 
773     if (mOutputSurface != nullptr) {
774         mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
775         mOutputSurface.clear();
776     }
777 
778     return ret;
779 }
780 
onFrameAvailable(const BufferItem & item)781 void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
782     if (item.mDataSpace == kJpegDataSpace) {
783         ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
784                 __func__, ns2ms(item.mTimestamp));
785 
786         Mutex::Autolock l(mMutex);
787         if (!mErrorState) {
788             mInputJpegBuffers.push_back(item.mTimestamp);
789             mInputReadyCondition.signal();
790         }
791     } else if (item.mDataSpace == kDepthMapDataSpace) {
792         ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
793                 ns2ms(item.mTimestamp));
794 
795         Mutex::Autolock l(mMutex);
796         if (!mErrorState) {
797             mInputDepthBuffers.push_back(item.mTimestamp);
798             mInputReadyCondition.signal();
799         }
800     } else {
801         ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
802     }
803 }
804 
insertGbp(SurfaceMap * outSurfaceMap,Vector<int32_t> * outputStreamIds,int32_t * currentStreamId)805 status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
806         Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
807     if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
808         outputStreamIds->push_back(mDepthStreamId);
809     }
810     (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
811 
812     if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
813         outputStreamIds->push_back(mBlobStreamId);
814     }
815     (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
816 
817     if (currentStreamId != nullptr) {
818         *currentStreamId = mBlobStreamId;
819     }
820 
821     return NO_ERROR;
822 }
823 
insertCompositeStreamIds(std::vector<int32_t> * compositeStreamIds)824 status_t DepthCompositeStream::insertCompositeStreamIds(
825         std::vector<int32_t>* compositeStreamIds /*out*/) {
826     if (compositeStreamIds == nullptr) {
827         return BAD_VALUE;
828     }
829 
830     compositeStreamIds->push_back(mDepthStreamId);
831     compositeStreamIds->push_back(mBlobStreamId);
832 
833     return OK;
834 }
835 
onResultError(const CaptureResultExtras & resultExtras)836 void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
837     // Processing can continue even in case of result errors.
838     // At the moment depth composite stream processing relies mainly on static camera
839     // characteristics data. The actual result data can be used for the jpeg quality but
840     // in case it is absent we can default to maximum.
841     eraseResult(resultExtras.frameNumber);
842 }
843 
onStreamBufferError(const CaptureResultExtras & resultExtras)844 bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
845     bool ret = false;
846     // Buffer errors concerning internal composite streams should not be directly visible to
847     // camera clients. They must only receive a single buffer error with the public composite
848     // stream id.
849     if ((resultExtras.errorStreamId == mDepthStreamId) ||
850             (resultExtras.errorStreamId == mBlobStreamId)) {
851         flagAnErrorFrameNumber(resultExtras.frameNumber);
852         ret = true;
853     }
854 
855     return ret;
856 }
857 
getMatchingDepthSize(size_t width,size_t height,const std::vector<std::tuple<size_t,size_t>> & supporedDepthSizes,size_t * depthWidth,size_t * depthHeight)858 status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
859         const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
860         size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
861     if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
862         return BAD_VALUE;
863     }
864 
865     float arTol = CameraProviderManager::kDepthARTolerance;
866     *depthWidth = *depthHeight = 0;
867 
868     float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
869     for (const auto& it : supporedDepthSizes) {
870         auto currentWidth = std::get<0>(it);
871         auto currentHeight = std::get<1>(it);
872         if ((currentWidth == width) && (currentHeight == height)) {
873             *depthWidth = width;
874             *depthHeight = height;
875             break;
876         } else {
877             float currentRatio = static_cast<float> (currentWidth) /
878                     static_cast<float> (currentHeight);
879             auto currentSize = currentWidth * currentHeight;
880             auto oldSize = (*depthWidth) * (*depthHeight);
881             if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
882                 *depthWidth = currentWidth;
883                 *depthHeight = currentHeight;
884             }
885         }
886     }
887 
888     return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
889 }
890 
getSupportedDepthSizes(const CameraMetadata & ch,bool maxResolution,std::vector<std::tuple<size_t,size_t>> * depthSizes)891 void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch, bool maxResolution,
892         std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
893     if (depthSizes == nullptr) {
894         return;
895     }
896 
897     auto entry = ch.find(
898             camera3::SessionConfigurationUtils::getAppropriateModeTag(
899                     ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, maxResolution));
900     if (entry.count > 0) {
901         // Depth stream dimensions have four int32_t components
902         // (pixelformat, width, height, type)
903         size_t entryCount = entry.count / 4;
904         depthSizes->reserve(entryCount);
905         for (size_t i = 0; i < entry.count; i += 4) {
906             if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
907                     (entry.data.i32[i+3] ==
908                      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
909                 depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
910                             entry.data.i32[i+2]));
911             }
912         }
913     }
914 }
915 
getCompositeStreamInfo(const OutputStreamInfo & streamInfo,const CameraMetadata & ch,std::vector<OutputStreamInfo> * compositeOutput)916 status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
917             const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
918     if (compositeOutput == nullptr) {
919         return BAD_VALUE;
920     }
921 
922     std::vector<std::tuple<size_t, size_t>> depthSizes;
923     std::vector<std::tuple<size_t, size_t>> depthSizesMaximumResolution;
924     getSupportedDepthSizes(ch, /*maxResolution*/false, &depthSizes);
925     if (depthSizes.empty()) {
926         ALOGE("%s: No depth stream configurations present", __FUNCTION__);
927         return BAD_VALUE;
928     }
929 
930     if (SessionConfigurationUtils::supportsUltraHighResolutionCapture(ch)) {
931         getSupportedDepthSizes(ch, /*maxResolution*/true, &depthSizesMaximumResolution);
932         if (depthSizesMaximumResolution.empty()) {
933             ALOGE("%s: No depth stream configurations for maximum resolution present",
934                     __FUNCTION__);
935             return BAD_VALUE;
936         }
937     }
938 
939     size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
940     auto ret = checkAndGetMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes,
941             depthSizesMaximumResolution, streamInfo.sensorPixelModesUsed, &chosenDepthWidth,
942             &chosenDepthHeight);
943 
944     if (ret != OK) {
945         ALOGE("%s: Couldn't get matching depth sizes", __FUNCTION__);
946         return ret;
947     }
948 
949     compositeOutput->clear();
950     compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
951 
952     // Sensor pixel modes should stay the same here. They're already overridden.
953     // Jpeg/Blob stream info
954     (*compositeOutput)[0].dataSpace = kJpegDataSpace;
955     (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
956 
957     // Depth stream info
958     (*compositeOutput)[1].width = chosenDepthWidth;
959     (*compositeOutput)[1].height = chosenDepthHeight;
960     (*compositeOutput)[1].format = kDepthMapPixelFormat;
961     (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
962     (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
963 
964     return NO_ERROR;
965 }
966 
967 }; // namespace camera3
968 }; // namespace android
969