1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 //#define LOG_NDEBUG 0
17 #define LOG_TAG "GraphicsTracker"
18 #include <fcntl.h>
19 #include <unistd.h>
20
21 #include <media/stagefright/foundation/ADebug.h>
22 #include <private/android/AHardwareBufferHelpers.h>
23 #include <vndk/hardware_buffer.h>
24
25 #include <C2BlockInternal.h>
26 #include <codec2/aidl/GraphicsTracker.h>
27
28 namespace aidl::android::hardware::media::c2::implementation {
29
30 namespace {
31
32 static constexpr int kMaxDequeueMin = 1;
33 static constexpr int kMaxDequeueMax = ::android::BufferQueueDefs::NUM_BUFFER_SLOTS - 2;
34
35 // Just some delay for HAL to receive the stop()/release() request.
36 static constexpr int kAllocateDirectDelayUs = 16666;
37
retrieveAHardwareBufferId(const C2ConstGraphicBlock & blk,uint64_t * bid)38 c2_status_t retrieveAHardwareBufferId(const C2ConstGraphicBlock &blk, uint64_t *bid) {
39 std::shared_ptr<const _C2BlockPoolData> bpData = _C2BlockFactory::GetGraphicBlockPoolData(blk);
40 if (!bpData || bpData->getType() != _C2BlockPoolData::TYPE_AHWBUFFER) {
41 return C2_BAD_VALUE;
42 }
43 if (__builtin_available(android __ANDROID_API_T__, *)) {
44 AHardwareBuffer *pBuf;
45 if (!_C2BlockFactory::GetAHardwareBuffer(bpData, &pBuf)) {
46 return C2_CORRUPTED;
47 }
48 int ret = AHardwareBuffer_getId(pBuf, bid);
49 if (ret != ::android::OK) {
50 return C2_CORRUPTED;
51 }
52 return C2_OK;
53 } else {
54 return C2_OMITTED;
55 }
56 }
57
58 } // anonymous namespace
59
BufferItem(uint32_t generation,int slot,const sp<GraphicBuffer> & buf,const sp<Fence> & fence)60 GraphicsTracker::BufferItem::BufferItem(
61 uint32_t generation, int slot, const sp<GraphicBuffer>& buf, const sp<Fence>& fence) :
62 mInit{false}, mGeneration{generation}, mSlot{slot} {
63 if (!buf) {
64 return;
65 }
66 if (__builtin_available(android __ANDROID_API_T__, *)) {
67 AHardwareBuffer *pBuf = AHardwareBuffer_from_GraphicBuffer(buf.get());
68 int ret = AHardwareBuffer_getId(pBuf, &mId);
69 if (ret != ::android::OK) {
70 return;
71 }
72 mUsage = buf->getUsage();
73 AHardwareBuffer_acquire(pBuf);
74 mBuf = pBuf;
75 mFence = fence;
76 mInit = true;
77 }
78 }
79
BufferItem(uint32_t generation,AHardwareBuffer * pBuf,uint64_t usage)80 GraphicsTracker::BufferItem::BufferItem(
81 uint32_t generation, AHardwareBuffer *pBuf, uint64_t usage) :
82 mInit{true}, mGeneration{generation}, mSlot{-1},
83 mBuf{pBuf}, mUsage{usage},
84 mFence{Fence::NO_FENCE} {
85 if (__builtin_available(android __ANDROID_API_T__, *)) {
86 int ret = AHardwareBuffer_getId(mBuf, &mId);
87 if (ret != ::android::OK) {
88 mInit = false;
89 mBuf = nullptr;
90 return;
91 }
92 }
93 AHardwareBuffer_acquire(mBuf);
94 }
95
~BufferItem()96 GraphicsTracker::BufferItem::~BufferItem() {
97 if (mInit) {
98 AHardwareBuffer_release(mBuf);
99 }
100 }
101
102
migrateBuffer(uint64_t newUsage,uint32_t newGeneration)103 std::shared_ptr<GraphicsTracker::BufferItem> GraphicsTracker::BufferItem::migrateBuffer(
104 uint64_t newUsage, uint32_t newGeneration) {
105 if (!mInit) {
106 return nullptr;
107 }
108 newUsage |= mUsage;
109 uint64_t ahbUsage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(newUsage);
110 AHardwareBuffer_Desc desc;
111 AHardwareBuffer_describe(mBuf, &desc);
112 // TODO: we need well-established buffer migration features from graphics.
113 // (b/273776738)
114 desc.usage = ahbUsage;
115 const native_handle_t *handle = AHardwareBuffer_getNativeHandle(mBuf);
116 if (!handle) {
117 return nullptr;
118 }
119
120 AHardwareBuffer *newBuf;
121 int err = AHardwareBuffer_createFromHandle(&desc, handle,
122 AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
123 &newBuf);
124 if (err != ::android::NO_ERROR) {
125 return nullptr;
126 }
127
128 std::shared_ptr<BufferItem> newBuffer =
129 std::make_shared<BufferItem>(newGeneration, newBuf, newUsage);
130 AHardwareBuffer_release(newBuf);
131 return newBuffer;
132 }
133
getGraphicBuffer()134 sp<GraphicBuffer> GraphicsTracker::BufferItem::getGraphicBuffer() {
135 if (!mInit) {
136 return nullptr;
137 }
138 GraphicBuffer *gb = ::android::AHardwareBuffer_to_GraphicBuffer(mBuf);
139 if (!gb) {
140 return nullptr;
141 }
142 gb->setGenerationNumber(mGeneration);
143 return gb;
144 }
145
~BufferCache()146 GraphicsTracker::BufferCache::~BufferCache() {
147 ALOGV("BufferCache destruction: generation(%d), igbp(%d)", mGeneration, (bool)mIgbp);
148 }
149
waitOnSlot(int slot)150 void GraphicsTracker::BufferCache::waitOnSlot(int slot) {
151 // TODO: log
152 CHECK(0 <= slot && slot < kNumSlots);
153 BlockedSlot *p = &mBlockedSlots[slot];
154 std::unique_lock<std::mutex> l(p->l);
155 while (p->blocked) {
156 p->cv.wait(l);
157 }
158 }
159
blockSlot(int slot)160 void GraphicsTracker::BufferCache::blockSlot(int slot) {
161 CHECK(0 <= slot && slot < kNumSlots);
162 ALOGV("block slot %d", slot);
163 BlockedSlot *p = &mBlockedSlots[slot];
164 std::unique_lock<std::mutex> l(p->l);
165 p->blocked = true;
166 }
167
unblockSlot(int slot)168 void GraphicsTracker::BufferCache::unblockSlot(int slot) {
169 CHECK(0 <= slot && slot < kNumSlots);
170 ALOGV("unblock slot %d", slot);
171 BlockedSlot *p = &mBlockedSlots[slot];
172 std::unique_lock<std::mutex> l(p->l);
173 p->blocked = false;
174 l.unlock();
175 p->cv.notify_one();
176 }
177
GraphicsTracker(int maxDequeueCount)178 GraphicsTracker::GraphicsTracker(int maxDequeueCount)
179 : mBufferCache(new BufferCache()), mNumDequeueing{0}, mMaxDequeue{maxDequeueCount},
180 mMaxDequeueCommitted{maxDequeueCount},
181 mDequeueable{maxDequeueCount},
182 mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
183 mInConfig{false}, mStopped{false}, mStopRequested{false}, mAllocAfterStopRequested{0} {
184 if (maxDequeueCount < kMaxDequeueMin) {
185 mMaxDequeue = kMaxDequeueMin;
186 mMaxDequeueCommitted = kMaxDequeueMin;
187 mDequeueable = kMaxDequeueMin;
188 } else if(maxDequeueCount > kMaxDequeueMax) {
189 mMaxDequeue = kMaxDequeueMax;
190 mMaxDequeueCommitted = kMaxDequeueMax;
191 mDequeueable = kMaxDequeueMax;
192 }
193 int pipefd[2] = { -1, -1};
194 int ret = ::pipe2(pipefd, O_CLOEXEC | O_NONBLOCK);
195
196 mReadPipeFd.reset(pipefd[0]);
197 mWritePipeFd.reset(pipefd[1]);
198
199 // ctor does not require lock to be held.
200 writeIncDequeueableLocked(mDequeueable);
201
202 CHECK(ret >= 0);
203 }
204
~GraphicsTracker()205 GraphicsTracker::~GraphicsTracker() {
206 stop();
207 }
208
adjustDequeueConfLocked(bool * updateDequeue)209 bool GraphicsTracker::adjustDequeueConfLocked(bool *updateDequeue) {
210 // TODO: can't we adjust during config? not committing it may safe?
211 *updateDequeue = false;
212 if (!mInConfig && mMaxDequeueRequested.has_value() && mMaxDequeueRequested < mMaxDequeue) {
213 int delta = mMaxDequeue - mMaxDequeueRequested.value();
214 int drained = 0;
215 // Since we are supposed to increase mDequeuable by one already
216 int adjustable = mDequeueable + 1;
217 if (adjustable >= delta) {
218 mMaxDequeue = mMaxDequeueRequested.value();
219 mDequeueable -= (delta - 1);
220 drained = delta - 1;
221 } else {
222 mMaxDequeue -= adjustable;
223 drained = mDequeueable;
224 mDequeueable = 0;
225 }
226 if (drained > 0) {
227 drainDequeueableLocked(drained);
228 }
229 if (mMaxDequeueRequested == mMaxDequeue && mMaxDequeueRequested != mMaxDequeueCommitted) {
230 *updateDequeue = true;
231 }
232 return true;
233 }
234 return false;
235 }
236
configureGraphics(const sp<IGraphicBufferProducer> & igbp,uint32_t generation)237 c2_status_t GraphicsTracker::configureGraphics(
238 const sp<IGraphicBufferProducer>& igbp, uint32_t generation) {
239 // TODO: wait until operations to previous IGBP is completed.
240 std::shared_ptr<BufferCache> prevCache;
241 int prevDequeueRequested = 0;
242 int prevDequeueCommitted;
243
244 std::unique_lock<std::mutex> cl(mConfigLock);
245 {
246 std::unique_lock<std::mutex> l(mLock);
247 mInConfig = true;
248 prevCache = mBufferCache;
249 prevDequeueCommitted = mMaxDequeueCommitted;
250 if (mMaxDequeueRequested.has_value()) {
251 prevDequeueRequested = mMaxDequeueRequested.value();
252 }
253 }
254 // NOTE: Switching to the same surface is blocked from MediaCodec.
255 // Switching to the same surface might not work if tried, since disconnect()
256 // to the old surface in MediaCodec and allocate from the new surface from
257 // GraphicsTracker cannot be synchronized properly.
258 uint64_t bqId{0ULL};
259 ::android::status_t ret = ::android::OK;
260 if (igbp) {
261 ret = igbp->getUniqueId(&bqId);
262 }
263 if (ret != ::android::OK ||
264 prevCache->mGeneration == generation) {
265 ALOGE("new surface configure fail due to wrong or same bqId or same generation:"
266 "igbp(%d:%llu -> %llu), gen(%lu -> %lu)", (bool)igbp,
267 (unsigned long long)prevCache->mBqId, (unsigned long long)bqId,
268 (unsigned long)prevCache->mGeneration, (unsigned long)generation);
269 std::unique_lock<std::mutex> l(mLock);
270 mInConfig = false;
271 return C2_BAD_VALUE;
272 }
273 ALOGD("new surface in configuration: maxDequeueRequested(%d), maxDequeueCommitted(%d)",
274 prevDequeueRequested, prevDequeueCommitted);
275 if (prevDequeueRequested > 0 && prevDequeueRequested > prevDequeueCommitted) {
276 prevDequeueCommitted = prevDequeueRequested;
277 }
278 if (igbp) {
279 ret = igbp->setMaxDequeuedBufferCount(prevDequeueCommitted);
280 if (ret != ::android::OK) {
281 ALOGE("new surface maxDequeueBufferCount configure fail");
282 // TODO: sort out the error from igbp and return an error accordingly.
283 std::unique_lock<std::mutex> l(mLock);
284 mInConfig = false;
285 return C2_CORRUPTED;
286 }
287 }
288 ALOGD("new surface configured with id:%llu gen:%lu maxDequeue:%d",
289 (unsigned long long)bqId, (unsigned long)generation, prevDequeueCommitted);
290 std::shared_ptr<BufferCache> newCache = std::make_shared<BufferCache>(bqId, generation, igbp);
291 {
292 std::unique_lock<std::mutex> l(mLock);
293 mInConfig = false;
294 mBufferCache = newCache;
295 // {@code dequeued} is the number of currently dequeued buffers.
296 // {@code prevDequeueCommitted} is max dequeued buffer at any moment
297 // from the new surface.
298 // {@code newDequeueable} is hence the current # of dequeueable buffers
299 // if no change occurs.
300 int dequeued = mDequeued.size() + mNumDequeueing;
301 int newDequeueable = prevDequeueCommitted - dequeued;
302 if (newDequeueable < 0) {
303 // This will not happen.
304 // But if this happens, we respect the value and try to continue.
305 ALOGE("calculated new dequeueable is negative: %d max(%d),dequeued(%d)",
306 newDequeueable, prevDequeueCommitted, dequeued);
307 }
308
309 if (mMaxDequeueRequested.has_value() && mMaxDequeueRequested == prevDequeueCommitted) {
310 mMaxDequeueRequested.reset();
311 }
312 mMaxDequeue = mMaxDequeueCommitted = prevDequeueCommitted;
313
314 int delta = newDequeueable - mDequeueable;
315 if (delta > 0) {
316 writeIncDequeueableLocked(delta);
317 } else if (delta < 0) {
318 drainDequeueableLocked(-delta);
319 }
320 ALOGV("new surfcace dequeueable %d(delta %d), maxDequeue %d",
321 newDequeueable, delta, mMaxDequeue);
322 mDequeueable = newDequeueable;
323 }
324 return C2_OK;
325 }
326
configureMaxDequeueCount(int maxDequeueCount)327 c2_status_t GraphicsTracker::configureMaxDequeueCount(int maxDequeueCount) {
328 std::shared_ptr<BufferCache> cache;
329
330 if (maxDequeueCount < kMaxDequeueMin || maxDequeueCount > kMaxDequeueMax) {
331 ALOGE("max dequeue count %d is not valid", maxDequeueCount);
332 return C2_BAD_VALUE;
333 }
334
335 // max dequeue count which can be committed to IGBP.
336 // (Sometimes maxDequeueCount cannot be committed if the number of
337 // dequeued buffer count is bigger.)
338 int maxDequeueToCommit;
339 std::unique_lock<std::mutex> cl(mConfigLock);
340 {
341 std::unique_lock<std::mutex> l(mLock);
342 if (mMaxDequeueRequested.has_value()) {
343 if (mMaxDequeueRequested == maxDequeueCount) {
344 ALOGD("maxDequeueCount requested with %d already", maxDequeueCount);
345 return C2_OK;
346 }
347 } else if (mMaxDequeue == maxDequeueCount) {
348 ALOGD("maxDequeueCount is already %d", maxDequeueCount);
349 return C2_OK;
350 }
351 mInConfig = true;
352 mMaxDequeueRequested = maxDequeueCount;
353 cache = mBufferCache;
354 if (mMaxDequeue <= maxDequeueCount) {
355 maxDequeueToCommit = maxDequeueCount;
356 } else {
357 // Since mDequeuable is decreasing,
358 // a delievered ready to allocate event may not be fulfilled.
359 // Another waiting via a waitable object may be necessary in the case.
360 int delta = std::min(mMaxDequeue - maxDequeueCount, mDequeueable);
361 maxDequeueToCommit = mMaxDequeue - delta;
362 mDequeueable -= delta;
363 if (delta > 0) {
364 drainDequeueableLocked(delta);
365 }
366 }
367 }
368
369 bool committed = true;
370 if (cache->mIgbp && maxDequeueToCommit != mMaxDequeueCommitted) {
371 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(maxDequeueToCommit);
372 committed = (ret == ::android::OK);
373 if (committed) {
374 ALOGD("maxDequeueCount committed to IGBP: %d", maxDequeueToCommit);
375 } else {
376 // This should not happen.
377 ALOGE("maxdequeueCount update to IGBP failed with error(%d)", (int)ret);
378 }
379 }
380
381 int oldMaxDequeue = 0;
382 int requested = 0;
383 {
384 std::unique_lock<std::mutex> l(mLock);
385 mInConfig = false;
386 oldMaxDequeue = mMaxDequeue;
387 mMaxDequeue = maxDequeueToCommit; // we already drained dequeueable
388 if (committed) {
389 clearCacheIfNecessaryLocked(cache, maxDequeueToCommit);
390 mMaxDequeueCommitted = maxDequeueToCommit;
391 if (mMaxDequeueRequested == mMaxDequeueCommitted &&
392 mMaxDequeueRequested == mMaxDequeue) {
393 mMaxDequeueRequested.reset();
394 }
395 if (mMaxDequeueRequested.has_value()) {
396 requested = mMaxDequeueRequested.value();
397 }
398 int delta = mMaxDequeueCommitted - oldMaxDequeue;
399 if (delta > 0) {
400 mDequeueable += delta;
401 writeIncDequeueableLocked(delta);
402 }
403 }
404 }
405 ALOGD("maxDqueueCount change %d -> %d: pending: %d",
406 oldMaxDequeue, maxDequeueToCommit, requested);
407
408 if (!committed) {
409 return C2_CORRUPTED;
410 }
411 return C2_OK;
412 }
413
updateDequeueConf()414 void GraphicsTracker::updateDequeueConf() {
415 std::shared_ptr<BufferCache> cache;
416 int dequeueCommit;
417 ALOGV("trying to update max dequeue count");
418 std::unique_lock<std::mutex> cl(mConfigLock);
419 {
420 std::unique_lock<std::mutex> l(mLock);
421 if (!mMaxDequeueRequested.has_value() || mMaxDequeue != mMaxDequeueRequested) {
422 return;
423 }
424 if (mMaxDequeueCommitted == mMaxDequeueRequested) {
425 // already committed. may not happen.
426 mMaxDequeueRequested.reset();
427 return;
428 }
429 dequeueCommit = mMaxDequeue;
430 mInConfig = true;
431 cache = mBufferCache;
432 }
433 bool committed = true;
434 if (cache->mIgbp) {
435 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(dequeueCommit);
436 committed = (ret == ::android::OK);
437 if (committed) {
438 ALOGD("delayed maxDequeueCount update to IGBP: %d", dequeueCommit);
439 } else {
440 // This should not happen.
441 ALOGE("delayed maxdequeueCount update to IGBP failed with error(%d)", (int)ret);
442 }
443 }
444 {
445 // cache == mCache here, since we locked config.
446 std::unique_lock<std::mutex> l(mLock);
447 mInConfig = false;
448 if (committed) {
449 clearCacheIfNecessaryLocked(cache, dequeueCommit);
450 mMaxDequeueCommitted = dequeueCommit;
451 }
452 mMaxDequeueRequested.reset();
453 }
454 }
455
clearCacheIfNecessaryLocked(const std::shared_ptr<BufferCache> & cache,int maxDequeueCommitted)456 void GraphicsTracker::clearCacheIfNecessaryLocked(const std::shared_ptr<BufferCache> &cache,
457 int maxDequeueCommitted) {
458 int cleared = 0;
459 size_t origCacheSize = cache->mBuffers.size();
460 if (cache->mIgbp && maxDequeueCommitted < mMaxDequeueCommitted) {
461 // we are shrinking # of buffers in the case, so evict the previous
462 // cached buffers.
463 for (auto it = cache->mBuffers.begin(); it != cache->mBuffers.end();) {
464 uint64_t bid = it->second->mId;
465 if (mDequeued.count(bid) == 0 || mDeallocating.count(bid) > 0) {
466 ++cleared;
467 it = cache->mBuffers.erase(it);
468 } else {
469 ++it;
470 }
471 }
472 }
473 ALOGD("Cache size %zu -> %zu: maybe_cleared(%d), dequeued(%zu)",
474 origCacheSize, cache->mBuffers.size(), cleared, mDequeued.size());
475 }
476
getCurDequeueable()477 int GraphicsTracker::getCurDequeueable() {
478 std::unique_lock<std::mutex> l(mLock);
479 return mDequeueable;
480 }
481
stop()482 void GraphicsTracker::stop() {
483 // TODO: wait until all operation to current IGBP
484 // being completed.
485 std::unique_lock<std::mutex> l(mLock);
486 if (mStopped) {
487 return;
488 }
489 mStopped = true;
490 int writeFd = mWritePipeFd.release();
491 if (writeFd >= 0) {
492 ::close(writeFd);
493 }
494 }
495
onRequestStop()496 void GraphicsTracker::onRequestStop() {
497 std::unique_lock<std::mutex> l(mLock);
498 if (mStopped) {
499 return;
500 }
501 if (mStopRequested) {
502 return;
503 }
504 mStopRequested = true;
505 writeIncDequeueableLocked(kMaxDequeueMax - 1);
506 }
507
writeIncDequeueableLocked(int inc)508 void GraphicsTracker::writeIncDequeueableLocked(int inc) {
509 CHECK(inc > 0 && inc < kMaxDequeueMax);
510 thread_local char buf[kMaxDequeueMax];
511 if (mStopped) { // reading end closed;
512 return;
513 }
514 int writeFd = mWritePipeFd.get();
515 if (writeFd < 0) {
516 // initialization fail and not valid though.
517 return;
518 }
519 int ret = ::write(writeFd, buf, inc);
520 // Since this is non-blocking i/o, it never returns EINTR.
521 //
522 // ::write() to pipe guarantee to succeed atomically if it writes less than
523 // the given PIPE_BUF. And the buffer size in pipe/fifo is at least 4K and our total
524 // max pending buffer size is 64. So it never returns EAGAIN here either.
525 // See pipe(7) for further information.
526 //
527 // Other errors are serious errors and we cannot synchronize mDequeueable to
528 // length of pending buffer in pipe/fifo anymore. So better to abort here.
529 // TODO: do not abort here. (b/318717399)
530 CHECK(ret == inc);
531 }
532
drainDequeueableLocked(int dec)533 void GraphicsTracker::drainDequeueableLocked(int dec) {
534 CHECK(dec > 0 && dec < kMaxDequeueMax);
535 thread_local char buf[kMaxDequeueMax];
536 if (mStopped) {
537 return;
538 }
539 int readFd = mReadPipeFd.get();
540 if (readFd < 0) {
541 // initializationf fail and not valid though.
542 return;
543 }
544 int ret = ::read(readFd, buf, dec);
545 // TODO: no dot abort here. (b/318717399)
546 CHECK(ret == dec);
547 }
548
getWaitableFd(int * pipeFd)549 c2_status_t GraphicsTracker::getWaitableFd(int *pipeFd) {
550 *pipeFd = ::dup(mReadPipeFd.get());
551 if (*pipeFd < 0) {
552 if (mReadPipeFd.get() < 0) {
553 return C2_BAD_STATE;
554 }
555 // dup error
556 ALOGE("dup() for the reading end failed %d", errno);
557 return C2_NO_MEMORY;
558 }
559 return C2_OK;
560 }
561
requestAllocateLocked(std::shared_ptr<BufferCache> * cache)562 c2_status_t GraphicsTracker::requestAllocateLocked(std::shared_ptr<BufferCache> *cache) {
563 if (mDequeueable > 0) {
564 char buf[1];
565 int ret = ::read(mReadPipeFd.get(), buf, 1);
566 if (ret < 0) {
567 if (errno == EINTR) {
568 // Do we really need to care for cancel due to signal handling?
569 return C2_CANCELED;
570 }
571 if (errno == EAGAIN) {
572 // proper usage of waitable object should not return this.
573 // but there could be alloc requests from HAL ignoring the internal status.
574 return C2_BLOCKING;
575 }
576 CHECK(errno != 0);
577 }
578 if (ret == 0) {
579 // writing end is closed
580 ALOGE("writing end for the waitable object seems to be closed");
581 return C2_BAD_STATE;
582 }
583 mNumDequeueing++;
584 mDequeueable--;
585 *cache = mBufferCache;
586 return C2_OK;
587 }
588 return C2_BLOCKING;
589 }
590
591 // If {@code cached} is {@code true}, {@code pBuffer} should be read from the
592 // current cached status. Otherwise, {@code pBuffer} should be written to
593 // current caches status.
commitAllocate(c2_status_t res,const std::shared_ptr<BufferCache> & cache,bool cached,int slot,const sp<Fence> & fence,std::shared_ptr<BufferItem> * pBuffer,bool * updateDequeue)594 void GraphicsTracker::commitAllocate(c2_status_t res, const std::shared_ptr<BufferCache> &cache,
595 bool cached, int slot, const sp<Fence> &fence,
596 std::shared_ptr<BufferItem> *pBuffer, bool *updateDequeue) {
597 std::unique_lock<std::mutex> l(mLock);
598 mNumDequeueing--;
599 if (res == C2_OK) {
600 if (cached) {
601 auto it = cache->mBuffers.find(slot);
602 CHECK(it != cache->mBuffers.end());
603 it->second->mFence = fence;
604 *pBuffer = it->second;
605 ALOGV("an allocated buffer already cached, updated Fence");
606 } else if (cache.get() == mBufferCache.get() && mBufferCache->mIgbp) {
607 // Cache the buffer if it is allocated from the current IGBP
608 CHECK(slot >= 0);
609 auto ret = mBufferCache->mBuffers.emplace(slot, *pBuffer);
610 if (!ret.second) {
611 ret.first->second = *pBuffer;
612 }
613 ALOGV("an allocated buffer not cached from the current IGBP");
614 }
615 uint64_t bid = (*pBuffer)->mId;
616 auto mapRet = mDequeued.emplace(bid, *pBuffer);
617 CHECK(mapRet.second);
618 } else {
619 ALOGD("allocate error(%d): Dequeued(%zu), Dequeuable(%d)",
620 (int)res, mDequeued.size(), mDequeueable + 1);
621 if (adjustDequeueConfLocked(updateDequeue)) {
622 return;
623 }
624 mDequeueable++;
625 writeIncDequeueableLocked(1);
626 }
627 }
628
629
630 // if a buffer is newly allocated, {@code cached} is {@code false},
631 // and the buffer is in the {@code buffer}
632 // otherwise, {@code cached} is {@code false} and the buffer should be
633 // retrieved by commitAllocate();
_allocate(const std::shared_ptr<BufferCache> & cache,uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,bool * cached,int * rSlotId,sp<Fence> * rFence,std::shared_ptr<BufferItem> * buffer)634 c2_status_t GraphicsTracker::_allocate(const std::shared_ptr<BufferCache> &cache,
635 uint32_t width, uint32_t height, PixelFormat format,
636 uint64_t usage,
637 bool *cached,
638 int *rSlotId,
639 sp<Fence> *rFence,
640 std::shared_ptr<BufferItem> *buffer) {
641 ::android::sp<IGraphicBufferProducer> igbp = cache->mIgbp;
642 uint32_t generation = cache->mGeneration;
643 if (!igbp) {
644 // allocate directly
645 AHardwareBuffer_Desc desc;
646 desc.width = width;
647 desc.height = height;
648 desc.layers = 1u;
649 desc.format = ::android::AHardwareBuffer_convertFromPixelFormat(format);
650 desc.usage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(usage);
651 desc.rfu0 = 0;
652 desc.rfu1 = 0;
653
654 AHardwareBuffer *buf;
655 int ret = AHardwareBuffer_allocate(&desc, &buf);
656 if (ret != ::android::OK) {
657 ALOGE("direct allocation of AHB failed(%d)", ret);
658 return ret == ::android::NO_MEMORY ? C2_NO_MEMORY : C2_CORRUPTED;
659 }
660 *cached = false;
661 *rSlotId = -1;
662 *rFence = Fence::NO_FENCE;
663 *buffer = std::make_shared<BufferItem>(generation, buf, usage);
664 AHardwareBuffer_release(buf); // remove an acquire count from
665 // AHwb_allocate().
666 if (!*buffer) {
667 ALOGE("direct allocation of AHB successful, but failed to create BufferItem");
668 return C2_NO_MEMORY;
669 }
670 if (!(*buffer)->mInit) {
671 ALOGE("direct allocation of AHB successful, but BufferItem init failed");
672 buffer->reset();
673 return C2_CORRUPTED;
674 }
675 ALOGV("allocate: direct allocate without igbp");
676 return C2_OK;
677 }
678
679 int slotId;
680 uint64_t outBufferAge;
681 sp<Fence> fence;
682
683 ::android::status_t status = igbp->dequeueBuffer(
684 &slotId, &fence, width, height, format, usage, &outBufferAge, nullptr);
685 if (status < ::android::OK) {
686 if (status == ::android::TIMED_OUT || status == ::android::WOULD_BLOCK) {
687 ALOGW("BQ might not be ready for dequeueBuffer()");
688 return C2_BLOCKING;
689 }
690 bool cacheExpired = false;
691 {
692 std::unique_lock<std::mutex> l(mLock);
693 cacheExpired = (mBufferCache.get() != cache.get());
694 }
695 if (cacheExpired) {
696 ALOGW("a new BQ is configured. dequeueBuffer() error %d", (int)status);
697 return C2_BLOCKING;
698 }
699 ALOGE("BQ in inconsistent status. dequeueBuffer() error %d", (int)status);
700 return C2_CORRUPTED;
701 }
702 cache->waitOnSlot(slotId);
703 bool exists = false;
704 {
705 std::unique_lock<std::mutex> l(mLock);
706 if (cache.get() == mBufferCache.get() &&
707 cache->mBuffers.find(slotId) != cache->mBuffers.end()) {
708 exists = true;
709 }
710 }
711 bool needsRealloc = status & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION;
712 if (needsRealloc || !exists) {
713 sp<GraphicBuffer> realloced;
714 status = igbp->requestBuffer(slotId, &realloced);
715 if (status != ::android::OK) {
716 ALOGE("allocate by dequeueBuffer() successful, but requestBuffer() failed %d",
717 status);
718 igbp->cancelBuffer(slotId, fence);
719 // This might be due to life-cycle end and/or surface switching.
720 return C2_BLOCKING;
721 }
722 *buffer = std::make_shared<BufferItem>(generation, slotId, realloced, fence);
723 if (!*buffer) {
724 ALOGE("allocate by dequeueBuffer() successful, but creating BufferItem failed");
725 igbp->cancelBuffer(slotId, fence);
726 return C2_NO_MEMORY;
727 }
728 if (!(*buffer)->mInit) {
729 ALOGE("allocate by dequeueBuffer() successful, but BufferItem init failed");
730 buffer->reset();
731 igbp->cancelBuffer(slotId, fence);
732 return C2_CORRUPTED;
733 }
734 *cached = false;
735 } else {
736 *cached = true;
737 }
738 ALOGV("allocate: a new allocated buffer from igbp cached %d, slot: %d",
739 *cached, slotId);
740 *rSlotId = slotId;
741 *rFence = fence;
742 return C2_OK;
743 }
744
_allocateDirect(uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,AHardwareBuffer ** buf,sp<Fence> * rFence)745 c2_status_t GraphicsTracker::_allocateDirect(
746 uint32_t width, uint32_t height, PixelFormat format, uint64_t usage,
747 AHardwareBuffer **buf, sp<Fence> *rFence) {
748 AHardwareBuffer_Desc desc;
749 desc.width = width;
750 desc.height = height;
751 desc.layers = 1u;
752 desc.format = ::android::AHardwareBuffer_convertFromPixelFormat(format);
753 desc.usage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(usage);
754 desc.rfu0 = 0;
755 desc.rfu1 = 0;
756
757 int res = AHardwareBuffer_allocate(&desc, buf);
758 if (res != ::android::OK) {
759 ALOGE("_allocateDirect() failed(%d)", res);
760 if (res == ::android::NO_MEMORY) {
761 return C2_NO_MEMORY;
762 } else {
763 return C2_CORRUPTED;
764 }
765 }
766
767 int alloced = mAllocAfterStopRequested++;
768 *rFence = Fence::NO_FENCE;
769 ALOGD("_allocateDirect() allocated %d buffer", alloced);
770 return C2_OK;
771 }
772
allocate(uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,AHardwareBuffer ** buf,sp<Fence> * rFence)773 c2_status_t GraphicsTracker::allocate(
774 uint32_t width, uint32_t height, PixelFormat format, uint64_t usage,
775 AHardwareBuffer **buf, sp<Fence> *rFence) {
776 if (mStopped.load() == true) {
777 ALOGE("cannot allocate due to being stopped");
778 return C2_BAD_STATE;
779 }
780 c2_status_t res = C2_OK;
781 std::shared_ptr<BufferCache> cache;
782 {
783 std::unique_lock<std::mutex> l(mLock);
784 if (mStopRequested) {
785 l.unlock();
786 res = _allocateDirect(width, height, format, usage, buf, rFence);
787 // Delay a little bit for HAL to receive stop()/release() request.
788 ::usleep(kAllocateDirectDelayUs);
789 return res;
790 }
791 c2_status_t res = requestAllocateLocked(&cache);
792 if (res != C2_OK) {
793 return res;
794 }
795 }
796 ALOGV("allocatable or dequeueable");
797
798 bool cached = false;
799 int slotId;
800 sp<Fence> fence;
801 std::shared_ptr<BufferItem> buffer;
802 bool updateDequeue;
803 res = _allocate(cache, width, height, format, usage, &cached, &slotId, &fence, &buffer);
804 commitAllocate(res, cache, cached, slotId, fence, &buffer, &updateDequeue);
805 if (res == C2_OK) {
806 ALOGV("allocated a buffer width:%u height:%u pixelformat:%d usage:%llu",
807 width, height, format, (unsigned long long)usage);
808 *buf = buffer->mBuf;
809 *rFence = buffer->mFence;
810 // *buf should be valid even if buffer is dtor-ed.
811 AHardwareBuffer_acquire(*buf);
812 }
813 if (updateDequeue) {
814 updateDequeueConf();
815 }
816 return res;
817 }
818
requestDeallocate(uint64_t bid,const sp<Fence> & fence,bool * completed,bool * updateDequeue,std::shared_ptr<BufferCache> * cache,int * slotId,sp<Fence> * rFence)819 c2_status_t GraphicsTracker::requestDeallocate(uint64_t bid, const sp<Fence> &fence,
820 bool *completed, bool *updateDequeue,
821 std::shared_ptr<BufferCache> *cache, int *slotId,
822 sp<Fence> *rFence) {
823 std::unique_lock<std::mutex> l(mLock);
824 if (mDeallocating.find(bid) != mDeallocating.end()) {
825 ALOGE("Tries to deallocate a buffer which is already deallocating or rendering");
826 return C2_DUPLICATE;
827 }
828 auto it = mDequeued.find(bid);
829 if (it == mDequeued.end()) {
830 ALOGE("Tried to deallocate non dequeued buffer");
831 return C2_NOT_FOUND;
832 }
833
834 std::shared_ptr<BufferItem> buffer = it->second;
835 if (buffer->mGeneration == mBufferCache->mGeneration && mBufferCache->mIgbp) {
836 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
837 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
838 *cache = mBufferCache;
839 *slotId = buffer->mSlot;
840 *rFence = ( fence == Fence::NO_FENCE) ? buffer->mFence : fence;
841 // mark this deallocating
842 mDeallocating.emplace(bid);
843 mBufferCache->blockSlot(buffer->mSlot);
844 *completed = false;
845 } else { // buffer is not from the current underlying Graphics.
846 mDequeued.erase(bid);
847 *completed = true;
848 if (adjustDequeueConfLocked(updateDequeue)) {
849 return C2_OK;
850 }
851 mDequeueable++;
852 writeIncDequeueableLocked(1);
853 }
854 return C2_OK;
855 }
856
commitDeallocate(std::shared_ptr<BufferCache> & cache,int slotId,uint64_t bid,bool * updateDequeue)857 void GraphicsTracker::commitDeallocate(
858 std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid, bool *updateDequeue) {
859 std::unique_lock<std::mutex> l(mLock);
860 size_t del1 = mDequeued.erase(bid);
861 size_t del2 = mDeallocating.erase(bid);
862 CHECK(del1 > 0 && del2 > 0);
863 if (cache) {
864 cache->unblockSlot(slotId);
865 }
866 if (adjustDequeueConfLocked(updateDequeue)) {
867 return;
868 }
869 mDequeueable++;
870 writeIncDequeueableLocked(1);
871 }
872
873
deallocate(uint64_t bid,const sp<Fence> & fence)874 c2_status_t GraphicsTracker::deallocate(uint64_t bid, const sp<Fence> &fence) {
875 bool completed;
876 bool updateDequeue;
877 std::shared_ptr<BufferCache> cache;
878 int slotId;
879 sp<Fence> rFence;
880 if (mStopped.load() == true) {
881 ALOGE("cannot deallocate due to being stopped");
882 return C2_BAD_STATE;
883 }
884 c2_status_t res = requestDeallocate(bid, fence, &completed, &updateDequeue,
885 &cache, &slotId, &rFence);
886 if (res != C2_OK) {
887 return res;
888 }
889 if (completed == true) {
890 if (updateDequeue) {
891 updateDequeueConf();
892 }
893 return C2_OK;
894 }
895
896 // ignore return value since IGBP could be already stale.
897 // cache->mIgbp is not null, if completed is false.
898 (void)cache->mIgbp->cancelBuffer(slotId, rFence);
899
900 commitDeallocate(cache, slotId, bid, &updateDequeue);
901 if (updateDequeue) {
902 updateDequeueConf();
903 }
904 return C2_OK;
905 }
906
requestRender(uint64_t bid,std::shared_ptr<BufferCache> * cache,std::shared_ptr<BufferItem> * pBuffer,bool * fromCache,bool * updateDequeue)907 c2_status_t GraphicsTracker::requestRender(uint64_t bid, std::shared_ptr<BufferCache> *cache,
908 std::shared_ptr<BufferItem> *pBuffer,
909 bool *fromCache,
910 bool *updateDequeue) {
911 std::unique_lock<std::mutex> l(mLock);
912 if (mDeallocating.find(bid) != mDeallocating.end()) {
913 ALOGE("Tries to render a buffer which is already deallocating or rendering");
914 return C2_DUPLICATE;
915 }
916 auto it = mDequeued.find(bid);
917 if (it == mDequeued.end()) {
918 ALOGE("Tried to render non dequeued buffer");
919 return C2_NOT_FOUND;
920 }
921 if (!mBufferCache->mIgbp) {
922 // Render requested without surface.
923 // reclaim the buffer for dequeue.
924 // TODO: is this correct for API wise?
925 mDequeued.erase(it);
926 if (adjustDequeueConfLocked(updateDequeue)) {
927 return C2_BAD_STATE;
928 }
929 mDequeueable++;
930 writeIncDequeueableLocked(1);
931 return C2_BAD_STATE;
932 }
933 std::shared_ptr<BufferItem> buffer = it->second;
934 *cache = mBufferCache;
935 if (buffer->mGeneration == mBufferCache->mGeneration) {
936 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
937 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
938 mBufferCache->blockSlot(buffer->mSlot);
939 *fromCache = true;
940 } else {
941 *fromCache = false;
942 }
943 *pBuffer = buffer;
944 mDeallocating.emplace(bid);
945 return C2_OK;
946 }
947
commitRender(const std::shared_ptr<BufferCache> & cache,const std::shared_ptr<BufferItem> & buffer,const std::shared_ptr<BufferItem> & oldBuffer,bool bufferReplaced,bool * updateDequeue)948 void GraphicsTracker::commitRender(const std::shared_ptr<BufferCache> &cache,
949 const std::shared_ptr<BufferItem> &buffer,
950 const std::shared_ptr<BufferItem> &oldBuffer,
951 bool bufferReplaced,
952 bool *updateDequeue) {
953 std::unique_lock<std::mutex> l(mLock);
954 uint64_t origBid = oldBuffer ? oldBuffer->mId : buffer->mId;
955
956 if (cache) {
957 cache->unblockSlot(buffer->mSlot);
958 if (oldBuffer) {
959 // migrated, register the new buffer to the cache.
960 auto ret = cache->mBuffers.emplace(buffer->mSlot, buffer);
961 if (!ret.second) {
962 ret.first->second = buffer;
963 }
964 }
965 }
966 mDeallocating.erase(origBid);
967 mDequeued.erase(origBid);
968
969 if (cache.get() != mBufferCache.get() || bufferReplaced) {
970 // Surface changed, no need to wait for buffer being released.
971 if (adjustDequeueConfLocked(updateDequeue)) {
972 return;
973 }
974 mDequeueable++;
975 writeIncDequeueableLocked(1);
976 return;
977 }
978 }
979
render(const C2ConstGraphicBlock & blk,const IGraphicBufferProducer::QueueBufferInput & input,IGraphicBufferProducer::QueueBufferOutput * output)980 c2_status_t GraphicsTracker::render(const C2ConstGraphicBlock& blk,
981 const IGraphicBufferProducer::QueueBufferInput &input,
982 IGraphicBufferProducer::QueueBufferOutput *output) {
983 uint64_t bid;
984 c2_status_t res = retrieveAHardwareBufferId(blk, &bid);
985 if (res != C2_OK) {
986 ALOGE("retrieving AHB-ID for GraphicBlock failed");
987 return C2_CORRUPTED;
988 }
989 std::shared_ptr<_C2BlockPoolData> poolData =
990 _C2BlockFactory::GetGraphicBlockPoolData(blk);
991 _C2BlockFactory::DisownIgbaBlock(poolData);
992 std::shared_ptr<BufferCache> cache;
993 std::shared_ptr<BufferItem> buffer;
994 std::shared_ptr<BufferItem> oldBuffer;
995 bool updateDequeue = false;
996 bool fromCache = false;
997 res = requestRender(bid, &cache, &buffer, &fromCache, &updateDequeue);
998 if (res != C2_OK) {
999 if (updateDequeue) {
1000 updateDequeueConf();
1001 }
1002 return res;
1003 }
1004 int cacheSlotId = fromCache ? buffer->mSlot : -1;
1005 ALOGV("render prepared: igbp(%d) slot(%d)", bool(cache->mIgbp), cacheSlotId);
1006 if (!fromCache) {
1007 // The buffer does not come from the current cache.
1008 // The buffer is needed to be migrated(attached).
1009 uint64_t newUsage = 0ULL;
1010
1011 (void) cache->mIgbp->getConsumerUsage(&newUsage);
1012 std::shared_ptr<BufferItem> newBuffer =
1013 buffer->migrateBuffer(newUsage, cache->mGeneration);
1014 sp<GraphicBuffer> gb = newBuffer ? newBuffer->getGraphicBuffer() : nullptr;
1015
1016 if (!gb) {
1017 ALOGE("render: realloc-ing a new buffer for migration failed");
1018 std::shared_ptr<BufferCache> nullCache;
1019 commitDeallocate(nullCache, -1, bid, &updateDequeue);
1020 if (updateDequeue) {
1021 updateDequeueConf();
1022 }
1023 return C2_REFUSED;
1024 }
1025 if (cache->mIgbp->attachBuffer(&(newBuffer->mSlot), gb) != ::android::OK) {
1026 ALOGE("render: attaching a new buffer to IGBP failed");
1027 std::shared_ptr<BufferCache> nullCache;
1028 commitDeallocate(nullCache, -1, bid, &updateDequeue);
1029 if (updateDequeue) {
1030 updateDequeueConf();
1031 }
1032 return C2_REFUSED;
1033 }
1034 cache->waitOnSlot(newBuffer->mSlot);
1035 cache->blockSlot(newBuffer->mSlot);
1036 oldBuffer = buffer;
1037 buffer = newBuffer;
1038 }
1039 ::android::status_t renderRes = cache->mIgbp->queueBuffer(buffer->mSlot, input, output);
1040 ALOGV("render done: migration(%d), render(err = %d)", !fromCache, renderRes);
1041 if (renderRes != ::android::OK) {
1042 CHECK(renderRes != ::android::BAD_VALUE);
1043 ALOGE("render: failed to queueBuffer() err = %d", renderRes);
1044 (void) cache->mIgbp->cancelBuffer(buffer->mSlot, input.fence);
1045 commitDeallocate(cache, buffer->mSlot, bid, &updateDequeue);
1046 if (updateDequeue) {
1047 updateDequeueConf();
1048 }
1049 return C2_REFUSED;
1050 }
1051
1052 commitRender(cache, buffer, oldBuffer, output->bufferReplaced, &updateDequeue);
1053 if (updateDequeue) {
1054 updateDequeueConf();
1055 }
1056 return C2_OK;
1057 }
1058
pollForRenderedFrames(FrameEventHistoryDelta * delta)1059 void GraphicsTracker::pollForRenderedFrames(FrameEventHistoryDelta* delta) {
1060 sp<IGraphicBufferProducer> igbp;
1061 {
1062 std::unique_lock<std::mutex> l(mLock);
1063 if (mBufferCache) {
1064 igbp = mBufferCache->mIgbp;
1065 }
1066 }
1067 if (igbp) {
1068 igbp->getFrameTimestamps(delta);
1069 }
1070 }
1071
onReleased(uint32_t generation)1072 void GraphicsTracker::onReleased(uint32_t generation) {
1073 bool updateDequeue = false;
1074 {
1075 std::unique_lock<std::mutex> l(mLock);
1076 if (mBufferCache->mGeneration == generation) {
1077 if (mBufferCache->mNumAttached > 0) {
1078 ALOGV("one onReleased() ignored for each prior onAttached().");
1079 mBufferCache->mNumAttached--;
1080 return;
1081 }
1082 if (!adjustDequeueConfLocked(&updateDequeue)) {
1083 mDequeueable++;
1084 writeIncDequeueableLocked(1);
1085 }
1086 }
1087 }
1088 if (updateDequeue) {
1089 updateDequeueConf();
1090 }
1091 }
1092
onAttached(uint32_t generation)1093 void GraphicsTracker::onAttached(uint32_t generation) {
1094 std::unique_lock<std::mutex> l(mLock);
1095 if (mBufferCache->mGeneration == generation) {
1096 ALOGV("buffer attached");
1097 mBufferCache->mNumAttached++;
1098 }
1099 }
1100
1101 } // namespace aidl::android::hardware::media::c2::implementation
1102