1 /*
2 * Copyright 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #undef LOG_TAG
18 #define LOG_TAG "Planner"
19 // #define LOG_NDEBUG 0
20 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
21
22 #include <android-base/properties.h>
23 #include <common/FlagManager.h>
24 #include <common/trace.h>
25 #include <compositionengine/impl/planner/Flattener.h>
26 #include <compositionengine/impl/planner/LayerState.h>
27
28 using time_point = std::chrono::steady_clock::time_point;
29 using namespace std::chrono_literals;
30
31 namespace android::compositionengine::impl::planner {
32
33 namespace {
34
35 // True if the underlying layer stack is the same modulo state that would be expected to be
36 // different like specific buffers, false otherwise.
isSameStack(const std::vector<const LayerState * > & incomingLayers,const std::vector<CachedSet> & cachedSets)37 bool isSameStack(const std::vector<const LayerState*>& incomingLayers,
38 const std::vector<CachedSet>& cachedSets) {
39 std::vector<const LayerState*> existingLayers;
40 for (auto& cachedSet : cachedSets) {
41 for (auto& layer : cachedSet.getConstituentLayers()) {
42 existingLayers.push_back(layer.getState());
43 }
44 }
45
46 if (incomingLayers.size() != existingLayers.size()) {
47 return false;
48 }
49
50 for (size_t i = 0; i < incomingLayers.size(); i++) {
51 // Checking the IDs here is very strict, but we do this as otherwise we may mistakenly try
52 // to access destroyed OutputLayers later on.
53 if (incomingLayers[i]->getId() != existingLayers[i]->getId()) {
54 return false;
55 }
56
57 // Do not unflatten if source crop is only moved.
58 if (FlagManager::getInstance().cache_when_source_crop_layer_only_moved() &&
59 incomingLayers[i]->isSourceCropSizeEqual(*(existingLayers[i])) &&
60 incomingLayers[i]->getDifferingFields(*(existingLayers[i])) ==
61 LayerStateField::SourceCrop) {
62 continue;
63 }
64
65 if (incomingLayers[i]->getDifferingFields(*(existingLayers[i])) != LayerStateField::None) {
66 return false;
67 }
68 }
69 return true;
70 }
71
72 } // namespace
73
Flattener(renderengine::RenderEngine & renderEngine,const Tunables & tunables)74 Flattener::Flattener(renderengine::RenderEngine& renderEngine, const Tunables& tunables)
75 : mRenderEngine(renderEngine), mTunables(tunables), mTexturePool(mRenderEngine) {}
76
flattenLayers(const std::vector<const LayerState * > & layers,NonBufferHash hash,time_point now)77 NonBufferHash Flattener::flattenLayers(const std::vector<const LayerState*>& layers,
78 NonBufferHash hash, time_point now) {
79 SFTRACE_CALL();
80 const size_t unflattenedDisplayCost = calculateDisplayCost(layers);
81 mUnflattenedDisplayCost += unflattenedDisplayCost;
82
83 // We invalidate the layer cache if:
84 // 1. We're not tracking any layers, or
85 // 2. The last seen hashed geometry changed between frames, or
86 // 3. A stricter equality check demonstrates that the layer stack really did change, since the
87 // hashed geometry does not guarantee uniqueness.
88 if (mCurrentGeometry != hash || (!mLayers.empty() && !isSameStack(layers, mLayers))) {
89 resetActivities(hash, now);
90 mFlattenedDisplayCost += unflattenedDisplayCost;
91 return hash;
92 }
93
94 ++mInitialLayerCounts[layers.size()];
95
96 // Only buildCachedSets if these layers are already stored in mLayers.
97 // Otherwise (i.e. mergeWithCachedSets returns false), the time has not
98 // changed, so buildCachedSets will never find any runs.
99 const bool alreadyHadCachedSets = mergeWithCachedSets(layers, now);
100
101 ++mFinalLayerCounts[mLayers.size()];
102
103 if (alreadyHadCachedSets) {
104 buildCachedSets(now);
105 hash = computeLayersHash();
106 }
107
108 return hash;
109 }
110
renderCachedSets(const OutputCompositionState & outputState,std::optional<std::chrono::steady_clock::time_point> renderDeadline,bool deviceHandlesColorTransform)111 void Flattener::renderCachedSets(
112 const OutputCompositionState& outputState,
113 std::optional<std::chrono::steady_clock::time_point> renderDeadline,
114 bool deviceHandlesColorTransform) {
115 SFTRACE_CALL();
116
117 if (!mNewCachedSet) {
118 return;
119 }
120
121 // Ensure that a cached set has a valid buffer first
122 if (mNewCachedSet->hasRenderedBuffer()) {
123 SFTRACE_NAME("mNewCachedSet->hasRenderedBuffer()");
124 return;
125 }
126
127 const auto now = std::chrono::steady_clock::now();
128
129 // If we have a render deadline, and the flattener is configured to skip rendering if we don't
130 // have enough time, then we skip rendering the cached set if we think that we'll steal too much
131 // time from the next frame.
132 if (renderDeadline && mTunables.mRenderScheduling) {
133 if (const auto estimatedRenderFinish =
134 now + mTunables.mRenderScheduling->cachedSetRenderDuration;
135 estimatedRenderFinish > *renderDeadline) {
136 mNewCachedSet->incrementSkipCount();
137
138 if (mNewCachedSet->getSkipCount() <=
139 mTunables.mRenderScheduling->maxDeferRenderAttempts) {
140 SFTRACE_FORMAT("DeadlinePassed: exceeded deadline by: %d us",
141 std::chrono::duration_cast<std::chrono::microseconds>(
142 estimatedRenderFinish - *renderDeadline)
143 .count());
144 return;
145 } else {
146 SFTRACE_NAME("DeadlinePassed: exceeded max skips");
147 }
148 }
149 }
150
151 mNewCachedSet->render(mRenderEngine, mTexturePool, outputState, deviceHandlesColorTransform);
152 }
153
dumpLayers(std::string & result) const154 void Flattener::dumpLayers(std::string& result) const {
155 result.append(" Current layers:");
156 for (const CachedSet& layer : mLayers) {
157 result.append("\n");
158 layer.dump(result);
159 }
160 }
161
dump(std::string & result) const162 void Flattener::dump(std::string& result) const {
163 const auto now = std::chrono::steady_clock::now();
164
165 base::StringAppendF(&result, "Flattener state:\n");
166
167 result.append("\n Statistics:\n");
168
169 result.append(" Display cost (in screen-size buffers):\n");
170 const size_t displayArea = static_cast<size_t>(mDisplaySize.width * mDisplaySize.height);
171 base::StringAppendF(&result, " Unflattened: %.2f\n",
172 static_cast<float>(mUnflattenedDisplayCost) / displayArea);
173 base::StringAppendF(&result, " Flattened: %.2f\n",
174 static_cast<float>(mFlattenedDisplayCost) / displayArea);
175
176 const auto compareLayerCounts = [](const std::pair<size_t, size_t>& left,
177 const std::pair<size_t, size_t>& right) {
178 return left.first < right.first;
179 };
180
181 const size_t maxLayerCount = mInitialLayerCounts.empty()
182 ? 0u
183 : std::max_element(mInitialLayerCounts.cbegin(), mInitialLayerCounts.cend(),
184 compareLayerCounts)
185 ->first;
186
187 result.append("\n Initial counts:\n");
188 for (size_t count = 1; count < maxLayerCount; ++count) {
189 size_t initial = mInitialLayerCounts.count(count) > 0 ? mInitialLayerCounts.at(count) : 0;
190 base::StringAppendF(&result, " % 2zd: %zd\n", count, initial);
191 }
192
193 result.append("\n Final counts:\n");
194 for (size_t count = 1; count < maxLayerCount; ++count) {
195 size_t final = mFinalLayerCounts.count(count) > 0 ? mFinalLayerCounts.at(count) : 0;
196 base::StringAppendF(&result, " % 2zd: %zd\n", count, final);
197 }
198
199 base::StringAppendF(&result, "\n Cached sets created: %zd\n", mCachedSetCreationCount);
200 base::StringAppendF(&result, " Cost: %.2f\n",
201 static_cast<float>(mCachedSetCreationCost) / displayArea);
202
203 const auto lastUpdate =
204 std::chrono::duration_cast<std::chrono::milliseconds>(now - mLastGeometryUpdate);
205 base::StringAppendF(&result, "\n Current hash %016zx, last update %sago\n\n", mCurrentGeometry,
206 durationString(lastUpdate).c_str());
207
208 dumpLayers(result);
209
210 base::StringAppendF(&result, "\n");
211 mTexturePool.dump(result);
212 }
213
calculateDisplayCost(const std::vector<const LayerState * > & layers) const214 size_t Flattener::calculateDisplayCost(const std::vector<const LayerState*>& layers) const {
215 Region coveredRegion;
216 size_t displayCost = 0;
217 bool hasClientComposition = false;
218
219 for (const LayerState* layer : layers) {
220 coveredRegion.orSelf(layer->getDisplayFrame());
221
222 // Regardless of composition type, we always have to read each input once
223 displayCost += static_cast<size_t>(layer->getDisplayFrame().width() *
224 layer->getDisplayFrame().height());
225
226 hasClientComposition |= layer->getCompositionType() ==
227 aidl::android::hardware::graphics::composer3::Composition::CLIENT;
228 }
229
230 if (hasClientComposition) {
231 // If there is client composition, the client target buffer has to be both written by the
232 // GPU and read by the DPU, so we pay its cost twice
233 displayCost += 2 *
234 static_cast<size_t>(coveredRegion.bounds().width() *
235 coveredRegion.bounds().height());
236 }
237
238 return displayCost;
239 }
240
resetActivities(NonBufferHash hash,time_point now)241 void Flattener::resetActivities(NonBufferHash hash, time_point now) {
242 ALOGV("[%s]", __func__);
243
244 mCurrentGeometry = hash;
245 mLastGeometryUpdate = now;
246
247 for (const CachedSet& cachedSet : mLayers) {
248 if (cachedSet.getLayerCount() > 1) {
249 ++mInvalidatedCachedSetAges[cachedSet.getAge()];
250 }
251 }
252
253 mLayers.clear();
254
255 if (mNewCachedSet) {
256 ++mInvalidatedCachedSetAges[mNewCachedSet->getAge()];
257 mNewCachedSet = std::nullopt;
258 }
259 }
260
computeLayersHash() const261 NonBufferHash Flattener::computeLayersHash() const{
262 size_t hash = 0;
263 for (const auto& layer : mLayers) {
264 android::hashCombineSingleHashed(hash, layer.getNonBufferHash());
265 }
266 return hash;
267 }
268
269 // Only called if the geometry matches the last frame. Return true if mLayers
270 // was already populated with these layers, i.e. on the second and following
271 // calls with the same geometry.
mergeWithCachedSets(const std::vector<const LayerState * > & layers,time_point now)272 bool Flattener::mergeWithCachedSets(const std::vector<const LayerState*>& layers, time_point now) {
273 SFTRACE_CALL();
274 std::vector<CachedSet> merged;
275
276 if (mLayers.empty()) {
277 merged.reserve(layers.size());
278 for (const LayerState* layer : layers) {
279 merged.emplace_back(layer, now);
280 mFlattenedDisplayCost += merged.back().getDisplayCost();
281 }
282 mLayers = std::move(merged);
283 return false;
284 }
285
286 // the compiler should strip out the following no-op loops when ALOGV is off
287 ALOGV("[%s] Incoming layers:", __func__);
288 for (const LayerState* layer : layers) {
289 ALOGV("%s", layer->getName().c_str());
290 }
291
292 ALOGV("[%s] Current layers:", __func__);
293 for (const CachedSet& layer : mLayers) {
294 const auto dumper = [&] {
295 std::string dump;
296 layer.dump(dump);
297 return dump;
298 };
299 ALOGV("%s", dumper().c_str());
300 }
301
302 auto currentLayerIter = mLayers.begin();
303 auto incomingLayerIter = layers.begin();
304
305 // If not null, this represents the layer that is blurring the layer before
306 // currentLayerIter. The blurring was stored in the override buffer, so the
307 // layer that requests the blur no longer needs to do any blurring.
308 compositionengine::OutputLayer* priorBlurLayer = nullptr;
309
310 while (incomingLayerIter != layers.end()) {
311 if (mNewCachedSet &&
312 mNewCachedSet->getFirstLayer().getState()->getId() == (*incomingLayerIter)->getId()) {
313 if (mNewCachedSet->hasBufferUpdate()) {
314 ALOGV("[%s] Dropping new cached set", __func__);
315 ++mInvalidatedCachedSetAges[0];
316 mNewCachedSet = std::nullopt;
317 } else if (mNewCachedSet->hasReadyBuffer()) {
318 ALOGV("[%s] Found ready buffer", __func__);
319 size_t skipCount = mNewCachedSet->getLayerCount();
320 while (skipCount != 0) {
321 auto* peekThroughLayer = mNewCachedSet->getHolePunchLayer();
322 const size_t layerCount = currentLayerIter->getLayerCount();
323 for (size_t i = 0; i < layerCount; ++i) {
324 bool disableBlur = priorBlurLayer &&
325 priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
326 OutputLayer::CompositionState& state =
327 (*incomingLayerIter)->getOutputLayer()->editState();
328 state.overrideInfo = {
329 .buffer = mNewCachedSet->getBuffer(),
330 .acquireFence = mNewCachedSet->getDrawFence(),
331 .displayFrame = mNewCachedSet->getTextureBounds(),
332 .dataspace = mNewCachedSet->getOutputDataspace(),
333 .displaySpace = mNewCachedSet->getOutputSpace(),
334 .damageRegion = Region::INVALID_REGION,
335 .visibleRegion = mNewCachedSet->getVisibleRegion(),
336 .peekThroughLayer = peekThroughLayer,
337 .disableBackgroundBlur = disableBlur,
338 };
339 ++incomingLayerIter;
340 }
341
342 if (currentLayerIter->getLayerCount() > 1) {
343 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
344 }
345 ++currentLayerIter;
346
347 skipCount -= layerCount;
348 }
349 priorBlurLayer = mNewCachedSet->getBlurLayer();
350 merged.emplace_back(std::move(*mNewCachedSet));
351 mNewCachedSet = std::nullopt;
352 continue;
353 }
354 }
355
356 if (!currentLayerIter->hasBufferUpdate()) {
357 currentLayerIter->incrementAge();
358 merged.emplace_back(*currentLayerIter);
359
360 // Skip the incoming layers corresponding to this valid current layer
361 const size_t layerCount = currentLayerIter->getLayerCount();
362 auto* peekThroughLayer = currentLayerIter->getHolePunchLayer();
363 for (size_t i = 0; i < layerCount; ++i) {
364 bool disableBlur =
365 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
366 OutputLayer::CompositionState& state =
367 (*incomingLayerIter)->getOutputLayer()->editState();
368 state.overrideInfo = {
369 .buffer = currentLayerIter->getBuffer(),
370 .acquireFence = currentLayerIter->getDrawFence(),
371 .displayFrame = currentLayerIter->getTextureBounds(),
372 .dataspace = currentLayerIter->getOutputDataspace(),
373 .displaySpace = currentLayerIter->getOutputSpace(),
374 .damageRegion = Region(),
375 .visibleRegion = currentLayerIter->getVisibleRegion(),
376 .peekThroughLayer = peekThroughLayer,
377 .disableBackgroundBlur = disableBlur,
378 };
379 ++incomingLayerIter;
380 }
381 } else if (currentLayerIter->getLayerCount() > 1) {
382 // Break the current layer into its constituent layers
383 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
384 for (CachedSet& layer : currentLayerIter->decompose()) {
385 bool disableBlur =
386 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
387 OutputLayer::CompositionState& state =
388 (*incomingLayerIter)->getOutputLayer()->editState();
389 state.overrideInfo.disableBackgroundBlur = disableBlur;
390 layer.updateAge(now);
391 merged.emplace_back(layer);
392 ++incomingLayerIter;
393 }
394 } else {
395 bool disableBlur =
396 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
397 OutputLayer::CompositionState& state =
398 (*incomingLayerIter)->getOutputLayer()->editState();
399 state.overrideInfo.disableBackgroundBlur = disableBlur;
400 currentLayerIter->updateAge(now);
401 merged.emplace_back(*currentLayerIter);
402 ++incomingLayerIter;
403 }
404 priorBlurLayer = currentLayerIter->getBlurLayer();
405 ++currentLayerIter;
406 }
407
408 for (const CachedSet& layer : merged) {
409 mFlattenedDisplayCost += layer.getDisplayCost();
410 }
411
412 mLayers = std::move(merged);
413 return true;
414 }
415
findCandidateRuns(time_point now) const416 std::vector<Flattener::Run> Flattener::findCandidateRuns(time_point now) const {
417 SFTRACE_CALL();
418 std::vector<Run> runs;
419 bool isPartOfRun = false;
420 Run::Builder builder;
421 bool firstLayer = true;
422 bool runHasFirstLayer = false;
423
424 for (auto currentSet = mLayers.cbegin(); currentSet != mLayers.cend(); ++currentSet) {
425 bool layerIsInactive = now - currentSet->getLastUpdate() > mTunables.mActiveLayerTimeout;
426 const bool layerHasBlur = currentSet->hasBlurBehind();
427 const bool layerDeniedFromCaching = currentSet->cachingHintExcludesLayers();
428
429 // Layers should also be considered inactive whenever their framerate is lower than 1fps.
430 if (!layerIsInactive && currentSet->getLayerCount() == kNumLayersFpsConsideration) {
431 auto layerFps = currentSet->getFirstLayer().getState()->getFps();
432 if (layerFps > 0 && layerFps <= kFpsActiveThreshold) {
433 SFTRACE_FORMAT("layer is considered inactive due to low FPS [%s] %f",
434 currentSet->getFirstLayer().getName().c_str(), layerFps);
435 layerIsInactive = true;
436 }
437 }
438
439 if (!layerDeniedFromCaching && layerIsInactive &&
440 (firstLayer || runHasFirstLayer || !layerHasBlur) &&
441 !currentSet->hasKnownColorShift()) {
442 if (isPartOfRun) {
443 builder.increment();
444 } else {
445 builder.init(currentSet);
446 if (firstLayer) {
447 runHasFirstLayer = true;
448 }
449 isPartOfRun = true;
450 }
451 } else if (isPartOfRun) {
452 builder.setHolePunchCandidate(&(*currentSet));
453
454 // If we're here then this blur layer recently had an active buffer updating, meaning
455 // that there is exactly one layer. Blur radius currently is part of layer stack
456 // geometry, so we're also guaranteed that the background blur radius hasn't changed for
457 // at least as long as this new inactive cached set.
458 if (runHasFirstLayer && layerHasBlur &&
459 currentSet->getFirstLayer().getBackgroundBlurRadius() > 0) {
460 builder.setBlurringLayer(&(*currentSet));
461 }
462 if (auto run = builder.validateAndBuild(); run) {
463 runs.push_back(*run);
464 }
465
466 runHasFirstLayer = false;
467 builder.reset();
468 isPartOfRun = false;
469 }
470
471 firstLayer = false;
472 }
473
474 // If we're in the middle of a run at the end, we still need to validate and build it.
475 if (isPartOfRun) {
476 if (auto run = builder.validateAndBuild(); run) {
477 runs.push_back(*run);
478 }
479 }
480
481 ALOGV("[%s] Found %zu candidate runs", __func__, runs.size());
482
483 return runs;
484 }
485
findBestRun(std::vector<Flattener::Run> & runs) const486 std::optional<Flattener::Run> Flattener::findBestRun(std::vector<Flattener::Run>& runs) const {
487 if (runs.empty()) {
488 return std::nullopt;
489 }
490
491 // TODO (b/181192467): Choose the best run, instead of just the first.
492 return runs[0];
493 }
494
buildCachedSets(time_point now)495 void Flattener::buildCachedSets(time_point now) {
496 SFTRACE_CALL();
497 if (mLayers.empty()) {
498 ALOGV("[%s] No layers found, returning", __func__);
499 return;
500 }
501
502 // Don't try to build a new cached set if we already have a new one in progress
503 if (mNewCachedSet) {
504 return;
505 }
506
507 for (const CachedSet& layer : mLayers) {
508 // TODO (b/191997217): make it less aggressive, and sync with findCandidateRuns
509 if (layer.hasProtectedLayers()) {
510 SFTRACE_NAME("layer->hasProtectedLayers()");
511 return;
512 }
513 }
514
515 std::vector<Run> runs = findCandidateRuns(now);
516
517 std::optional<Run> bestRun = findBestRun(runs);
518
519 if (!bestRun) {
520 return;
521 }
522
523 mNewCachedSet.emplace(*bestRun->getStart());
524 mNewCachedSet->setLastUpdate(now);
525 auto currentSet = bestRun->getStart();
526 while (mNewCachedSet->getLayerCount() < bestRun->getLayerLength()) {
527 ++currentSet;
528 mNewCachedSet->append(*currentSet);
529 }
530
531 if (bestRun->getBlurringLayer()) {
532 mNewCachedSet->addBackgroundBlurLayer(*bestRun->getBlurringLayer());
533 }
534
535 if (mTunables.mEnableHolePunch && bestRun->getHolePunchCandidate() &&
536 bestRun->getHolePunchCandidate()->requiresHolePunch()) {
537 // Add the pip layer to mNewCachedSet, but in a special way - it should
538 // replace the buffer with a clear round rect.
539 mNewCachedSet->addHolePunchLayerIfFeasible(*bestRun->getHolePunchCandidate(),
540 bestRun->getStart() == mLayers.cbegin());
541 }
542
543 // TODO(b/181192467): Actually compute new LayerState vector and corresponding hash for each run
544 // and feedback into the predictor
545
546 ++mCachedSetCreationCount;
547 mCachedSetCreationCost += mNewCachedSet->getCreationCost();
548
549 // note the compiler should strip the follow no-op statements when ALOGV is off
550 const auto dumper = [&] {
551 std::string setDump;
552 mNewCachedSet->dump(setDump);
553 return setDump;
554 };
555 ALOGV("[%s] Added new cached set:\n%s", __func__, dumper().c_str());
556 }
557
558 } // namespace android::compositionengine::impl::planner
559