xref: /aosp_15_r20/external/skia/src/gpu/ganesh/GrResourceAllocator.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/GrResourceAllocator.h"
9 
10 #include "include/core/SkTypes.h"
11 #include "include/gpu/GpuTypes.h"
12 #include "include/gpu/ganesh/GrDirectContext.h"
13 #include "include/private/gpu/ganesh/GrTypesPriv.h"
14 #include "src/gpu/ganesh/GrCaps.h"
15 #include "src/gpu/ganesh/GrDirectContextPriv.h"
16 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
17 #include "src/gpu/ganesh/GrResourceCache.h"
18 #include "src/gpu/ganesh/GrResourceProvider.h"
19 #include "src/gpu/ganesh/GrSurfaceProxy.h"
20 #include "src/gpu/ganesh/GrSurfaceProxyPriv.h"
21 #include "src/gpu/ganesh/GrTexture.h"  // IWYU pragma: keep
22 
23 #include <cstddef>
24 #include <limits>
25 #include <utility>
26 
27 #ifdef SK_DEBUG
28 #include <atomic>
29 
CreateUniqueID()30 uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
31     static std::atomic<uint32_t> nextID{1};
32     uint32_t id;
33     do {
34         id = nextID.fetch_add(1, std::memory_order_relaxed);
35     } while (id == SK_InvalidUniqueID);
36     return id;
37 }
38 
CreateUniqueID()39 uint32_t GrResourceAllocator::Register::CreateUniqueID() {
40     static std::atomic<uint32_t> nextID{1};
41     uint32_t id;
42     do {
43         id = nextID.fetch_add(1, std::memory_order_relaxed);
44     } while (id == SK_InvalidUniqueID);
45     return id;
46 }
47 #endif
48 
~GrResourceAllocator()49 GrResourceAllocator::~GrResourceAllocator() {
50     SkASSERT(fFailedInstantiation || fIntvlList.empty());
51     SkASSERT(fActiveIntvls.empty());
52     SkASSERT(!fIntvlHash.count());
53 }
54 
55 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
56                                       ActualUse actualUse, AllowRecycling allowRecycling
57                                       SkDEBUGCODE(, bool isDirectDstRead)) {
58     SkASSERT(start <= end);
59     SkASSERT(!fAssigned);  // We shouldn't be adding any intervals after (or during) assignment
60 
61     if (proxy->canSkipResourceAllocator()) {
62         return;
63     }
64 
65     // If a proxy is read only it must refer to a texture with specific content that cannot be
66     // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
67     // with the same texture.
68     if (proxy->readOnly()) {
69         auto resourceProvider = fDContext->priv().resourceProvider();
70         if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(resourceProvider)) {
71             fFailedInstantiation = true;
72         } else {
73             // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
74             // must already be instantiated or it must be a lazy proxy that we instantiated above.
75             SkASSERT(proxy->isInstantiated());
76         }
77         return;
78     }
79     uint32_t proxyID = proxy->uniqueID().asUInt();
80     if (Interval** intvlPtr = fIntvlHash.find(proxyID)) {
81         // Revise the interval for an existing use
82         Interval* intvl = *intvlPtr;
83 #ifdef SK_DEBUG
84         if (0 == start && 0 == end) {
85             // This interval is for the initial upload to a deferred proxy. Due to the vagaries
86             // of how deferred proxies are collected they can appear as uploads multiple times
87             // in a single opsTasks' list and as uploads in several opsTasks.
88             SkASSERT(0 == intvl->start());
89         } else if (isDirectDstRead) {
90             // Direct reads from the render target itself should occur w/in the existing
91             // interval
92             SkASSERT(intvl->start() <= start && intvl->end() >= end);
93         } else {
94             SkASSERT(intvl->end() <= start && intvl->end() <= end);
95         }
96 #endif
97         if (ActualUse::kYes == actualUse) {
98             intvl->addUse();
99         }
100         if (allowRecycling == AllowRecycling::kNo) {
101             // In this case, a preexisting interval is made non-reuseable since its proxy is sampled
102             // into a secondary command buffer.
103             intvl->disallowRecycling();
104         }
105         intvl->extendEnd(end);
106         return;
107     }
108     Interval* newIntvl = fInternalAllocator.make<Interval>(proxy, start, end);
109 
110     if (ActualUse::kYes == actualUse) {
111         newIntvl->addUse();
112     }
113     if (allowRecycling == AllowRecycling::kNo) {
114         newIntvl->disallowRecycling();
115     }
116     fIntvlList.insertByIncreasingStart(newIntvl);
117     fIntvlHash.set(proxyID, newIntvl);
118 }
119 
120 // Tragically we have cases where we always have to make new textures.
can_proxy_use_scratch(const GrCaps & caps,GrSurfaceProxy * proxy)121 static bool can_proxy_use_scratch(const GrCaps& caps, GrSurfaceProxy* proxy) {
122     return caps.reuseScratchTextures() || proxy->asRenderTargetProxy();
123 }
124 
Register(GrSurfaceProxy * originatingProxy,skgpu::ScratchKey scratchKey,GrResourceProvider * provider)125 GrResourceAllocator::Register::Register(GrSurfaceProxy* originatingProxy,
126                                         skgpu::ScratchKey scratchKey,
127                                         GrResourceProvider* provider)
128         : fOriginatingProxy(originatingProxy)
129         , fScratchKey(std::move(scratchKey)) {
130     SkASSERT(originatingProxy);
131     SkASSERT(!originatingProxy->isInstantiated());
132     SkASSERT(!originatingProxy->isLazy());
133     SkDEBUGCODE(fUniqueID = CreateUniqueID();)
134     if (fScratchKey.isValid()) {
135         if (can_proxy_use_scratch(*provider->caps(), originatingProxy)) {
136             fExistingSurface = provider->findAndRefScratchTexture(
137                     fScratchKey, /*label=*/"ResourceAllocatorRegister");
138         }
139     } else {
140         SkASSERT(this->uniqueKey().isValid());
141         fExistingSurface = provider->findByUniqueKey<GrSurface>(this->uniqueKey());
142     }
143 }
144 
isRecyclable(const GrCaps & caps,GrSurfaceProxy * proxy,int knownUseCount,AllowRecycling allowRecycling) const145 bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
146                                                  GrSurfaceProxy* proxy,
147                                                  int knownUseCount,
148                                                  AllowRecycling allowRecycling) const {
149     if (allowRecycling == AllowRecycling::kNo) {
150         return false;
151     }
152 
153     if (!can_proxy_use_scratch(caps, proxy)) {
154         return false;
155     }
156 
157     if (!this->scratchKey().isValid()) {
158         return false; // no scratch key, no free pool
159     }
160     if (this->uniqueKey().isValid()) {
161         return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
162     }
163     // If all the refs on the proxy are known to the resource allocator then no one
164     // should be holding onto it outside of Ganesh.
165     return !proxy->refCntGreaterThan(knownUseCount);
166 }
167 
instantiateSurface(GrSurfaceProxy * proxy,GrResourceProvider * resourceProvider)168 bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
169                                                        GrResourceProvider* resourceProvider) {
170     SkASSERT(!proxy->peekSurface());
171 
172     sk_sp<GrSurface> newSurface;
173     if (!fExistingSurface) {
174         if (proxy == fOriginatingProxy) {
175             newSurface = proxy->priv().createSurface(resourceProvider);
176         } else {
177             newSurface = sk_ref_sp(fOriginatingProxy->peekSurface());
178         }
179     }
180     if (!fExistingSurface && !newSurface) {
181         return false;
182     }
183 
184     GrSurface* surface = newSurface ? newSurface.get() : fExistingSurface.get();
185     // Make surface budgeted if this proxy is budgeted.
186     if (skgpu::Budgeted::kYes == proxy->isBudgeted() &&
187         GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
188         // This gets the job done but isn't quite correct. It would be better to try to
189         // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
190         surface->resourcePriv().makeBudgeted();
191     }
192 
193     // Propagate the proxy unique key to the surface if we have one.
194     if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
195         if (!surface->getUniqueKey().isValid()) {
196             resourceProvider->assignUniqueKeyToResource(uniqueKey, surface);
197         }
198         SkASSERT(surface->getUniqueKey() == uniqueKey);
199     }
200     proxy->priv().assign(fExistingSurface ? fExistingSurface : std::move(newSurface));
201     return true;
202 }
203 
popHead()204 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
205     SkDEBUGCODE(this->validate());
206 
207     Interval* temp = fHead;
208     if (temp) {
209         fHead = temp->next();
210         if (!fHead) {
211             fTail = nullptr;
212         }
213         temp->setNext(nullptr);
214     }
215 
216     SkDEBUGCODE(this->validate());
217     return temp;
218 }
219 
220 // TODO: fuse this with insertByIncreasingEnd
insertByIncreasingStart(Interval * intvl)221 void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
222     SkDEBUGCODE(this->validate());
223     SkASSERT(!intvl->next());
224 
225     if (!fHead) {
226         // 14%
227         fHead = fTail = intvl;
228     } else if (intvl->start() <= fHead->start()) {
229         // 3%
230         intvl->setNext(fHead);
231         fHead = intvl;
232     } else if (fTail->start() <= intvl->start()) {
233         // 83%
234         fTail->setNext(intvl);
235         fTail = intvl;
236     } else {
237         // almost never
238         Interval* prev = fHead;
239         Interval* next = prev->next();
240         for (; intvl->start() > next->start(); prev = next, next = next->next()) {
241         }
242 
243         SkASSERT(next);
244         intvl->setNext(next);
245         prev->setNext(intvl);
246     }
247 
248     SkDEBUGCODE(this->validate());
249 }
250 
251 // TODO: fuse this with insertByIncreasingStart
insertByIncreasingEnd(Interval * intvl)252 void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
253     SkDEBUGCODE(this->validate());
254     SkASSERT(!intvl->next());
255 
256     if (!fHead) {
257         // 14%
258         fHead = fTail = intvl;
259     } else if (intvl->end() <= fHead->end()) {
260         // 64%
261         intvl->setNext(fHead);
262         fHead = intvl;
263     } else if (fTail->end() <= intvl->end()) {
264         // 3%
265         fTail->setNext(intvl);
266         fTail = intvl;
267     } else {
268         // 19% but 81% of those land right after the list's head
269         Interval* prev = fHead;
270         Interval* next = prev->next();
271         for (; intvl->end() > next->end(); prev = next, next = next->next()) {
272         }
273 
274         SkASSERT(next);
275         intvl->setNext(next);
276         prev->setNext(intvl);
277     }
278 
279     SkDEBUGCODE(this->validate());
280 }
281 
282 #ifdef SK_DEBUG
validate() const283 void GrResourceAllocator::IntervalList::validate() const {
284     SkASSERT(SkToBool(fHead) == SkToBool(fTail));
285 
286     Interval* prev = nullptr;
287     for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
288     }
289 
290     SkASSERT(fTail == prev);
291 }
292 #endif
293 
294 // First try to reuse one of the recently allocated/used registers in the free pool.
findOrCreateRegisterFor(GrSurfaceProxy * proxy)295 GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
296     auto resourceProvider = fDContext->priv().resourceProvider();
297     // Handle uniquely keyed proxies
298     if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
299         if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
300             return *p;
301         }
302         // No need for a scratch key. These don't go in the free pool.
303         Register* r = fInternalAllocator.make<Register>(proxy,
304                                                         skgpu::ScratchKey(),
305                                                         resourceProvider);
306         fUniqueKeyRegisters.set(uniqueKey, r);
307         return r;
308     }
309 
310     // Then look in the free pool
311     skgpu::ScratchKey scratchKey;
312     proxy->priv().computeScratchKey(*fDContext->priv().caps(), &scratchKey);
313 
314     auto filter = [] (const Register* r) {
315         return true;
316     };
317     if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
318         return r;
319     }
320 
321     return fInternalAllocator.make<Register>(proxy, std::move(scratchKey), resourceProvider);
322 }
323 
324 // Remove any intervals that end before the current index. Add their registers
325 // to the free pool if possible.
expire(unsigned int curIndex)326 void GrResourceAllocator::expire(unsigned int curIndex) {
327     while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
328         Interval* intvl = fActiveIntvls.popHead();
329         SkASSERT(!intvl->next());
330 
331         Register* r = intvl->getRegister();
332         if (r && r->isRecyclable(*fDContext->priv().caps(), intvl->proxy(), intvl->uses(),
333                                  intvl->allowRecycling())) {
334 #if GR_ALLOCATION_SPEW
335             SkDebugf("putting register %d back into pool\n", r->uniqueID());
336 #endif
337             // TODO: fix this insertion so we get a more LRU-ish behavior
338             fFreePool.insert(r->scratchKey(), r);
339         }
340         fFinishedIntvls.insertByIncreasingStart(intvl);
341     }
342 }
343 
planAssignment()344 bool GrResourceAllocator::planAssignment() {
345     fIntvlHash.reset(); // we don't need the interval hash anymore
346 
347     SkASSERT(!fPlanned && !fAssigned);
348     SkDEBUGCODE(fPlanned = true;)
349 
350 #if GR_ALLOCATION_SPEW
351     SkDebugf("assigning %d ops\n", fNumOps);
352     this->dumpIntervals();
353 #endif
354 
355     auto resourceProvider = fDContext->priv().resourceProvider();
356     while (Interval* cur = fIntvlList.popHead()) {
357         this->expire(cur->start());
358         fActiveIntvls.insertByIncreasingEnd(cur);
359 
360         // Already-instantiated proxies and lazy proxies don't use registers.
361         if (cur->proxy()->isInstantiated()) {
362             continue;
363         }
364 
365         // Instantiate fully-lazy proxies immediately. Ignore other lazy proxies at this stage.
366         if (cur->proxy()->isLazy()) {
367             if (cur->proxy()->isFullyLazy()) {
368                 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
369                 if (fFailedInstantiation) {
370                     break;
371                 }
372             }
373             continue;
374         }
375 
376         Register* r = this->findOrCreateRegisterFor(cur->proxy());
377 #if GR_ALLOCATION_SPEW
378         SkDebugf("Assigning register %d to %d\n",
379              r->uniqueID(),
380              cur->proxy()->uniqueID().asUInt());
381 #endif
382         SkASSERT(!cur->proxy()->peekSurface());
383         cur->setRegister(r);
384     }
385 
386     // expire all the remaining intervals to drain the active interval list
387     this->expire(std::numeric_limits<unsigned int>::max());
388     return !fFailedInstantiation;
389 }
390 
makeBudgetHeadroom()391 bool GrResourceAllocator::makeBudgetHeadroom() {
392     SkASSERT(fPlanned);
393     SkASSERT(!fFailedInstantiation);
394     size_t additionalBytesNeeded = 0;
395     for (Interval* cur = fFinishedIntvls.peekHead(); cur; cur = cur->next()) {
396         GrSurfaceProxy* proxy = cur->proxy();
397         if (skgpu::Budgeted::kNo == proxy->isBudgeted() || proxy->isInstantiated()) {
398             continue;
399         }
400 
401         // N.B Fully-lazy proxies were already instantiated in planAssignment
402         if (proxy->isLazy()) {
403             additionalBytesNeeded += proxy->gpuMemorySize();
404         } else {
405             Register* r = cur->getRegister();
406             SkASSERT(r);
407             if (!r->accountedForInBudget() && !r->existingSurface()) {
408                 additionalBytesNeeded += proxy->gpuMemorySize();
409             }
410             r->setAccountedForInBudget();
411         }
412     }
413     return fDContext->priv().getResourceCache()->purgeToMakeHeadroom(additionalBytesNeeded);
414 }
415 
reset()416 void GrResourceAllocator::reset() {
417     // NOTE: We do not reset the failedInstantiation flag because we currently do not attempt
418     // to recover from failed instantiations. The user is responsible for checking this flag and
419     // bailing early.
420     SkDEBUGCODE(fPlanned = false;)
421     SkDEBUGCODE(fAssigned = false;)
422     SkASSERT(fActiveIntvls.empty());
423     fFinishedIntvls = IntervalList();
424     fIntvlList = IntervalList();
425     fIntvlHash.reset();
426     fUniqueKeyRegisters.reset();
427     fFreePool.reset();
428     fInternalAllocator.reset();
429 }
430 
assign()431 bool GrResourceAllocator::assign() {
432     if (fFailedInstantiation) {
433         return false;
434     }
435     SkASSERT(fPlanned && !fAssigned);
436     SkDEBUGCODE(fAssigned = true;)
437     auto resourceProvider = fDContext->priv().resourceProvider();
438     while (Interval* cur = fFinishedIntvls.popHead()) {
439         if (fFailedInstantiation) {
440             break;
441         }
442         if (cur->proxy()->isInstantiated()) {
443             continue;
444         }
445         if (cur->proxy()->isLazy()) {
446             fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
447             continue;
448         }
449         Register* r = cur->getRegister();
450         SkASSERT(r);
451         fFailedInstantiation = !r->instantiateSurface(cur->proxy(), resourceProvider);
452     }
453     return !fFailedInstantiation;
454 }
455 
456 #if GR_ALLOCATION_SPEW
dumpIntervals()457 void GrResourceAllocator::dumpIntervals() {
458     // Print all the intervals while computing their range
459     SkDebugf("------------------------------------------------------------\n");
460     unsigned int min = std::numeric_limits<unsigned int>::max();
461     unsigned int max = 0;
462     for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
463         SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
464                  cur->proxy()->uniqueID().asUInt(),
465                  cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
466                  cur->start(),
467                  cur->end(),
468                  cur->proxy()->priv().getProxyRefCnt(),
469                  cur->proxy()->testingOnly_getBackingRefCnt());
470         min = std::min(min, cur->start());
471         max = std::max(max, cur->end());
472     }
473 
474     // Draw a graph of the useage intervals
475     for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
476         SkDebugf("{ %3d,%3d }: ",
477                  cur->proxy()->uniqueID().asUInt(),
478                  cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
479         for (unsigned int i = min; i <= max; ++i) {
480             if (i >= cur->start() && i <= cur->end()) {
481                 SkDebugf("x");
482             } else {
483                 SkDebugf(" ");
484             }
485         }
486         SkDebugf("\n");
487     }
488 }
489 #endif
490