xref: /aosp_15_r20/external/skia/src/gpu/ganesh/GrResourceAllocator.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrResourceAllocator_DEFINED
9 #define GrResourceAllocator_DEFINED
10 
11 #include "include/core/SkRefCnt.h"
12 #include "include/private/base/SkAssert.h"
13 #include "include/private/base/SkDebug.h"
14 #include "include/private/base/SkTo.h"
15 #include "src/base/SkArenaAlloc.h"
16 #include "src/core/SkTHash.h"
17 #include "src/core/SkTMultiMap.h"
18 #include "src/gpu/ResourceKey.h"
19 #include "src/gpu/ganesh/GrCaps.h"
20 #include "src/gpu/ganesh/GrHashMapWithCache.h"
21 #include "src/gpu/ganesh/GrSurface.h"
22 #include "src/gpu/ganesh/GrSurfaceProxy.h"
23 
24 #include <cstdint>
25 
26 class GrDirectContext;
27 class GrResourceProvider;
28 
29 // Print out explicit allocation information
30 #define GR_ALLOCATION_SPEW 0
31 
32 // Print out information about interval creation
33 #define GR_TRACK_INTERVAL_CREATION 0
34 
35 /*
36  * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
37  * being given the usage intervals of the various proxies. It keeps these intervals in a singly
38  * linked list sorted by increasing start index. (It also maintains a hash table from proxyID
39  * to interval to find proxy reuse). The ResourceAllocator uses Registers (in the sense of register
40  * allocation) to represent a future surface that will be used for each proxy during
41  * `planAssignment`, and then assigns actual surfaces during `assign`.
42  *
43  * Note: the op indices (used in the usage intervals) come from the order of the ops in
44  * their opsTasks after the opsTask DAG has been linearized.
45  *
46  * The planAssignment method traverses the sorted list and:
47  *     moves intervals from the active list that have completed (returning their registers
48  *     to the free pool) into the finished list (sorted by increasing start)
49  *
50  *     allocates a new register (preferably from the free pool) for the new interval
51  *     adds the new interval to the active list (that is sorted by increasing end index)
52  *
53  * After assignment planning, the user can choose to call `makeBudgetHeadroom` which:
54  *     computes how much VRAM would be needed for new resources for all extant Registers
55  *
56  *     asks the resource cache to purge enough resources to get that much free space
57  *
58  *     if it's not possible, do nothing and return false. The user may opt to reset
59  *     the allocator and start over with a different DAG.
60  *
61  * If the user wants to commit to the current assignment plan, they call `assign` which:
62  *     instantiates lazy proxies
63  *
64  *     instantantiates new surfaces for all registers that need them
65  *
66  *     assigns the surface for each register to all the proxies that will use it
67  *
68  *************************************************************************************************
69  * How does instantiation failure handling work when explicitly allocating?
70  *
71  * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
72  * gathered (i.e., in OpsTask::gatherProxyIntervals).
73  *
74  * During addInterval, read-only lazy proxies are instantiated. If that fails, the resource
75  * allocator will note the failure and ignore pretty much anything else until `reset`.
76  *
77  * During planAssignment, fully-lazy proxies are instantiated so that we can know their size for
78  * budgeting purposes. If this fails, return false.
79  *
80  * During assign, partially-lazy proxies are instantiated and new surfaces are created for all other
81  * proxies. If any of these fails, return false.
82  *
83  * The drawing manager will drop the flush if any proxies fail to instantiate.
84  */
85 class GrResourceAllocator {
86 public:
GrResourceAllocator(GrDirectContext * dContext)87     GrResourceAllocator(GrDirectContext* dContext)
88             : fDContext(dContext) {}
89 
90     ~GrResourceAllocator();
91 
curOp()92     unsigned int curOp() const { return fNumOps; }
incOps()93     void incOps() { fNumOps++; }
94 
95     /** Indicates whether a given call to addInterval represents an actual usage of the
96      *  provided proxy. This is mainly here to accommodate deferred proxies attached to opsTasks.
97      *  In that case we need to create an extra long interval for them (due to the upload) but
98      *  don't want to count that usage/reference towards the proxy's recyclability.
99      */
100     enum class ActualUse : bool {
101         kNo  = false,
102         kYes = true
103     };
104 
105     /** Indicates whether we allow a gpu texture assigned to a register to be recycled or not. This
106      *  comes up when dealing with with Vulkan Secondary CommandBuffers since offscreens sampled
107      *  into the scb will all be drawn before being sampled in the scb. This is because the scb
108      *  will get submitted in a later command buffer. Thus offscreens cannot share an allocation or
109      *  later reuses will overwrite earlier ones.
110      */
111     enum class AllowRecycling : bool {
112         kNo  = false,
113         kYes = true
114     };
115 
116     // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets.
117     // If an existing interval already exists it will be expanded to include the new range.
118     void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse,
119                      AllowRecycling SkDEBUGCODE(, bool isDirectDstRead = false));
120 
failedInstantiation()121     bool failedInstantiation() const { return fFailedInstantiation; }
122 
123     // Generate an internal plan for resource allocation. After this you can optionally call
124     // `makeBudgetHeadroom` to check whether that plan would go over our memory budget.
125     // Fully-lazy proxies are also instantiated at this point so that their size can
126     // be known accurately. Returns false if any lazy proxy failed to instantiate, true otherwise.
127     bool planAssignment();
128 
129     // Figure out how much VRAM headroom this plan requires. If there's enough purgeable resources,
130     // purge them and return true. Otherwise return false.
131     bool makeBudgetHeadroom();
132 
133     // Clear all internal state in preparation for a new set of intervals.
134     void reset();
135 
136     // Instantiate and assign resources to all proxies.
137     bool assign();
138 
139 #if GR_ALLOCATION_SPEW
140     void dumpIntervals();
141 #endif
142 
143 private:
144     class Interval;
145     class Register;
146 
147     // Remove dead intervals from the active list
148     void expire(unsigned int curIndex);
149 
150     // These two methods wrap the interactions with the free pool
151     void recycleRegister(Register* r);
152     Register* findOrCreateRegisterFor(GrSurfaceProxy* proxy);
153 
154     struct FreePoolTraits {
GetKeyFreePoolTraits155         static const skgpu::ScratchKey& GetKey(const Register& r) {
156             return r.scratchKey();
157         }
158 
HashFreePoolTraits159         static uint32_t Hash(const skgpu::ScratchKey& key) { return key.hash(); }
OnFreeFreePoolTraits160         static void OnFree(Register* r) { }
161     };
162     typedef SkTMultiMap<Register, skgpu::ScratchKey, FreePoolTraits> FreePoolMultiMap;
163 
164     typedef skia_private::THashMap<uint32_t, Interval*, GrCheapHash> IntvlHash;
165 
166     struct UniqueKeyHash {
operatorUniqueKeyHash167         uint32_t operator()(const skgpu::UniqueKey& key) const { return key.hash(); }
168     };
169     typedef skia_private::THashMap<skgpu::UniqueKey, Register*, UniqueKeyHash>
170             UniqueKeyRegisterHash;
171 
172     // Each proxy – with some exceptions – is assigned a register. After all assignments are made,
173     // another pass is performed to instantiate and assign actual surfaces to the proxies. Right
174     // now these are performed in one call, but in the future they will be separable and the user
175     // will be able to query re: memory cost before committing to surface creation.
176     class Register {
177     public:
178         // It's OK to pass an invalid scratch key iff the proxy has a unique key.
179         Register(GrSurfaceProxy* originatingProxy, skgpu::ScratchKey, GrResourceProvider*);
180 
scratchKey()181         const skgpu::ScratchKey& scratchKey() const { return fScratchKey; }
uniqueKey()182         const skgpu::UniqueKey& uniqueKey() const { return fOriginatingProxy->getUniqueKey(); }
183 
accountedForInBudget()184         bool accountedForInBudget() const { return fAccountedForInBudget; }
setAccountedForInBudget()185         void setAccountedForInBudget() { fAccountedForInBudget = true; }
186 
existingSurface()187         GrSurface* existingSurface() const { return fExistingSurface.get(); }
188 
189         // Can this register be used by other proxies after this one?
190         bool isRecyclable(const GrCaps&, GrSurfaceProxy* proxy, int knownUseCount,
191                           AllowRecycling) const;
192 
193         // Resolve the register allocation to an actual GrSurface. 'fOriginatingProxy'
194         // is used to cache the allocation when a given register is used by multiple
195         // proxies.
196         bool instantiateSurface(GrSurfaceProxy*, GrResourceProvider*);
197 
198         SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
199 
200     private:
201         GrSurfaceProxy*   fOriginatingProxy;
202         skgpu::ScratchKey fScratchKey; // free pool wants a reference to this.
203         sk_sp<GrSurface>  fExistingSurface; // queried from resource cache. may be null.
204         bool              fAccountedForInBudget = false;
205 
206 #ifdef SK_DEBUG
207         uint32_t         fUniqueID;
208 
209         static uint32_t  CreateUniqueID();
210 #endif
211     };
212 
213     class Interval {
214     public:
Interval(GrSurfaceProxy * proxy,unsigned int start,unsigned int end)215         Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
216                 : fProxy(proxy)
217                 , fStart(start)
218                 , fEnd(end) {
219             SkASSERT(proxy);
220             SkDEBUGCODE(fUniqueID = CreateUniqueID());
221 #if GR_TRACK_INTERVAL_CREATION
222             SkString proxyStr = proxy->dump();
223             SkDebugf("New intvl %d: %s [%d, %d]\n", fUniqueID, proxyStr.c_str(), start, end);
224 #endif
225         }
226 
proxy()227         const GrSurfaceProxy* proxy() const { return fProxy; }
proxy()228         GrSurfaceProxy* proxy() { return fProxy; }
229 
start()230         unsigned int start() const { return fStart; }
end()231         unsigned int end() const { return fEnd; }
232 
setNext(Interval * next)233         void setNext(Interval* next) { fNext = next; }
next()234         const Interval* next() const { return fNext; }
next()235         Interval* next() { return fNext; }
236 
getRegister()237         Register* getRegister() const { return fRegister; }
setRegister(Register * r)238         void setRegister(Register* r) { fRegister = r; }
239 
addUse()240         void addUse() { fUses++; }
uses()241         int uses() const { return fUses; }
242 
extendEnd(unsigned int newEnd)243         void extendEnd(unsigned int newEnd) {
244             if (newEnd > fEnd) {
245                 fEnd = newEnd;
246 #if GR_TRACK_INTERVAL_CREATION
247                 SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd);
248 #endif
249             }
250         }
251 
disallowRecycling()252         void disallowRecycling() {
253             fAllowRecycling = AllowRecycling::kNo;
254         }
allowRecycling()255         AllowRecycling allowRecycling() const { return fAllowRecycling; }
256 
257         SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
258 
259     private:
260         GrSurfaceProxy*  fProxy;
261         unsigned int     fStart;
262         unsigned int     fEnd;
263         Interval*        fNext = nullptr;
264         unsigned int     fUses = 0;
265         Register*        fRegister = nullptr;
266         AllowRecycling   fAllowRecycling = AllowRecycling::kYes;
267 
268 #ifdef SK_DEBUG
269         uint32_t        fUniqueID;
270 
271         static uint32_t CreateUniqueID();
272 #endif
273     };
274 
275     class IntervalList {
276     public:
277         IntervalList() = default;
278         // N.B. No need for a destructor – the arena allocator will clean up for us.
279 
empty()280         bool empty() const {
281             SkASSERT(SkToBool(fHead) == SkToBool(fTail));
282             return !SkToBool(fHead);
283         }
peekHead()284         const Interval* peekHead() const { return fHead; }
peekHead()285         Interval* peekHead() { return fHead; }
286         Interval* popHead();
287         void insertByIncreasingStart(Interval*);
288         void insertByIncreasingEnd(Interval*);
289 
290     private:
291         SkDEBUGCODE(void validate() const;)
292 
293         Interval* fHead = nullptr;
294         Interval* fTail = nullptr;
295     };
296 
297     // Compositing use cases can create > 80 intervals.
298     static const int kInitialArenaSize = 128 * sizeof(Interval);
299 
300     GrDirectContext*             fDContext;
301     FreePoolMultiMap             fFreePool;          // Recently created/used GrSurfaces
302     IntvlHash                    fIntvlHash;         // All the intervals, hashed by proxyID
303 
304     IntervalList                 fIntvlList;         // All the intervals sorted by increasing start
305     IntervalList                 fActiveIntvls;      // List of live intervals during assignment
306                                                      // (sorted by increasing end)
307     IntervalList                 fFinishedIntvls;    // All the completed intervals
308                                                      // (sorted by increasing start)
309     UniqueKeyRegisterHash        fUniqueKeyRegisters;
310     unsigned int                 fNumOps = 0;
311 
312     SkDEBUGCODE(bool             fPlanned = false;)
313     SkDEBUGCODE(bool             fAssigned = false;)
314 
315     SkSTArenaAllocWithReset<kInitialArenaSize>   fInternalAllocator; // intervals & registers
316     bool                                         fFailedInstantiation = false;
317 };
318 
319 #endif // GrResourceAllocator_DEFINED
320