xref: /aosp_15_r20/external/skia/src/gpu/graphite/Resource.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef skgpu_graphite_Resource_DEFINED
9 #define skgpu_graphite_Resource_DEFINED
10 
11 #include "include/gpu/GpuTypes.h"
12 #include "include/private/base/SkMutex.h"
13 #include "src/gpu/GpuTypesPriv.h"
14 #include "src/gpu/graphite/GraphiteResourceKey.h"
15 #include "src/gpu/graphite/ResourceTypes.h"
16 
17 #include <atomic>
18 #include <functional>
19 #include <string>
20 #include <string_view>
21 
22 class SkMutex;
23 class SkTraceMemoryDump;
24 
25 namespace skgpu::graphite {
26 
27 class ResourceCache;
28 class SharedContext;
29 class Texture;
30 
31 /**
32  * Base class for objects that can be kept in the ResourceCache.
33  */
34 class Resource {
35 public:
36     Resource(const Resource&) = delete;
37     Resource(Resource&&) = delete;
38     Resource& operator=(const Resource&) = delete;
39     Resource& operator=(Resource&&) = delete;
40 
41     // Adds a usage ref to the resource. Named ref so we can easily manage usage refs with sk_sp.
ref()42     void ref() const {
43         // Only the cache should be able to add the first usage ref to a resource.
44         SkASSERT(this->hasUsageRef());
45         // No barrier required.
46         (void)fUsageRefCnt.fetch_add(+1, std::memory_order_relaxed);
47     }
48 
49     // Removes a usage ref from the resource
unref()50     void unref() const {
51         bool shouldFree = false;
52         {
53             SkAutoMutexExclusive locked(fUnrefMutex);
54             SkASSERT(this->hasUsageRef());
55             // A release here acts in place of all releases we "should" have been doing in ref().
56             if (1 == fUsageRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
57                 shouldFree = this->notifyARefIsZero(LastRemovedRef::kUsage);
58             }
59         }
60         if (shouldFree) {
61             Resource* mutableThis = const_cast<Resource*>(this);
62             mutableThis->internalDispose();
63         }
64     }
65 
66     // Adds a command buffer ref to the resource
refCommandBuffer()67     void refCommandBuffer() const {
68         if (fCommandBufferRefsAsUsageRefs) {
69             return this->ref();
70         }
71         // No barrier required.
72         (void)fCommandBufferRefCnt.fetch_add(+1, std::memory_order_relaxed);
73     }
74 
75     // Removes a command buffer ref from the resource
unrefCommandBuffer()76     void unrefCommandBuffer() const {
77         if (fCommandBufferRefsAsUsageRefs) {
78             return this->unref();
79         }
80         bool shouldFree = false;
81         {
82             SkAutoMutexExclusive locked(fUnrefMutex);
83             SkASSERT(this->hasCommandBufferRef());
84             // A release here acts in place of all releases we "should" have been doing in ref().
85             if (1 == fCommandBufferRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
86                 shouldFree = this->notifyARefIsZero(LastRemovedRef::kCommandBuffer);
87             }
88         }
89         if (shouldFree) {
90             Resource* mutableThis = const_cast<Resource*>(this);
91             mutableThis->internalDispose();
92         }
93     }
94 
ownership()95     Ownership ownership() const { return fOwnership; }
96 
budgeted()97     skgpu::Budgeted budgeted() const { return fBudgeted; }
98 
99     // Retrieves the amount of GPU memory used by this resource in bytes. It is approximate since we
100     // aren't aware of additional padding or copies made by the driver.
gpuMemorySize()101     size_t gpuMemorySize() const { return fGpuMemorySize; }
102 
103     class UniqueID {
104     public:
105         UniqueID() = default;
106 
UniqueID(uint32_t id)107         explicit UniqueID(uint32_t id) : fID(id) {}
108 
asUInt()109         uint32_t asUInt() const { return fID; }
110 
111         bool operator==(const UniqueID& other) const { return fID == other.fID; }
112         bool operator!=(const UniqueID& other) const { return !(*this == other); }
113 
114     private:
115         uint32_t fID = SK_InvalidUniqueID;
116     };
117 
118     // Gets an id that is unique for this Resource object. It is static in that it does not change
119     // when the content of the Resource object changes. This will never return 0.
uniqueID()120     UniqueID uniqueID() const { return fUniqueID; }
121 
122     // Describes the type of gpu resource that is represented by the implementing
123     // class (e.g. texture, buffer, etc).  This data is used for diagnostic
124     // purposes by dumpMemoryStatistics().
125     //
126     // The value returned is expected to be long lived and will not be copied by the caller.
127     virtual const char* getResourceType() const = 0;
128 
getLabel()129     std::string getLabel() const { return fLabel; }
130 
131     // We allow the label on a Resource to change when used for a different function. For example
132     // when reusing a scratch Texture we can change the label to match callers current use.
setLabel(std::string_view label)133     void setLabel(std::string_view label) {
134         fLabel = label;
135 
136         if (!fLabel.empty()) {
137             const std::string fullLabel = "Skia_" + fLabel;
138             this->setBackendLabel(fullLabel.c_str());
139         }
140     }
141 
142     // Tests whether a object has been abandoned or released. All objects will be in this state
143     // after their creating Context is destroyed or abandoned.
144     //
145     // @return true if the object has been released or abandoned,
146     //         false otherwise.
147     // TODO: As of now this function isn't really needed because in freeGpuData we are always
148     // deleting this object. However, I want to implement all the purging logic first to make sure
149     // we don't have a use case for calling internalDispose but not wanting to delete the actual
150     // object yet.
wasDestroyed()151     bool wasDestroyed() const { return fSharedContext == nullptr; }
152 
key()153     const GraphiteResourceKey& key() const { return fKey; }
154     // This should only ever be called by the ResourceProvider
setKey(const GraphiteResourceKey & key)155     void setKey(const GraphiteResourceKey& key) {
156         SkASSERT(key.shareable() == Shareable::kNo || this->budgeted() == skgpu::Budgeted::kYes);
157         fKey = key;
158     }
159 
160     // Dumps memory usage information for this Resource to traceMemoryDump.
161     void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
162 
163     /**
164      * If the resource has a non-shareable key then this gives the resource subclass an opportunity
165      * to prepare itself to re-enter the cache. The ResourceCache extends its privilege to take the
166      * first UsageRef to this function via takeRef. If takeRef is called this resource will not
167      * immediately enter the cache but will be re-reprocessed with the Usage Ref count again reaches
168      * zero.
169      */
prepareForReturnToCache(const std::function<void ()> & takeRef)170     virtual void prepareForReturnToCache(const std::function<void()>& takeRef) {}
171 
asTexture()172     virtual const Texture* asTexture() const { return nullptr; }
173 
174 #if defined(GPU_TEST_UTILS)
testingShouldDeleteASAP()175     bool testingShouldDeleteASAP() const { return fDeleteASAP == DeleteASAP::kYes; }
176 #endif
177 
178 protected:
179     Resource(const SharedContext*,
180              Ownership,
181              skgpu::Budgeted,
182              size_t gpuMemorySize,
183              bool commandBufferRefsAsUsageRefs = false);
184     virtual ~Resource();
185 
sharedContext()186     const SharedContext* sharedContext() const { return fSharedContext; }
187 
188     // Overridden to add extra information to the memory dump.
onDumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump,const char * dumpName)189     virtual void onDumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump,
190                                         const char* dumpName) const {}
191 
192 #ifdef SK_DEBUG
debugHasCommandBufferRef()193     bool debugHasCommandBufferRef() const {
194         return hasCommandBufferRef();
195     }
196 #endif
197 
198     // Needed to be protected for DawnBuffer emscripten prepareForReturnToCache
setDeleteASAP()199     void setDeleteASAP() { fDeleteASAP = DeleteASAP::kYes; }
200 
201 private:
202     friend class ProxyCache; // for setDeleteASAP and updateAccessTime
203 
204     // Overridden to free GPU resources in the backend API.
205     virtual void freeGpuData() = 0;
206 
207     // Overridden to call any release callbacks, if necessary
invokeReleaseProc()208     virtual void invokeReleaseProc() {}
209 
210     enum class DeleteASAP : bool {
211         kNo = false,
212         kYes = true,
213     };
214 
shouldDeleteASAP()215     DeleteASAP shouldDeleteASAP() const { return fDeleteASAP; }
216 
217     // In the ResourceCache this is called whenever a Resource is moved into the purgeableQueue. It
218     // may also be called by the ProxyCache to track the time on Resources it is holding on to.
updateAccessTime()219     void updateAccessTime() {
220         fLastAccess = skgpu::StdSteadyClock::now();
221     }
lastAccessTime()222     skgpu::StdSteadyClock::time_point lastAccessTime() const {
223         return fLastAccess;
224     }
225 
setBackendLabel(char const * label)226     virtual void setBackendLabel(char const* label) {}
227 
228     ////////////////////////////////////////////////////////////////////////////
229     // The following set of functions are only meant to be called by the ResourceCache. We don't
230     // want them public general users of a Resource, but they also aren't purely internal calls.
231     ////////////////////////////////////////////////////////////////////////////
232     friend ResourceCache;
233 
makeBudgeted()234     void makeBudgeted() { fBudgeted = skgpu::Budgeted::kYes; }
makeUnbudgeted()235     void makeUnbudgeted() { fBudgeted = skgpu::Budgeted::kNo; }
236 
237     // This version of ref allows adding a ref when the usage count is 0. This should only be called
238     // from the ResourceCache.
initialUsageRef()239     void initialUsageRef() const {
240         // Only the cache should be able to add the first usage ref to a resource.
241         SkASSERT(fUsageRefCnt >= 0);
242         // No barrier required.
243         (void)fUsageRefCnt.fetch_add(+1, std::memory_order_relaxed);
244     }
245 
246     bool isPurgeable() const;
accessReturnIndex()247     int* accessReturnIndex()  const { return &fReturnIndex; }
accessCacheIndex()248     int* accessCacheIndex()  const { return &fCacheArrayIndex; }
249 
timestamp()250     uint32_t timestamp() const { return fTimestamp; }
setTimestamp(uint32_t ts)251     void setTimestamp(uint32_t ts) { fTimestamp = ts; }
252 
253     void registerWithCache(sk_sp<ResourceCache>);
254 
255     // Adds a cache ref to the resource. This is only called by ResourceCache. A Resource will only
256     // ever add a ref when the Resource is part of the cache (i.e. when insertResource is called)
257     // and while the Resource is in the ResourceCache::ReturnQueue.
refCache()258     void refCache() const {
259         // No barrier required.
260         (void)fCacheRefCnt.fetch_add(+1, std::memory_order_relaxed);
261     }
262 
263     // Removes a cache ref from the resource. The unref here should only ever be called from the
264     // ResourceCache and only in the Recorder thread the ResourceCache is part of.
unrefCache()265     void unrefCache() const {
266         bool shouldFree = false;
267         {
268             SkAutoMutexExclusive locked(fUnrefMutex);
269             SkASSERT(this->hasCacheRef());
270             // A release here acts in place of all releases we "should" have been doing in ref().
271             if (1 == fCacheRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
272                 shouldFree = this->notifyARefIsZero(LastRemovedRef::kCache);
273             }
274         }
275         if (shouldFree) {
276             Resource* mutableThis = const_cast<Resource*>(this);
277             mutableThis->internalDispose();
278         }
279     }
280 
281 #ifdef SK_DEBUG
isUsableAsScratch()282     bool isUsableAsScratch() const {
283         return fKey.shareable() == Shareable::kNo && !this->hasUsageRef() && fNonShareableInCache;
284     }
285 #endif
286 
287     ////////////////////////////////////////////////////////////////////////////
288     // The remaining calls are meant to be truely private
289     ////////////////////////////////////////////////////////////////////////////
hasUsageRef()290     bool hasUsageRef() const {
291         if (0 == fUsageRefCnt.load(std::memory_order_acquire)) {
292             // The acquire barrier is only really needed if we return true.  It
293             // prevents code conditioned on the result of hasUsageRef() from running until previous
294             // owners are all totally done calling unref().
295             return false;
296         }
297         return true;
298     }
299 
hasCommandBufferRef()300     bool hasCommandBufferRef() const {
301         // Note that we don't check here for fCommandBufferRefsAsUsageRefs. This should always
302         // report zero if that value is true.
303         if (0 == fCommandBufferRefCnt.load(std::memory_order_acquire)) {
304             // The acquire barrier is only really needed if we return true.  It
305             // prevents code conditioned on the result of hasCommandBufferRef() from running
306             // until previous owners are all totally done calling unrefCommandBuffer().
307             return false;
308         }
309         SkASSERT(!fCommandBufferRefsAsUsageRefs);
310         return true;
311     }
312 
hasCacheRef()313     bool hasCacheRef() const {
314         if (0 == fCacheRefCnt.load(std::memory_order_acquire)) {
315             // The acquire barrier is only really needed if we return true. It
316             // prevents code conditioned on the result of hasUsageRef() from running until previous
317             // owners are all totally done calling unref().
318             return false;
319         }
320         return true;
321     }
322 
hasAnyRefs()323     bool hasAnyRefs() const {
324         return this->hasUsageRef() || this->hasCommandBufferRef() || this->hasCacheRef();
325     }
326 
327     bool notifyARefIsZero(LastRemovedRef removedRef) const;
328 
329     // Frees the object in the underlying 3D API.
330     void internalDispose();
331 
332     // We need to guard calling unref on the usage and command buffer refs since they each could be
333     // unreffed on different threads. This can lead to calling notifyARefIsZero twice with each
334     // instance thinking there are no more refs left and both trying to delete the object.
335     mutable SkMutex fUnrefMutex;
336 
337     SkDEBUGCODE(mutable bool fCalledRemovedFromCache = false;)
338 
339     // This is not ref'ed but internalDispose() will be called before the Gpu object is destroyed.
340     // That call will set this to nullptr.
341     const SharedContext* fSharedContext;
342 
343     mutable std::atomic<int32_t> fUsageRefCnt;
344     mutable std::atomic<int32_t> fCommandBufferRefCnt;
345     mutable std::atomic<int32_t> fCacheRefCnt;
346     // Indicates that CommandBufferRefs should be rerouted to UsageRefs.
347     const bool fCommandBufferRefsAsUsageRefs = false;
348 
349     GraphiteResourceKey fKey;
350 
351     sk_sp<ResourceCache> fReturnCache;
352     // An index into the return cache so we know whether or not the resource is already waiting to
353     // be returned or not.
354     mutable int fReturnIndex = -1;
355 
356     Ownership fOwnership;
357 
358     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
359     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
360 
361     // All resource created internally by Graphite and held in the ResourceCache as a shared
362     // resource or available scratch resource are considered budgeted. Resources that back client
363     // owned objects (e.g. SkSurface or SkImage) are not budgeted and do not count against cache
364     // limits.
365     skgpu::Budgeted fBudgeted;
366 
367     // This is only used by ProxyCache::purgeProxiesNotUsedSince which is called from
368     // ResourceCache::purgeResourcesNotUsedSince. When kYes, this signals that the Resource
369     // should've been purged based on its timestamp at some point regardless of what its
370     // current timestamp may indicate (since the timestamp will be updated when the Resource
371     // is returned to the ResourceCache).
372     DeleteASAP fDeleteASAP = DeleteASAP::kNo;
373 
374     // An index into a heap when this resource is purgeable or an array when not. This is maintained
375     // by the cache.
376     mutable int fCacheArrayIndex = -1;
377     // This value reflects how recently this resource was accessed in the cache. This is maintained
378     // by the cache.
379     uint32_t fTimestamp;
380     skgpu::StdSteadyClock::time_point fLastAccess;
381 
382     const UniqueID fUniqueID;
383 
384     // String used to describe the current use of this Resource.
385     std::string fLabel;
386 
387     // This is only used during validation checking. Lots of the validation code depends on a
388     // resource being purgeable or not. However, purgeable itself just means having no refs. The
389     // refs can be removed before a Resource is returned to the cache (or even added to the
390     // ReturnQueue).
391     SkDEBUGCODE(mutable bool fNonShareableInCache = false;)
392 };
393 
394 } // namespace skgpu::graphite
395 
396 #endif // skgpu_graphite_Resource_DEFINED
397