1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/ResourceCache.h"
9
10 #include "include/private/base/SingleOwner.h"
11 #include "src/base/SkRandom.h"
12 #include "src/core/SkTMultiMap.h"
13 #include "src/core/SkTraceEvent.h"
14 #include "src/gpu/graphite/GraphiteResourceKey.h"
15 #include "src/gpu/graphite/ProxyCache.h"
16 #include "src/gpu/graphite/Resource.h"
17
18 #if defined(GPU_TEST_UTILS)
19 #include "src/gpu/graphite/Texture.h"
20 #endif
21
22 namespace skgpu::graphite {
23
24 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
25
Make(SingleOwner * singleOwner,uint32_t recorderID,size_t maxBytes)26 sk_sp<ResourceCache> ResourceCache::Make(SingleOwner* singleOwner,
27 uint32_t recorderID,
28 size_t maxBytes) {
29 return sk_sp<ResourceCache>(new ResourceCache(singleOwner, recorderID, maxBytes));
30 }
31
ResourceCache(SingleOwner * singleOwner,uint32_t recorderID,size_t maxBytes)32 ResourceCache::ResourceCache(SingleOwner* singleOwner, uint32_t recorderID, size_t maxBytes)
33 : fMaxBytes(maxBytes)
34 , fSingleOwner(singleOwner) {
35 if (recorderID != SK_InvalidGenID) {
36 fProxyCache = std::make_unique<ProxyCache>(recorderID);
37 }
38 // TODO: Maybe when things start using ResourceCache, then like Ganesh the compiler won't
39 // complain about not using fSingleOwner in Release builds and we can delete this.
40 #ifndef SK_DEBUG
41 (void)fSingleOwner;
42 #endif
43 }
44
~ResourceCache()45 ResourceCache::~ResourceCache() {
46 // The ResourceCache must have been shutdown by the ResourceProvider before it is destroyed.
47 SkASSERT(fIsShutdown);
48 }
49
shutdown()50 void ResourceCache::shutdown() {
51 ASSERT_SINGLE_OWNER
52
53 SkASSERT(!fIsShutdown);
54
55 {
56 SkAutoMutexExclusive locked(fReturnMutex);
57 fIsShutdown = true;
58 }
59 if (fProxyCache) {
60 fProxyCache->purgeAll();
61 }
62
63 this->processReturnedResources();
64
65 while (!fNonpurgeableResources.empty()) {
66 Resource* back = *(fNonpurgeableResources.end() - 1);
67 SkASSERT(!back->wasDestroyed());
68 this->removeFromNonpurgeableArray(back);
69 back->unrefCache();
70 }
71
72 while (fPurgeableQueue.count()) {
73 Resource* top = fPurgeableQueue.peek();
74 SkASSERT(!top->wasDestroyed());
75 this->removeFromPurgeableQueue(top);
76 top->unrefCache();
77 }
78
79 TRACE_EVENT_INSTANT0("skia.gpu.cache", TRACE_FUNC, TRACE_EVENT_SCOPE_THREAD);
80 }
81
insertResource(Resource * resource)82 void ResourceCache::insertResource(Resource* resource) {
83 ASSERT_SINGLE_OWNER
84 SkASSERT(resource);
85 SkASSERT(!this->isInCache(resource));
86 SkASSERT(!resource->wasDestroyed());
87 SkASSERT(!resource->isPurgeable());
88 SkASSERT(resource->key().isValid());
89 // All resources in the cache are owned. If we track wrapped resources in the cache we'll need
90 // to update this check.
91 SkASSERT(resource->ownership() == Ownership::kOwned);
92
93 // The reason to call processReturnedResources here is to get an accurate accounting of our
94 // memory usage as some resources can go from unbudgeted to budgeted when they return. So we
95 // want to have them all returned before adding the budget for the new resource in case we need
96 // to purge things. However, if the new resource has a memory size of 0, then we just skip
97 // returning resources (which has overhead for each call) since the new resource won't be
98 // affecting whether we're over or under budget.
99 if (resource->gpuMemorySize() > 0) {
100 this->processReturnedResources();
101 }
102
103 resource->registerWithCache(sk_ref_sp(this));
104 resource->refCache();
105
106 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
107 // up iterating over all the resources that already have timestamps.
108 this->setResourceTimestamp(resource, this->getNextTimestamp());
109 resource->updateAccessTime();
110
111 this->addToNonpurgeableArray(resource);
112
113 SkDEBUGCODE(fCount++;)
114
115 if (resource->key().shareable() == Shareable::kYes) {
116 fResourceMap.insert(resource->key(), resource);
117 }
118
119 if (resource->budgeted() == skgpu::Budgeted::kYes) {
120 fBudgetedBytes += resource->gpuMemorySize();
121 }
122
123 this->purgeAsNeeded();
124 }
125
findAndRefResource(const GraphiteResourceKey & key,skgpu::Budgeted budgeted)126 Resource* ResourceCache::findAndRefResource(const GraphiteResourceKey& key,
127 skgpu::Budgeted budgeted) {
128 ASSERT_SINGLE_OWNER
129
130 SkASSERT(key.isValid());
131
132 Resource* resource = fResourceMap.find(key);
133 if (!resource) {
134 // The main reason to call processReturnedResources in this call is to see if there are any
135 // resources that we could match with the key. However, there is overhead into calling it.
136 // So we only call it if we first failed to find a matching resource.
137 if (this->processReturnedResources()) {
138 resource = fResourceMap.find(key);
139 }
140 }
141 if (resource) {
142 // All resources we pull out of the cache for use should be budgeted
143 SkASSERT(resource->budgeted() == skgpu::Budgeted::kYes);
144 if (key.shareable() == Shareable::kNo) {
145 // If a resource is not shareable (i.e. scratch resource) then we remove it from the map
146 // so that it isn't found again.
147 fResourceMap.remove(key, resource);
148 if (budgeted == skgpu::Budgeted::kNo) {
149 resource->makeUnbudgeted();
150 fBudgetedBytes -= resource->gpuMemorySize();
151 }
152 SkDEBUGCODE(resource->fNonShareableInCache = false;)
153 } else {
154 // Shareable resources should never be requested as non budgeted
155 SkASSERT(budgeted == skgpu::Budgeted::kYes);
156 }
157 this->refAndMakeResourceMRU(resource);
158 this->validate();
159 }
160
161 // processReturnedResources may have added resources back into our budget if they were being
162 // using in an SkImage or SkSurface previously. However, instead of calling purgeAsNeeded in
163 // processReturnedResources, we delay calling it until now so we don't end up purging a resource
164 // we're looking for in this function.
165 //
166 // We could avoid calling this if we didn't return any resources from processReturnedResources.
167 // However, when not overbudget purgeAsNeeded is very cheap. When overbudget there may be some
168 // really niche usage patterns that could cause us to never actually return resources to the
169 // cache, but still be overbudget due to shared resources. So to be safe we just always call it
170 // here.
171 this->purgeAsNeeded();
172
173 return resource;
174 }
175
refAndMakeResourceMRU(Resource * resource)176 void ResourceCache::refAndMakeResourceMRU(Resource* resource) {
177 SkASSERT(resource);
178 SkASSERT(this->isInCache(resource));
179
180 if (this->inPurgeableQueue(resource)) {
181 // It's about to become unpurgeable.
182 this->removeFromPurgeableQueue(resource);
183 this->addToNonpurgeableArray(resource);
184 }
185 resource->initialUsageRef();
186
187 this->setResourceTimestamp(resource, this->getNextTimestamp());
188 this->validate();
189 }
190
returnResource(Resource * resource,LastRemovedRef removedRef)191 bool ResourceCache::returnResource(Resource* resource, LastRemovedRef removedRef) {
192 // We should never be trying to return a LastRemovedRef of kCache.
193 SkASSERT(removedRef != LastRemovedRef::kCache);
194 SkAutoMutexExclusive locked(fReturnMutex);
195 if (fIsShutdown) {
196 return false;
197 }
198
199 SkASSERT(resource);
200
201 // When a non-shareable resource's CB and Usage refs are both zero, give it a chance prepare
202 // itself to be reused. On Dawn/WebGPU we use this to remap kXferCpuToGpu buffers asynchronously
203 // so that they are already mapped before they come out of the cache again.
204 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kNo &&
205 resource->key().shareable() == Shareable::kNo &&
206 removedRef == LastRemovedRef::kUsage) {
207 resource->prepareForReturnToCache([resource] { resource->initialUsageRef(); });
208 // Check if resource was re-ref'ed. In that case exit without adding to the queue.
209 if (resource->hasUsageRef()) {
210 return true;
211 }
212 }
213
214 // We only allow one instance of a Resource to be in the return queue at a time. We do this so
215 // that the ReturnQueue stays small and quick to process.
216 //
217 // Because we take CacheRefs to all Resources added to the ReturnQueue, we would be safe if we
218 // decided to have multiple instances of a Resource. Even if an earlier returned instance of a
219 // Resource triggers that Resource to get purged from the cache, the Resource itself wouldn't
220 // get deleted until we drop all the CacheRefs in this ReturnQueue.
221 if (*resource->accessReturnIndex() >= 0) {
222 // If the resource is already in the return queue we promote the LastRemovedRef to be
223 // kUsage if that is what is returned here.
224 if (removedRef == LastRemovedRef::kUsage) {
225 SkASSERT(*resource->accessReturnIndex() < (int)fReturnQueue.size());
226 fReturnQueue[*resource->accessReturnIndex()].second = removedRef;
227 }
228 return true;
229 }
230 #ifdef SK_DEBUG
231 for (auto& nextResource : fReturnQueue) {
232 SkASSERT(nextResource.first != resource);
233 }
234 #endif
235
236 fReturnQueue.push_back(std::make_pair(resource, removedRef));
237 *resource->accessReturnIndex() = fReturnQueue.size() - 1;
238 resource->refCache();
239 return true;
240 }
241
processReturnedResources()242 bool ResourceCache::processReturnedResources() {
243 // We need to move the returned Resources off of the ReturnQueue before we start processing them
244 // so that we can drop the fReturnMutex. When we process a Resource we may need to grab its
245 // UnrefMutex. This could cause a deadlock if on another thread the Resource has the UnrefMutex
246 // and is waiting on the ReturnMutex to be free.
247 ReturnQueue tempQueue;
248 {
249 SkAutoMutexExclusive locked(fReturnMutex);
250 // TODO: Instead of doing a copy of the vector, we may be able to improve the performance
251 // here by storing some form of linked list, then just move the pointer the first element
252 // and reset the ReturnQueue's top element to nullptr.
253 tempQueue = fReturnQueue;
254 fReturnQueue.clear();
255 for (auto& nextResource : tempQueue) {
256 auto [resource, ref] = nextResource;
257 SkASSERT(*resource->accessReturnIndex() >= 0);
258 *resource->accessReturnIndex() = -1;
259 }
260 }
261
262 if (tempQueue.empty()) {
263 return false;
264 }
265
266 // Trace after the lock has been released so we can simply record the tempQueue size.
267 TRACE_EVENT1("skia.gpu.cache", TRACE_FUNC, "count", tempQueue.size());
268
269 for (auto& nextResource : tempQueue) {
270 auto [resource, ref] = nextResource;
271 // We need this check here to handle the following scenario. A Resource is sitting in the
272 // ReturnQueue (say from kUsage last ref) and the Resource still has a command buffer ref
273 // out in the wild. When the ResourceCache calls processReturnedResources it locks the
274 // ReturnMutex. Immediately after this, the command buffer ref is released on another
275 // thread. The Resource cannot be added to the ReturnQueue since the lock is held. Back in
276 // the ResourceCache (we'll drop the ReturnMutex) and when we try to return the Resource we
277 // will see that it is purgeable. If we are overbudget it is possible that the Resource gets
278 // purged from the ResourceCache at this time setting its cache index to -1. The unrefCache
279 // call will actually block here on the Resource's UnrefMutex which is held from the command
280 // buffer ref. Eventually the command bufer ref thread will get to run again and with the
281 // ReturnMutex lock dropped it will get added to the ReturnQueue. At this point the first
282 // unrefCache call will continue on the main ResourceCache thread. When we call
283 // processReturnedResources the next time, we don't want this Resource added back into the
284 // cache, thus we have the check here. The Resource will then get deleted when we call
285 // unrefCache below to remove the cache ref added from the ReturnQueue.
286 if (*resource->accessCacheIndex() != -1) {
287 this->returnResourceToCache(resource, ref);
288 }
289 // Remove cache ref held by ReturnQueue
290 resource->unrefCache();
291 }
292 return true;
293 }
294
returnResourceToCache(Resource * resource,LastRemovedRef removedRef)295 void ResourceCache::returnResourceToCache(Resource* resource, LastRemovedRef removedRef) {
296 // A resource should not have been destroyed when placed into the return queue. Also before
297 // purging any resources from the cache itself, it should always empty the queue first. When the
298 // cache releases/abandons all of its resources, it first invalidates the return queue so no new
299 // resources can be added. Thus we should not end up in a situation where a resource gets
300 // destroyed after it was added to the return queue.
301 SkASSERT(!resource->wasDestroyed());
302
303 SkASSERT(this->isInCache(resource));
304 if (removedRef == LastRemovedRef::kUsage) {
305 if (resource->key().shareable() == Shareable::kYes) {
306 // Shareable resources should still be in the cache
307 SkASSERT(fResourceMap.find(resource->key()));
308 } else {
309 SkDEBUGCODE(resource->fNonShareableInCache = true;)
310 resource->setLabel("Scratch");
311 fResourceMap.insert(resource->key(), resource);
312 if (resource->budgeted() == skgpu::Budgeted::kNo) {
313 resource->makeBudgeted();
314 fBudgetedBytes += resource->gpuMemorySize();
315 }
316 }
317 }
318
319 // If we weren't using multiple threads, it is ok to assume a resource that isn't purgeable must
320 // be in the non purgeable array. However, since resources can be unreffed from multiple
321 // threads, it is possible that a resource became purgeable while we are in the middle of
322 // returning resources. For example, a resource could have 1 usage and 1 command buffer ref. We
323 // then unref the usage which puts the resource in the return queue. Then the ResourceCache
324 // thread locks the ReturnQueue as it returns the Resource. At this same time another thread
325 // unrefs the command buffer usage but can't add the Resource to the ReturnQueue as it is
326 // locked (but the command buffer ref has been reduced to zero). When we are processing the
327 // Resource (from the kUsage ref) to return it to the cache it will look like it is purgeable
328 // since all refs are zero. Thus we will move the Resource from the non purgeable to purgeable
329 // queue. Then later when we return the command buffer ref, the Resource will have already been
330 // moved to purgeable queue and we don't need to do it again.
331 if (!resource->isPurgeable() || this->inPurgeableQueue(resource)) {
332 this->validate();
333 return;
334 }
335
336 this->setResourceTimestamp(resource, this->getNextTimestamp());
337
338 this->removeFromNonpurgeableArray(resource);
339
340 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kYes) {
341 this->purgeResource(resource);
342 } else {
343 resource->updateAccessTime();
344 fPurgeableQueue.insert(resource);
345 fPurgeableBytes += resource->gpuMemorySize();
346 }
347 this->validate();
348 }
349
addToNonpurgeableArray(Resource * resource)350 void ResourceCache::addToNonpurgeableArray(Resource* resource) {
351 int index = fNonpurgeableResources.size();
352 *fNonpurgeableResources.append() = resource;
353 *resource->accessCacheIndex() = index;
354 }
355
removeFromNonpurgeableArray(Resource * resource)356 void ResourceCache::removeFromNonpurgeableArray(Resource* resource) {
357 int* index = resource->accessCacheIndex();
358 // Fill the hole we will create in the array with the tail object, adjust its index, and
359 // then pop the array
360 Resource* tail = *(fNonpurgeableResources.end() - 1);
361 SkASSERT(fNonpurgeableResources[*index] == resource);
362 fNonpurgeableResources[*index] = tail;
363 *tail->accessCacheIndex() = *index;
364 fNonpurgeableResources.pop_back();
365 *index = -1;
366 }
367
removeFromPurgeableQueue(Resource * resource)368 void ResourceCache::removeFromPurgeableQueue(Resource* resource) {
369 fPurgeableQueue.remove(resource);
370 fPurgeableBytes -= resource->gpuMemorySize();
371 // SkTDPQueue will set the index back to -1 in debug builds, but we are using the index as a
372 // flag for whether the Resource has been purged from the cache or not. So we need to make sure
373 // it always gets set.
374 *resource->accessCacheIndex() = -1;
375 }
376
inPurgeableQueue(Resource * resource) const377 bool ResourceCache::inPurgeableQueue(Resource* resource) const {
378 SkASSERT(this->isInCache(resource));
379 int index = *resource->accessCacheIndex();
380 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
381 return true;
382 }
383 return false;
384 }
385
purgeResource(Resource * resource)386 void ResourceCache::purgeResource(Resource* resource) {
387 SkASSERT(resource->isPurgeable());
388
389 TRACE_EVENT_INSTANT1("skia.gpu.cache", TRACE_FUNC, TRACE_EVENT_SCOPE_THREAD,
390 "size", resource->gpuMemorySize());
391
392 fResourceMap.remove(resource->key(), resource);
393
394 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kNo) {
395 SkASSERT(this->inPurgeableQueue(resource));
396 this->removeFromPurgeableQueue(resource);
397 } else {
398 SkASSERT(!this->isInCache(resource));
399 }
400
401 fBudgetedBytes -= resource->gpuMemorySize();
402 resource->unrefCache();
403 }
404
purgeAsNeeded()405 void ResourceCache::purgeAsNeeded() {
406 ASSERT_SINGLE_OWNER
407
408 if (this->overbudget() && fProxyCache) {
409 fProxyCache->freeUniquelyHeld();
410
411 // After the image cache frees resources we need to return those resources to the cache
412 this->processReturnedResources();
413 }
414 while (this->overbudget() && fPurgeableQueue.count()) {
415 Resource* resource = fPurgeableQueue.peek();
416 SkASSERT(!resource->wasDestroyed());
417 SkASSERT(fResourceMap.find(resource->key()));
418
419 if (resource->timestamp() == kMaxTimestamp) {
420 // If we hit a resource that is at kMaxTimestamp, then we've hit the part of the
421 // purgeable queue with all zero sized resources. We don't want to actually remove those
422 // so we just break here.
423 SkASSERT(resource->gpuMemorySize() == 0);
424 break;
425 }
426
427 this->purgeResource(resource);
428 }
429
430 this->validate();
431 }
432
purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)433 void ResourceCache::purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
434 ASSERT_SINGLE_OWNER
435 this->purgeResources(&purgeTime);
436 }
437
purgeResources()438 void ResourceCache::purgeResources() {
439 ASSERT_SINGLE_OWNER
440 this->purgeResources(nullptr);
441 }
442
purgeResources(const StdSteadyClock::time_point * purgeTime)443 void ResourceCache::purgeResources(const StdSteadyClock::time_point* purgeTime) {
444 TRACE_EVENT0("skia.gpu.cache", TRACE_FUNC);
445 if (fProxyCache) {
446 fProxyCache->purgeProxiesNotUsedSince(purgeTime);
447 }
448 this->processReturnedResources();
449
450 // Early out if the very first item is too new to purge to avoid sorting the queue when
451 // nothing will be deleted.
452 if (fPurgeableQueue.count() &&
453 purgeTime &&
454 fPurgeableQueue.peek()->lastAccessTime() >= *purgeTime) {
455 return;
456 }
457
458 // Sort the queue
459 fPurgeableQueue.sort();
460
461 // Make a list of the scratch resources to delete
462 SkTDArray<Resource*> resourcesToPurge;
463 for (int i = 0; i < fPurgeableQueue.count(); i++) {
464 Resource* resource = fPurgeableQueue.at(i);
465
466 const skgpu::StdSteadyClock::time_point resourceTime = resource->lastAccessTime();
467 if (purgeTime && resourceTime >= *purgeTime) {
468 // scratch or not, all later iterations will be too recently used to purge.
469 break;
470 }
471 SkASSERT(resource->isPurgeable());
472 *resourcesToPurge.append() = resource;
473 }
474
475 // Delete the scratch resources. This must be done as a separate pass
476 // to avoid messing up the sorted order of the queue
477 for (int i = 0; i < resourcesToPurge.size(); i++) {
478 this->purgeResource(resourcesToPurge[i]);
479 }
480
481 // Since we called process returned resources at the start of this call, we could still end up
482 // over budget even after purging resources based on purgeTime. So we call purgeAsNeeded at the
483 // end here.
484 this->purgeAsNeeded();
485 }
486
getNextTimestamp()487 uint32_t ResourceCache::getNextTimestamp() {
488 // If we wrap then all the existing resources will appear older than any resources that get
489 // a timestamp after the wrap. We wrap one value early when we reach kMaxTimestamp so that we
490 // can continue to use kMaxTimestamp as a special case for zero sized resources.
491 if (fTimestamp == kMaxTimestamp) {
492 fTimestamp = 0;
493 int count = this->getResourceCount();
494 if (count) {
495 // Reset all the timestamps. We sort the resources by timestamp and then assign
496 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
497 // rare.
498 SkTDArray<Resource*> sortedPurgeableResources;
499 sortedPurgeableResources.reserve(fPurgeableQueue.count());
500
501 while (fPurgeableQueue.count()) {
502 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
503 fPurgeableQueue.pop();
504 }
505
506 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
507 CompareTimestamp);
508
509 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
510 // timestamp and assign new timestamps.
511 int currP = 0;
512 int currNP = 0;
513 while (currP < sortedPurgeableResources.size() &&
514 currNP < fNonpurgeableResources.size()) {
515 uint32_t tsP = sortedPurgeableResources[currP]->timestamp();
516 uint32_t tsNP = fNonpurgeableResources[currNP]->timestamp();
517 SkASSERT(tsP != tsNP);
518 if (tsP < tsNP) {
519 this->setResourceTimestamp(sortedPurgeableResources[currP++], fTimestamp++);
520 } else {
521 // Correct the index in the nonpurgeable array stored on the resource post-sort.
522 *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
523 this->setResourceTimestamp(fNonpurgeableResources[currNP++], fTimestamp++);
524 }
525 }
526
527 // The above loop ended when we hit the end of one array. Finish the other one.
528 while (currP < sortedPurgeableResources.size()) {
529 this->setResourceTimestamp(sortedPurgeableResources[currP++], fTimestamp++);
530 }
531 while (currNP < fNonpurgeableResources.size()) {
532 *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
533 this->setResourceTimestamp(fNonpurgeableResources[currNP++], fTimestamp++);
534 }
535
536 // Rebuild the queue.
537 for (int i = 0; i < sortedPurgeableResources.size(); ++i) {
538 fPurgeableQueue.insert(sortedPurgeableResources[i]);
539 }
540
541 this->validate();
542 SkASSERT(count == this->getResourceCount());
543
544 // count should be the next timestamp we return.
545 SkASSERT(fTimestamp == SkToU32(count));
546 }
547 }
548 return fTimestamp++;
549 }
550
setResourceTimestamp(Resource * resource,uint32_t timestamp)551 void ResourceCache::setResourceTimestamp(Resource* resource, uint32_t timestamp) {
552 // We always set the timestamp for zero sized resources to be kMaxTimestamp
553 if (resource->gpuMemorySize() == 0) {
554 timestamp = kMaxTimestamp;
555 }
556 resource->setTimestamp(timestamp);
557 }
558
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const559 void ResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
560 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
561 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
562 }
563 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
564 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
565 }
566 }
567
568 ////////////////////////////////////////////////////////////////////////////////
569
GetKey(const Resource & r)570 const GraphiteResourceKey& ResourceCache::MapTraits::GetKey(const Resource& r) {
571 return r.key();
572 }
573
Hash(const GraphiteResourceKey & key)574 uint32_t ResourceCache::MapTraits::Hash(const GraphiteResourceKey& key) {
575 return key.hash();
576 }
577
CompareTimestamp(Resource * const & a,Resource * const & b)578 bool ResourceCache::CompareTimestamp(Resource* const& a, Resource* const& b) {
579 return a->timestamp() < b->timestamp();
580 }
581
AccessResourceIndex(Resource * const & res)582 int* ResourceCache::AccessResourceIndex(Resource* const& res) {
583 return res->accessCacheIndex();
584 }
585
586 #ifdef SK_DEBUG
validate() const587 void ResourceCache::validate() const {
588 // Reduce the frequency of validations for large resource counts.
589 static SkRandom gRandom;
590 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
591 if (~mask && (gRandom.nextU() & mask)) {
592 return;
593 }
594
595 struct Stats {
596 int fShareable;
597 int fScratch;
598 size_t fBudgetedBytes;
599 size_t fPurgeableBytes;
600 const ResourceMap* fResourceMap;
601 const PurgeableQueue* fPurgeableQueue;
602
603 Stats(const ResourceCache* cache) {
604 memset(this, 0, sizeof(*this));
605 fResourceMap = &cache->fResourceMap;
606 fPurgeableQueue = &cache->fPurgeableQueue;
607 }
608
609 void update(Resource* resource) {
610 const GraphiteResourceKey& key = resource->key();
611 SkASSERT(key.isValid());
612
613 // We should always have at least 1 cache ref
614 SkASSERT(resource->hasCacheRef());
615
616 // All resources in the cache are owned. If we track wrapped resources in the cache
617 // we'll need to update this check.
618 SkASSERT(resource->ownership() == Ownership::kOwned);
619
620 // We track scratch (non-shareable, no usage refs, has been returned to cache) and
621 // shareable resources here as those should be the only things in the fResourceMap. A
622 // non-shareable resources that does meet the scratch criteria will not be able to be
623 // given back out from a cache requests. After processing all the resources we assert
624 // that the fScratch + fShareable equals the count in the fResourceMap.
625 if (resource->isUsableAsScratch()) {
626 SkASSERT(key.shareable() == Shareable::kNo);
627 SkASSERT(!resource->hasUsageRef());
628 ++fScratch;
629 SkASSERT(fResourceMap->has(resource, key));
630 SkASSERT(resource->budgeted() == skgpu::Budgeted::kYes);
631 } else if (key.shareable() == Shareable::kNo) {
632 SkASSERT(!fResourceMap->has(resource, key));
633 } else {
634 SkASSERT(key.shareable() == Shareable::kYes);
635 ++fShareable;
636 SkASSERT(fResourceMap->has(resource, key));
637 SkASSERT(resource->budgeted() == skgpu::Budgeted::kYes);
638 }
639
640 if (resource->budgeted() == skgpu::Budgeted::kYes) {
641 fBudgetedBytes += resource->gpuMemorySize();
642 }
643
644 if (resource->gpuMemorySize() == 0) {
645 SkASSERT(resource->timestamp() == kMaxTimestamp);
646 } else {
647 SkASSERT(resource->timestamp() < kMaxTimestamp);
648 }
649
650 int index = *resource->accessCacheIndex();
651 if (index < fPurgeableQueue->count() && fPurgeableQueue->at(index) == resource) {
652 SkASSERT(resource->isPurgeable());
653 fPurgeableBytes += resource->gpuMemorySize();
654 }
655 }
656 };
657
658 {
659 int count = 0;
660 fResourceMap.foreach([&](const Resource& resource) {
661 SkASSERT(resource.isUsableAsScratch() || resource.key().shareable() == Shareable::kYes);
662 SkASSERT(resource.budgeted() == skgpu::Budgeted::kYes);
663 count++;
664 });
665 SkASSERT(count == fResourceMap.count());
666 }
667
668 // In the below checks we can assert that anything in the purgeable queue is purgeable because
669 // we won't put a Resource into that queue unless all refs are zero. Thus there is no way for
670 // that resource to be made non-purgeable without going through the cache (which will switch
671 // queues back to non-purgeable).
672 //
673 // However, we can't say the same for things in the non-purgeable array. It is possible that
674 // Resources have removed all their refs (thus technically become purgeable) but have not been
675 // processed back into the cache yet. Thus we may not have moved resources to the purgeable
676 // queue yet. Its also possible that Resource hasn't been added to the ReturnQueue yet (thread
677 // paused between unref and adding to ReturnQueue) so we can't even make asserts like not
678 // purgeable or is in ReturnQueue.
679 Stats stats(this);
680 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
681 SkASSERT(*fNonpurgeableResources[i]->accessCacheIndex() == i);
682 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
683 SkASSERT(!this->inPurgeableQueue(fNonpurgeableResources[i]));
684 stats.update(fNonpurgeableResources[i]);
685 }
686 bool firstPurgeableIsSizeZero = false;
687 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
688 if (i == 0) {
689 firstPurgeableIsSizeZero = (fPurgeableQueue.at(0)->gpuMemorySize() == 0);
690 }
691 if (firstPurgeableIsSizeZero) {
692 // If the first purgeable item (i.e. least recently used) is sized zero, then all other
693 // purgeable resources must also be sized zero since they should all have a timestamp of
694 // kMaxTimestamp.
695 SkASSERT(fPurgeableQueue.at(i)->gpuMemorySize() == 0);
696 }
697 SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
698 SkASSERT(*fPurgeableQueue.at(i)->accessCacheIndex() == i);
699 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
700 stats.update(fPurgeableQueue.at(i));
701 }
702
703 SkASSERT((stats.fScratch + stats.fShareable) == fResourceMap.count());
704 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
705 SkASSERT(stats.fPurgeableBytes == fPurgeableBytes);
706 }
707
isInCache(const Resource * resource) const708 bool ResourceCache::isInCache(const Resource* resource) const {
709 int index = *resource->accessCacheIndex();
710 if (index < 0) {
711 return false;
712 }
713 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
714 return true;
715 }
716 if (index < fNonpurgeableResources.size() && fNonpurgeableResources[index] == resource) {
717 return true;
718 }
719 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
720 return false;
721 }
722
723 #endif // SK_DEBUG
724
725 #if defined(GPU_TEST_UTILS)
726
numFindableResources() const727 int ResourceCache::numFindableResources() const {
728 return fResourceMap.count();
729 }
730
setMaxBudget(size_t bytes)731 void ResourceCache::setMaxBudget(size_t bytes) {
732 fMaxBytes = bytes;
733 this->processReturnedResources();
734 this->purgeAsNeeded();
735 }
736
topOfPurgeableQueue()737 Resource* ResourceCache::topOfPurgeableQueue() {
738 if (!fPurgeableQueue.count()) {
739 return nullptr;
740 }
741 return fPurgeableQueue.peek();
742 }
743
visitTextures(const std::function<void (const Texture *,bool purgeable)> & func) const744 void ResourceCache::visitTextures(
745 const std::function<void(const Texture*, bool purgeable)>& func) const {
746 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
747 if (const Texture* tex = fNonpurgeableResources[i]->asTexture()) {
748 func(tex, /* purgeable= */ false);
749 }
750 }
751 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
752 if (const Texture* tex = fPurgeableQueue.at(i)->asTexture()) {
753 func(tex, /* purgeable= */ true);
754 }
755 }
756 }
757
758 #endif // defined(GPU_TEST_UTILS)
759
760 } // namespace skgpu::graphite
761