1 /*
2 * Copyright 2020 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/GrThreadSafeCache.h"
9
10 #include "include/core/SkSize.h"
11 #include "include/core/SkTypes.h"
12 #include "include/gpu/GpuTypes.h"
13 #include "include/gpu/ganesh/GrDirectContext.h"
14 #include "include/gpu/ganesh/GrTypes.h"
15 #include "include/private/base/SkTo.h"
16 #include "include/private/gpu/ganesh/GrTypesPriv.h"
17 #include "src/gpu/GpuTypesPriv.h"
18 #include "src/gpu/Swizzle.h"
19 #include "src/gpu/ganesh/GrCaps.h"
20 #include "src/gpu/ganesh/GrDirectContextPriv.h"
21 #include "src/gpu/ganesh/GrGpuBuffer.h"
22 #include "src/gpu/ganesh/GrProxyProvider.h"
23 #include "src/gpu/ganesh/GrRenderTargetProxy.h" // IWYU pragma: keep
24 #include "src/gpu/ganesh/GrResourceCache.h"
25 #include "src/gpu/ganesh/GrSurface.h"
26 #include "src/gpu/ganesh/GrTexture.h"
27
28 #include <chrono>
29 #include <functional>
30
31 class GrResourceProvider;
32 class SkData;
33 enum class SkBackingFit;
34
~VertexData()35 GrThreadSafeCache::VertexData::~VertexData () {
36 this->reset();
37 }
38
GrThreadSafeCache()39 GrThreadSafeCache::GrThreadSafeCache()
40 : fFreeEntryList(nullptr) {
41 }
42
~GrThreadSafeCache()43 GrThreadSafeCache::~GrThreadSafeCache() {
44 this->dropAllRefs();
45 }
46
47 #if defined(GPU_TEST_UTILS)
numEntries() const48 int GrThreadSafeCache::numEntries() const {
49 SkAutoSpinlock lock{fSpinLock};
50
51 return fUniquelyKeyedEntryMap.count();
52 }
53
approxBytesUsedForHash() const54 size_t GrThreadSafeCache::approxBytesUsedForHash() const {
55 SkAutoSpinlock lock{fSpinLock};
56
57 return fUniquelyKeyedEntryMap.approxBytesUsed();
58 }
59 #endif
60
dropAllRefs()61 void GrThreadSafeCache::dropAllRefs() {
62 SkAutoSpinlock lock{fSpinLock};
63
64 fUniquelyKeyedEntryMap.reset();
65 while (auto tmp = fUniquelyKeyedEntryList.head()) {
66 fUniquelyKeyedEntryList.remove(tmp);
67 this->recycleEntry(tmp);
68 }
69 // TODO: should we empty out the fFreeEntryList and reset fEntryAllocator?
70 }
71
72 // TODO: If iterating becomes too expensive switch to using something like GrIORef for the
73 // GrSurfaceProxy
dropUniqueRefs(GrResourceCache * resourceCache)74 void GrThreadSafeCache::dropUniqueRefs(GrResourceCache* resourceCache) {
75 SkAutoSpinlock lock{fSpinLock};
76
77 // Iterate from LRU to MRU
78 Entry* cur = fUniquelyKeyedEntryList.tail();
79 Entry* prev = cur ? cur->fPrev : nullptr;
80
81 while (cur) {
82 if (resourceCache && !resourceCache->overBudget()) {
83 return;
84 }
85
86 if (cur->uniquelyHeld()) {
87 fUniquelyKeyedEntryMap.remove(cur->key());
88 fUniquelyKeyedEntryList.remove(cur);
89 this->recycleEntry(cur);
90 }
91
92 cur = prev;
93 prev = cur ? cur->fPrev : nullptr;
94 }
95 }
96
dropUniqueRefsOlderThan(skgpu::StdSteadyClock::time_point purgeTime)97 void GrThreadSafeCache::dropUniqueRefsOlderThan(skgpu::StdSteadyClock::time_point purgeTime) {
98 SkAutoSpinlock lock{fSpinLock};
99
100 // Iterate from LRU to MRU
101 Entry* cur = fUniquelyKeyedEntryList.tail();
102 Entry* prev = cur ? cur->fPrev : nullptr;
103
104 while (cur) {
105 if (cur->fLastAccess >= purgeTime) {
106 // This entry and all the remaining ones in the list will be newer than 'purgeTime'
107 return;
108 }
109
110 if (cur->uniquelyHeld()) {
111 fUniquelyKeyedEntryMap.remove(cur->key());
112 fUniquelyKeyedEntryList.remove(cur);
113 this->recycleEntry(cur);
114 }
115
116 cur = prev;
117 prev = cur ? cur->fPrev : nullptr;
118 }
119 }
120
makeExistingEntryMRU(Entry * entry)121 void GrThreadSafeCache::makeExistingEntryMRU(Entry* entry) {
122 SkASSERT(fUniquelyKeyedEntryList.isInList(entry));
123
124 entry->fLastAccess = skgpu::StdSteadyClock::now();
125 fUniquelyKeyedEntryList.remove(entry);
126 fUniquelyKeyedEntryList.addToHead(entry);
127 }
128
internalFind(const skgpu::UniqueKey & key)129 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalFind(
130 const skgpu::UniqueKey& key) {
131 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
132 if (tmp) {
133 this->makeExistingEntryMRU(tmp);
134 return { tmp->view(), tmp->refCustomData() };
135 }
136
137 return {};
138 }
139
140 #ifdef SK_DEBUG
has(const skgpu::UniqueKey & key)141 bool GrThreadSafeCache::has(const skgpu::UniqueKey& key) {
142 SkAutoSpinlock lock{fSpinLock};
143
144 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
145 return SkToBool(tmp);
146 }
147 #endif
148
find(const skgpu::UniqueKey & key)149 GrSurfaceProxyView GrThreadSafeCache::find(const skgpu::UniqueKey& key) {
150 SkAutoSpinlock lock{fSpinLock};
151
152 GrSurfaceProxyView view;
153 std::tie(view, std::ignore) = this->internalFind(key);
154 return view;
155 }
156
findWithData(const skgpu::UniqueKey & key)157 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findWithData(
158 const skgpu::UniqueKey& key) {
159 SkAutoSpinlock lock{fSpinLock};
160
161 return this->internalFind(key);
162 }
163
getEntry(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)164 GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
165 const GrSurfaceProxyView& view) {
166 Entry* entry;
167
168 if (fFreeEntryList) {
169 entry = fFreeEntryList;
170 fFreeEntryList = entry->fNext;
171 entry->fNext = nullptr;
172
173 entry->set(key, view);
174 } else {
175 entry = fEntryAllocator.make<Entry>(key, view);
176 }
177
178 return this->makeNewEntryMRU(entry);
179 }
180
makeNewEntryMRU(Entry * entry)181 GrThreadSafeCache::Entry* GrThreadSafeCache::makeNewEntryMRU(Entry* entry) {
182 entry->fLastAccess = skgpu::StdSteadyClock::now();
183 fUniquelyKeyedEntryList.addToHead(entry);
184 fUniquelyKeyedEntryMap.add(entry);
185 return entry;
186 }
187
getEntry(const skgpu::UniqueKey & key,sk_sp<VertexData> vertData)188 GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
189 sk_sp<VertexData> vertData) {
190 Entry* entry;
191
192 if (fFreeEntryList) {
193 entry = fFreeEntryList;
194 fFreeEntryList = entry->fNext;
195 entry->fNext = nullptr;
196
197 entry->set(key, std::move(vertData));
198 } else {
199 entry = fEntryAllocator.make<Entry>(key, std::move(vertData));
200 }
201
202 return this->makeNewEntryMRU(entry);
203 }
204
recycleEntry(Entry * dead)205 void GrThreadSafeCache::recycleEntry(Entry* dead) {
206 SkASSERT(!dead->fPrev && !dead->fNext && !dead->fList);
207
208 dead->makeEmpty();
209
210 dead->fNext = fFreeEntryList;
211 fFreeEntryList = dead;
212 }
213
internalAdd(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)214 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalAdd(
215 const skgpu::UniqueKey& key,
216 const GrSurfaceProxyView& view) {
217 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
218 if (!tmp) {
219 tmp = this->getEntry(key, view);
220
221 SkASSERT(fUniquelyKeyedEntryMap.find(key));
222 }
223
224 return { tmp->view(), tmp->refCustomData() };
225 }
226
add(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)227 GrSurfaceProxyView GrThreadSafeCache::add(const skgpu::UniqueKey& key,
228 const GrSurfaceProxyView& view) {
229 SkAutoSpinlock lock{fSpinLock};
230
231 GrSurfaceProxyView newView;
232 std::tie(newView, std::ignore) = this->internalAdd(key, view);
233 return newView;
234 }
235
addWithData(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)236 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::addWithData(
237 const skgpu::UniqueKey& key,
238 const GrSurfaceProxyView& view) {
239 SkAutoSpinlock lock{fSpinLock};
240
241 return this->internalAdd(key, view);
242 }
243
findOrAdd(const skgpu::UniqueKey & key,const GrSurfaceProxyView & v)244 GrSurfaceProxyView GrThreadSafeCache::findOrAdd(const skgpu::UniqueKey& key,
245 const GrSurfaceProxyView& v) {
246 SkAutoSpinlock lock{fSpinLock};
247
248 GrSurfaceProxyView view;
249 std::tie(view, std::ignore) = this->internalFind(key);
250 if (view) {
251 return view;
252 }
253
254 std::tie(view, std::ignore) = this->internalAdd(key, v);
255 return view;
256 }
257
findOrAddWithData(const skgpu::UniqueKey & key,const GrSurfaceProxyView & v)258 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findOrAddWithData(
259 const skgpu::UniqueKey& key,
260 const GrSurfaceProxyView& v) {
261 SkAutoSpinlock lock{fSpinLock};
262
263 auto [view, data] = this->internalFind(key);
264 if (view) {
265 return { std::move(view), std::move(data) };
266 }
267
268 return this->internalAdd(key, v);
269 }
270
MakeVertexData(const void * vertices,int vertexCount,size_t vertexSize)271 sk_sp<GrThreadSafeCache::VertexData> GrThreadSafeCache::MakeVertexData(const void* vertices,
272 int vertexCount,
273 size_t vertexSize) {
274 return sk_sp<VertexData>(new VertexData(vertices, vertexCount, vertexSize));
275 }
276
MakeVertexData(sk_sp<GrGpuBuffer> buffer,int vertexCount,size_t vertexSize)277 sk_sp<GrThreadSafeCache::VertexData> GrThreadSafeCache::MakeVertexData(sk_sp<GrGpuBuffer> buffer,
278 int vertexCount,
279 size_t vertexSize) {
280 return sk_sp<VertexData>(new VertexData(std::move(buffer), vertexCount, vertexSize));
281 }
282
283 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
internalFindVerts(const skgpu::UniqueKey & key)284 GrThreadSafeCache::internalFindVerts(const skgpu::UniqueKey& key) {
285 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
286 if (tmp) {
287 this->makeExistingEntryMRU(tmp);
288 return { tmp->vertexData(), tmp->refCustomData() };
289 }
290
291 return {};
292 }
293
294 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
findVertsWithData(const skgpu::UniqueKey & key)295 GrThreadSafeCache::findVertsWithData(const skgpu::UniqueKey& key) {
296 SkAutoSpinlock lock{fSpinLock};
297
298 return this->internalFindVerts(key);
299 }
300
internalAddVerts(const skgpu::UniqueKey & key,sk_sp<VertexData> vertData,IsNewerBetter isNewerBetter)301 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::internalAddVerts(
302 const skgpu::UniqueKey& key,
303 sk_sp<VertexData> vertData,
304 IsNewerBetter isNewerBetter) {
305 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
306 if (!tmp) {
307 tmp = this->getEntry(key, std::move(vertData));
308
309 SkASSERT(fUniquelyKeyedEntryMap.find(key));
310 } else if (isNewerBetter(tmp->getCustomData(), key.getCustomData())) {
311 // This orphans any existing uses of the prior vertex data but ensures the best
312 // version is in the cache.
313 tmp->set(key, std::move(vertData));
314 }
315
316 return { tmp->vertexData(), tmp->refCustomData() };
317 }
318
addVertsWithData(const skgpu::UniqueKey & key,sk_sp<VertexData> vertData,IsNewerBetter isNewerBetter)319 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::addVertsWithData(
320 const skgpu::UniqueKey& key,
321 sk_sp<VertexData> vertData,
322 IsNewerBetter isNewerBetter) {
323 SkAutoSpinlock lock{fSpinLock};
324
325 return this->internalAddVerts(key, std::move(vertData), isNewerBetter);
326 }
327
remove(const skgpu::UniqueKey & key)328 void GrThreadSafeCache::remove(const skgpu::UniqueKey& key) {
329 SkAutoSpinlock lock{fSpinLock};
330
331 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
332 if (tmp) {
333 fUniquelyKeyedEntryMap.remove(key);
334 fUniquelyKeyedEntryList.remove(tmp);
335 this->recycleEntry(tmp);
336 }
337 }
338
339 std::tuple<GrSurfaceProxyView, sk_sp<GrThreadSafeCache::Trampoline>>
CreateLazyView(GrDirectContext * dContext,GrColorType origCT,SkISize dimensions,GrSurfaceOrigin origin,SkBackingFit fit)340 GrThreadSafeCache::CreateLazyView(GrDirectContext* dContext,
341 GrColorType origCT,
342 SkISize dimensions,
343 GrSurfaceOrigin origin,
344 SkBackingFit fit) {
345 GrProxyProvider* proxyProvider = dContext->priv().proxyProvider();
346 const GrCaps* caps = dContext->priv().caps();
347
348 constexpr int kSampleCnt = 1;
349 auto [newCT, format] = caps->getFallbackColorTypeAndFormat(origCT, kSampleCnt);
350
351 if (newCT == GrColorType::kUnknown) {
352 return {GrSurfaceProxyView(nullptr), nullptr};
353 }
354
355 sk_sp<Trampoline> trampoline(new Trampoline);
356
357 GrProxyProvider::TextureInfo texInfo{skgpu::Mipmapped::kNo, GrTextureType::k2D};
358
359 sk_sp<GrRenderTargetProxy> proxy = proxyProvider->createLazyRenderTargetProxy(
360 [trampoline](
361 GrResourceProvider* resourceProvider,
362 const GrSurfaceProxy::LazySurfaceDesc&) -> GrSurfaceProxy::LazyCallbackResult {
363 if (!resourceProvider || !trampoline->fProxy ||
364 !trampoline->fProxy->isInstantiated()) {
365 return GrSurfaceProxy::LazyCallbackResult(nullptr, true);
366 }
367
368 SkASSERT(!trampoline->fProxy->peekTexture()->getUniqueKey().isValid());
369 return GrSurfaceProxy::LazyCallbackResult(
370 sk_ref_sp(trampoline->fProxy->peekTexture()));
371 },
372 format,
373 dimensions,
374 kSampleCnt,
375 GrInternalSurfaceFlags::kNone,
376 &texInfo,
377 GrMipmapStatus::kNotAllocated,
378 fit,
379 skgpu::Budgeted::kYes,
380 GrProtected::kNo,
381 /* wrapsVkSecondaryCB */ false,
382 GrSurfaceProxy::UseAllocator::kYes);
383
384 // TODO: It seems like this 'newCT' usage should be 'origCT' but this is
385 // what skgpu::ganesh::SurfaceDrawContext::MakeWithFallback does
386 skgpu::Swizzle swizzle = dContext->priv().caps()->getReadSwizzle(format, newCT);
387
388 return {{std::move(proxy), origin, swizzle}, std::move(trampoline)};
389 }
390