1 /*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/gpu/graphite/Recorder.h"
9
10 #include "include/core/SkBitmap.h"
11 #include "include/core/SkCanvas.h"
12 #include "include/core/SkColorSpace.h"
13 #include "include/core/SkTraceMemoryDump.h"
14 #include "include/effects/SkRuntimeEffect.h"
15 #include "include/gpu/graphite/BackendTexture.h"
16 #include "include/gpu/graphite/GraphiteTypes.h"
17 #include "include/gpu/graphite/ImageProvider.h"
18 #include "include/gpu/graphite/Recording.h"
19
20 #include "src/core/SkCompressedDataUtils.h"
21 #include "src/core/SkConvertPixels.h"
22 #include "src/core/SkTraceEvent.h"
23 #include "src/gpu/AtlasTypes.h"
24 #include "src/gpu/DataUtils.h"
25 #include "src/gpu/RefCntedCallback.h"
26 #include "src/gpu/graphite/AtlasProvider.h"
27 #include "src/gpu/graphite/BufferManager.h"
28 #include "src/gpu/graphite/Caps.h"
29 #include "src/gpu/graphite/CommandBuffer.h"
30 #include "src/gpu/graphite/ContextPriv.h"
31 #include "src/gpu/graphite/Device.h"
32 #include "src/gpu/graphite/GlobalCache.h"
33 #include "src/gpu/graphite/Log.h"
34 #include "src/gpu/graphite/PathAtlas.h"
35 #include "src/gpu/graphite/PipelineData.h"
36 #include "src/gpu/graphite/ProxyCache.h"
37 #include "src/gpu/graphite/RasterPathAtlas.h"
38 #include "src/gpu/graphite/RecorderPriv.h"
39 #include "src/gpu/graphite/RecordingPriv.h"
40 #include "src/gpu/graphite/ResourceProvider.h"
41 #include "src/gpu/graphite/RuntimeEffectDictionary.h"
42 #include "src/gpu/graphite/ScratchResourceManager.h"
43 #include "src/gpu/graphite/SharedContext.h"
44 #include "src/gpu/graphite/Texture.h"
45 #include "src/gpu/graphite/UploadBufferManager.h"
46 #include "src/gpu/graphite/task/CopyTask.h"
47 #include "src/gpu/graphite/task/TaskList.h"
48 #include "src/gpu/graphite/task/UploadTask.h"
49 #include "src/gpu/graphite/text/TextAtlasManager.h"
50 #include "src/image/SkImage_Base.h"
51 #include "src/text/gpu/StrikeCache.h"
52 #include "src/text/gpu/TextBlobRedrawCoordinator.h"
53
54 namespace skgpu::graphite {
55
56 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner())
57 #define ASSERT_SINGLE_OWNER_PRIV SKGPU_ASSERT_SINGLE_OWNER(fRecorder->singleOwner())
58
59 /*
60 * The default image provider doesn't perform any conversion so, by default, Graphite won't
61 * draw any non-Graphite-backed images.
62 */
63 class DefaultImageProvider final : public ImageProvider {
64 public:
Make()65 static sk_sp<DefaultImageProvider> Make() { return sk_sp(new DefaultImageProvider); }
66
findOrCreate(Recorder * recorder,const SkImage * image,SkImage::RequiredProperties)67 sk_sp<SkImage> findOrCreate(Recorder* recorder,
68 const SkImage* image,
69 SkImage::RequiredProperties) override {
70 SkASSERT(!as_IB(image)->isGraphiteBacked());
71
72 return nullptr;
73 }
74
75 private:
DefaultImageProvider()76 DefaultImageProvider() {}
77 };
78
79 /**************************************************************************************************/
80 RecorderOptions::RecorderOptions() = default;
81 RecorderOptions::RecorderOptions(const RecorderOptions&) = default;
82 RecorderOptions::~RecorderOptions() = default;
83
84 /**************************************************************************************************/
next_id()85 static uint32_t next_id() {
86 static std::atomic<uint32_t> nextID{1};
87 uint32_t id;
88 do {
89 id = nextID.fetch_add(1, std::memory_order_relaxed);
90 } while (id == SK_InvalidGenID);
91 return id;
92 }
93
Recorder(sk_sp<SharedContext> sharedContext,const RecorderOptions & options,const Context * context)94 Recorder::Recorder(sk_sp<SharedContext> sharedContext,
95 const RecorderOptions& options,
96 const Context* context)
97 : fSharedContext(std::move(sharedContext))
98 , fRuntimeEffectDict(std::make_unique<RuntimeEffectDictionary>())
99 , fRootTaskList(new TaskList)
100 , fRootUploads(new UploadList)
101 , fTextureDataCache(new TextureDataCache)
102 , fProxyReadCounts(new ProxyReadCountMap)
103 , fUniqueID(next_id())
104 , fAtlasProvider(std::make_unique<AtlasProvider>(this))
105 , fTokenTracker(std::make_unique<TokenTracker>())
106 , fStrikeCache(std::make_unique<sktext::gpu::StrikeCache>())
107 , fTextBlobCache(std::make_unique<sktext::gpu::TextBlobRedrawCoordinator>(fUniqueID)) {
108 fClientImageProvider = options.fImageProvider;
109 if (!fClientImageProvider) {
110 fClientImageProvider = DefaultImageProvider::Make();
111 }
112
113 if (context) {
114 fOwnedResourceProvider = nullptr;
115 fResourceProvider = context->priv().resourceProvider();
116 } else {
117 fOwnedResourceProvider = fSharedContext->makeResourceProvider(
118 this->singleOwner(),
119 fUniqueID,
120 options.fGpuBudgetInBytes,
121 /* avoidBufferAlloc= */ false);
122 fResourceProvider = fOwnedResourceProvider.get();
123 }
124 fUploadBufferManager = std::make_unique<UploadBufferManager>(fResourceProvider,
125 fSharedContext->caps());
126 fDrawBufferManager = std::make_unique<DrawBufferManager>(fResourceProvider,
127 fSharedContext->caps(),
128 fUploadBufferManager.get());
129
130 SkASSERT(fResourceProvider);
131 }
132
~Recorder()133 Recorder::~Recorder() {
134 ASSERT_SINGLE_OWNER
135 // Any finished procs that haven't been passed to a Recording fail
136 for (int i = 0; i < fFinishedProcs.size(); ++i) {
137 fFinishedProcs[i]->setFailureResult();
138 }
139
140 for (auto& device : fTrackedDevices) {
141 // deregisterDevice() may have left an entry as null previously.
142 if (device) {
143 device->abandonRecorder();
144 }
145 }
146 #if defined(GPU_TEST_UTILS)
147 if (fContext) {
148 fContext->priv().deregisterRecorder(this);
149 }
150 #endif
151 }
152
backend() const153 BackendApi Recorder::backend() const { return fSharedContext->backend(); }
154
snap()155 std::unique_ptr<Recording> Recorder::snap() {
156 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
157 ASSERT_SINGLE_OWNER
158
159 if (fTargetProxyData) {
160 // Normally devices are marked immutable when their owning Surface goes away, but the
161 // deferred canvas+device do not have a surface so mimic that operation. Do this before
162 // flushing all other tracked devices to avoid a redundant flush.
163 fTargetProxyDevice->setImmutable();
164 fTargetProxyDevice.reset();
165 fTargetProxyCanvas.reset();
166 }
167 // Collect all pending tasks on the deferred recording canvas and any other tracked device.
168 this->priv().flushTrackedDevices();
169
170 // Now that all devices have been flushed, extract all lazy proxies from the texture
171 // data cache so that they can be instantiated easily when the Recording is inserted.
172 std::unordered_set<sk_sp<TextureProxy>, Recording::ProxyHash> nonVolatileLazyProxies;
173 std::unordered_set<sk_sp<TextureProxy>, Recording::ProxyHash> volatileLazyProxies;
174 fTextureDataCache->foreach([&](TextureDataBlock block) {
175 for (int j = 0; j < block.numTextures(); ++j) {
176 const TextureDataBlock::SampledTexture& tex = block.texture(j);
177
178 if (tex.first->isLazy()) {
179 if (tex.first->isVolatile()) {
180 volatileLazyProxies.insert(tex.first);
181 } else {
182 nonVolatileLazyProxies.insert(tex.first);
183 }
184 }
185 }
186 });
187
188 // The scratch resources only need to be tracked until prepareResources() is finished, so
189 // Recorder doesn't hold a persistent manager and it can be deleted when snap() returns.
190 ScratchResourceManager scratchManager{fResourceProvider, std::move(fProxyReadCounts)};
191 std::unique_ptr<Recording> recording(new Recording(fNextRecordingID++,
192 fUniqueID,
193 std::move(nonVolatileLazyProxies),
194 std::move(volatileLazyProxies),
195 std::move(fTargetProxyData),
196 std::move(fFinishedProcs)));
197 // Allow the buffer managers to add any collected tasks for data transfer or initialization
198 // before moving the root task list to the Recording.
199 bool valid = fDrawBufferManager->transferToRecording(recording.get());
200
201 // We create the Recording's full task list even if the DrawBufferManager failed because it is
202 // a convenient way to ensure everything else is unmapped and reset for the next Recording.
203 fUploadBufferManager->transferToRecording(recording.get());
204 // Add one task for all root uploads before the rest of the rendering tasks might depend on them
205 if (fRootUploads->size() > 0) {
206 recording->priv().taskList()->add(UploadTask::Make(fRootUploads.get()));
207 SkASSERT(fRootUploads->size() == 0); // Drained by the newly added task
208 }
209 recording->priv().taskList()->add(std::move(*fRootTaskList));
210 SkASSERT(!fRootTaskList->hasTasks());
211
212 // In both the "task failed" case and the "everything is discarded" case, there's no work that
213 // needs to be done in insertRecording(). However, we use nullptr as a failure signal, so
214 // kDiscard will return a non-null Recording that has no tasks in it.
215 valid &= recording->priv().taskList()->prepareResources(
216 fResourceProvider, &scratchManager, fRuntimeEffectDict.get()) != Task::Status::kFail;
217 if (!valid) {
218 recording = nullptr;
219 fAtlasProvider->invalidateAtlases();
220 }
221
222 // Remaining cleanup that must always happen regardless of success or failure
223 fRuntimeEffectDict->reset();
224 fProxyReadCounts = std::make_unique<ProxyReadCountMap>();
225 fTextureDataCache = std::make_unique<TextureDataCache>();
226 if (!this->priv().caps()->requireOrderedRecordings()) {
227 fAtlasProvider->textAtlasManager()->evictAtlases();
228 }
229
230 return recording;
231 }
232
makeDeferredCanvas(const SkImageInfo & imageInfo,const TextureInfo & textureInfo)233 SkCanvas* Recorder::makeDeferredCanvas(const SkImageInfo& imageInfo,
234 const TextureInfo& textureInfo) {
235 if (fTargetProxyCanvas) {
236 // Require snapping before requesting another canvas.
237 SKGPU_LOG_W("Requested a new deferred canvas before snapping the previous one");
238 return nullptr;
239 }
240
241 fTargetProxyData = std::make_unique<Recording::LazyProxyData>(
242 this->priv().caps(), imageInfo.dimensions(), textureInfo);
243 // Use kLoad for the initial load op since the purpose of a deferred canvas is to draw on top
244 // of an existing, late-bound texture.
245 fTargetProxyDevice = Device::Make(this,
246 fTargetProxyData->refLazyProxy(),
247 imageInfo.dimensions(),
248 imageInfo.colorInfo(),
249 {},
250 LoadOp::kLoad);
251 fTargetProxyCanvas = std::make_unique<SkCanvas>(fTargetProxyDevice);
252 return fTargetProxyCanvas.get();
253 }
254
registerDevice(sk_sp<Device> device)255 void Recorder::registerDevice(sk_sp<Device> device) {
256 ASSERT_SINGLE_OWNER
257
258 SkASSERT(device);
259
260 // By taking a ref on tracked devices, the Recorder prevents the Device from being deleted on
261 // another thread unless the Recorder has been destroyed or the device has abandoned its
262 // recorder (e.g. was marked immutable).
263 fTrackedDevices.emplace_back(std::move(device));
264 }
265
deregisterDevice(const Device * device)266 void Recorder::deregisterDevice(const Device* device) {
267 ASSERT_SINGLE_OWNER
268 for (int i = 0; i < fTrackedDevices.size(); ++i) {
269 if (fTrackedDevices[i].get() == device) {
270 // Don't modify the list structure of fTrackedDevices within this loop
271 fTrackedDevices[i] = nullptr;
272 break;
273 }
274 }
275 }
276
maxTextureSize() const277 int Recorder::maxTextureSize() const {
278 return this->priv().caps()->maxTextureSize();
279 }
280
createBackendTexture(SkISize dimensions,const TextureInfo & info)281 BackendTexture Recorder::createBackendTexture(SkISize dimensions, const TextureInfo& info) {
282 ASSERT_SINGLE_OWNER
283
284 if (!info.isValid() || info.backend() != this->backend()) {
285 return {};
286 }
287 return fResourceProvider->createBackendTexture(dimensions, info);
288 }
289
290 #ifdef SK_BUILD_FOR_ANDROID
291
createBackendTexture(AHardwareBuffer * hardwareBuffer,bool isRenderable,bool isProtectedContent,SkISize dimensions,bool fromAndroidWindow) const292 BackendTexture Recorder::createBackendTexture(AHardwareBuffer* hardwareBuffer,
293 bool isRenderable,
294 bool isProtectedContent,
295 SkISize dimensions,
296 bool fromAndroidWindow) const {
297 if (fSharedContext->backend() != BackendApi::kVulkan) {
298 SKGPU_LOG_W("Creating an AHardwareBuffer-backed BackendTexture is only supported with the"
299 "Vulkan backend.");
300 return {};
301 }
302 return fResourceProvider->createBackendTexture(hardwareBuffer,
303 isRenderable,
304 isProtectedContent,
305 dimensions,
306 fromAndroidWindow);
307 }
308
309 #endif // SK_BUILD_FOR_ANDROID
310
updateBackendTexture(const BackendTexture & backendTex,const SkPixmap srcData[],int numLevels,GpuFinishedProc finishedProc,GpuFinishedContext finishedContext)311 bool Recorder::updateBackendTexture(const BackendTexture& backendTex,
312 const SkPixmap srcData[],
313 int numLevels,
314 GpuFinishedProc finishedProc,
315 GpuFinishedContext finishedContext) {
316 ASSERT_SINGLE_OWNER
317
318 auto releaseHelper = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
319
320 if (!backendTex.isValid() || backendTex.backend() != this->backend()) {
321 return false;
322 }
323
324 if (!srcData || numLevels <= 0) {
325 return false;
326 }
327
328 // If the texture has MIP levels then we require that the full set is overwritten.
329 int numExpectedLevels = 1;
330 if (backendTex.info().mipmapped() == Mipmapped::kYes) {
331 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTex.dimensions().width(),
332 backendTex.dimensions().height()) + 1;
333 }
334 if (numLevels != numExpectedLevels) {
335 return false;
336 }
337
338 SkColorType ct = srcData[0].colorType();
339
340 if (!this->priv().caps()->areColorTypeAndTextureInfoCompatible(ct, backendTex.info())) {
341 return false;
342 }
343
344 sk_sp<Texture> texture = this->priv().resourceProvider()->createWrappedTexture(backendTex, "");
345 if (!texture) {
346 return false;
347 }
348 texture->setReleaseCallback(std::move(releaseHelper));
349
350 sk_sp<TextureProxy> proxy = TextureProxy::Wrap(std::move(texture));
351
352 std::vector<MipLevel> mipLevels;
353 mipLevels.resize(numLevels);
354
355 for (int i = 0; i < numLevels; ++i) {
356 SkASSERT(srcData[i].addr());
357 SkASSERT(srcData[i].info().colorInfo() == srcData[0].info().colorInfo());
358
359 mipLevels[i].fPixels = srcData[i].addr();
360 mipLevels[i].fRowBytes = srcData[i].rowBytes();
361 }
362
363 // Src and dst colorInfo are the same
364 const SkColorInfo& colorInfo = srcData[0].info().colorInfo();
365 // Add UploadTask to Recorder
366 UploadInstance upload = UploadInstance::Make(this,
367 std::move(proxy),
368 colorInfo, colorInfo,
369 mipLevels,
370 SkIRect::MakeSize(backendTex.dimensions()),
371 std::make_unique<ImageUploadContext>());
372 if (!upload.isValid()) {
373 SKGPU_LOG_E("Recorder::updateBackendTexture: Could not create UploadInstance");
374 return false;
375 }
376 sk_sp<Task> uploadTask = UploadTask::Make(std::move(upload));
377
378 // Need to flush any pending work in case it depends on this texture
379 this->priv().flushTrackedDevices();
380
381 this->priv().add(std::move(uploadTask));
382
383 return true;
384 }
385
updateCompressedBackendTexture(const BackendTexture & backendTex,const void * data,size_t dataSize,GpuFinishedProc finishedProc,GpuFinishedContext finishedContext)386 bool Recorder::updateCompressedBackendTexture(const BackendTexture& backendTex,
387 const void* data,
388 size_t dataSize,
389 GpuFinishedProc finishedProc,
390 GpuFinishedContext finishedContext) {
391 ASSERT_SINGLE_OWNER
392
393 auto releaseHelper = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
394
395 if (!backendTex.isValid() || backendTex.backend() != this->backend()) {
396 return false;
397 }
398
399 if (!data) {
400 return false;
401 }
402
403 sk_sp<Texture> texture = this->priv().resourceProvider()->createWrappedTexture(backendTex, "");
404 if (!texture) {
405 return false;
406 }
407 texture->setReleaseCallback(std::move(releaseHelper));
408
409 sk_sp<TextureProxy> proxy = TextureProxy::Wrap(std::move(texture));
410
411 // Add UploadTask to Recorder
412 UploadInstance upload = UploadInstance::MakeCompressed(this,
413 std::move(proxy),
414 data,
415 dataSize);
416 if (!upload.isValid()) {
417 SKGPU_LOG_E("Recorder::updateBackendTexture: Could not create UploadInstance");
418 return false;
419 }
420 sk_sp<Task> uploadTask = UploadTask::Make(std::move(upload));
421
422 // Need to flush any pending work in case it depends on this texture
423 this->priv().flushTrackedDevices();
424
425 this->priv().add(std::move(uploadTask));
426
427 return true;
428 }
429
deleteBackendTexture(const BackendTexture & texture)430 void Recorder::deleteBackendTexture(const BackendTexture& texture) {
431 ASSERT_SINGLE_OWNER
432
433 if (!texture.isValid() || texture.backend() != this->backend()) {
434 return;
435 }
436 fResourceProvider->deleteBackendTexture(texture);
437 }
438
addFinishInfo(const InsertFinishInfo & info)439 void Recorder::addFinishInfo(const InsertFinishInfo& info) {
440 if (info.fFinishedProc) {
441 sk_sp<RefCntedCallback> callback =
442 RefCntedCallback::Make(info.fFinishedProc, info.fFinishedContext);
443 fFinishedProcs.push_back(std::move(callback));
444 }
445 }
446
freeGpuResources()447 void Recorder::freeGpuResources() {
448 ASSERT_SINGLE_OWNER
449
450 // We don't want to free the Uniform/TextureDataCaches or the Draw/UploadBufferManagers since
451 // all their resources need to be held on to until a Recording is snapped. And once snapped, all
452 // their held resources are released. The StrikeCache and TextBlobCache don't hold onto any Gpu
453 // resources.
454
455 // Notify the atlas and resource provider to free any resources it can (does not include
456 // resources that are locked due to pending work).
457 fAtlasProvider->freeGpuResources();
458
459 fResourceProvider->freeGpuResources();
460
461 // This is technically not GPU memory, but there's no other place for the client to tell us to
462 // clean this up, and without any cleanup it can grow unbounded.
463 fStrikeCache->freeAll();
464 }
465
performDeferredCleanup(std::chrono::milliseconds msNotUsed)466 void Recorder::performDeferredCleanup(std::chrono::milliseconds msNotUsed) {
467 ASSERT_SINGLE_OWNER
468
469 auto purgeTime = skgpu::StdSteadyClock::now() - msNotUsed;
470 fResourceProvider->purgeResourcesNotUsedSince(purgeTime);
471 }
472
currentBudgetedBytes() const473 size_t Recorder::currentBudgetedBytes() const {
474 ASSERT_SINGLE_OWNER
475 return fResourceProvider->getResourceCacheCurrentBudgetedBytes();
476 }
477
currentPurgeableBytes() const478 size_t Recorder::currentPurgeableBytes() const {
479 ASSERT_SINGLE_OWNER
480 return fResourceProvider->getResourceCacheCurrentPurgeableBytes();
481 }
482
maxBudgetedBytes() const483 size_t Recorder::maxBudgetedBytes() const {
484 ASSERT_SINGLE_OWNER
485 return fResourceProvider->getResourceCacheLimit();
486 }
487
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const488 void Recorder::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
489 ASSERT_SINGLE_OWNER
490 fResourceProvider->dumpMemoryStatistics(traceMemoryDump);
491 // TODO: What is the graphite equivalent for the text blob cache and how do we print out its
492 // used bytes here (see Ganesh implementation).
493 }
494
addPendingRead(const TextureProxy * proxy)495 void RecorderPriv::addPendingRead(const TextureProxy* proxy) {
496 ASSERT_SINGLE_OWNER_PRIV
497 fRecorder->fProxyReadCounts->increment(proxy);
498 }
499
add(sk_sp<Task> task)500 void RecorderPriv::add(sk_sp<Task> task) {
501 ASSERT_SINGLE_OWNER_PRIV
502 fRecorder->fRootTaskList->add(std::move(task));
503 }
504
flushTrackedDevices()505 void RecorderPriv::flushTrackedDevices() {
506 ASSERT_SINGLE_OWNER_PRIV
507
508 // If this is the initial flushTrackedDevices() call, fFlushingTrackedDevicesIndex will be -1
509 // so we start iterating at 0. We remember the starting device index to perform clean up only
510 // when it was 0 to prevent modifying the underlying data structure while iterating over it.
511 // However, when flushing one device it may register new devices as well as recursively call
512 // flushTrackedDevices(). In that case, it picks up the next device after the current one that
513 // triggered the recursive flush since all prior devices have been flushed are in progress
514 // (and they should not be flushed while in an unfinished flush). When the control flow returns
515 // to the outer flushTrackedDevices(), it will pick up with wherever the inner flush had ended.
516 // TODO(b/330864257): Once paint data is extracted at draw time (so picture shaders are rendered
517 // to images before a flush instead of inside a flush), we can simplify this and assert that
518 // flushTrackedDevices() is not recursively called and that devices are not added or removed
519 // while flushing.
520 const int startingIndex = fRecorder->fFlushingDevicesIndex;
521 while (fRecorder->fFlushingDevicesIndex < fRecorder->fTrackedDevices.size() - 1) {
522 // Advance before calling flushPendingWorkToRecorder() so that any re-entrant clal to
523 // flushTrackedDevices() will skip the current device.
524 fRecorder->fFlushingDevicesIndex++;
525 // Entries may be set to null from a call to deregisterDevice(), which will be cleaned up
526 // along with any immutable or uniquely held Devices once everything is flushed.
527 Device* device = fRecorder->fTrackedDevices[fRecorder->fFlushingDevicesIndex].get();
528 if (device) {
529 device->flushPendingWorkToRecorder();
530 }
531 }
532
533 // Issue next upload flush token. This is only used by the atlasing code which
534 // always uses this method. Calling in Device::flushPendingWorkToRecorder may
535 // miss parent device flushes, increment too often, and lead to atlas corruption.
536 this->tokenTracker()->issueFlushToken();
537
538 if (startingIndex < 0) {
539 // Initial call to flushTrackedDevices() so cleanup null/immutable devices and reset the
540 // loop index.
541 int i = 0;
542 while (i < fRecorder->fTrackedDevices.size()) {
543 Device* device = fRecorder->fTrackedDevices[i].get();
544 if (!device || !device->recorder() || device->unique()) {
545 if (device) {
546 device->abandonRecorder(); // Keep ~Device() happy
547 }
548 fRecorder->fTrackedDevices.removeShuffle(i);
549 // Keep i as-is to process what was just shuffled to the ith index.
550 } else {
551 i++;
552 }
553 }
554
555 fRecorder->fFlushingDevicesIndex = -1;
556 }
557 }
558
CreateCachedProxy(Recorder * recorder,const SkBitmap & bitmap,std::string_view label)559 sk_sp<TextureProxy> RecorderPriv::CreateCachedProxy(Recorder* recorder,
560 const SkBitmap& bitmap,
561 std::string_view label) {
562 SkASSERT(!bitmap.isNull());
563
564 if (!recorder) {
565 return nullptr;
566 }
567 return recorder->priv().proxyCache()->findOrCreateCachedProxy(recorder,
568 bitmap,
569 std::move(label));
570 }
571
getResourceCacheLimit() const572 size_t RecorderPriv::getResourceCacheLimit() const {
573 return fRecorder->fResourceProvider->getResourceCacheLimit();
574 }
575
576 #if defined(GPU_TEST_UTILS)
deviceIsRegistered(Device * device) const577 bool RecorderPriv::deviceIsRegistered(Device* device) const {
578 ASSERT_SINGLE_OWNER_PRIV
579 for (const sk_sp<Device>& currentDevice : fRecorder->fTrackedDevices) {
580 if (device == currentDevice.get()) {
581 return true;
582 }
583 }
584 return false;
585 }
586
587 // used by the Context that created this Recorder to set a back pointer
setContext(Context * context)588 void RecorderPriv::setContext(Context* context) {
589 fRecorder->fContext = context;
590 }
591
issueFlushToken()592 void RecorderPriv::issueFlushToken() {
593 fRecorder->fTokenTracker->issueFlushToken();
594 }
595
596 #endif
597
598
599 } // namespace skgpu::graphite
600