xref: /aosp_15_r20/external/skia/src/gpu/ganesh/GrDrawingManager.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 #include "src/gpu/ganesh/GrDrawingManager.h"
8 
9 #include "include/core/SkData.h"
10 #include "include/core/SkRect.h"
11 #include "include/core/SkSize.h"
12 #include "include/core/SkSurface.h"
13 #include "include/gpu/GpuTypes.h"
14 #include "include/gpu/ganesh/GrDirectContext.h"
15 #include "include/gpu/ganesh/GrRecordingContext.h"
16 #include "include/gpu/ganesh/GrTypes.h"
17 #include "include/private/base/SkAssert.h"
18 #include "include/private/base/SkTo.h"
19 #include "include/private/chromium/GrDeferredDisplayList.h"
20 #include "include/private/chromium/GrSurfaceCharacterization.h"
21 #include "include/private/gpu/ganesh/GrTypesPriv.h"
22 #include "src/base/SkTInternalLList.h"
23 #include "src/core/SkTraceEvent.h"
24 #include "src/gpu/GpuTypesPriv.h"
25 #include "src/gpu/ganesh/GrAuditTrail.h"
26 #include "src/gpu/ganesh/GrBufferTransferRenderTask.h"
27 #include "src/gpu/ganesh/GrBufferUpdateRenderTask.h"
28 #include "src/gpu/ganesh/GrClientMappedBufferManager.h"
29 #include "src/gpu/ganesh/GrCopyRenderTask.h"
30 #include "src/gpu/ganesh/GrDDLTask.h"
31 #include "src/gpu/ganesh/GrDeferredDisplayListPriv.h"
32 #include "src/gpu/ganesh/GrDirectContextPriv.h"
33 #include "src/gpu/ganesh/GrGpu.h"
34 #include "src/gpu/ganesh/GrGpuBuffer.h"
35 #include "src/gpu/ganesh/GrNativeRect.h"
36 #include "src/gpu/ganesh/GrOnFlushResourceProvider.h"
37 #include "src/gpu/ganesh/GrOpFlushState.h"
38 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
39 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
40 #include "src/gpu/ganesh/GrRenderTask.h"
41 #include "src/gpu/ganesh/GrRenderTaskCluster.h"
42 #include "src/gpu/ganesh/GrResourceAllocator.h"
43 #include "src/gpu/ganesh/GrResourceCache.h"
44 #include "src/gpu/ganesh/GrSurfaceProxyView.h"
45 #include "src/gpu/ganesh/GrTTopoSort.h"
46 #include "src/gpu/ganesh/GrTextureProxy.h"
47 #include "src/gpu/ganesh/GrTextureResolveManager.h"
48 #include "src/gpu/ganesh/GrTextureResolveRenderTask.h"
49 #include "src/gpu/ganesh/GrTracing.h"
50 #include "src/gpu/ganesh/GrTransferFromRenderTask.h"
51 #include "src/gpu/ganesh/GrWaitRenderTask.h"
52 #include "src/gpu/ganesh/GrWritePixelsRenderTask.h"
53 #include "src/gpu/ganesh/ops/GrOp.h"
54 #include "src/gpu/ganesh/ops/OpsTask.h"
55 #include "src/gpu/ganesh/ops/SoftwarePathRenderer.h"
56 
57 #include <algorithm>
58 #include <memory>
59 #include <optional>
60 #include <utility>
61 
62 using namespace skia_private;
63 
64 ///////////////////////////////////////////////////////////////////////////////////////////////////
GrDrawingManager(GrRecordingContext * rContext,const PathRendererChain::Options & optionsForPathRendererChain,bool reduceOpsTaskSplitting)65 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext,
66                                    const PathRendererChain::Options& optionsForPathRendererChain,
67                                    bool reduceOpsTaskSplitting)
68         : fContext(rContext)
69         , fOptionsForPathRendererChain(optionsForPathRendererChain)
70         , fPathRendererChain(nullptr)
71         , fSoftwarePathRenderer(nullptr)
72         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
73 }
74 
~GrDrawingManager()75 GrDrawingManager::~GrDrawingManager() {
76     this->closeAllTasks();
77     this->removeRenderTasks();
78 }
79 
wasAbandoned() const80 bool GrDrawingManager::wasAbandoned() const {
81     return fContext->abandoned();
82 }
83 
freeGpuResources()84 void GrDrawingManager::freeGpuResources() {
85     for (int i = fOnFlushCBObjects.size() - 1; i >= 0; --i) {
86         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
87             // it's safe to just do this because we're iterating in reverse
88             fOnFlushCBObjects.removeShuffle(i);
89         }
90     }
91 
92     // a path renderer may be holding onto resources
93     fPathRendererChain = nullptr;
94     fSoftwarePathRenderer = nullptr;
95 }
96 
97 // MDB TODO: make use of the 'proxies' parameter.
flush(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)98 bool GrDrawingManager::flush(SkSpan<GrSurfaceProxy*> proxies,
99                              SkSurfaces::BackendSurfaceAccess access,
100                              const GrFlushInfo& info,
101                              const skgpu::MutableTextureState* newState) {
102     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
103 
104     if (fFlushing || this->wasAbandoned()) {
105         if (info.fSubmittedProc) {
106             info.fSubmittedProc(info.fSubmittedContext, false);
107         }
108         if (info.fFinishedProc) {
109             info.fFinishedProc(info.fFinishedContext);
110         }
111         return false;
112     }
113 
114     SkDEBUGCODE(this->validate());
115 
116     // As of now we only short-circuit if we got an explicit list of surfaces to flush.
117     if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
118         access == SkSurfaces::BackendSurfaceAccess::kNoAccess && !newState) {
119         bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
120             bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
121                 return task && task->isUsed(proxy);
122             });
123             return !used;
124         });
125         if (allUnused) {
126             if (info.fSubmittedProc) {
127                 info.fSubmittedProc(info.fSubmittedContext, true);
128             }
129             return false;
130         }
131     }
132 
133     auto dContext = fContext->asDirectContext();
134     SkASSERT(dContext);
135     dContext->priv().clientMappedBufferManager()->process();
136 
137     GrGpu* gpu = dContext->priv().getGpu();
138     // We have a non abandoned and direct GrContext. It must have a GrGpu.
139     SkASSERT(gpu);
140 
141     fFlushing = true;
142 
143     auto resourceProvider = dContext->priv().resourceProvider();
144     auto resourceCache = dContext->priv().getResourceCache();
145 
146     // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
147     // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
148     // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
149     // if the SkGpuDevice(s) write to them again.
150     this->closeAllTasks();
151     fActiveOpsTask = nullptr;
152 
153     this->sortTasks();
154 
155     if (!fCpuBufferCache) {
156         // We cache more buffers when the backend is using client side arrays. Otherwise, we
157         // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
158         // buffer object. Each pool only requires one staging buffer at a time.
159         int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
160         fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
161     }
162 
163     GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
164 
165     std::optional<GrTimerQuery> timerQuery;
166     if (info.fFinishedWithStatsProc && (info.fGpuStatsFlags & skgpu::GpuStatsFlags::kElapsedTime)) {
167         timerQuery = gpu->startTimerQuery();
168     }
169     GrOnFlushResourceProvider onFlushProvider(this);
170 
171     // Prepare any onFlush op lists (e.g. atlases).
172     bool preFlushSuccessful = true;
173     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
174         preFlushSuccessful &= onFlushCBObject->preFlush(&onFlushProvider);
175     }
176 
177     bool cachePurgeNeeded = false;
178 
179     if (preFlushSuccessful) {
180         bool usingReorderedDAG = false;
181         GrResourceAllocator resourceAllocator(dContext);
182         if (fReduceOpsTaskSplitting) {
183             usingReorderedDAG = this->reorderTasks(&resourceAllocator);
184             if (!usingReorderedDAG) {
185                 resourceAllocator.reset();
186             }
187         }
188 
189 #if 0
190         // Enable this to print out verbose GrOp information
191         SkDEBUGCODE(SkDebugf("RenderTasks (%d):\n", fDAG.count()));
192         for (const auto& task : fDAG) {
193             SkDEBUGCODE(task->dump(/* printDependencies */ true);)
194         }
195 #endif
196 
197         if (!resourceAllocator.failedInstantiation()) {
198             if (!usingReorderedDAG) {
199                 for (const auto& task : fDAG) {
200                     SkASSERT(task);
201                     task->gatherProxyIntervals(&resourceAllocator);
202                 }
203                 resourceAllocator.planAssignment();
204             }
205             resourceAllocator.assign();
206         }
207 
208         cachePurgeNeeded = !resourceAllocator.failedInstantiation() &&
209                            this->executeRenderTasks(&flushState);
210     }
211     this->removeRenderTasks();
212 
213     gpu->executeFlushInfo(proxies, access, info, std::move(timerQuery), newState);
214 
215     // Give the cache a chance to purge resources that become purgeable due to flushing.
216     if (cachePurgeNeeded) {
217         resourceCache->purgeAsNeeded();
218         cachePurgeNeeded = false;
219     }
220     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
221         onFlushCBObject->postFlush(fTokenTracker.nextFlushToken());
222         cachePurgeNeeded = true;
223     }
224     if (cachePurgeNeeded) {
225         resourceCache->purgeAsNeeded();
226     }
227     fFlushing = false;
228 
229     return true;
230 }
231 
submitToGpu()232 bool GrDrawingManager::submitToGpu() {
233     if (fFlushing || this->wasAbandoned()) {
234         return false;
235     }
236 
237     auto direct = fContext->asDirectContext();
238     if (!direct) {
239         return false; // Can't submit while DDL recording
240     }
241     GrGpu* gpu = direct->priv().getGpu();
242     return gpu->submitToGpu();
243 }
244 
executeRenderTasks(GrOpFlushState * flushState)245 bool GrDrawingManager::executeRenderTasks(GrOpFlushState* flushState) {
246 #if GR_FLUSH_TIME_OP_SPEW
247     SkDebugf("Flushing %d opsTasks\n", fDAG.size());
248     for (int i = 0; i < fDAG.size(); ++i) {
249         if (fDAG[i]) {
250             SkString label;
251             label.printf("task %d/%d", i, fDAG.size());
252             fDAG[i]->dump(label, {}, true, true);
253         }
254     }
255 #endif
256 
257     bool anyRenderTasksExecuted = false;
258 
259     for (const auto& renderTask : fDAG) {
260         if (!renderTask || !renderTask->isInstantiated()) {
261              continue;
262         }
263 
264         SkASSERT(renderTask->deferredProxiesAreInstantiated());
265 
266         renderTask->prepare(flushState);
267     }
268 
269     // Upload all data to the GPU
270     flushState->preExecuteDraws();
271 
272     // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
273     // for each command buffer associated with the oplists. If this gets too large we can cause the
274     // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
275     // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
276     // memory pressure.
277     static constexpr int kMaxRenderTasksBeforeFlush = 100;
278     int numRenderTasksExecuted = 0;
279 
280     // Execute the normal op lists.
281     for (const auto& renderTask : fDAG) {
282         SkASSERT(renderTask);
283         if (!renderTask->isInstantiated()) {
284             continue;
285         }
286 
287         if (renderTask->execute(flushState)) {
288             anyRenderTasksExecuted = true;
289         }
290         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
291             flushState->gpu()->submitToGpu();
292             numRenderTasksExecuted = 0;
293         }
294     }
295 
296     SkASSERT(!flushState->opsRenderPass());
297     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextFlushToken());
298 
299     // We reset the flush state before the RenderTasks so that the last resources to be freed are
300     // those that are written to in the RenderTasks. This helps to make sure the most recently used
301     // resources are the last to be purged by the resource cache.
302     flushState->reset();
303 
304     return anyRenderTasksExecuted;
305 }
306 
removeRenderTasks()307 void GrDrawingManager::removeRenderTasks() {
308     for (const auto& task : fDAG) {
309         SkASSERT(task);
310         if (!task->unique() || task->requiresExplicitCleanup()) {
311             // TODO: Eventually uniqueness should be guaranteed: http://skbug.com/7111.
312             // DDLs, however, will always require an explicit notification for when they
313             // can clean up resources.
314             task->endFlush(this);
315         }
316         task->disown(this);
317     }
318     fDAG.clear();
319     fReorderBlockerTaskIndices.clear();
320     fLastRenderTasks.reset();
321 }
322 
sortTasks()323 void GrDrawingManager::sortTasks() {
324     // We separately sort the ranges around non-reorderable tasks.
325     for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
326         end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
327         SkSpan span(fDAG.begin() + start, end - start);
328 
329         SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
330             return t->blocksReordering();
331         }));
332         SkASSERT(span.end() == fDAG.end() || fDAG[end]->blocksReordering());
333 
334 #if defined(SK_DEBUG)
335         // In order to partition the dag array like this it must be the case that each partition
336         // only depends on nodes in the partition or earlier partitions.
337         auto check = [&](const GrRenderTask* task, auto&& check) -> void {
338             SkASSERT(GrRenderTask::TopoSortTraits::WasOutput(task) ||
339                      std::find_if(span.begin(), span.end(), [task](const auto& n) {
340                          return n.get() == task; }));
341             for (int i = 0; i < task->fDependencies.size(); ++i) {
342                 check(task->fDependencies[i], check);
343             }
344         };
345         for (const auto& node : span) {
346             check(node.get(), check);
347         }
348 #endif
349 
350         bool sorted = GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(span, start);
351         if (!sorted) {
352             SkDEBUGFAIL("Render task topo sort failed.");
353         }
354 
355 #ifdef SK_DEBUG
356         if (sorted && !span.empty()) {
357             // This block checks for any unnecessary splits in the opsTasks. If two sequential
358             // opsTasks could have merged it means the opsTask was artificially split.
359             auto prevOpsTask = span[0]->asOpsTask();
360             for (size_t j = 1; j < span.size(); ++j) {
361                 auto curOpsTask = span[j]->asOpsTask();
362 
363                 if (prevOpsTask && curOpsTask) {
364                     SkASSERT(!prevOpsTask->canMerge(curOpsTask));
365                 }
366 
367                 prevOpsTask = curOpsTask;
368             }
369         }
370 #endif
371     }
372 }
373 
374 // Reorder the array to match the llist without reffing & unreffing sk_sp's.
375 // Both args must contain the same objects.
376 // This is basically a shim because clustering uses LList but the rest of drawmgr uses array.
377 template <typename T>
reorder_array_by_llist(const SkTInternalLList<T> & llist,TArray<sk_sp<T>> * array)378 static void reorder_array_by_llist(const SkTInternalLList<T>& llist, TArray<sk_sp<T>>* array) {
379     int i = 0;
380     for (T* t : llist) {
381         // Release the pointer that used to live here so it doesn't get unreffed.
382         [[maybe_unused]] T* old = array->at(i).release();
383         array->at(i++).reset(t);
384     }
385     SkASSERT(i == array->size());
386 }
387 
reorderTasks(GrResourceAllocator * resourceAllocator)388 bool GrDrawingManager::reorderTasks(GrResourceAllocator* resourceAllocator) {
389     SkASSERT(fReduceOpsTaskSplitting);
390     // We separately sort the ranges around non-reorderable tasks.
391     bool clustered = false;
392     SkTInternalLList<GrRenderTask> llist;
393     for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
394         end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
395         SkSpan span(fDAG.begin() + start, end - start);
396         SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
397             return t->blocksReordering();
398         }));
399 
400         SkTInternalLList<GrRenderTask> subllist;
401         if (GrClusterRenderTasks(span, &subllist)) {
402             clustered = true;
403         }
404 
405         if (i < fReorderBlockerTaskIndices.size()) {
406             SkASSERT(fDAG[fReorderBlockerTaskIndices[i]]->blocksReordering());
407             subllist.addToTail(fDAG[fReorderBlockerTaskIndices[i]].get());
408         }
409         llist.concat(std::move(subllist));
410     }
411     if (!clustered) {
412         return false;
413     }
414 
415     for (GrRenderTask* task : llist) {
416         task->gatherProxyIntervals(resourceAllocator);
417     }
418     if (!resourceAllocator->planAssignment()) {
419         return false;
420     }
421     if (!resourceAllocator->makeBudgetHeadroom()) {
422         auto dContext = fContext->asDirectContext();
423         SkASSERT(dContext);
424         dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
425         return false;
426     }
427     reorder_array_by_llist(llist, &fDAG);
428 
429     int newCount = 0;
430     for (int i = 0; i < fDAG.size(); i++) {
431         sk_sp<GrRenderTask>& task = fDAG[i];
432         if (auto opsTask = task->asOpsTask()) {
433             size_t remaining = fDAG.size() - i - 1;
434             SkSpan<sk_sp<GrRenderTask>> nextTasks{fDAG.end() - remaining, remaining};
435             int removeCount = opsTask->mergeFrom(nextTasks);
436             for (const auto& removed : nextTasks.first(removeCount)) {
437                 removed->disown(this);
438             }
439             i += removeCount;
440         }
441         fDAG[newCount++] = std::move(task);
442     }
443     fDAG.resize_back(newCount);
444     return true;
445 }
446 
closeAllTasks()447 void GrDrawingManager::closeAllTasks() {
448     for (auto& task : fDAG) {
449         if (task) {
450             task->makeClosed(fContext);
451         }
452     }
453 }
454 
insertTaskBeforeLast(sk_sp<GrRenderTask> task)455 GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
456     if (!task) {
457         return nullptr;
458     }
459     if (fDAG.empty()) {
460         return fDAG.push_back(std::move(task)).get();
461     }
462     if (!fReorderBlockerTaskIndices.empty() && fReorderBlockerTaskIndices.back() == fDAG.size()) {
463         fReorderBlockerTaskIndices.back()++;
464     }
465     fDAG.push_back(std::move(task));
466     auto& penultimate = fDAG.fromBack(1);
467     fDAG.back().swap(penultimate);
468     return penultimate.get();
469 }
470 
appendTask(sk_sp<GrRenderTask> task)471 GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
472     if (!task) {
473         return nullptr;
474     }
475     if (task->blocksReordering()) {
476         fReorderBlockerTaskIndices.push_back(fDAG.size());
477     }
478     return fDAG.push_back(std::move(task)).get();
479 }
480 
resolve_and_mipmap(GrGpu * gpu,GrSurfaceProxy * proxy)481 static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
482     if (!proxy->isInstantiated()) {
483         return;
484     }
485 
486     // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
487     // because clients expect the flushed surface's backing texture to be fully resolved
488     // upon return.
489     if (proxy->requiresManualMSAAResolve()) {
490         auto* rtProxy = proxy->asRenderTargetProxy();
491         SkASSERT(rtProxy);
492         if (rtProxy->isMSAADirty()) {
493             SkASSERT(rtProxy->peekRenderTarget());
494             gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
495             gpu->submitToGpu();
496             rtProxy->markMSAAResolved();
497         }
498     }
499     // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
500     // case their backend textures are being stolen.
501     // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
502     // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
503     if (auto* textureProxy = proxy->asTextureProxy()) {
504         if (textureProxy->mipmapsAreDirty()) {
505             SkASSERT(textureProxy->peekTexture());
506             gpu->regenerateMipMapLevels(textureProxy->peekTexture());
507             textureProxy->markMipmapsClean();
508         }
509     }
510 }
511 
flushSurfaces(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)512 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(SkSpan<GrSurfaceProxy*> proxies,
513                                                       SkSurfaces::BackendSurfaceAccess access,
514                                                       const GrFlushInfo& info,
515                                                       const skgpu::MutableTextureState* newState) {
516     if (this->wasAbandoned()) {
517         if (info.fSubmittedProc) {
518             info.fSubmittedProc(info.fSubmittedContext, false);
519         }
520         if (info.fFinishedProc) {
521             info.fFinishedProc(info.fFinishedContext);
522         }
523         return GrSemaphoresSubmitted::kNo;
524     }
525     SkDEBUGCODE(this->validate());
526 
527     auto direct = fContext->asDirectContext();
528     SkASSERT(direct);
529     GrGpu* gpu = direct->priv().getGpu();
530     // We have a non abandoned and direct GrContext. It must have a GrGpu.
531     SkASSERT(gpu);
532 
533     // TODO: It is important to upgrade the drawingmanager to just flushing the
534     // portion of the DAG required by 'proxies' in order to restore some of the
535     // semantics of this method.
536     bool didFlush = this->flush(proxies, access, info, newState);
537     for (GrSurfaceProxy* proxy : proxies) {
538         resolve_and_mipmap(gpu, proxy);
539     }
540 
541     SkDEBUGCODE(this->validate());
542 
543     if (!didFlush || (!direct->priv().caps()->backendSemaphoreSupport() && info.fNumSemaphores)) {
544         return GrSemaphoresSubmitted::kNo;
545     }
546     return GrSemaphoresSubmitted::kYes;
547 }
548 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)549 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
550     fOnFlushCBObjects.push_back(onFlushCBObject);
551 }
552 
553 #if defined(GPU_TEST_UTILS)
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)554 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
555     int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
556             fOnFlushCBObjects.begin();
557     SkASSERT(n < fOnFlushCBObjects.size());
558     fOnFlushCBObjects.removeShuffle(n);
559 }
560 #endif
561 
setLastRenderTask(const GrSurfaceProxy * proxy,GrRenderTask * task)562 void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
563 #ifdef SK_DEBUG
564     if (auto prior = this->getLastRenderTask(proxy)) {
565         SkASSERT(prior->isClosed() || prior == task);
566     }
567 #endif
568     uint32_t key = proxy->uniqueID().asUInt();
569     if (task) {
570         fLastRenderTasks.set(key, task);
571     } else if (fLastRenderTasks.find(key)) {
572         fLastRenderTasks.remove(key);
573     }
574 }
575 
getLastRenderTask(const GrSurfaceProxy * proxy) const576 GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
577     auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
578     return entry ? *entry : nullptr;
579 }
580 
getLastOpsTask(const GrSurfaceProxy * proxy) const581 skgpu::ganesh::OpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
582     GrRenderTask* task = this->getLastRenderTask(proxy);
583     return task ? task->asOpsTask() : nullptr;
584 }
585 
moveRenderTasksToDDL(GrDeferredDisplayList * ddl)586 void GrDrawingManager::moveRenderTasksToDDL(GrDeferredDisplayList* ddl) {
587     SkDEBUGCODE(this->validate());
588 
589     // no renderTask should receive a new command after this
590     this->closeAllTasks();
591     fActiveOpsTask = nullptr;
592 
593     this->sortTasks();
594 
595     fDAG.swap(ddl->fRenderTasks);
596     SkASSERT(fDAG.empty());
597     fReorderBlockerTaskIndices.clear();
598 
599     for (auto& renderTask : ddl->fRenderTasks) {
600         renderTask->disown(this);
601         renderTask->prePrepare(fContext);
602     }
603 
604     ddl->fArenas = std::move(fContext->priv().detachArenas());
605 
606     fContext->priv().detachProgramData(&ddl->fProgramData);
607 
608     SkDEBUGCODE(this->validate());
609 }
610 
createDDLTask(sk_sp<const GrDeferredDisplayList> ddl,sk_sp<GrRenderTargetProxy> newDest)611 void GrDrawingManager::createDDLTask(sk_sp<const GrDeferredDisplayList> ddl,
612                                      sk_sp<GrRenderTargetProxy> newDest) {
613     SkDEBUGCODE(this->validate());
614 
615     if (fActiveOpsTask) {
616         // This is a temporary fix for the partial-MDB world. In that world we're not
617         // reordering so ops that (in the single opsTask world) would've just glommed onto the
618         // end of the single opsTask but referred to a far earlier RT need to appear in their
619         // own opsTask.
620         fActiveOpsTask->makeClosed(fContext);
621         fActiveOpsTask = nullptr;
622     }
623 
624     // Propagate the DDL proxy's state information to the replay target.
625     if (ddl->priv().targetProxy()->isMSAADirty()) {
626         auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
627                 ddl->characterization().origin(),
628                 ddl->priv().targetProxy()->backingStoreDimensions().height(),
629                 ddl->priv().targetProxy()->msaaDirtyRect());
630         newDest->markMSAADirty(nativeRect);
631     }
632     GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
633     if (newTextureProxy && skgpu::Mipmapped::kYes == newTextureProxy->mipmapped()) {
634         newTextureProxy->markMipmapsDirty();
635     }
636 
637     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
638     // The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
639     ddl->fLazyProxyData->fReplayDest = newDest.get();
640 
641     // Add a task to handle drawing and lifetime management of the DDL.
642     SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
643                                                                        std::move(newDest),
644                                                                        std::move(ddl)));
645     SkASSERT(ddlTask->isClosed());
646 
647     SkDEBUGCODE(this->validate());
648 }
649 
650 #ifdef SK_DEBUG
validate() const651 void GrDrawingManager::validate() const {
652     if (fActiveOpsTask) {
653         SkASSERT(!fDAG.empty());
654         SkASSERT(!fActiveOpsTask->isClosed());
655         SkASSERT(fActiveOpsTask == fDAG.back().get());
656     }
657 
658     for (int i = 0; i < fDAG.size(); ++i) {
659         if (fActiveOpsTask != fDAG[i].get()) {
660             // The resolveTask associated with the activeTask remains open for as long as the
661             // activeTask does.
662             bool isActiveResolveTask =
663                 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
664             bool isAtlas = fDAG[i]->isSetFlag(GrRenderTask::kAtlas_Flag);
665             SkASSERT(isActiveResolveTask || isAtlas || fDAG[i]->isClosed());
666         }
667     }
668 
669     // The active opsTask, if any, should always be at the back of the DAG.
670     if (!fDAG.empty()) {
671         if (fDAG.back()->isSetFlag(GrRenderTask::kAtlas_Flag)) {
672             SkASSERT(fActiveOpsTask == nullptr);
673             SkASSERT(!fDAG.back()->isClosed());
674         } else if (fDAG.back()->isClosed()) {
675             SkASSERT(fActiveOpsTask == nullptr);
676         } else {
677             SkASSERT(fActiveOpsTask == fDAG.back().get());
678         }
679     } else {
680         SkASSERT(fActiveOpsTask == nullptr);
681     }
682 }
683 #endif // SK_DEBUG
684 
closeActiveOpsTask()685 void GrDrawingManager::closeActiveOpsTask() {
686     if (fActiveOpsTask) {
687         // This is a temporary fix for the partial-MDB world. In that world we're not
688         // reordering so ops that (in the single opsTask world) would've just glommed onto the
689         // end of the single opsTask but referred to a far earlier RT need to appear in their
690         // own opsTask.
691         fActiveOpsTask->makeClosed(fContext);
692         fActiveOpsTask = nullptr;
693     }
694 }
695 
newOpsTask(GrSurfaceProxyView surfaceView,sk_sp<GrArenas> arenas)696 sk_sp<skgpu::ganesh::OpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
697                                                            sk_sp<GrArenas> arenas) {
698     SkDEBUGCODE(this->validate());
699     SkASSERT(fContext);
700 
701     this->closeActiveOpsTask();
702 
703     sk_sp<skgpu::ganesh::OpsTask> opsTask(new skgpu::ganesh::OpsTask(
704             this, std::move(surfaceView), fContext->priv().auditTrail(), std::move(arenas)));
705 
706     SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
707 
708     this->appendTask(opsTask);
709 
710     fActiveOpsTask = opsTask.get();
711 
712     SkDEBUGCODE(this->validate());
713     return opsTask;
714 }
715 
addAtlasTask(sk_sp<GrRenderTask> atlasTask,GrRenderTask * previousAtlasTask)716 void GrDrawingManager::addAtlasTask(sk_sp<GrRenderTask> atlasTask,
717                                     GrRenderTask* previousAtlasTask) {
718     SkDEBUGCODE(this->validate());
719     SkASSERT(fContext);
720 
721     if (previousAtlasTask) {
722         previousAtlasTask->makeClosed(fContext);
723         for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
724             // Make the new atlas depend on everybody who used the old atlas, and close their tasks.
725             // This guarantees that the previous atlas is totally out of service before we render
726             // the next one, meaning there is only ever one atlas active at a time and that they can
727             // all share the same texture.
728             atlasTask->addDependency(previousAtlasUser);
729             previousAtlasUser->makeClosed(fContext);
730             if (previousAtlasUser == fActiveOpsTask) {
731                 fActiveOpsTask = nullptr;
732             }
733         }
734     }
735 
736     atlasTask->setFlag(GrRenderTask::kAtlas_Flag);
737     this->insertTaskBeforeLast(std::move(atlasTask));
738 
739     SkDEBUGCODE(this->validate());
740 }
741 
newTextureResolveRenderTaskBefore(const GrCaps & caps)742 GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTaskBefore(
743         const GrCaps& caps) {
744     // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
745     // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
746     // state. This is because those opsTasks can still receive new ops and because if they refer to
747     // the mipmapped version of 'proxy', they will then come to depend on the render task being
748     // created here.
749     //
750     // Add the new textureResolveTask before the fActiveOpsTask (if not in
751     // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
752     // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
753     GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
754     return static_cast<GrTextureResolveRenderTask*>(task);
755 }
756 
newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,GrSurfaceProxy::ResolveFlags flags,const GrCaps & caps)757 void GrDrawingManager::newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,
758                                                    GrSurfaceProxy::ResolveFlags flags,
759                                                    const GrCaps& caps) {
760     SkDEBUGCODE(this->validate());
761     SkASSERT(fContext);
762 
763     if (!proxy->requiresManualMSAAResolve()) {
764         SkDEBUGCODE(this->validate());
765         return;
766     }
767 
768     GrRenderTask* lastTask = this->getLastRenderTask(proxy.get());
769     if (!proxy->asRenderTargetProxy()->isMSAADirty() && (!lastTask || lastTask->isClosed())) {
770         SkDEBUGCODE(this->validate());
771         return;
772     }
773 
774     this->closeActiveOpsTask();
775 
776     auto resolveTask = sk_make_sp<GrTextureResolveRenderTask>();
777     // Add proxy also adds all the needed dependencies we need
778     resolveTask->addProxy(this, std::move(proxy), flags, caps);
779 
780     auto task = this->appendTask(std::move(resolveTask));
781     task->makeClosed(fContext);
782 
783     // We have closed the previous active oplist but since a new oplist isn't being added there
784     // shouldn't be an active one.
785     SkASSERT(!fActiveOpsTask);
786     SkDEBUGCODE(this->validate());
787 }
788 
newWaitRenderTask(const sk_sp<GrSurfaceProxy> & proxy,std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,int numSemaphores)789 void GrDrawingManager::newWaitRenderTask(const sk_sp<GrSurfaceProxy>& proxy,
790                                          std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
791                                          int numSemaphores) {
792     SkDEBUGCODE(this->validate());
793     SkASSERT(fContext);
794 
795     sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
796                                                                     std::move(semaphores),
797                                                                     numSemaphores);
798 
799     if (fActiveOpsTask && (fActiveOpsTask->target(0) == proxy.get())) {
800         SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
801         this->insertTaskBeforeLast(waitTask);
802         // In this case we keep the current renderTask open but just insert the new waitTask
803         // before it in the list. The waitTask will never need to trigger any resolves or mip
804         // map generation which is the main advantage of going through the proxy version.
805         // Additionally we would've had to temporarily set the wait task as the lastRenderTask
806         // on the proxy, add the dependency, and then reset the lastRenderTask to
807         // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
808         // dependencies so that we don't unnecessarily reorder the waitTask before them.
809         // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
810         // semaphore even though they don't need to be for correctness.
811 
812         // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
813         // get a circular self dependency of waitTask on waitTask.
814         waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
815         fActiveOpsTask->addDependency(waitTask.get());
816     } else {
817         // In this case we just close the previous RenderTask and start and append the waitTask
818         // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
819         // there is a lastTask on the proxy we make waitTask depend on that task. This
820         // dependency isn't strictly needed but it does keep the DAG from reordering the
821         // waitTask earlier and blocking more tasks.
822         if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
823             waitTask->addDependency(lastTask);
824         }
825         this->setLastRenderTask(proxy.get(), waitTask.get());
826         this->closeActiveOpsTask();
827         this->appendTask(waitTask);
828     }
829     waitTask->makeClosed(fContext);
830 
831     SkDEBUGCODE(this->validate());
832 }
833 
newTransferFromRenderTask(const sk_sp<GrSurfaceProxy> & srcProxy,const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> dstBuffer,size_t dstOffset)834 void GrDrawingManager::newTransferFromRenderTask(const sk_sp<GrSurfaceProxy>& srcProxy,
835                                                  const SkIRect& srcRect,
836                                                  GrColorType surfaceColorType,
837                                                  GrColorType dstColorType,
838                                                  sk_sp<GrGpuBuffer> dstBuffer,
839                                                  size_t dstOffset) {
840     SkDEBUGCODE(this->validate());
841     SkASSERT(fContext);
842     this->closeActiveOpsTask();
843 
844     GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
845             srcProxy, srcRect, surfaceColorType, dstColorType,
846             std::move(dstBuffer), dstOffset));
847 
848     const GrCaps& caps = *fContext->priv().caps();
849 
850     // We always say skgpu::Mipmapped::kNo here since we are always just copying from the base
851     // layer. We don't need to make sure the whole mip map chain is valid.
852     task->addDependency(
853             this, srcProxy.get(), skgpu::Mipmapped::kNo, GrTextureResolveManager(this), caps);
854     task->makeClosed(fContext);
855 
856     // We have closed the previous active oplist but since a new oplist isn't being added there
857     // shouldn't be an active one.
858     SkASSERT(!fActiveOpsTask);
859     SkDEBUGCODE(this->validate());
860 }
861 
newBufferTransferTask(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)862 void GrDrawingManager::newBufferTransferTask(sk_sp<GrGpuBuffer> src,
863                                              size_t srcOffset,
864                                              sk_sp<GrGpuBuffer> dst,
865                                              size_t dstOffset,
866                                              size_t size) {
867     SkASSERT(src);
868     SkASSERT(dst);
869     SkASSERT(srcOffset + size <= src->size());
870     SkASSERT(dstOffset + size <= dst->size());
871     SkASSERT(src->intendedType() == GrGpuBufferType::kXferCpuToGpu);
872     SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
873 
874     SkDEBUGCODE(this->validate());
875     SkASSERT(fContext);
876 
877     this->closeActiveOpsTask();
878 
879     sk_sp<GrRenderTask> task = GrBufferTransferRenderTask::Make(std::move(src),
880                                                                 srcOffset,
881                                                                 std::move(dst),
882                                                                 dstOffset,
883                                                                 size);
884     SkASSERT(task);
885 
886     this->appendTask(task);
887     task->makeClosed(fContext);
888 
889     // We have closed the previous active oplist but since a new oplist isn't being added there
890     // shouldn't be an active one.
891     SkASSERT(!fActiveOpsTask);
892     SkDEBUGCODE(this->validate());
893 }
894 
newBufferUpdateTask(sk_sp<SkData> src,sk_sp<GrGpuBuffer> dst,size_t dstOffset)895 void GrDrawingManager::newBufferUpdateTask(sk_sp<SkData> src,
896                                            sk_sp<GrGpuBuffer> dst,
897                                            size_t dstOffset) {
898     SkASSERT(src);
899     SkASSERT(dst);
900     SkASSERT(dstOffset + src->size() <= dst->size());
901     SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
902     SkASSERT(!dst->isMapped());
903 
904     SkDEBUGCODE(this->validate());
905     SkASSERT(fContext);
906 
907     this->closeActiveOpsTask();
908 
909     sk_sp<GrRenderTask> task = GrBufferUpdateRenderTask::Make(std::move(src),
910                                                               std::move(dst),
911                                                               dstOffset);
912     SkASSERT(task);
913 
914     this->appendTask(task);
915     task->makeClosed(fContext);
916 
917     // We have closed the previous active oplist but since a new oplist isn't being added there
918     // shouldn't be an active one.
919     SkASSERT(!fActiveOpsTask);
920     SkDEBUGCODE(this->validate());
921 }
922 
newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,SkIRect dstRect,const sk_sp<GrSurfaceProxy> & src,SkIRect srcRect,GrSamplerState::Filter filter,GrSurfaceOrigin origin)923 sk_sp<GrRenderTask> GrDrawingManager::newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,
924                                                         SkIRect dstRect,
925                                                         const sk_sp<GrSurfaceProxy>& src,
926                                                         SkIRect srcRect,
927                                                         GrSamplerState::Filter filter,
928                                                         GrSurfaceOrigin origin) {
929     SkDEBUGCODE(this->validate());
930     SkASSERT(fContext);
931 
932     // It'd be nicer to check this in GrCopyRenderTask::Make. This gets complicated because of
933     // "active ops task" tracking. dst will be the target of our copy task but it might also be the
934     // target of the active ops task. We currently require the active ops task to be closed before
935     // making a new task that targets the same proxy. However, if we first close the active ops
936     // task, then fail to make a copy task, the next active ops task may target the same proxy. This
937     // will trip an assert related to unnecessary ops task splitting.
938     if (src->framebufferOnly()) {
939         return nullptr;
940     }
941 
942     this->closeActiveOpsTask();
943 
944     sk_sp<GrRenderTask> task = GrCopyRenderTask::Make(this,
945                                                       std::move(dst),
946                                                       dstRect,
947                                                       src,
948                                                       srcRect,
949                                                       filter,
950                                                       origin);
951     if (!task) {
952         return nullptr;
953     }
954 
955     this->appendTask(task);
956 
957     const GrCaps& caps = *fContext->priv().caps();
958     // We always say skgpu::Mipmapped::kNo here since we are always just copying from the base layer
959     // to another base layer. We don't need to make sure the whole mip map chain is valid.
960     task->addDependency(
961             this, src.get(), skgpu::Mipmapped::kNo, GrTextureResolveManager(this), caps);
962     task->makeClosed(fContext);
963 
964     // We have closed the previous active oplist but since a new oplist isn't being added there
965     // shouldn't be an active one.
966     SkASSERT(!fActiveOpsTask);
967     SkDEBUGCODE(this->validate());
968     return task;
969 }
970 
newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,SkIRect rect,GrColorType srcColorType,GrColorType dstColorType,const GrMipLevel levels[],int levelCount)971 bool GrDrawingManager::newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
972                                           SkIRect rect,
973                                           GrColorType srcColorType,
974                                           GrColorType dstColorType,
975                                           const GrMipLevel levels[],
976                                           int levelCount) {
977     SkDEBUGCODE(this->validate());
978     SkASSERT(fContext);
979 
980     this->closeActiveOpsTask();
981     const GrCaps& caps = *fContext->priv().caps();
982 
983     // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
984     // complete flush here.
985     if (!caps.preferVRAMUseOverFlushes()) {
986         this->flushSurfaces(SkSpan<GrSurfaceProxy*>{},
987                             SkSurfaces::BackendSurfaceAccess::kNoAccess,
988                             GrFlushInfo{},
989                             nullptr);
990     }
991 
992     GrRenderTask* task = this->appendTask(GrWritePixelsTask::Make(this,
993                                                                   std::move(dst),
994                                                                   rect,
995                                                                   srcColorType,
996                                                                   dstColorType,
997                                                                   levels,
998                                                                   levelCount));
999     if (!task) {
1000         return false;
1001     }
1002 
1003     task->makeClosed(fContext);
1004 
1005     // We have closed the previous active oplist but since a new oplist isn't being added there
1006     // shouldn't be an active one.
1007     SkASSERT(!fActiveOpsTask);
1008     SkDEBUGCODE(this->validate());
1009     return true;
1010 }
1011 
1012 /*
1013  * This method finds a path renderer that can draw the specified path on
1014  * the provided target.
1015  * Due to its expense, the software path renderer has split out so it can
1016  * can be individually allowed/disallowed via the "allowSW" boolean.
1017  */
getPathRenderer(const PathRenderer::CanDrawPathArgs & args,bool allowSW,PathRendererChain::DrawType drawType,PathRenderer::StencilSupport * stencilSupport)1018 skgpu::ganesh::PathRenderer* GrDrawingManager::getPathRenderer(
1019         const PathRenderer::CanDrawPathArgs& args,
1020         bool allowSW,
1021         PathRendererChain::DrawType drawType,
1022         PathRenderer::StencilSupport* stencilSupport) {
1023     if (!fPathRendererChain) {
1024         fPathRendererChain =
1025                 std::make_unique<PathRendererChain>(fContext, fOptionsForPathRendererChain);
1026     }
1027 
1028     auto pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
1029     if (!pr && allowSW) {
1030         auto swPR = this->getSoftwarePathRenderer();
1031         if (PathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
1032             pr = swPR;
1033         }
1034     }
1035 
1036 #if GR_PATH_RENDERER_SPEW
1037     if (pr) {
1038         SkDebugf("getPathRenderer: %s\n", pr->name());
1039     }
1040 #endif
1041 
1042     return pr;
1043 }
1044 
getSoftwarePathRenderer()1045 skgpu::ganesh::PathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
1046     if (!fSoftwarePathRenderer) {
1047         fSoftwarePathRenderer.reset(new skgpu::ganesh::SoftwarePathRenderer(
1048                 fContext->priv().proxyProvider(),
1049                 fOptionsForPathRendererChain.fAllowPathMaskCaching));
1050     }
1051     return fSoftwarePathRenderer.get();
1052 }
1053 
getAtlasPathRenderer()1054 skgpu::ganesh::AtlasPathRenderer* GrDrawingManager::getAtlasPathRenderer() {
1055     if (!fPathRendererChain) {
1056         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1057                                                                  fOptionsForPathRendererChain);
1058     }
1059     return fPathRendererChain->getAtlasPathRenderer();
1060 }
1061 
getTessellationPathRenderer()1062 skgpu::ganesh::PathRenderer* GrDrawingManager::getTessellationPathRenderer() {
1063     if (!fPathRendererChain) {
1064         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1065                                                                  fOptionsForPathRendererChain);
1066     }
1067     return fPathRendererChain->getTessellationPathRenderer();
1068 }
1069 
flushIfNecessary()1070 void GrDrawingManager::flushIfNecessary() {
1071     auto direct = fContext->asDirectContext();
1072     if (!direct) {
1073         return;
1074     }
1075 
1076     auto resourceCache = direct->priv().getResourceCache();
1077     if (resourceCache && resourceCache->requestsFlush()) {
1078         if (this->flush({}, SkSurfaces::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), nullptr)) {
1079             this->submitToGpu();
1080         }
1081         resourceCache->purgeAsNeeded();
1082     }
1083 }
1084