xref: /aosp_15_r20/external/skia/src/gpu/ganesh/GrRecordingContext.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "include/gpu/ganesh/GrRecordingContext.h"
9 
10 #include "include/core/SkString.h"
11 #include "include/core/SkTypes.h"
12 #include "include/gpu/GpuTypes.h"
13 #include "include/gpu/ganesh/GrBackendSurface.h"
14 #include "include/gpu/ganesh/GrContextOptions.h"
15 #include "include/gpu/ganesh/GrContextThreadSafeProxy.h"
16 #include "include/gpu/ganesh/GrTypes.h"
17 #include "include/private/base/SkDebug.h"
18 #include "include/private/base/SkMacros.h"
19 #include "include/private/gpu/ganesh/GrTypesPriv.h"
20 #include "src/base/SkArenaAlloc.h"
21 #include "src/gpu/ganesh/GrAuditTrail.h"
22 #include "src/gpu/ganesh/GrCaps.h"
23 #include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
24 #include "src/gpu/ganesh/GrDrawingManager.h"
25 #include "src/gpu/ganesh/GrProgramDesc.h"
26 #include "src/gpu/ganesh/GrProxyProvider.h"
27 #include "src/gpu/ganesh/PathRendererChain.h"
28 #include "src/gpu/ganesh/ops/AtlasTextOp.h"
29 #include "src/text/gpu/SubRunAllocator.h"
30 #include "src/text/gpu/TextBlobRedrawCoordinator.h"
31 
32 #include <utility>
33 
34 using namespace skia_private;
35 
36 using TextBlobRedrawCoordinator = sktext::gpu::TextBlobRedrawCoordinator;
37 
ProgramData(std::unique_ptr<const GrProgramDesc> desc,const GrProgramInfo * info)38 GrRecordingContext::ProgramData::ProgramData(std::unique_ptr<const GrProgramDesc> desc,
39                                              const GrProgramInfo* info)
40         : fDesc(std::move(desc))
41         , fInfo(info) {
42 }
43 
ProgramData(ProgramData && other)44 GrRecordingContext::ProgramData::ProgramData(ProgramData&& other)
45         : fDesc(std::move(other.fDesc))
46         , fInfo(other.fInfo) {
47 }
48 
49 GrRecordingContext::ProgramData::~ProgramData() = default;
50 
GrRecordingContext(sk_sp<GrContextThreadSafeProxy> proxy,bool ddlRecording)51 GrRecordingContext::GrRecordingContext(sk_sp<GrContextThreadSafeProxy> proxy, bool ddlRecording)
52         : GrImageContext(std::move(proxy))
53         , fAuditTrail(new GrAuditTrail())
54         , fArenas(ddlRecording) {
55     fProxyProvider = std::make_unique<GrProxyProvider>(this);
56 }
57 
~GrRecordingContext()58 GrRecordingContext::~GrRecordingContext() {
59     skgpu::ganesh::AtlasTextOp::ClearCache();
60 }
61 
init()62 bool GrRecordingContext::init() {
63     if (!GrImageContext::init()) {
64         return false;
65     }
66 
67     skgpu::ganesh::PathRendererChain::Options prcOptions;
68     prcOptions.fAllowPathMaskCaching = this->options().fAllowPathMaskCaching;
69 #if defined(GPU_TEST_UTILS)
70     prcOptions.fGpuPathRenderers = this->options().fGpuPathRenderers;
71 #endif
72     // FIXME: Once this is removed from Chrome and Android, rename to fEnable"".
73     if (this->options().fDisableDistanceFieldPaths) {
74         prcOptions.fGpuPathRenderers &= ~GpuPathRenderers::kSmall;
75     }
76 
77     bool reduceOpsTaskSplitting = true;
78     if (this->caps()->avoidReorderingRenderTasks()) {
79         reduceOpsTaskSplitting = false;
80     } else if (GrContextOptions::Enable::kYes == this->options().fReduceOpsTaskSplitting) {
81         reduceOpsTaskSplitting = true;
82     } else if (GrContextOptions::Enable::kNo == this->options().fReduceOpsTaskSplitting) {
83         reduceOpsTaskSplitting = false;
84     }
85     fDrawingManager.reset(new GrDrawingManager(this,
86                                                prcOptions,
87                                                reduceOpsTaskSplitting));
88     return true;
89 }
90 
abandonContext()91 void GrRecordingContext::abandonContext() {
92     GrImageContext::abandonContext();
93 
94     this->destroyDrawingManager();
95 }
96 
drawingManager()97 GrDrawingManager* GrRecordingContext::drawingManager() {
98     return fDrawingManager.get();
99 }
100 
destroyDrawingManager()101 void GrRecordingContext::destroyDrawingManager() {
102     fDrawingManager.reset();
103 }
104 
Arenas(SkArenaAlloc * recordTimeAllocator,sktext::gpu::SubRunAllocator * subRunAllocator)105 GrRecordingContext::Arenas::Arenas(SkArenaAlloc* recordTimeAllocator,
106                                    sktext::gpu::SubRunAllocator* subRunAllocator)
107         : fRecordTimeAllocator(recordTimeAllocator)
108         , fRecordTimeSubRunAllocator(subRunAllocator) {
109     // OwnedArenas should instantiate these before passing the bare pointer off to this struct.
110     SkASSERT(subRunAllocator);
111 }
112 
113 // Must be defined here so that std::unique_ptr can see the sizes of the various pools, otherwise
114 // it can't generate a default destructor for them.
OwnedArenas(bool ddlRecording)115 GrRecordingContext::OwnedArenas::OwnedArenas(bool ddlRecording) : fDDLRecording(ddlRecording) {}
~OwnedArenas()116 GrRecordingContext::OwnedArenas::~OwnedArenas() {}
117 
operator =(OwnedArenas && a)118 GrRecordingContext::OwnedArenas& GrRecordingContext::OwnedArenas::operator=(OwnedArenas&& a) {
119     fDDLRecording = a.fDDLRecording;
120     fRecordTimeAllocator = std::move(a.fRecordTimeAllocator);
121     fRecordTimeSubRunAllocator = std::move(a.fRecordTimeSubRunAllocator);
122     return *this;
123 }
124 
get()125 GrRecordingContext::Arenas GrRecordingContext::OwnedArenas::get() {
126     if (!fRecordTimeAllocator && fDDLRecording) {
127         // TODO: empirically determine a better number for SkArenaAlloc's firstHeapAllocation param
128         fRecordTimeAllocator = std::make_unique<SkArenaAlloc>(1024);
129     }
130 
131     if (!fRecordTimeSubRunAllocator) {
132         fRecordTimeSubRunAllocator = std::make_unique<sktext::gpu::SubRunAllocator>();
133     }
134 
135     return {fRecordTimeAllocator.get(), fRecordTimeSubRunAllocator.get()};
136 }
137 
detachArenas()138 GrRecordingContext::OwnedArenas&& GrRecordingContext::detachArenas() {
139     return std::move(fArenas);
140 }
141 
getTextBlobRedrawCoordinator()142 TextBlobRedrawCoordinator* GrRecordingContext::getTextBlobRedrawCoordinator() {
143     return fThreadSafeProxy->priv().getTextBlobRedrawCoordinator();
144 }
145 
getTextBlobRedrawCoordinator() const146 const TextBlobRedrawCoordinator* GrRecordingContext::getTextBlobRedrawCoordinator() const {
147     return fThreadSafeProxy->priv().getTextBlobRedrawCoordinator();
148 }
149 
threadSafeCache()150 GrThreadSafeCache* GrRecordingContext::threadSafeCache() {
151     return fThreadSafeProxy->priv().threadSafeCache();
152 }
153 
threadSafeCache() const154 const GrThreadSafeCache* GrRecordingContext::threadSafeCache() const {
155     return fThreadSafeProxy->priv().threadSafeCache();
156 }
157 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)158 void GrRecordingContext::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
159     this->drawingManager()->addOnFlushCallbackObject(onFlushCBObject);
160 }
161 
162 ////////////////////////////////////////////////////////////////////////////////
163 
skCapabilities() const164 sk_sp<const SkCapabilities> GrRecordingContext::skCapabilities() const {
165     return this->refCaps();
166 }
167 
maxTextureSize() const168 int GrRecordingContext::maxTextureSize() const { return this->caps()->maxTextureSize(); }
169 
maxRenderTargetSize() const170 int GrRecordingContext::maxRenderTargetSize() const { return this->caps()->maxRenderTargetSize(); }
171 
colorTypeSupportedAsImage(SkColorType colorType) const172 bool GrRecordingContext::colorTypeSupportedAsImage(SkColorType colorType) const {
173     GrBackendFormat format =
174             this->caps()->getDefaultBackendFormat(SkColorTypeToGrColorType(colorType),
175                                                   GrRenderable::kNo);
176     return format.isValid();
177 }
178 
supportsProtectedContent() const179 bool GrRecordingContext::supportsProtectedContent() const {
180     return this->caps()->supportsProtectedContent();
181 }
182 
183 ///////////////////////////////////////////////////////////////////////////////////////////////////
184 
185 #ifdef SK_ENABLE_DUMP_GPU
186 #include "src/utils/SkJSONWriter.h"
187 
dumpJSON(SkJSONWriter * writer) const188 void GrRecordingContext::dumpJSON(SkJSONWriter* writer) const {
189     writer->beginObject();
190 
191 #if GR_GPU_STATS
192     writer->appendS32("path_masks_generated", this->stats()->numPathMasksGenerated());
193     writer->appendS32("path_mask_cache_hits", this->stats()->numPathMaskCacheHits());
194 #endif
195 
196     writer->endObject();
197 }
198 #else
dumpJSON(SkJSONWriter *) const199 void GrRecordingContext::dumpJSON(SkJSONWriter*) const { }
200 #endif
201 
202 #if defined(GPU_TEST_UTILS)
203 
204 #if GR_GPU_STATS
205 
dump(SkString * out) const206 void GrRecordingContext::Stats::dump(SkString* out) const {
207     out->appendf("Num Path Masks Generated: %d\n", fNumPathMasksGenerated);
208     out->appendf("Num Path Mask Cache Hits: %d\n", fNumPathMaskCacheHits);
209 }
210 
dumpKeyValuePairs(TArray<SkString> * keys,TArray<double> * values) const211 void GrRecordingContext::Stats::dumpKeyValuePairs(TArray<SkString>* keys,
212                                                   TArray<double>* values) const {
213     keys->push_back(SkString("path_masks_generated"));
214     values->push_back(fNumPathMasksGenerated);
215 
216     keys->push_back(SkString("path_mask_cache_hits"));
217     values->push_back(fNumPathMaskCacheHits);
218 }
219 
dumpKeyValuePairs(TArray<SkString> * keys,TArray<double> * values) const220 void GrRecordingContext::DMSAAStats::dumpKeyValuePairs(TArray<SkString>* keys,
221                                                        TArray<double>* values) const {
222     keys->push_back(SkString("dmsaa_render_passes"));
223     values->push_back(fNumRenderPasses);
224 
225     keys->push_back(SkString("dmsaa_multisample_render_passes"));
226     values->push_back(fNumMultisampleRenderPasses);
227 
228     for (const auto& [name, count] : fTriggerCounts) {
229         keys->push_back(SkStringPrintf("dmsaa_trigger_%s", name.c_str()));
230         values->push_back(count);
231     }
232 }
233 
dump() const234 void GrRecordingContext::DMSAAStats::dump() const {
235     SkDebugf("DMSAA Render Passes: %d\n", fNumRenderPasses);
236     SkDebugf("DMSAA Multisample Render Passes: %d\n", fNumMultisampleRenderPasses);
237     if (!fTriggerCounts.empty()) {
238         SkDebugf("DMSAA Triggers:\n");
239         for (const auto& [name, count] : fTriggerCounts) {
240             SkDebugf("    %s: %d\n", name.c_str(), count);
241         }
242     }
243 }
244 
merge(const DMSAAStats & stats)245 void GrRecordingContext::DMSAAStats::merge(const DMSAAStats& stats) {
246     fNumRenderPasses += stats.fNumRenderPasses;
247     fNumMultisampleRenderPasses += stats.fNumMultisampleRenderPasses;
248     for (const auto& [name, count] : stats.fTriggerCounts) {
249         fTriggerCounts[name] += count;
250     }
251 }
252 
253 #endif // GR_GPU_STATS
254 #endif // defined(GPU_TEST_UTILS)
255