xref: /aosp_15_r20/external/skia/src/gpu/ganesh/GrOpFlushState.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 #ifndef GrOpFlushState_DEFINED
8 #define GrOpFlushState_DEFINED
9 
10 #include "include/core/SkRefCnt.h"
11 #include "include/private/base/SkAssert.h"
12 #include "include/private/base/SkDebug.h"
13 #include "include/private/base/SkTArray.h"
14 #include "include/private/gpu/ganesh/GrTypesPriv.h"
15 #include "src/base/SkArenaAlloc.h"
16 #include "src/base/SkArenaAllocList.h"
17 #include "src/gpu/AtlasTypes.h"
18 #include "src/gpu/ganesh/GrAppliedClip.h"
19 #include "src/gpu/ganesh/GrBuffer.h"
20 #include "src/gpu/ganesh/GrBufferAllocPool.h"
21 #include "src/gpu/ganesh/GrDeferredUpload.h"
22 #include "src/gpu/ganesh/GrDrawIndirectCommand.h"
23 #include "src/gpu/ganesh/GrDstProxyView.h"
24 #include "src/gpu/ganesh/GrGeometryProcessor.h"
25 #include "src/gpu/ganesh/GrMeshDrawTarget.h"
26 #include "src/gpu/ganesh/GrOpsRenderPass.h"
27 #include "src/gpu/ganesh/GrPipeline.h"
28 #include "src/gpu/ganesh/GrProgramInfo.h"
29 #include "src/gpu/ganesh/GrScissorState.h"
30 #include "src/gpu/ganesh/GrSurfaceProxyView.h"
31 
32 #include <cstddef>
33 #include <cstdint>
34 #include <utility>
35 
36 class GrAtlasManager;
37 class GrCaps;
38 class GrGpu;
39 class GrOp;
40 class GrRenderTargetProxy;
41 class GrResourceProvider;
42 class GrSurfaceProxy;
43 class GrThreadSafeCache;
44 enum class GrXferBarrierFlags;
45 struct GrSimpleMesh;
46 struct GrUserStencilSettings;
47 struct SkIRect;
48 struct SkRect;
49 
50 namespace skgpu::ganesh {
51 class SmallPathAtlasMgr;
52 }
53 namespace sktext::gpu {
54 class StrikeCache;
55 }
56 
57 /** Tracks the state across all the GrOps (really just the GrDrawOps) in a OpsTask flush. */
58 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawTarget {
59 public:
60     // vertexSpace and indexSpace may either be null or an alloation of size
61     // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
62     // vertices/indices when a buffer larger than kDefaultBufferSize is required.
63     GrOpFlushState(GrGpu*, GrResourceProvider*, skgpu::TokenTracker*,
64                    sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr);
65 
~GrOpFlushState()66     ~GrOpFlushState() final { this->reset(); }
67 
68     /** This is called after each op has a chance to prepare its draws and before the draws are
69         executed. */
70     void preExecuteDraws();
71 
72     /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded
73         surface needs to be prepared for being sampled in a draw after the upload, the caller
74         should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan
75         when doing inline uploads to reset the image layout back to sampled. */
76     void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false);
77 
78     /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
79     void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds,
80                                              const GrPipeline*, const GrUserStencilSettings*);
81 
opsRenderPass()82     GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; }
setOpsRenderPass(GrOpsRenderPass * renderPass)83     void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; }
84 
gpu()85     GrGpu* gpu() { return fGpu; }
86 
87     void reset();
88 
89     /** Additional data required on a per-op basis when executing GrOps. */
90     struct OpArgs {
91         // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it.
OpArgsOpArgs92         explicit OpArgs(GrOp* op, const GrSurfaceProxyView& surfaceView, bool usesMSAASurface,
93                         GrAppliedClip* appliedClip, const GrDstProxyView& dstProxyView,
94                         GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp)
95                 : fOp(op)
96                 , fSurfaceView(surfaceView)
97                 , fRenderTargetProxy(surfaceView.asRenderTargetProxy())
98                 , fUsesMSAASurface(usesMSAASurface)
99                 , fAppliedClip(appliedClip)
100                 , fDstProxyView(dstProxyView)
101                 , fRenderPassXferBarriers(renderPassXferBarriers)
102                 , fColorLoadOp(colorLoadOp) {
103             SkASSERT(surfaceView.asRenderTargetProxy());
104         }
105 
opOpArgs106         GrOp* op() { return fOp; }
writeViewOpArgs107         const GrSurfaceProxyView& writeView() const { return fSurfaceView; }
rtProxyOpArgs108         GrRenderTargetProxy* rtProxy() const { return fRenderTargetProxy; }
109         // True if the op under consideration belongs to an opsTask that renders to an MSAA buffer.
usesMSAASurfaceOpArgs110         bool usesMSAASurface() const { return fUsesMSAASurface; }
appliedClipOpArgs111         GrAppliedClip* appliedClip() { return fAppliedClip; }
appliedClipOpArgs112         const GrAppliedClip* appliedClip() const { return fAppliedClip; }
dstProxyViewOpArgs113         const GrDstProxyView& dstProxyView() const { return fDstProxyView; }
renderPassBarriersOpArgs114         GrXferBarrierFlags renderPassBarriers() const { return fRenderPassXferBarriers; }
colorLoadOpOpArgs115         GrLoadOp colorLoadOp() const { return fColorLoadOp; }
116 
117 #ifdef SK_DEBUG
validateOpArgs118         void validate() const {
119             SkASSERT(fOp);
120             SkASSERT(fSurfaceView);
121         }
122 #endif
123 
124     private:
125         GrOp*                         fOp;
126         const GrSurfaceProxyView&     fSurfaceView;
127         GrRenderTargetProxy*          fRenderTargetProxy;
128         bool                          fUsesMSAASurface;
129         GrAppliedClip*                fAppliedClip;
130         GrDstProxyView                fDstProxyView;   // TODO: do we still need the dst proxy here?
131         GrXferBarrierFlags            fRenderPassXferBarriers;
132         GrLoadOp                      fColorLoadOp;
133     };
134 
setOpArgs(OpArgs * opArgs)135     void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
136 
drawOpArgs()137     const OpArgs& drawOpArgs() const {
138         SkASSERT(fOpArgs);
139         SkDEBUGCODE(fOpArgs->validate());
140         return *fOpArgs;
141     }
142 
setSampledProxyArray(skia_private::TArray<GrSurfaceProxy *,true> * sampledProxies)143     void setSampledProxyArray(skia_private::TArray<GrSurfaceProxy*, true>* sampledProxies) {
144         fSampledProxies = sampledProxies;
145     }
146 
sampledProxyArray()147     skia_private::TArray<GrSurfaceProxy*, true>* sampledProxyArray() override {
148         return fSampledProxies;
149     }
150 
151     /** Overrides of GrDeferredUploadTarget. */
152 
tokenTracker()153     const skgpu::TokenTracker* tokenTracker() final { return fTokenTracker; }
154     skgpu::AtlasToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
155     skgpu::AtlasToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
156 
157     /** Overrides of GrMeshDrawTarget. */
158     void recordDraw(const GrGeometryProcessor*,
159                     const GrSimpleMesh[],
160                     int meshCnt,
161                     const GrSurfaceProxy* const primProcProxies[],
162                     GrPrimitiveType) final;
163     void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
164                           int* startVertex) final;
165     uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
166     void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
167                                  sk_sp<const GrBuffer>*, int* startVertex,
168                                  int* actualVertexCount) final;
169     uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
170                                     sk_sp<const GrBuffer>*, int* startIndex,
171                                     int* actualIndexCount) final;
makeDrawIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)172     GrDrawIndirectWriter makeDrawIndirectSpace(int drawCount, sk_sp<const GrBuffer>* buffer,
173                                                size_t* offset) override {
174         return fDrawIndirectPool.makeSpace(drawCount, buffer, offset);
175     }
makeDrawIndexedIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)176     GrDrawIndexedIndirectWriter makeDrawIndexedIndirectSpace(int drawCount,
177                                                              sk_sp<const GrBuffer>* buffer,
178                                                              size_t* offset) override {
179         return fDrawIndirectPool.makeIndexedSpace(drawCount, buffer, offset);
180     }
181     void putBackIndices(int indexCount) final;
182     void putBackVertices(int vertices, size_t vertexStride) final;
putBackIndirectDraws(int drawCount)183     void putBackIndirectDraws(int drawCount) final { fDrawIndirectPool.putBack(drawCount); }
putBackIndexedIndirectDraws(int drawCount)184     void putBackIndexedIndirectDraws(int drawCount) final {
185         fDrawIndirectPool.putBackIndexed(drawCount);
186     }
writeView()187     const GrSurfaceProxyView& writeView() const final { return this->drawOpArgs().writeView(); }
rtProxy()188     GrRenderTargetProxy* rtProxy() const final { return this->drawOpArgs().rtProxy(); }
usesMSAASurface()189     bool usesMSAASurface() const final { return this->drawOpArgs().usesMSAASurface(); }
appliedClip()190     const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); }
appliedHardClip()191     const GrAppliedHardClip& appliedHardClip() const {
192         return (fOpArgs->appliedClip()) ?
193                 fOpArgs->appliedClip()->hardClip() : GrAppliedHardClip::Disabled();
194     }
195     GrAppliedClip detachAppliedClip() final;
dstProxyView()196     const GrDstProxyView& dstProxyView() const final {
197         return this->drawOpArgs().dstProxyView();
198     }
199 
renderPassBarriers()200     GrXferBarrierFlags renderPassBarriers() const final {
201         return this->drawOpArgs().renderPassBarriers();
202     }
203 
colorLoadOp()204     GrLoadOp colorLoadOp() const final {
205         return this->drawOpArgs().colorLoadOp();
206     }
207 
deferredUploadTarget()208     GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
209     const GrCaps& caps() const final;
210     GrThreadSafeCache* threadSafeCache() const final;
resourceProvider()211     GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
212 
213     sktext::gpu::StrikeCache* strikeCache() const final;
214 
215     // At this point we know we're flushing so full access to the GrAtlasManager and
216     // SmallPathAtlasMgr is required (and permissible).
217     GrAtlasManager* atlasManager() const final;
218 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
219     skgpu::ganesh::SmallPathAtlasMgr* smallPathAtlasManager() const final;
220 #endif
221 
222     /** GrMeshDrawTarget override. */
allocator()223     SkArenaAlloc* allocator() override { return &fArena; }
224 
225     // This is a convenience method that binds the given pipeline, and then, if our applied clip has
226     // a scissor, sets the scissor rect from the applied clip.
bindPipelineAndScissorClip(const GrProgramInfo & programInfo,const SkRect & drawBounds)227     void bindPipelineAndScissorClip(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
228         SkASSERT((programInfo.pipeline().isScissorTestEnabled()) ==
229                  (this->appliedClip() && this->appliedClip()->scissorState().enabled()));
230         this->bindPipeline(programInfo, drawBounds);
231         if (programInfo.pipeline().isScissorTestEnabled()) {
232             this->setScissorRect(this->appliedClip()->scissorState().rect());
233         }
234     }
235 
236     // This is a convenience method for when the primitive processor has exactly one texture. It
237     // binds one texture for the primitive processor, and any others for FPs on the pipeline.
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy & singleGeomProcTexture,const GrPipeline & pipeline)238     void bindTextures(const GrGeometryProcessor& geomProc,
239                       const GrSurfaceProxy& singleGeomProcTexture,
240                       const GrPipeline& pipeline) {
241         SkASSERT(geomProc.numTextureSamplers() == 1);
242         const GrSurfaceProxy* ptr = &singleGeomProcTexture;
243         this->bindTextures(geomProc, &ptr, pipeline);
244     }
245 
246     // Makes the appropriate bindBuffers() and draw*() calls for the provided mesh.
247     void drawMesh(const GrSimpleMesh& mesh);
248 
249     // Pass-through methods to GrOpsRenderPass.
bindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)250     void bindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
251         fOpsRenderPass->bindPipeline(programInfo, drawBounds);
252     }
setScissorRect(const SkIRect & scissorRect)253     void setScissorRect(const SkIRect& scissorRect) {
254         fOpsRenderPass->setScissorRect(scissorRect);
255     }
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)256     void bindTextures(const GrGeometryProcessor& geomProc,
257                       const GrSurfaceProxy* const geomProcTextures[],
258                       const GrPipeline& pipeline) {
259         fOpsRenderPass->bindTextures(geomProc, geomProcTextures, pipeline);
260     }
261     void bindBuffers(sk_sp<const GrBuffer> indexBuffer, sk_sp<const GrBuffer> instanceBuffer,
262                      sk_sp<const GrBuffer> vertexBuffer,
263                      GrPrimitiveRestart primitiveRestart = GrPrimitiveRestart::kNo) {
264         fOpsRenderPass->bindBuffers(std::move(indexBuffer), std::move(instanceBuffer),
265                                     std::move(vertexBuffer), primitiveRestart);
266     }
draw(int vertexCount,int baseVertex)267     void draw(int vertexCount, int baseVertex) {
268         fOpsRenderPass->draw(vertexCount, baseVertex);
269     }
drawIndexed(int indexCount,int baseIndex,uint16_t minIndexValue,uint16_t maxIndexValue,int baseVertex)270     void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue,
271                      int baseVertex) {
272         fOpsRenderPass->drawIndexed(indexCount, baseIndex, minIndexValue, maxIndexValue,
273                                     baseVertex);
274     }
drawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)275     void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex) {
276         fOpsRenderPass->drawInstanced(instanceCount, baseInstance, vertexCount, baseVertex);
277     }
drawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)278     void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance,
279                               int baseVertex) {
280         fOpsRenderPass->drawIndexedInstanced(indexCount, baseIndex, instanceCount, baseInstance,
281                                              baseVertex);
282     }
drawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)283     void drawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
284         fOpsRenderPass->drawIndirect(drawIndirectBuffer, offset, drawCount);
285     }
drawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)286     void drawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
287         fOpsRenderPass->drawIndexedIndirect(drawIndirectBuffer, offset, drawCount);
288     }
drawIndexPattern(int patternIndexCount,int patternRepeatCount,int maxPatternRepetitionsInIndexBuffer,int patternVertexCount,int baseVertex)289     void drawIndexPattern(int patternIndexCount, int patternRepeatCount,
290                           int maxPatternRepetitionsInIndexBuffer, int patternVertexCount,
291                           int baseVertex) {
292         fOpsRenderPass->drawIndexPattern(patternIndexCount, patternRepeatCount,
293                                          maxPatternRepetitionsInIndexBuffer, patternVertexCount,
294                                          baseVertex);
295     }
296 
297 private:
298     struct InlineUpload {
InlineUploadInlineUpload299         InlineUpload(GrDeferredTextureUploadFn&& upload, skgpu::AtlasToken token)
300                 : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
301         GrDeferredTextureUploadFn fUpload;
302         skgpu::AtlasToken fUploadBeforeToken;
303     };
304 
305     // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
306     // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
307     // that share a geometry processor into a Draw is that it allows the Gpu object to setup
308     // the shared state once and then issue draws for each mesh.
309     struct Draw {
310         ~Draw();
311         // The geometry processor is always forced to be in an arena allocation. This object does
312         // not need to manage its lifetime.
313         const GrGeometryProcessor* fGeometryProcessor = nullptr;
314         // Must have GrGeometryProcessor::numTextureSamplers() entries. Can be null if no samplers.
315         const GrSurfaceProxy* const* fGeomProcProxies = nullptr;
316         const GrSimpleMesh* fMeshes = nullptr;
317         const GrOp* fOp = nullptr;
318         int fMeshCnt = 0;
319         GrPrimitiveType fPrimitiveType;
320     };
321 
322     // Storage for ops' pipelines, draws, and inline uploads.
323     SkArenaAllocWithReset fArena{sizeof(GrPipeline) * 100};
324 
325     // Store vertex and index data on behalf of ops that are flushed.
326     GrVertexBufferAllocPool fVertexPool;
327     GrIndexBufferAllocPool fIndexPool;
328     GrDrawIndirectBufferAllocPool fDrawIndirectPool;
329 
330     // Data stored on behalf of the ops being flushed.
331     SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
332     SkArenaAllocList<InlineUpload> fInlineUploads;
333     SkArenaAllocList<Draw> fDraws;
334 
335     // All draws we store have an implicit draw token. This is the draw token for the first draw
336     // in fDraws.
337     skgpu::AtlasToken fBaseDrawToken = skgpu::AtlasToken::InvalidToken();
338 
339     // Info about the op that is currently preparing or executing using the flush state or null if
340     // an op is not currently preparing of executing.
341     OpArgs* fOpArgs = nullptr;
342 
343     // This field is only transiently set during flush. Each OpsTask will set it to point to an
344     // array of proxies it uses before call onPrepare and onExecute.
345     skia_private::TArray<GrSurfaceProxy*, true>* fSampledProxies;
346 
347     GrGpu* fGpu;
348     GrResourceProvider* fResourceProvider;
349     skgpu::TokenTracker* fTokenTracker;
350     GrOpsRenderPass* fOpsRenderPass = nullptr;
351 
352     // Variables that are used to track where we are in lists as ops are executed
353     SkArenaAllocList<Draw>::Iter fCurrDraw;
354     SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
355 };
356 
357 #endif
358