xref: /aosp_15_r20/external/skia/src/gpu/ganesh/gl/GrGLOpsRenderPass.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2016 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 #ifndef GrGLOpsRenderPass_DEFINED
8 #define GrGLOpsRenderPass_DEFINED
9 
10 #include "include/core/SkRect.h"
11 #include "include/core/SkRefCnt.h"
12 #include "include/private/base/SkDebug.h"
13 #include "src/gpu/ganesh/GrDeferredUpload.h"
14 #include "src/gpu/ganesh/GrGeometryProcessor.h"
15 #include "src/gpu/ganesh/GrNativeRect.h"
16 #include "src/gpu/ganesh/GrOpFlushState.h"
17 #include "src/gpu/ganesh/GrOpsRenderPass.h"
18 #include "src/gpu/ganesh/gl/GrGLGpu.h"
19 
20 #include <array>
21 #include <cstddef>
22 #include <cstdint>
23 
24 class GrBuffer;
25 class GrGLAttribArrayState;
26 class GrGpu;
27 class GrPipeline;
28 class GrProgramInfo;
29 class GrRenderTarget;
30 class GrScissorState;
31 class GrSurfaceProxy;
32 enum GrSurfaceOrigin : int;
33 enum class GrPrimitiveRestart : bool;
34 enum class GrPrimitiveType : uint8_t;
35 
36 class GrGLOpsRenderPass : public GrOpsRenderPass {
37 /**
38  * We do not actually buffer up draws or do any work in the this class for GL. Instead commands
39  * are immediately sent to the gpu to execute. Thus all the commands in this class are simply
40  * pass through functions to corresponding calls in the GrGLGpu class.
41  */
42 public:
GrGLOpsRenderPass(GrGLGpu * gpu)43     GrGLOpsRenderPass(GrGLGpu* gpu) : fGpu(gpu) {}
44 
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)45     void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override {
46         state->doUpload(upload);
47     }
48 
49     void set(GrRenderTarget*, bool useMSAASurface, const SkIRect& contentBounds, GrSurfaceOrigin,
50              const LoadAndStoreInfo&, const StencilLoadAndStoreInfo&);
51 
reset()52     void reset() {
53         fRenderTarget = nullptr;
54     }
55 
56 private:
gpu()57     GrGpu* gpu() override { return fGpu; }
58 
59     void bindInstanceBuffer(const GrBuffer*, int baseInstance);
60     void bindVertexBuffer(const GrBuffer*, int baseVertex);
61 
offsetForBaseIndex(int baseIndex)62     const void* offsetForBaseIndex(int baseIndex) const {
63         if (!fIndexPointer) {
64             // nullptr != 0. Adding an offset to a nullptr is undefined.
65             return (void*)(baseIndex * sizeof(uint16_t));
66         }
67         return fIndexPointer + baseIndex;
68     }
69 
70     // Ideally we load and store DMSAA only within the content bounds of our render pass, but if
71     // the caps don't allow for partial framebuffer blits, we resolve the full target.
72     // We resolve the same bounds during load and store both because if we have to do a full size
73     // resolve at the end, the full DMSAA attachment needs to have valid content.
74     GrNativeRect dmsaaLoadStoreBounds() const;
75 
76     void onBegin() override;
77     void onEnd() override;
78     bool onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) override;
79     void onSetScissorRect(const SkIRect& scissor) override;
80     bool onBindTextures(const GrGeometryProcessor&,
81                         const GrSurfaceProxy* const geomProcTextures[],
82                         const GrPipeline&) override;
83     void onBindBuffers(sk_sp<const GrBuffer> indexBuffer, sk_sp<const GrBuffer> instanceBuffer,
84                        sk_sp<const GrBuffer> vertexBuffer, GrPrimitiveRestart) override;
85     void onDraw(int vertexCount, int baseVertex) override;
86     void onDrawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue,
87                        uint16_t maxIndexValue, int baseVertex) override;
88     void onDrawInstanced(int instanceCount, int baseInstance, int vertexCount,
89                          int baseVertex) override;
90     void onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance,
91                                 int baseVertex) override;
92     void onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) override;
93     void multiDrawArraysANGLEOrWebGL(const GrBuffer* drawIndirectBuffer, size_t offset,
94                                      int drawCount);
95     void onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
96                                int drawCount) override;
97     void multiDrawElementsANGLEOrWebGL(const GrBuffer* drawIndirectBuffer, size_t offset,
98                                        int drawCount);
99     void onClear(const GrScissorState& scissor, std::array<float, 4> color) override;
100     void onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) override;
101 
102     GrGLGpu* const fGpu;
103 
104     bool fUseMultisampleFBO;
105     SkIRect fContentBounds;
106     LoadAndStoreInfo fColorLoadAndStoreInfo;
107     StencilLoadAndStoreInfo fStencilLoadAndStoreInfo;
108 
109     // Per-pipeline state.
110     GrPrimitiveType fPrimitiveType;
111     GrGLAttribArrayState* fAttribArrayState = nullptr;
112 
113     // If using an index buffer, this gets set during onBindBuffers. It is either the CPU address of
114     // the indices, or nullptr if they reside physically in GPU memory.
115     const uint16_t* fIndexPointer;
116 
117     // This tracks whether or not we bound the respective buffers during the bindBuffers call.
118     SkDEBUGCODE(bool fDidBindVertexBuffer = false;)
119     SkDEBUGCODE(bool fDidBindInstanceBuffer = false;)
120 
121     using INHERITED = GrOpsRenderPass;
122 };
123 
124 #endif
125