1 /* 2 * Copyright 2022 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef skgpu_graphite_RendererProvider_DEFINED 9 #define skgpu_graphite_RendererProvider_DEFINED 10 11 #include "include/core/SkPathTypes.h" 12 #include "include/core/SkVertices.h" 13 #include "src/gpu/AtlasTypes.h" 14 #include "src/gpu/graphite/Renderer.h" 15 16 #include <vector> 17 18 namespace skgpu::graphite { 19 20 class Caps; 21 class StaticBufferManager; 22 23 #ifdef SK_ENABLE_VELLO_SHADERS 24 class VelloRenderer; 25 #endif 26 27 /** 28 * Graphite defines a limited set of renderers in order to increase the likelihood of batching 29 * across draw calls, and reducing the number of shader permutations required. These Renderers are 30 * stateless singletons and remain alive for the life of the Context and its Recorders. 31 * 32 * Because Renderers are immutable and the defined Renderers are created at context initialization, 33 * RendererProvider is trivially thread-safe. 34 */ 35 class RendererProvider { 36 public: 37 static bool IsVelloRendererSupported(const Caps*); 38 39 ~RendererProvider(); 40 41 // TODO: Add configuration options to disable "optimization" renderers in favor of the more 42 // general case, or renderers that won't be used by the application. When that's added, these 43 // functions could return null. 44 45 // Path rendering for fills and strokes stencilTessellatedCurvesAndTris(SkPathFillType type)46 const Renderer* stencilTessellatedCurvesAndTris(SkPathFillType type) const { 47 return &fStencilTessellatedCurves[(int) type]; 48 } stencilTessellatedWedges(SkPathFillType type)49 const Renderer* stencilTessellatedWedges(SkPathFillType type) const { 50 return &fStencilTessellatedWedges[(int) type]; 51 } convexTessellatedWedges()52 const Renderer* convexTessellatedWedges() const { return &fConvexTessellatedWedges; } tessellatedStrokes()53 const Renderer* tessellatedStrokes() const { return &fTessellatedStrokes; } 54 55 // Coverage mask rendering coverageMask()56 const Renderer* coverageMask() const { return &fCoverageMask; } 57 58 // Atlased text rendering bitmapText(bool useLCDText,skgpu::MaskFormat format)59 const Renderer* bitmapText(bool useLCDText, skgpu::MaskFormat format) const { 60 // We use 565 here to represent all LCD rendering, regardless of texture format 61 if (useLCDText) { 62 return &fBitmapText[(int)skgpu::MaskFormat::kA565]; 63 } 64 SkASSERT(format != skgpu::MaskFormat::kA565); 65 return &fBitmapText[(int)format]; 66 } sdfText(bool useLCDText)67 const Renderer* sdfText(bool useLCDText) const { return &fSDFText[useLCDText]; } 68 69 // Mesh rendering vertices(SkVertices::VertexMode mode,bool hasColors,bool hasTexCoords)70 const Renderer* vertices(SkVertices::VertexMode mode, bool hasColors, bool hasTexCoords) const { 71 SkASSERT(mode != SkVertices::kTriangleFan_VertexMode); // Should be converted to kTriangles 72 bool triStrip = mode == SkVertices::kTriangleStrip_VertexMode; 73 return &fVertices[4*triStrip + 2*hasColors + hasTexCoords]; 74 } 75 76 // Filled and stroked [r]rects analyticRRect()77 const Renderer* analyticRRect() const { return &fAnalyticRRect; } 78 79 // Per-edge AA quadrilaterals perEdgeAAQuad()80 const Renderer* perEdgeAAQuad() const { return &fPerEdgeAAQuad; } 81 82 // Non-AA bounds filling (can handle inverse "fills" but will touch every pixel within the clip) nonAABounds()83 const Renderer* nonAABounds() const { return &fNonAABoundsFill; } 84 85 // Circular arcs circularArc()86 const Renderer* circularArc() const { return &fCircularArc; } 87 analyticBlur()88 const Renderer* analyticBlur() const { return &fAnalyticBlur; } 89 90 // TODO: May need to add support for inverse filled strokes (need to check SVG spec if this is a 91 // real thing). 92 93 // Iterate over all available Renderers to combine with specified paint combinations when 94 // pre-compiling pipelines. renderers()95 SkSpan<const Renderer* const> renderers() const { 96 return {fRenderers.data(), fRenderers.size()}; 97 } 98 99 const RenderStep* lookup(uint32_t uniqueID) const; 100 101 #ifdef SK_ENABLE_VELLO_SHADERS 102 // Compute shader-based path renderer and compositor. velloRenderer()103 const VelloRenderer* velloRenderer() const { return fVelloRenderer.get(); } 104 #endif 105 106 private: 107 static constexpr int kPathTypeCount = 4; 108 static constexpr int kVerticesCount = 8; // 2 modes * 2 color configs * 2 tex coord configs 109 110 friend class Context; // for ctor 111 112 // TODO: Take in caps that determines which Renderers to use for each category 113 RendererProvider(const Caps*, StaticBufferManager* bufferManager); 114 115 // Cannot be moved or copied 116 RendererProvider(const RendererProvider&) = delete; 117 RendererProvider(RendererProvider&&) = delete; 118 119 // Renderers are composed of 1+ steps, and some steps can be shared by multiple Renderers. 120 // Renderers don't keep their RenderSteps alive so RendererProvider holds them here. 121 std::vector<std::unique_ptr<RenderStep>> fRenderSteps; 122 123 // NOTE: Keep all Renderers dense to support automatically completing 'fRenderers'. 124 Renderer fStencilTessellatedCurves[kPathTypeCount]; 125 Renderer fStencilTessellatedWedges[kPathTypeCount]; 126 Renderer fConvexTessellatedWedges; 127 Renderer fTessellatedStrokes; 128 129 Renderer fCoverageMask; 130 131 Renderer fBitmapText[3]; // int variant 132 Renderer fSDFText[2]; // bool isLCD 133 134 Renderer fAnalyticRRect; 135 Renderer fPerEdgeAAQuad; 136 Renderer fNonAABoundsFill; 137 Renderer fCircularArc; 138 139 Renderer fAnalyticBlur; 140 141 Renderer fVertices[kVerticesCount]; 142 143 // Aggregate of all enabled Renderers for convenient iteration when pre-compiling 144 std::vector<const Renderer*> fRenderers; 145 146 #ifdef SK_ENABLE_VELLO_SHADERS 147 std::unique_ptr<VelloRenderer> fVelloRenderer; 148 #endif 149 }; 150 151 } // namespace skgpu::graphite 152 153 #endif // skgpu_graphite_RendererProvider_DEFINED 154