xref: /aosp_15_r20/external/skia/src/gpu/graphite/vk/VulkanGraphicsPipeline.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2023 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/vk/VulkanGraphicsPipeline.h"
9 
10 #include "include/gpu/ShaderErrorHandler.h"
11 #include "include/gpu/graphite/TextureInfo.h"
12 #include "src/core/SkSLTypeShared.h"
13 #include "src/core/SkTraceEvent.h"
14 #include "src/gpu/SkSLToBackend.h"
15 #include "src/gpu/graphite/Attribute.h"
16 #include "src/gpu/graphite/ContextUtils.h"
17 #include "src/gpu/graphite/GraphicsPipelineDesc.h"
18 #include "src/gpu/graphite/Log.h"
19 #include "src/gpu/graphite/RenderPassDesc.h"
20 #include "src/gpu/graphite/RendererProvider.h"
21 #include "src/gpu/graphite/ResourceTypes.h"
22 #include "src/gpu/graphite/RuntimeEffectDictionary.h"
23 #include "src/gpu/graphite/ShaderInfo.h"
24 #include "src/gpu/graphite/vk/VulkanCaps.h"
25 #include "src/gpu/graphite/vk/VulkanGraphicsPipeline.h"
26 #include "src/gpu/graphite/vk/VulkanRenderPass.h"
27 #include "src/gpu/graphite/vk/VulkanResourceProvider.h"
28 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
29 #include "src/gpu/vk/VulkanUtilsPriv.h"
30 #include "src/sksl/SkSLProgramKind.h"
31 #include "src/sksl/SkSLProgramSettings.h"
32 #include "src/sksl/ir/SkSLProgram.h"
33 
34 namespace skgpu::graphite {
35 
attrib_type_to_vkformat(VertexAttribType type)36 static inline VkFormat attrib_type_to_vkformat(VertexAttribType type) {
37     switch (type) {
38         case VertexAttribType::kFloat:
39             return VK_FORMAT_R32_SFLOAT;
40         case VertexAttribType::kFloat2:
41             return VK_FORMAT_R32G32_SFLOAT;
42         case VertexAttribType::kFloat3:
43             return VK_FORMAT_R32G32B32_SFLOAT;
44         case VertexAttribType::kFloat4:
45             return VK_FORMAT_R32G32B32A32_SFLOAT;
46         case VertexAttribType::kHalf:
47             return VK_FORMAT_R16_SFLOAT;
48         case VertexAttribType::kHalf2:
49             return VK_FORMAT_R16G16_SFLOAT;
50         case VertexAttribType::kHalf4:
51             return VK_FORMAT_R16G16B16A16_SFLOAT;
52         case VertexAttribType::kInt2:
53             return VK_FORMAT_R32G32_SINT;
54         case VertexAttribType::kInt3:
55             return VK_FORMAT_R32G32B32_SINT;
56         case VertexAttribType::kInt4:
57             return VK_FORMAT_R32G32B32A32_SINT;
58         case VertexAttribType::kUInt2:
59             return VK_FORMAT_R32G32_UINT;
60         case VertexAttribType::kByte:
61             return VK_FORMAT_R8_SINT;
62         case VertexAttribType::kByte2:
63             return VK_FORMAT_R8G8_SINT;
64         case VertexAttribType::kByte4:
65             return VK_FORMAT_R8G8B8A8_SINT;
66         case VertexAttribType::kUByte:
67             return VK_FORMAT_R8_UINT;
68         case VertexAttribType::kUByte2:
69             return VK_FORMAT_R8G8_UINT;
70         case VertexAttribType::kUByte4:
71             return VK_FORMAT_R8G8B8A8_UINT;
72         case VertexAttribType::kUByte_norm:
73             return VK_FORMAT_R8_UNORM;
74         case VertexAttribType::kUByte4_norm:
75             return VK_FORMAT_R8G8B8A8_UNORM;
76         case VertexAttribType::kShort2:
77             return VK_FORMAT_R16G16_SINT;
78         case VertexAttribType::kShort4:
79             return VK_FORMAT_R16G16B16A16_SINT;
80         case VertexAttribType::kUShort2:
81             return VK_FORMAT_R16G16_UINT;
82         case VertexAttribType::kUShort2_norm:
83             return VK_FORMAT_R16G16_UNORM;
84         case VertexAttribType::kInt:
85             return VK_FORMAT_R32_SINT;
86         case VertexAttribType::kUInt:
87             return VK_FORMAT_R32_UINT;
88         case VertexAttribType::kUShort_norm:
89             return VK_FORMAT_R16_UNORM;
90         case VertexAttribType::kUShort4_norm:
91             return VK_FORMAT_R16G16B16A16_UNORM;
92     }
93     SK_ABORT("Unknown vertex attrib type");
94 }
95 
setup_vertex_input_state(const SkSpan<const Attribute> & vertexAttrs,const SkSpan<const Attribute> & instanceAttrs,VkPipelineVertexInputStateCreateInfo * vertexInputInfo,skia_private::STArray<2,VkVertexInputBindingDescription,true> * bindingDescs,skia_private::STArray<16,VkVertexInputAttributeDescription> * attributeDescs)96 static void setup_vertex_input_state(
97         const SkSpan<const Attribute>& vertexAttrs,
98         const SkSpan<const Attribute>& instanceAttrs,
99         VkPipelineVertexInputStateCreateInfo* vertexInputInfo,
100         skia_private::STArray<2, VkVertexInputBindingDescription, true>* bindingDescs,
101         skia_private::STArray<16, VkVertexInputAttributeDescription>* attributeDescs) {
102     // Setup attribute & binding descriptions
103     int attribIndex = 0;
104     size_t vertexAttributeOffset = 0;
105     for (auto attrib : vertexAttrs) {
106         VkVertexInputAttributeDescription vkAttrib;
107         vkAttrib.location = attribIndex++;
108         vkAttrib.binding = VulkanGraphicsPipeline::kVertexBufferIndex;
109         vkAttrib.format = attrib_type_to_vkformat(attrib.cpuType());
110         vkAttrib.offset = vertexAttributeOffset;
111         vertexAttributeOffset += attrib.sizeAlign4();
112         attributeDescs->push_back(vkAttrib);
113     }
114 
115     size_t instanceAttributeOffset = 0;
116     for (auto attrib : instanceAttrs) {
117         VkVertexInputAttributeDescription vkAttrib;
118         vkAttrib.location = attribIndex++;
119         vkAttrib.binding = VulkanGraphicsPipeline::kInstanceBufferIndex;
120         vkAttrib.format = attrib_type_to_vkformat(attrib.cpuType());
121         vkAttrib.offset = instanceAttributeOffset;
122         instanceAttributeOffset += attrib.sizeAlign4();
123         attributeDescs->push_back(vkAttrib);
124     }
125 
126     if (bindingDescs && !vertexAttrs.empty()) {
127         bindingDescs->push_back() = {
128                 VulkanGraphicsPipeline::kVertexBufferIndex,
129                 (uint32_t) vertexAttributeOffset,
130                 VK_VERTEX_INPUT_RATE_VERTEX
131         };
132     }
133     if (bindingDescs && !instanceAttrs.empty()) {
134         bindingDescs->push_back() = {
135                 VulkanGraphicsPipeline::kInstanceBufferIndex,
136                 (uint32_t) instanceAttributeOffset,
137                 VK_VERTEX_INPUT_RATE_INSTANCE
138         };
139     }
140 
141     memset(vertexInputInfo, 0, sizeof(VkPipelineVertexInputStateCreateInfo));
142     vertexInputInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
143     vertexInputInfo->pNext = nullptr;
144     vertexInputInfo->flags = 0;
145     vertexInputInfo->vertexBindingDescriptionCount = bindingDescs ? bindingDescs->size() : 0;
146     vertexInputInfo->pVertexBindingDescriptions =
147             bindingDescs && !bindingDescs->empty() ? bindingDescs->begin() : VK_NULL_HANDLE;
148     vertexInputInfo->vertexAttributeDescriptionCount = attributeDescs ? attributeDescs->size() : 0;
149     vertexInputInfo->pVertexAttributeDescriptions =
150             attributeDescs && !attributeDescs->empty() ? attributeDescs->begin() : VK_NULL_HANDLE;
151 }
152 
primitive_type_to_vk_topology(PrimitiveType primitiveType)153 static VkPrimitiveTopology primitive_type_to_vk_topology(PrimitiveType primitiveType) {
154     switch (primitiveType) {
155         case PrimitiveType::kTriangles:
156             return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
157         case PrimitiveType::kTriangleStrip:
158             return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
159         case PrimitiveType::kPoints:
160             return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
161     }
162     SkUNREACHABLE;
163 }
164 
setup_input_assembly_state(PrimitiveType primitiveType,VkPipelineInputAssemblyStateCreateInfo * inputAssemblyInfo)165 static void setup_input_assembly_state(PrimitiveType primitiveType,
166                                        VkPipelineInputAssemblyStateCreateInfo* inputAssemblyInfo) {
167     memset(inputAssemblyInfo, 0, sizeof(VkPipelineInputAssemblyStateCreateInfo));
168     inputAssemblyInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
169     inputAssemblyInfo->pNext = nullptr;
170     inputAssemblyInfo->flags = 0;
171     inputAssemblyInfo->primitiveRestartEnable = false;
172     inputAssemblyInfo->topology = primitive_type_to_vk_topology(primitiveType);
173 }
174 
stencil_op_to_vk_stencil_op(StencilOp op)175 static VkStencilOp stencil_op_to_vk_stencil_op(StencilOp op) {
176     static const VkStencilOp gTable[] = {
177         VK_STENCIL_OP_KEEP,                 // kKeep
178         VK_STENCIL_OP_ZERO,                 // kZero
179         VK_STENCIL_OP_REPLACE,              // kReplace
180         VK_STENCIL_OP_INVERT,               // kInvert
181         VK_STENCIL_OP_INCREMENT_AND_WRAP,   // kIncWrap
182         VK_STENCIL_OP_DECREMENT_AND_WRAP,   // kDecWrap
183         VK_STENCIL_OP_INCREMENT_AND_CLAMP,  // kIncClamp
184         VK_STENCIL_OP_DECREMENT_AND_CLAMP,  // kDecClamp
185     };
186     static_assert(std::size(gTable) == kStencilOpCount);
187     static_assert(0 == (int)StencilOp::kKeep);
188     static_assert(1 == (int)StencilOp::kZero);
189     static_assert(2 == (int)StencilOp::kReplace);
190     static_assert(3 == (int)StencilOp::kInvert);
191     static_assert(4 == (int)StencilOp::kIncWrap);
192     static_assert(5 == (int)StencilOp::kDecWrap);
193     static_assert(6 == (int)StencilOp::kIncClamp);
194     static_assert(7 == (int)StencilOp::kDecClamp);
195     SkASSERT(op < (StencilOp)kStencilOpCount);
196     return gTable[(int)op];
197 }
198 
compare_op_to_vk_compare_op(CompareOp op)199 static VkCompareOp compare_op_to_vk_compare_op(CompareOp op) {
200     static const VkCompareOp gTable[] = {
201         VK_COMPARE_OP_ALWAYS,              // kAlways
202         VK_COMPARE_OP_NEVER,               // kNever
203         VK_COMPARE_OP_GREATER,             // kGreater
204         VK_COMPARE_OP_GREATER_OR_EQUAL,    // kGEqual
205         VK_COMPARE_OP_LESS,                // kLess
206         VK_COMPARE_OP_LESS_OR_EQUAL,       // kLEqual
207         VK_COMPARE_OP_EQUAL,               // kEqual
208         VK_COMPARE_OP_NOT_EQUAL,           // kNotEqual
209     };
210     static_assert(std::size(gTable) == kCompareOpCount);
211     static_assert(0 == (int)CompareOp::kAlways);
212     static_assert(1 == (int)CompareOp::kNever);
213     static_assert(2 == (int)CompareOp::kGreater);
214     static_assert(3 == (int)CompareOp::kGEqual);
215     static_assert(4 == (int)CompareOp::kLess);
216     static_assert(5 == (int)CompareOp::kLEqual);
217     static_assert(6 == (int)CompareOp::kEqual);
218     static_assert(7 == (int)CompareOp::kNotEqual);
219     SkASSERT(op < (CompareOp)kCompareOpCount);
220 
221     return gTable[(int)op];
222 }
223 
setup_stencil_op_state(VkStencilOpState * opState,const DepthStencilSettings::Face & face,uint32_t referenceValue)224 static void setup_stencil_op_state(VkStencilOpState* opState,
225                                    const DepthStencilSettings::Face& face,
226                                    uint32_t referenceValue) {
227     opState->failOp = stencil_op_to_vk_stencil_op(face.fStencilFailOp);
228     opState->passOp = stencil_op_to_vk_stencil_op(face.fDepthStencilPassOp);
229     opState->depthFailOp = stencil_op_to_vk_stencil_op(face.fDepthFailOp);
230     opState->compareOp = compare_op_to_vk_compare_op(face.fCompareOp);
231     opState->compareMask = face.fReadMask; // TODO - check this.
232     opState->writeMask = face.fWriteMask;
233     opState->reference = referenceValue;
234 }
235 
setup_depth_stencil_state(const DepthStencilSettings & stencilSettings,VkPipelineDepthStencilStateCreateInfo * stencilInfo)236 static void setup_depth_stencil_state(const DepthStencilSettings& stencilSettings,
237                                       VkPipelineDepthStencilStateCreateInfo* stencilInfo) {
238     SkASSERT(stencilSettings.fDepthTestEnabled ||
239              stencilSettings.fDepthCompareOp == CompareOp::kAlways);
240 
241     memset(stencilInfo, 0, sizeof(VkPipelineDepthStencilStateCreateInfo));
242     stencilInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
243     stencilInfo->pNext = nullptr;
244     stencilInfo->flags = 0;
245     stencilInfo->depthTestEnable = stencilSettings.fDepthTestEnabled;
246     stencilInfo->depthWriteEnable = stencilSettings.fDepthWriteEnabled;
247     stencilInfo->depthCompareOp = compare_op_to_vk_compare_op(stencilSettings.fDepthCompareOp);
248     stencilInfo->depthBoundsTestEnable = VK_FALSE; // Default value TODO - Confirm
249     stencilInfo->stencilTestEnable = stencilSettings.fStencilTestEnabled;
250     if (stencilSettings.fStencilTestEnabled) {
251         setup_stencil_op_state(&stencilInfo->front,
252                                stencilSettings.fFrontStencil,
253                                stencilSettings.fStencilReferenceValue);
254         setup_stencil_op_state(&stencilInfo->back,
255                                stencilSettings.fBackStencil,
256                                stencilSettings.fStencilReferenceValue);
257     }
258     stencilInfo->minDepthBounds = 0.0f;
259     stencilInfo->maxDepthBounds = 1.0f;
260 }
261 
setup_viewport_scissor_state(VkPipelineViewportStateCreateInfo * viewportInfo)262 static void setup_viewport_scissor_state(VkPipelineViewportStateCreateInfo* viewportInfo) {
263     memset(viewportInfo, 0, sizeof(VkPipelineViewportStateCreateInfo));
264     viewportInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
265     viewportInfo->pNext = nullptr;
266     viewportInfo->flags = 0;
267 
268     viewportInfo->viewportCount = 1;
269     viewportInfo->pViewports = nullptr; // This is set dynamically with a draw pass command
270 
271     viewportInfo->scissorCount = 1;
272     viewportInfo->pScissors = nullptr; // This is set dynamically with a draw pass command
273 
274     SkASSERT(viewportInfo->viewportCount == viewportInfo->scissorCount);
275 }
276 
setup_multisample_state(int numSamples,VkPipelineMultisampleStateCreateInfo * multisampleInfo)277 static void setup_multisample_state(int numSamples,
278                                     VkPipelineMultisampleStateCreateInfo* multisampleInfo) {
279     memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo));
280     multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
281     multisampleInfo->pNext = nullptr;
282     multisampleInfo->flags = 0;
283     SkAssertResult(skgpu::SampleCountToVkSampleCount(numSamples,
284                                                      &multisampleInfo->rasterizationSamples));
285     multisampleInfo->sampleShadingEnable = VK_FALSE;
286     multisampleInfo->minSampleShading = 0.0f;
287     multisampleInfo->pSampleMask = nullptr;
288     multisampleInfo->alphaToCoverageEnable = VK_FALSE;
289     multisampleInfo->alphaToOneEnable = VK_FALSE;
290 }
291 
blend_coeff_to_vk_blend(skgpu::BlendCoeff coeff)292 static VkBlendFactor blend_coeff_to_vk_blend(skgpu::BlendCoeff coeff) {
293     switch (coeff) {
294         case skgpu::BlendCoeff::kZero:
295             return VK_BLEND_FACTOR_ZERO;
296         case skgpu::BlendCoeff::kOne:
297             return VK_BLEND_FACTOR_ONE;
298         case skgpu::BlendCoeff::kSC:
299             return VK_BLEND_FACTOR_SRC_COLOR;
300         case skgpu::BlendCoeff::kISC:
301             return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
302         case skgpu::BlendCoeff::kDC:
303             return VK_BLEND_FACTOR_DST_COLOR;
304         case skgpu::BlendCoeff::kIDC:
305             return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
306         case skgpu::BlendCoeff::kSA:
307             return VK_BLEND_FACTOR_SRC_ALPHA;
308         case skgpu::BlendCoeff::kISA:
309             return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
310         case skgpu::BlendCoeff::kDA:
311             return VK_BLEND_FACTOR_DST_ALPHA;
312         case skgpu::BlendCoeff::kIDA:
313             return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
314         case skgpu::BlendCoeff::kConstC:
315             return VK_BLEND_FACTOR_CONSTANT_COLOR;
316         case skgpu::BlendCoeff::kIConstC:
317             return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
318         case skgpu::BlendCoeff::kS2C:
319             return VK_BLEND_FACTOR_SRC1_COLOR;
320         case skgpu::BlendCoeff::kIS2C:
321             return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
322         case skgpu::BlendCoeff::kS2A:
323             return VK_BLEND_FACTOR_SRC1_ALPHA;
324         case skgpu::BlendCoeff::kIS2A:
325             return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
326         case skgpu::BlendCoeff::kIllegal:
327             return VK_BLEND_FACTOR_ZERO;
328     }
329     SkUNREACHABLE;
330 }
331 
blend_equation_to_vk_blend_op(skgpu::BlendEquation equation)332 static VkBlendOp blend_equation_to_vk_blend_op(skgpu::BlendEquation equation) {
333     static const VkBlendOp gTable[] = {
334         // Basic blend ops
335         VK_BLEND_OP_ADD,
336         VK_BLEND_OP_SUBTRACT,
337         VK_BLEND_OP_REVERSE_SUBTRACT,
338 
339         // Advanced blend ops
340         VK_BLEND_OP_SCREEN_EXT,
341         VK_BLEND_OP_OVERLAY_EXT,
342         VK_BLEND_OP_DARKEN_EXT,
343         VK_BLEND_OP_LIGHTEN_EXT,
344         VK_BLEND_OP_COLORDODGE_EXT,
345         VK_BLEND_OP_COLORBURN_EXT,
346         VK_BLEND_OP_HARDLIGHT_EXT,
347         VK_BLEND_OP_SOFTLIGHT_EXT,
348         VK_BLEND_OP_DIFFERENCE_EXT,
349         VK_BLEND_OP_EXCLUSION_EXT,
350         VK_BLEND_OP_MULTIPLY_EXT,
351         VK_BLEND_OP_HSL_HUE_EXT,
352         VK_BLEND_OP_HSL_SATURATION_EXT,
353         VK_BLEND_OP_HSL_COLOR_EXT,
354         VK_BLEND_OP_HSL_LUMINOSITY_EXT,
355 
356         // Illegal.
357         VK_BLEND_OP_ADD,
358     };
359     static_assert(0 == (int)skgpu::BlendEquation::kAdd);
360     static_assert(1 == (int)skgpu::BlendEquation::kSubtract);
361     static_assert(2 == (int)skgpu::BlendEquation::kReverseSubtract);
362     static_assert(3 == (int)skgpu::BlendEquation::kScreen);
363     static_assert(4 == (int)skgpu::BlendEquation::kOverlay);
364     static_assert(5 == (int)skgpu::BlendEquation::kDarken);
365     static_assert(6 == (int)skgpu::BlendEquation::kLighten);
366     static_assert(7 == (int)skgpu::BlendEquation::kColorDodge);
367     static_assert(8 == (int)skgpu::BlendEquation::kColorBurn);
368     static_assert(9 == (int)skgpu::BlendEquation::kHardLight);
369     static_assert(10 == (int)skgpu::BlendEquation::kSoftLight);
370     static_assert(11 == (int)skgpu::BlendEquation::kDifference);
371     static_assert(12 == (int)skgpu::BlendEquation::kExclusion);
372     static_assert(13 == (int)skgpu::BlendEquation::kMultiply);
373     static_assert(14 == (int)skgpu::BlendEquation::kHSLHue);
374     static_assert(15 == (int)skgpu::BlendEquation::kHSLSaturation);
375     static_assert(16 == (int)skgpu::BlendEquation::kHSLColor);
376     static_assert(17 == (int)skgpu::BlendEquation::kHSLLuminosity);
377     static_assert(std::size(gTable) == skgpu::kBlendEquationCnt);
378 
379     SkASSERT((unsigned)equation < skgpu::kBlendEquationCnt);
380     return gTable[(int)equation];
381 }
382 
setup_color_blend_state(const skgpu::BlendInfo & blendInfo,VkPipelineColorBlendStateCreateInfo * colorBlendInfo,VkPipelineColorBlendAttachmentState * attachmentState)383 static void setup_color_blend_state(const skgpu::BlendInfo& blendInfo,
384                                     VkPipelineColorBlendStateCreateInfo* colorBlendInfo,
385                                     VkPipelineColorBlendAttachmentState* attachmentState) {
386     skgpu::BlendEquation equation = blendInfo.fEquation;
387     skgpu::BlendCoeff srcCoeff = blendInfo.fSrcBlend;
388     skgpu::BlendCoeff dstCoeff = blendInfo.fDstBlend;
389     bool blendOff = skgpu::BlendShouldDisable(equation, srcCoeff, dstCoeff);
390 
391     memset(attachmentState, 0, sizeof(VkPipelineColorBlendAttachmentState));
392     attachmentState->blendEnable = !blendOff;
393     if (!blendOff) {
394         attachmentState->srcColorBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
395         attachmentState->dstColorBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
396         attachmentState->colorBlendOp = blend_equation_to_vk_blend_op(equation);
397         attachmentState->srcAlphaBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
398         attachmentState->dstAlphaBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
399         attachmentState->alphaBlendOp = blend_equation_to_vk_blend_op(equation);
400     }
401 
402     if (!blendInfo.fWritesColor) {
403         attachmentState->colorWriteMask = 0;
404     } else {
405         attachmentState->colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
406                                           VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
407     }
408 
409     memset(colorBlendInfo, 0, sizeof(VkPipelineColorBlendStateCreateInfo));
410     colorBlendInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
411     colorBlendInfo->pNext = nullptr;
412     colorBlendInfo->flags = 0;
413     colorBlendInfo->logicOpEnable = VK_FALSE;
414     colorBlendInfo->attachmentCount = 1;
415     colorBlendInfo->pAttachments = attachmentState;
416     // colorBlendInfo->blendConstants is set dynamically
417 }
418 
setup_raster_state(bool isWireframe,VkPipelineRasterizationStateCreateInfo * rasterInfo)419 static void setup_raster_state(bool isWireframe,
420                                VkPipelineRasterizationStateCreateInfo* rasterInfo) {
421     memset(rasterInfo, 0, sizeof(VkPipelineRasterizationStateCreateInfo));
422     rasterInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
423     rasterInfo->pNext = nullptr;
424     rasterInfo->flags = 0;
425     rasterInfo->depthClampEnable = VK_FALSE;
426     rasterInfo->rasterizerDiscardEnable = VK_FALSE;
427     rasterInfo->polygonMode = isWireframe ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL;
428     rasterInfo->cullMode = VK_CULL_MODE_NONE;
429     rasterInfo->frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
430     rasterInfo->depthBiasEnable = VK_FALSE;
431     rasterInfo->depthBiasConstantFactor = 0.0f;
432     rasterInfo->depthBiasClamp = 0.0f;
433     rasterInfo->depthBiasSlopeFactor = 0.0f;
434     rasterInfo->lineWidth = 1.0f;
435 }
436 
setup_shader_stage_info(VkShaderStageFlagBits stage,VkShaderModule shaderModule,VkPipelineShaderStageCreateInfo * shaderStageInfo)437 static void setup_shader_stage_info(VkShaderStageFlagBits stage,
438                                     VkShaderModule shaderModule,
439                                     VkPipelineShaderStageCreateInfo* shaderStageInfo) {
440     memset(shaderStageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
441     shaderStageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
442     shaderStageInfo->pNext = nullptr;
443     shaderStageInfo->flags = 0;
444     shaderStageInfo->stage = stage;
445     shaderStageInfo->module = shaderModule;
446     shaderStageInfo->pName = "main";
447     shaderStageInfo->pSpecializationInfo = nullptr;
448 }
449 
450 
descriptor_data_to_layout(const VulkanSharedContext * sharedContext,const SkSpan<DescriptorData> & descriptorData)451 static VkDescriptorSetLayout descriptor_data_to_layout(const VulkanSharedContext* sharedContext,
452         const SkSpan<DescriptorData>& descriptorData) {
453     if (descriptorData.empty()) { return VK_NULL_HANDLE; }
454 
455     VkDescriptorSetLayout setLayout;
456     DescriptorDataToVkDescSetLayout(sharedContext, descriptorData, &setLayout);
457     if (setLayout == VK_NULL_HANDLE) {
458         SKGPU_LOG_E("Failed to create descriptor set layout; pipeline creation will fail.\n");
459         return VK_NULL_HANDLE;
460     }
461     return setLayout;
462 }
463 
destroy_desc_set_layouts(const VulkanSharedContext * sharedContext,skia_private::TArray<VkDescriptorSetLayout> & setLayouts)464 static void destroy_desc_set_layouts(const VulkanSharedContext* sharedContext,
465                                      skia_private::TArray<VkDescriptorSetLayout>& setLayouts) {
466     for (int i = 0; i < setLayouts.size(); i++) {
467         if (setLayouts[i] != VK_NULL_HANDLE) {
468             VULKAN_CALL(sharedContext->interface(),
469             DestroyDescriptorSetLayout(sharedContext->device(),
470                                        setLayouts[i],
471                                        nullptr));
472         }
473     }
474 }
475 
setup_pipeline_layout(const VulkanSharedContext * sharedContext,bool usesIntrinsicConstantUbo,bool useMSAALoadPushConstant,bool hasStepUniforms,bool hasPaintUniforms,bool hasGradientBuffer,int numTextureSamplers,int numInputAttachments,SkSpan<sk_sp<VulkanSampler>> immutableSamplers)476 static VkPipelineLayout setup_pipeline_layout(const VulkanSharedContext* sharedContext,
477                                               bool usesIntrinsicConstantUbo,
478                                               bool useMSAALoadPushConstant,
479                                               bool hasStepUniforms,
480                                               bool hasPaintUniforms,
481                                               bool hasGradientBuffer,
482                                               int numTextureSamplers,
483                                               int numInputAttachments,
484                                               SkSpan<sk_sp<VulkanSampler>> immutableSamplers) {
485     SkASSERT(!useMSAALoadPushConstant ||
486              (!usesIntrinsicConstantUbo && !hasStepUniforms && !hasPaintUniforms));
487     // Determine descriptor set layouts for this pipeline based upon render pass information.
488     skia_private::STArray<3, VkDescriptorSetLayout> setLayouts;
489 
490     // Determine uniform descriptor set layout
491     skia_private::STArray<VulkanGraphicsPipeline::kNumUniformBuffers, DescriptorData>
492             uniformDescriptors;
493     if (usesIntrinsicConstantUbo) {
494         uniformDescriptors.push_back(VulkanGraphicsPipeline::kIntrinsicUniformBufferDescriptor);
495     }
496 
497     DescriptorType uniformBufferType = sharedContext->caps()->storageBufferSupport()
498                                             ? DescriptorType::kStorageBuffer
499                                             : DescriptorType::kUniformBuffer;
500     if (hasStepUniforms) {
501         uniformDescriptors.push_back({
502             uniformBufferType,
503             /*count=*/1,
504             VulkanGraphicsPipeline::kRenderStepUniformBufferIndex,
505             PipelineStageFlags::kVertexShader | PipelineStageFlags::kFragmentShader});
506     }
507     if (hasPaintUniforms) {
508         uniformDescriptors.push_back({
509             uniformBufferType,
510             /*count=*/1,
511             VulkanGraphicsPipeline::kPaintUniformBufferIndex,
512             PipelineStageFlags::kFragmentShader});
513     }
514     if (hasGradientBuffer) {
515         uniformDescriptors.push_back({
516             DescriptorType::kStorageBuffer,
517             /*count=*/1,
518             VulkanGraphicsPipeline::kGradientBufferIndex,
519             PipelineStageFlags::kFragmentShader});
520     }
521 
522     if (!uniformDescriptors.empty()) {
523         VkDescriptorSetLayout uniformSetLayout =
524                 descriptor_data_to_layout(sharedContext, {uniformDescriptors});
525         if (uniformSetLayout == VK_NULL_HANDLE) { return VK_NULL_HANDLE; }
526         setLayouts.push_back(uniformSetLayout);
527     }
528 
529     // Determine input attachment descriptor set layout
530     if (numInputAttachments > 0) {
531         // For now, we only expect to have up to 1 input attachment. We also share that descriptor
532         // set number with uniform descriptors for normal graphics pipeline usages, so verify that
533         // we are not using any uniform descriptors to avoid conflicts.
534         SkASSERT(numInputAttachments == 1 && uniformDescriptors.empty());
535         skia_private::TArray<DescriptorData> inputAttachmentDescriptors(numInputAttachments);
536         inputAttachmentDescriptors.push_back(VulkanGraphicsPipeline::kInputAttachmentDescriptor);
537 
538         VkDescriptorSetLayout inputAttachmentDescSetLayout =
539                 descriptor_data_to_layout(sharedContext, {inputAttachmentDescriptors});
540 
541         if (inputAttachmentDescSetLayout == VK_NULL_HANDLE) {
542             destroy_desc_set_layouts(sharedContext, setLayouts);
543             return VK_NULL_HANDLE;
544         }
545         setLayouts.push_back(inputAttachmentDescSetLayout);
546     }
547 
548     // Determine texture/sampler descriptor set layout
549     if (numTextureSamplers > 0) {
550         skia_private::TArray<DescriptorData> textureSamplerDescs(numTextureSamplers);
551         // The immutable sampler span size must be = the total number of texture/samplers such that
552         // we can use the index of a sampler as its binding index (or we just have none, which
553         // enables us to skip some of this logic entirely).
554         SkASSERT(immutableSamplers.empty() ||
555                  SkTo<int>(immutableSamplers.size()) == numTextureSamplers);
556 
557         for (int i = 0; i < numTextureSamplers; i++) {
558             Sampler* immutableSampler = nullptr;
559             if (!immutableSamplers.empty() && immutableSamplers[i]) {
560                 immutableSampler = immutableSamplers[i].get();
561             }
562             textureSamplerDescs.push_back({DescriptorType::kCombinedTextureSampler,
563                                            /*count=*/1,
564                                            /*bindingIdx=*/i,
565                                            PipelineStageFlags::kFragmentShader,
566                                            immutableSampler});
567         }
568 
569         VkDescriptorSetLayout textureSamplerDescSetLayout =
570                 descriptor_data_to_layout(sharedContext, {textureSamplerDescs});
571 
572         if (textureSamplerDescSetLayout == VK_NULL_HANDLE) {
573             destroy_desc_set_layouts(sharedContext, setLayouts);
574             return VK_NULL_HANDLE;
575         }
576         setLayouts.push_back(textureSamplerDescSetLayout);
577     }
578 
579     VkPushConstantRange pushConstantRange;
580     if (useMSAALoadPushConstant) {
581         pushConstantRange.offset = 0;
582         pushConstantRange.size = 32;
583         pushConstantRange.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
584     }
585 
586     // Generate a pipeline layout using the now-populated descriptor set layout array
587     VkPipelineLayoutCreateInfo layoutCreateInfo;
588     memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags));
589     layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
590     layoutCreateInfo.pNext = nullptr;
591     layoutCreateInfo.flags = 0;
592     layoutCreateInfo.setLayoutCount = setLayouts.size();
593     layoutCreateInfo.pSetLayouts = setLayouts.begin();
594     // TODO: Add support for push constants.
595     layoutCreateInfo.pushConstantRangeCount = useMSAALoadPushConstant ? 1 : 0;
596     layoutCreateInfo.pPushConstantRanges = useMSAALoadPushConstant ? &pushConstantRange : nullptr;
597 
598     VkResult result;
599     VkPipelineLayout layout;
600     VULKAN_CALL_RESULT(sharedContext,
601                        result,
602                        CreatePipelineLayout(sharedContext->device(),
603                                             &layoutCreateInfo,
604                                             /*const VkAllocationCallbacks*=*/nullptr,
605                                             &layout));
606 
607     // DescriptorSetLayouts can be deleted after the pipeline layout is created.
608     destroy_desc_set_layouts(sharedContext, setLayouts);
609 
610     return result == VK_SUCCESS ? layout : VK_NULL_HANDLE;
611 }
612 
destroy_shader_modules(const VulkanSharedContext * sharedContext,VkShaderModule vsModule,VkShaderModule fsModule)613 static void destroy_shader_modules(const VulkanSharedContext* sharedContext,
614                                    VkShaderModule vsModule,
615                                    VkShaderModule fsModule) {
616     if (vsModule != VK_NULL_HANDLE) {
617         VULKAN_CALL(sharedContext->interface(),
618                     DestroyShaderModule(sharedContext->device(), vsModule, nullptr));
619     }
620     if (fsModule != VK_NULL_HANDLE) {
621         VULKAN_CALL(sharedContext->interface(),
622                     DestroyShaderModule(sharedContext->device(), fsModule, nullptr));
623     }
624 }
625 
setup_dynamic_state(VkPipelineDynamicStateCreateInfo * dynamicInfo,VkDynamicState * dynamicStates)626 static void setup_dynamic_state(VkPipelineDynamicStateCreateInfo* dynamicInfo,
627                                 VkDynamicState* dynamicStates) {
628     memset(dynamicInfo, 0, sizeof(VkPipelineDynamicStateCreateInfo));
629     dynamicInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
630     dynamicInfo->pNext = VK_NULL_HANDLE;
631     dynamicInfo->flags = 0;
632     dynamicStates[0] = VK_DYNAMIC_STATE_VIEWPORT;
633     dynamicStates[1] = VK_DYNAMIC_STATE_SCISSOR;
634     dynamicStates[2] = VK_DYNAMIC_STATE_BLEND_CONSTANTS;
635     dynamicInfo->dynamicStateCount = 3;
636     dynamicInfo->pDynamicStates = dynamicStates;
637 }
638 
Make(VulkanResourceProvider * rsrcProvider,const RuntimeEffectDictionary * runtimeDict,const GraphicsPipelineDesc & pipelineDesc,const RenderPassDesc & renderPassDesc,SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags)639 sk_sp<VulkanGraphicsPipeline> VulkanGraphicsPipeline::Make(
640         VulkanResourceProvider* rsrcProvider,
641         const RuntimeEffectDictionary* runtimeDict,
642         const GraphicsPipelineDesc& pipelineDesc,
643         const RenderPassDesc& renderPassDesc,
644         SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags) {
645     SkASSERT(rsrcProvider);
646     SkSL::Program::Interface vsInterface, fsInterface;
647 
648     SkSL::ProgramSettings settings;
649     settings.fSharpenTextures = true;
650     settings.fForceNoRTFlip = true; // TODO: Confirm
651 
652     const VulkanSharedContext* sharedContext = rsrcProvider->vulkanSharedContext();
653     ShaderErrorHandler* errorHandler = sharedContext->caps()->shaderErrorHandler();
654 
655     const RenderStep* step = sharedContext->rendererProvider()->lookup(pipelineDesc.renderStepID());
656     const bool useStorageBuffers = sharedContext->caps()->storageBufferSupport();
657 
658     if (step->vertexAttributes().size() + step->instanceAttributes().size() >
659         sharedContext->vulkanCaps().maxVertexAttributes()) {
660         SKGPU_LOG_W("Requested more than the supported number of vertex attributes");
661         return nullptr;
662     }
663 
664     skia_private::TArray<SamplerDesc> descContainer {};
665     std::unique_ptr<ShaderInfo> shaderInfo = ShaderInfo::Make(sharedContext->caps(),
666                                                               sharedContext->shaderCodeDictionary(),
667                                                               runtimeDict,
668                                                               step,
669                                                               pipelineDesc.paintParamsID(),
670                                                               useStorageBuffers,
671                                                               renderPassDesc.fWriteSwizzle,
672                                                               &descContainer);
673 
674     // Populate an array of sampler ptrs where a sampler's index within the array indicates their
675     // binding index within the descriptor set. Initialize all values to nullptr, which represents a
676     // "regular", dynamic sampler at that index.
677     skia_private::TArray<sk_sp<VulkanSampler>> immutableSamplers;
678     immutableSamplers.push_back_n(shaderInfo->numFragmentTexturesAndSamplers());
679     SkASSERT(rsrcProvider);
680     // This logic relies upon Vulkan using combined texture/sampler bindings, which is necessary for
681     // ycbcr samplers per the Vulkan spec.
682     SkASSERT(!sharedContext->caps()->resourceBindingRequirements().fSeparateTextureAndSamplerBinding
683              && shaderInfo->numFragmentTexturesAndSamplers() == descContainer.size());
684     for (int i = 0; i < descContainer.size(); i++) {
685         // If a SamplerDesc is not equivalent to the default-initialized SamplerDesc, that indicates
686         // the usage of an immutable sampler. That sampler desc should then be used to obtain an
687         // actual immutable sampler from the resource provider and added at the proper index within
688         // immutableSamplers for inclusion in the pipeline layout.
689         if (descContainer.at(i) != SamplerDesc()) {
690             sk_sp<Sampler> immutableSampler =
691                     rsrcProvider->findOrCreateCompatibleSampler(descContainer.at(i));
692             sk_sp<VulkanSampler> vulkanSampler =
693                     sk_ref_sp<VulkanSampler>(static_cast<VulkanSampler*>(immutableSampler.get()));
694             SkASSERT(vulkanSampler);
695             immutableSamplers[i] = std::move(vulkanSampler);
696         }
697     }
698 
699     const std::string& fsSkSL = shaderInfo->fragmentSkSL();
700 
701     const bool hasFragmentSkSL = !fsSkSL.empty();
702     std::string vsSPIRV, fsSPIRV;
703     VkShaderModule fsModule = VK_NULL_HANDLE, vsModule = VK_NULL_HANDLE;
704 
705     if (hasFragmentSkSL) {
706         if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
707                                 fsSkSL,
708                                 SkSL::ProgramKind::kGraphiteFragment,
709                                 settings,
710                                 &fsSPIRV,
711                                 &fsInterface,
712                                 errorHandler)) {
713             return nullptr;
714         }
715 
716         fsModule = createVulkanShaderModule(sharedContext, fsSPIRV, VK_SHADER_STAGE_FRAGMENT_BIT);
717         if (!fsModule) {
718             return nullptr;
719         }
720     }
721 
722     const std::string& vsSkSL = shaderInfo->vertexSkSL();
723     if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
724                             vsSkSL,
725                             SkSL::ProgramKind::kGraphiteVertex,
726                             settings,
727                             &vsSPIRV,
728                             &vsInterface,
729                             errorHandler)) {
730         return nullptr;
731     }
732 
733     vsModule = createVulkanShaderModule(sharedContext, vsSPIRV, VK_SHADER_STAGE_VERTEX_BIT);
734     if (!vsModule) {
735         // Clean up the other shader module before returning.
736         destroy_shader_modules(sharedContext, VK_NULL_HANDLE, fsModule);
737         return nullptr;
738     }
739 
740     VkPipelineVertexInputStateCreateInfo vertexInputInfo;
741     skia_private::STArray<2, VkVertexInputBindingDescription, true> bindingDescs;
742     skia_private::STArray<16, VkVertexInputAttributeDescription> attributeDescs;
743     setup_vertex_input_state(step->vertexAttributes(),
744                              step->instanceAttributes(),
745                              &vertexInputInfo,
746                              &bindingDescs,
747                              &attributeDescs);
748 
749     VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
750     setup_input_assembly_state(step->primitiveType(), &inputAssemblyInfo);
751 
752     VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
753     setup_depth_stencil_state(step->depthStencilSettings(), &depthStencilInfo);
754 
755     VkPipelineViewportStateCreateInfo viewportInfo;
756     setup_viewport_scissor_state(&viewportInfo);
757 
758     VkPipelineMultisampleStateCreateInfo multisampleInfo;
759     setup_multisample_state(renderPassDesc.fColorAttachment.fTextureInfo.numSamples(),
760                             &multisampleInfo);
761 
762     // We will only have one color blend attachment per pipeline.
763     VkPipelineColorBlendAttachmentState attachmentStates[1];
764     VkPipelineColorBlendStateCreateInfo colorBlendInfo;
765     setup_color_blend_state(shaderInfo->blendInfo(), &colorBlendInfo, attachmentStates);
766 
767     VkPipelineRasterizationStateCreateInfo rasterInfo;
768     // TODO: Check for wire frame mode once that is an available context option within graphite.
769     setup_raster_state(/*isWireframe=*/false, &rasterInfo);
770 
771     VkPipelineShaderStageCreateInfo pipelineShaderStages[2];
772     setup_shader_stage_info(VK_SHADER_STAGE_VERTEX_BIT,
773                             vsModule,
774                             &pipelineShaderStages[0]);
775     if (hasFragmentSkSL) {
776         setup_shader_stage_info(VK_SHADER_STAGE_FRAGMENT_BIT,
777                                 fsModule,
778                                 &pipelineShaderStages[1]);
779     }
780 
781     // TODO: Query RenderPassDesc for input attachment information. For now, we only use one for
782     // loading MSAA from resolve so we can simply pass in 0 when not doing that.
783     VkPipelineLayout pipelineLayout =
784             setup_pipeline_layout(sharedContext,
785                                   /*usesIntrinsicConstantUbo=*/true,
786                                   /*useMSAALoadPushConstant=*/false,
787                                   !step->uniforms().empty(),
788                                   shaderInfo->hasPaintUniforms(),
789                                   shaderInfo->hasGradientBuffer(),
790                                   shaderInfo->numFragmentTexturesAndSamplers(),
791                                   /*numInputAttachments=*/0,
792                                   SkSpan<sk_sp<VulkanSampler>>(immutableSamplers));
793 
794     if (pipelineLayout == VK_NULL_HANDLE) {
795         destroy_shader_modules(sharedContext, vsModule, fsModule);
796         return nullptr;
797     }
798 
799     VkDynamicState dynamicStates[3];
800     VkPipelineDynamicStateCreateInfo dynamicInfo;
801     setup_dynamic_state(&dynamicInfo, dynamicStates);
802 
803     bool loadMsaaFromResolve = renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() &&
804                                renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
805 
806     sk_sp<VulkanRenderPass> compatibleRenderPass =
807             rsrcProvider->findOrCreateRenderPass(renderPassDesc, /*compatibleOnly=*/true);
808 
809     VkGraphicsPipelineCreateInfo pipelineCreateInfo;
810     memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
811     pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
812     pipelineCreateInfo.pNext = nullptr;
813     pipelineCreateInfo.flags = 0;
814     pipelineCreateInfo.stageCount = hasFragmentSkSL ? 2 : 1;
815     pipelineCreateInfo.pStages = &pipelineShaderStages[0];
816     pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
817     pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
818     pipelineCreateInfo.pTessellationState = nullptr;
819     pipelineCreateInfo.pViewportState = &viewportInfo;
820     pipelineCreateInfo.pRasterizationState = &rasterInfo;
821     pipelineCreateInfo.pMultisampleState = &multisampleInfo;
822     pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
823     pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
824     pipelineCreateInfo.pDynamicState = &dynamicInfo;
825     pipelineCreateInfo.layout = pipelineLayout;
826     pipelineCreateInfo.renderPass = compatibleRenderPass->renderPass();
827     pipelineCreateInfo.subpass = loadMsaaFromResolve ? 1 : 0;
828     pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE;
829     pipelineCreateInfo.basePipelineIndex = -1;
830 
831     VkPipeline vkPipeline;
832     VkResult result;
833     {
834         TRACE_EVENT0_ALWAYS("skia.shaders", "VkCreateGraphicsPipeline");
835         VULKAN_CALL_RESULT(sharedContext,
836                            result,
837                            CreateGraphicsPipelines(sharedContext->device(),
838                                                    rsrcProvider->pipelineCache(),
839                                                    /*createInfoCount=*/1,
840                                                    &pipelineCreateInfo,
841                                                    /*pAllocator=*/nullptr,
842                                                    &vkPipeline));
843     }
844     if (result != VK_SUCCESS) {
845         SkDebugf("Failed to create pipeline. Error: %d\n", result);
846         return nullptr;
847     }
848 
849     // After creating the pipeline object, we can clean up the VkShaderModule(s).
850     destroy_shader_modules(sharedContext, vsModule, fsModule);
851 
852     PipelineInfo pipelineInfo{*shaderInfo, pipelineCreationFlags};
853 #if defined(GPU_TEST_UTILS)
854     pipelineInfo.fNativeVertexShader   = "SPIR-V disassembly not available";
855     pipelineInfo.fNativeFragmentShader = "SPIR-V disassmebly not available";
856 #endif
857 
858     return sk_sp<VulkanGraphicsPipeline>(
859             new VulkanGraphicsPipeline(sharedContext,
860                                        pipelineInfo,
861                                        pipelineLayout,
862                                        vkPipeline,
863                                        /*ownsPipelineLayout=*/true,
864                                        std::move(immutableSamplers)));
865 }
866 
InitializeMSAALoadPipelineStructs(const VulkanSharedContext * sharedContext,VkShaderModule * outVertexShaderModule,VkShaderModule * outFragShaderModule,VkPipelineShaderStageCreateInfo * outShaderStageInfo,VkPipelineLayout * outPipelineLayout)867 bool VulkanGraphicsPipeline::InitializeMSAALoadPipelineStructs(
868         const VulkanSharedContext* sharedContext,
869         VkShaderModule* outVertexShaderModule,
870         VkShaderModule* outFragShaderModule,
871         VkPipelineShaderStageCreateInfo* outShaderStageInfo,
872         VkPipelineLayout* outPipelineLayout) {
873     SkSL::Program::Interface vsInterface, fsInterface;
874     SkSL::ProgramSettings settings;
875     settings.fForceNoRTFlip = true;
876     std::string vsSPIRV, fsSPIRV;
877     ShaderErrorHandler* errorHandler = sharedContext->caps()->shaderErrorHandler();
878 
879     std::string vertShaderText;
880     vertShaderText.append(
881             "layout(vulkan,  push_constant) uniform vertexUniformBuffer {"
882             "half4 uPosXform;"
883             "};"
884 
885             "// MSAA Load Program VS\n"
886             "void main() {"
887             "float2 position = float2(sk_VertexID >> 1, sk_VertexID & 1);"
888             "sk_Position.xy = position * uPosXform.xy + uPosXform.zw;"
889             "sk_Position.zw = half2(0, 1);"
890             "}");
891 
892     std::string fragShaderText;
893     fragShaderText.append(
894             "layout(vulkan, input_attachment_index=0, set=0, binding=0) subpassInput uInput;"
895 
896             "// MSAA Load Program FS\n"
897             "void main() {"
898             "sk_FragColor = subpassLoad(uInput);"
899             "}");
900 
901     if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
902                             vertShaderText,
903                             SkSL::ProgramKind::kGraphiteVertex,
904                             settings,
905                             &vsSPIRV,
906                             &vsInterface,
907                             errorHandler)) {
908         return false;
909     }
910     if (!skgpu::SkSLToSPIRV(sharedContext->caps()->shaderCaps(),
911                             fragShaderText,
912                             SkSL::ProgramKind::kGraphiteFragment,
913                             settings,
914                             &fsSPIRV,
915                             &fsInterface,
916                             errorHandler)) {
917         return false;
918     }
919     *outFragShaderModule =
920             createVulkanShaderModule(sharedContext, fsSPIRV, VK_SHADER_STAGE_FRAGMENT_BIT);
921     if (*outFragShaderModule == VK_NULL_HANDLE) {
922         return false;
923     }
924 
925     *outVertexShaderModule =
926             createVulkanShaderModule(sharedContext, vsSPIRV, VK_SHADER_STAGE_VERTEX_BIT);
927     if (*outVertexShaderModule == VK_NULL_HANDLE) {
928         destroy_shader_modules(sharedContext, VK_NULL_HANDLE, *outFragShaderModule);
929         return false;
930     }
931 
932     setup_shader_stage_info(VK_SHADER_STAGE_VERTEX_BIT,
933                             *outVertexShaderModule,
934                             &outShaderStageInfo[0]);
935 
936     setup_shader_stage_info(VK_SHADER_STAGE_FRAGMENT_BIT,
937                             *outFragShaderModule,
938                             &outShaderStageInfo[1]);
939 
940     // The load msaa pipeline takes no step or paint uniforms and no instance attributes. It only
941     // references one input attachment texture (which does not require a sampler) and one vertex
942     // attribute (NDC position)
943     skia_private::TArray<DescriptorData> inputAttachmentDescriptors(1);
944     inputAttachmentDescriptors.push_back(VulkanGraphicsPipeline::kInputAttachmentDescriptor);
945     // TODO: Do we need to consider the potential usage of immutable YCbCr samplers here?
946     *outPipelineLayout = setup_pipeline_layout(sharedContext,
947                                                /*usesIntrinsicConstantUbo=*/false,
948                                                /*useMSAALoadPushConstant=*/true,
949                                                /*hasStepUniforms=*/false,
950                                                /*hasPaintUniforms=*/false,
951                                                /*hasGradientBuffer=*/false,
952                                                /*numTextureSamplers=*/0,
953                                                /*numInputAttachments=*/1,
954                                                /*immutableSamplers=*/{});
955 
956     if (*outPipelineLayout == VK_NULL_HANDLE) {
957         destroy_shader_modules(sharedContext, *outVertexShaderModule, *outFragShaderModule);
958         return false;
959     }
960     return true;
961 }
962 
MakeLoadMSAAPipeline(const VulkanSharedContext * sharedContext,VkShaderModule vsModule,VkShaderModule fsModule,VkPipelineShaderStageCreateInfo * pipelineShaderStages,VkPipelineLayout pipelineLayout,sk_sp<VulkanRenderPass> compatibleRenderPass,VkPipelineCache pipelineCache,const TextureInfo & dstColorAttachmentTexInfo)963 sk_sp<VulkanGraphicsPipeline> VulkanGraphicsPipeline::MakeLoadMSAAPipeline(
964         const VulkanSharedContext* sharedContext,
965         VkShaderModule vsModule,
966         VkShaderModule fsModule,
967         VkPipelineShaderStageCreateInfo* pipelineShaderStages,
968         VkPipelineLayout pipelineLayout,
969         sk_sp<VulkanRenderPass> compatibleRenderPass,
970         VkPipelineCache pipelineCache,
971         const TextureInfo& dstColorAttachmentTexInfo) {
972 
973     int numSamples = dstColorAttachmentTexInfo.numSamples();
974 
975     // Create vertex attribute list
976     SkSpan<const Attribute> loadMSAAVertexAttribs = {};
977 
978     VkPipelineVertexInputStateCreateInfo vertexInputInfo;
979     skia_private::STArray<2, VkVertexInputBindingDescription, true> bindingDescs;
980     skia_private::STArray<16, VkVertexInputAttributeDescription> attributeDescs;
981     setup_vertex_input_state(loadMSAAVertexAttribs,
982                              /*instanceAttrs=*/{}, // Load msaa pipeline takes no instance attribs
983                              &vertexInputInfo,
984                              &bindingDescs,
985                              &attributeDescs);
986 
987     VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
988     setup_input_assembly_state(PrimitiveType::kTriangleStrip, &inputAssemblyInfo);
989 
990     VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
991     setup_depth_stencil_state(/*stencilSettings=*/{}, &depthStencilInfo);
992 
993     VkPipelineViewportStateCreateInfo viewportInfo;
994     setup_viewport_scissor_state(&viewportInfo);
995 
996     VkPipelineMultisampleStateCreateInfo multisampleInfo;
997     setup_multisample_state(numSamples, &multisampleInfo);
998 
999     // We will only have one color blend attachment per pipeline.
1000     VkPipelineColorBlendAttachmentState attachmentStates[1];
1001     VkPipelineColorBlendStateCreateInfo colorBlendInfo;
1002     setup_color_blend_state({}, &colorBlendInfo, attachmentStates);
1003 
1004     VkPipelineRasterizationStateCreateInfo rasterInfo;
1005     // TODO: Check for wire frame mode once that is an available context option within graphite.
1006     setup_raster_state(/*isWireframe=*/false, &rasterInfo);
1007 
1008     VkDynamicState dynamicStates[3];
1009     VkPipelineDynamicStateCreateInfo dynamicInfo;
1010     setup_dynamic_state(&dynamicInfo, dynamicStates);
1011 
1012     VkGraphicsPipelineCreateInfo pipelineCreateInfo;
1013     memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
1014     pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
1015     pipelineCreateInfo.pNext = nullptr;
1016     pipelineCreateInfo.flags = 0;
1017     pipelineCreateInfo.stageCount = 2;
1018     pipelineCreateInfo.pStages = pipelineShaderStages;
1019     pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
1020     pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
1021     pipelineCreateInfo.pTessellationState = nullptr;
1022     pipelineCreateInfo.pViewportState = &viewportInfo;
1023     pipelineCreateInfo.pRasterizationState = &rasterInfo;
1024     pipelineCreateInfo.pMultisampleState = &multisampleInfo;
1025     pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
1026     pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
1027     pipelineCreateInfo.pDynamicState = &dynamicInfo;
1028     pipelineCreateInfo.layout = pipelineLayout;
1029     pipelineCreateInfo.renderPass = compatibleRenderPass->renderPass();
1030 
1031     VkPipeline vkPipeline;
1032     VkResult result;
1033     {
1034         TRACE_EVENT0_ALWAYS("skia.shaders", "CreateGraphicsPipeline");
1035         SkASSERT(pipelineCache != VK_NULL_HANDLE);
1036         VULKAN_CALL_RESULT(sharedContext,
1037                            result,
1038                            CreateGraphicsPipelines(sharedContext->device(),
1039                                                    pipelineCache,
1040                                                    /*createInfoCount=*/1,
1041                                                    &pipelineCreateInfo,
1042                                                    /*pAllocator=*/nullptr,
1043                                                    &vkPipeline));
1044     }
1045     if (result != VK_SUCCESS) {
1046         SkDebugf("Failed to create pipeline. Error: %d\n", result);
1047         return nullptr;
1048     }
1049 
1050     // This is an internal shader, so don't bother filling in the shader code metadata
1051     PipelineInfo pipelineInfo{};
1052     return sk_sp<VulkanGraphicsPipeline>(
1053             new VulkanGraphicsPipeline(sharedContext,
1054                                        pipelineInfo,
1055                                        pipelineLayout,
1056                                        vkPipeline,
1057                                        /*ownsPipelineLayout=*/false,
1058                                        /*immutableSamplers=*/{}));
1059 }
1060 
VulkanGraphicsPipeline(const VulkanSharedContext * sharedContext,const PipelineInfo & pipelineInfo,VkPipelineLayout pipelineLayout,VkPipeline pipeline,bool ownsPipelineLayout,skia_private::TArray<sk_sp<VulkanSampler>> immutableSamplers)1061 VulkanGraphicsPipeline::VulkanGraphicsPipeline(
1062         const VulkanSharedContext* sharedContext,
1063         const PipelineInfo& pipelineInfo,
1064         VkPipelineLayout pipelineLayout,
1065         VkPipeline pipeline,
1066         bool ownsPipelineLayout,
1067         skia_private::TArray<sk_sp<VulkanSampler>> immutableSamplers)
1068     : GraphicsPipeline(sharedContext, pipelineInfo)
1069     , fPipelineLayout(pipelineLayout)
1070     , fPipeline(pipeline)
1071     , fOwnsPipelineLayout(ownsPipelineLayout)
1072     , fImmutableSamplers(std::move(immutableSamplers)) {}
1073 
freeGpuData()1074 void VulkanGraphicsPipeline::freeGpuData() {
1075     auto sharedCtxt = static_cast<const VulkanSharedContext*>(this->sharedContext());
1076     if (fPipeline != VK_NULL_HANDLE) {
1077         VULKAN_CALL(sharedCtxt->interface(),
1078             DestroyPipeline(sharedCtxt->device(), fPipeline, nullptr));
1079     }
1080     if (fOwnsPipelineLayout && fPipelineLayout != VK_NULL_HANDLE) {
1081         VULKAN_CALL(sharedCtxt->interface(),
1082                     DestroyPipelineLayout(sharedCtxt->device(), fPipelineLayout, nullptr));
1083     }
1084 }
1085 
1086 } // namespace skgpu::graphite
1087