xref: /aosp_15_r20/external/skia/src/gpu/graphite/dawn/DawnComputePipeline.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2023 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/dawn/DawnComputePipeline.h"
9 
10 #include "src/gpu/SkSLToBackend.h"
11 #include "src/gpu/graphite/Caps.h"
12 #include "src/gpu/graphite/ComputePipelineDesc.h"
13 #include "src/gpu/graphite/ContextUtils.h"
14 #include "src/gpu/graphite/dawn/DawnAsyncWait.h"
15 #include "src/gpu/graphite/dawn/DawnErrorChecker.h"
16 #include "src/gpu/graphite/dawn/DawnGraphiteTypesPriv.h"
17 #include "src/gpu/graphite/dawn/DawnGraphiteUtilsPriv.h"
18 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
19 #include "src/gpu/graphite/dawn/DawnUtilsPriv.h"
20 #include "src/sksl/SkSLProgramSettings.h"
21 
22 namespace skgpu::graphite {
23 namespace {
24 
25 struct ShaderInfo {
26     wgpu::ShaderModule fModule;
27     std::string fEntryPoint;
28 
isValidskgpu::graphite::__anon992c045a0111::ShaderInfo29     bool isValid() const { return static_cast<bool>(fModule); }
30 };
31 
compile_shader_module(const DawnSharedContext * sharedContext,const ComputePipelineDesc & pipelineDesc)32 static ShaderInfo compile_shader_module(const DawnSharedContext* sharedContext,
33                                         const ComputePipelineDesc& pipelineDesc) {
34     SkASSERT(sharedContext);
35 
36     ShaderInfo info;
37 
38     const Caps* caps = sharedContext->caps();
39     const ComputeStep* step = pipelineDesc.computeStep();
40     ShaderErrorHandler* errorHandler = caps->shaderErrorHandler();
41 
42     if (step->supportsNativeShader()) {
43         auto nativeShader = step->nativeShaderSource(ComputeStep::NativeShaderFormat::kWGSL);
44         if (!DawnCompileWGSLShaderModule(sharedContext,
45                                          step->name(),
46                                          std::string(nativeShader.fSource),
47                                          &info.fModule,
48                                          errorHandler)) {
49             return {};
50         }
51         info.fEntryPoint = std::move(nativeShader.fEntryPoint);
52     } else {
53         std::string wgsl;
54         SkSL::Program::Interface interface;
55         SkSL::ProgramSettings settings;
56 
57         std::string sksl = BuildComputeSkSL(caps, step);
58         if (skgpu::SkSLToWGSL(caps->shaderCaps(),
59                               sksl,
60                               SkSL::ProgramKind::kCompute,
61                               settings,
62                               &wgsl,
63                               &interface,
64                               errorHandler)) {
65             if (!DawnCompileWGSLShaderModule(sharedContext, step->name(), wgsl,
66                                              &info.fModule, errorHandler)) {
67                 return {};
68             }
69             info.fEntryPoint = "main";
70         }
71     }
72 
73     return info;
74 }
75 
76 }  // namespace
77 
Make(const DawnSharedContext * sharedContext,const ComputePipelineDesc & pipelineDesc)78 sk_sp<DawnComputePipeline> DawnComputePipeline::Make(const DawnSharedContext* sharedContext,
79                                                      const ComputePipelineDesc& pipelineDesc) {
80     auto [shaderModule, entryPointName] = compile_shader_module(sharedContext, pipelineDesc);
81     if (!shaderModule) {
82         return nullptr;
83     }
84 
85     const ComputeStep* step = pipelineDesc.computeStep();
86 
87     // ComputeStep resources are listed in the order that they must be declared in the shader. This
88     // order is then used for the index assignment using an "indexed by order" policy that has
89     // backend-specific semantics. The semantics on Dawn is to assign the index number in increasing
90     // order.
91     //
92     // All resources get assigned to a single bind group at index 0.
93     SkASSERT(!sharedContext->caps()->resourceBindingRequirements().fDistinctIndexRanges);
94     std::vector<wgpu::BindGroupLayoutEntry> bindGroupLayoutEntries;
95     auto resources = step->resources();
96 
97     // Sampled textures count as 2 resources (1 texture and 1 sampler). All other types count as 1.
98     size_t resourceCount = 0;
99     for (const ComputeStep::ResourceDesc& r : resources) {
100         resourceCount++;
101         if (r.fType == ComputeStep::ResourceType::kSampledTexture) {
102             resourceCount++;
103         }
104     }
105 
106     bindGroupLayoutEntries.reserve(resourceCount);
107     int declarationIndex = 0;
108     for (const ComputeStep::ResourceDesc& r : resources) {
109         bindGroupLayoutEntries.emplace_back();
110         uint32_t bindingIndex = bindGroupLayoutEntries.size() - 1;
111 
112         wgpu::BindGroupLayoutEntry& entry = bindGroupLayoutEntries.back();
113         entry.binding = bindingIndex;
114         entry.visibility = wgpu::ShaderStage::Compute;
115         switch (r.fType) {
116             case ComputeStep::ResourceType::kUniformBuffer:
117                 entry.buffer.type = wgpu::BufferBindingType::Uniform;
118                 break;
119             case ComputeStep::ResourceType::kStorageBuffer:
120             case ComputeStep::ResourceType::kIndirectBuffer:
121                 entry.buffer.type = wgpu::BufferBindingType::Storage;
122                 break;
123             case ComputeStep::ResourceType::kReadOnlyStorageBuffer:
124                 entry.buffer.type = wgpu::BufferBindingType::ReadOnlyStorage;
125                 break;
126             case ComputeStep::ResourceType::kReadOnlyTexture:
127                 entry.texture.sampleType = wgpu::TextureSampleType::Float;
128                 entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
129                 break;
130             case ComputeStep::ResourceType::kWriteOnlyStorageTexture: {
131                 entry.storageTexture.access = wgpu::StorageTextureAccess::WriteOnly;
132                 entry.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
133 
134                 auto [_, colorType] = step->calculateTextureParameters(declarationIndex, r);
135                 auto textureInfo = sharedContext->caps()->getDefaultStorageTextureInfo(colorType);
136                 entry.storageTexture.format = TextureInfos::GetDawnViewFormat(textureInfo);
137                 break;
138             }
139             case ComputeStep::ResourceType::kSampledTexture: {
140                 entry.sampler.type = wgpu::SamplerBindingType::Filtering;
141 
142                 // Add an additional entry for the texture.
143                 bindGroupLayoutEntries.emplace_back();
144                 wgpu::BindGroupLayoutEntry& texEntry = bindGroupLayoutEntries.back();
145                 texEntry.binding = bindingIndex + 1;
146                 texEntry.visibility = wgpu::ShaderStage::Compute;
147                 texEntry.texture.sampleType = wgpu::TextureSampleType::Float;
148                 texEntry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
149                 break;
150             }
151         }
152         declarationIndex++;
153     }
154 
155     const wgpu::Device& device = sharedContext->device();
156 
157     // All resources of a ComputeStep currently get assigned to a single bind group at index 0.
158     wgpu::BindGroupLayoutDescriptor bindGroupLayoutDesc;
159     bindGroupLayoutDesc.entryCount = bindGroupLayoutEntries.size();
160     bindGroupLayoutDesc.entries = bindGroupLayoutEntries.data();
161     wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(&bindGroupLayoutDesc);
162     if (!bindGroupLayout) {
163         return nullptr;
164     }
165 
166     wgpu::PipelineLayoutDescriptor pipelineLayoutDesc;
167     if (sharedContext->caps()->setBackendLabels()) {
168         pipelineLayoutDesc.label = step->name();
169     }
170     pipelineLayoutDesc.bindGroupLayoutCount = 1;
171     pipelineLayoutDesc.bindGroupLayouts = &bindGroupLayout;
172     wgpu::PipelineLayout layout = device.CreatePipelineLayout(&pipelineLayoutDesc);
173     if (!layout) {
174         return nullptr;
175     }
176 
177     wgpu::ComputePipelineDescriptor descriptor;
178     // Always set the label for pipelines, dawn may need it for tracing.
179     descriptor.label = step->name();
180     descriptor.compute.module = std::move(shaderModule);
181     descriptor.compute.entryPoint = entryPointName.c_str();
182     descriptor.layout = std::move(layout);
183 
184     std::optional<DawnErrorChecker> errorChecker;
185     if (sharedContext->dawnCaps()->allowScopedErrorChecks()) {
186         errorChecker.emplace(sharedContext);
187     }
188     wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&descriptor);
189     SkASSERT(pipeline);
190     if (errorChecker.has_value() && errorChecker->popErrorScopes() != DawnErrorType::kNoError) {
191         return nullptr;
192     }
193 
194     return sk_sp<DawnComputePipeline>(new DawnComputePipeline(
195             sharedContext, std::move(pipeline), std::move(bindGroupLayout)));
196 }
197 
DawnComputePipeline(const SharedContext * sharedContext,wgpu::ComputePipeline pso,wgpu::BindGroupLayout groupLayout)198 DawnComputePipeline::DawnComputePipeline(const SharedContext* sharedContext,
199                                          wgpu::ComputePipeline pso,
200                                          wgpu::BindGroupLayout groupLayout)
201         : ComputePipeline(sharedContext)
202         , fPipeline(std::move(pso))
203         , fGroupLayout(std::move(groupLayout)) {}
204 
freeGpuData()205 void DawnComputePipeline::freeGpuData() { fPipeline = nullptr; }
206 
207 }  // namespace skgpu::graphite
208