1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/dawn/DawnResourceProvider.h"
9
10 #include "include/gpu/graphite/BackendTexture.h"
11 #include "include/gpu/graphite/TextureInfo.h"
12 #include "include/gpu/graphite/dawn/DawnTypes.h"
13 #include "include/private/base/SkAlign.h"
14 #include "src/gpu/graphite/ComputePipeline.h"
15 #include "src/gpu/graphite/RenderPassDesc.h"
16 #include "src/gpu/graphite/dawn/DawnBuffer.h"
17 #include "src/gpu/graphite/dawn/DawnCommandBuffer.h"
18 #include "src/gpu/graphite/dawn/DawnComputePipeline.h"
19 #include "src/gpu/graphite/dawn/DawnErrorChecker.h"
20 #include "src/gpu/graphite/dawn/DawnGraphicsPipeline.h"
21 #include "src/gpu/graphite/dawn/DawnGraphiteTypesPriv.h"
22 #include "src/gpu/graphite/dawn/DawnSampler.h"
23 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
24 #include "src/gpu/graphite/dawn/DawnTexture.h"
25 #include "src/sksl/SkSLCompiler.h"
26
27 namespace skgpu::graphite {
28
29 namespace {
30
31 constexpr int kBufferBindingSizeAlignment = 16;
32 constexpr int kMaxNumberOfCachedBufferBindGroups = 1024;
33 constexpr int kMaxNumberOfCachedTextureBindGroups = 4096;
34
create_shader_module(const wgpu::Device & device,const char * source)35 wgpu::ShaderModule create_shader_module(const wgpu::Device& device, const char* source) {
36 #ifdef WGPU_BREAKING_CHANGE_DROP_DESCRIPTOR
37 wgpu::ShaderSourceWGSL wgslDesc;
38 #else
39 wgpu::ShaderModuleWGSLDescriptor wgslDesc;
40 #endif
41 wgslDesc.code = source;
42 wgpu::ShaderModuleDescriptor descriptor;
43 descriptor.nextInChain = &wgslDesc;
44 return device.CreateShaderModule(&descriptor);
45 }
46
create_blit_render_pipeline(const DawnSharedContext * sharedContext,const char * label,wgpu::ShaderModule vsModule,wgpu::ShaderModule fsModule,wgpu::TextureFormat renderPassColorFormat,wgpu::TextureFormat renderPassDepthStencilFormat,int numSamples)47 wgpu::RenderPipeline create_blit_render_pipeline(const DawnSharedContext* sharedContext,
48 const char* label,
49 wgpu::ShaderModule vsModule,
50 wgpu::ShaderModule fsModule,
51 wgpu::TextureFormat renderPassColorFormat,
52 wgpu::TextureFormat renderPassDepthStencilFormat,
53 int numSamples) {
54 wgpu::RenderPipelineDescriptor descriptor;
55 descriptor.label = label;
56 descriptor.layout = nullptr;
57
58 wgpu::ColorTargetState colorTarget;
59 colorTarget.format = renderPassColorFormat;
60 colorTarget.blend = nullptr;
61 colorTarget.writeMask = wgpu::ColorWriteMask::All;
62
63 wgpu::DepthStencilState depthStencil;
64 if (renderPassDepthStencilFormat != wgpu::TextureFormat::Undefined) {
65 depthStencil.format = renderPassDepthStencilFormat;
66 depthStencil.depthWriteEnabled = false;
67 depthStencil.depthCompare = wgpu::CompareFunction::Always;
68
69 descriptor.depthStencil = &depthStencil;
70 }
71
72 wgpu::FragmentState fragment;
73 fragment.module = std::move(fsModule);
74 fragment.entryPoint = "main";
75 fragment.targetCount = 1;
76 fragment.targets = &colorTarget;
77 descriptor.fragment = &fragment;
78
79 descriptor.vertex.module = std::move(vsModule);
80 descriptor.vertex.entryPoint = "main";
81 descriptor.vertex.constantCount = 0;
82 descriptor.vertex.constants = nullptr;
83 descriptor.vertex.bufferCount = 0;
84 descriptor.vertex.buffers = nullptr;
85
86 descriptor.primitive.frontFace = wgpu::FrontFace::CCW;
87 descriptor.primitive.cullMode = wgpu::CullMode::None;
88 descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
89 descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Undefined;
90
91 descriptor.multisample.count = numSamples;
92 descriptor.multisample.mask = 0xFFFFFFFF;
93 descriptor.multisample.alphaToCoverageEnabled = false;
94
95 std::optional<DawnErrorChecker> errorChecker;
96 if (sharedContext->dawnCaps()->allowScopedErrorChecks()) {
97 errorChecker.emplace(sharedContext);
98 }
99 auto pipeline = sharedContext->device().CreateRenderPipeline(&descriptor);
100 if (errorChecker.has_value() && errorChecker->popErrorScopes() != DawnErrorType::kNoError) {
101 return nullptr;
102 }
103
104 return pipeline;
105 }
106
107 template <size_t NumEntries>
108 using BindGroupKey = typename DawnResourceProvider::BindGroupKey<NumEntries>;
109 using UniformBindGroupKey = BindGroupKey<DawnResourceProvider::kNumUniformEntries>;
110
make_ubo_bind_group_key(const std::array<std::pair<const DawnBuffer *,uint32_t>,DawnResourceProvider::kNumUniformEntries> & boundBuffersAndSizes)111 UniformBindGroupKey make_ubo_bind_group_key(
112 const std::array<std::pair<const DawnBuffer*, uint32_t>,
113 DawnResourceProvider::kNumUniformEntries>& boundBuffersAndSizes) {
114 UniformBindGroupKey uniqueKey;
115 {
116 // Each entry in the bind group needs 2 uint32_t in the key:
117 // - buffer's unique ID: 32 bits.
118 // - buffer's binding size: 32 bits.
119 // We need total of 4 entries in the uniform buffer bind group.
120 // Unused entries will be assigned zero values.
121 UniformBindGroupKey::Builder builder(&uniqueKey);
122
123 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
124 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
125 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
126 if (boundBuffer) {
127 builder[2 * i] = boundBuffer->uniqueID().asUInt();
128 builder[2 * i + 1] = bindingSize;
129 } else {
130 builder[2 * i] = 0;
131 builder[2 * i + 1] = 0;
132 }
133 }
134
135 builder.finish();
136 }
137
138 return uniqueKey;
139 }
140
make_texture_bind_group_key(const DawnSampler * sampler,const DawnTexture * texture)141 BindGroupKey<1> make_texture_bind_group_key(const DawnSampler* sampler,
142 const DawnTexture* texture) {
143 BindGroupKey<1> uniqueKey;
144 {
145 BindGroupKey<1>::Builder builder(&uniqueKey);
146
147 builder[0] = sampler->uniqueID().asUInt();
148 builder[1] = texture->uniqueID().asUInt();
149
150 builder.finish();
151 }
152
153 return uniqueKey;
154 }
155 } // namespace
156
157
158 // Wraps a Dawn buffer, and tracks the intrinsic blocks residing in this buffer.
159 class DawnResourceProvider::IntrinsicBuffer final {
160 public:
161 static constexpr int kNumSlots = 8;
162
IntrinsicBuffer(sk_sp<DawnBuffer> dawnBuffer)163 IntrinsicBuffer(sk_sp<DawnBuffer> dawnBuffer) : fDawnBuffer(std::move(dawnBuffer)) {}
164 ~IntrinsicBuffer() = default;
165
buffer() const166 sk_sp<DawnBuffer> buffer() const { return fDawnBuffer; }
167
168 // Track that 'intrinsicValues' is stored in the buffer at the 'offset'.
trackIntrinsic(UniformDataBlock intrinsicValues,uint32_t offset)169 void trackIntrinsic(UniformDataBlock intrinsicValues, uint32_t offset) {
170 fCachedIntrinsicValues.set(UniformDataBlock::Make(intrinsicValues, &fUniformData), offset);
171 }
172
173 // Find the offset of 'intrinsicValues' in the buffer. If not found, return nullptr.
findIntrinsic(UniformDataBlock intrinsicValues) const174 uint32_t* findIntrinsic(UniformDataBlock intrinsicValues) const {
175 return fCachedIntrinsicValues.find(intrinsicValues);
176 }
177
slotsUsed() const178 int slotsUsed() const { return fCachedIntrinsicValues.count(); }
179
updateAccessTime()180 void updateAccessTime() {
181 fLastAccess = skgpu::StdSteadyClock::now();
182 }
lastAccessTime() const183 skgpu::StdSteadyClock::time_point lastAccessTime() const {
184 return fLastAccess;
185 }
186
187 private:
188 skia_private::THashMap<UniformDataBlock, uint32_t, UniformDataBlock::Hash>
189 fCachedIntrinsicValues;
190 SkArenaAlloc fUniformData{0};
191
192 sk_sp<DawnBuffer> fDawnBuffer;
193 skgpu::StdSteadyClock::time_point fLastAccess;
194
195 SK_DECLARE_INTERNAL_LLIST_INTERFACE(IntrinsicBuffer);
196 };
197
198 // DawnResourceProvider::IntrinsicConstantsManager
199 // ----------------------------------------------------------------------------
200
201 /**
202 * Since Dawn does not currently provide push constants, this helper class manages rotating through
203 * buffers and writing each new occurrence of a set of intrinsic uniforms into the current buffer.
204 */
205 class DawnResourceProvider::IntrinsicConstantsManager {
206 public:
IntrinsicConstantsManager(DawnResourceProvider * resourceProvider)207 explicit IntrinsicConstantsManager(DawnResourceProvider* resourceProvider)
208 : fResourceProvider(resourceProvider) {}
209
~IntrinsicConstantsManager()210 ~IntrinsicConstantsManager() {
211 auto alwaysTrue = [](IntrinsicBuffer* buffer) { return true; };
212 this->purgeBuffersIf(alwaysTrue);
213
214 SkASSERT(fIntrinsicBuffersLRU.isEmpty());
215 }
216
217 // Find or create a bind buffer info for the given intrinsic values used in the given command
218 // buffer.
219 BindBufferInfo add(DawnCommandBuffer* cb, UniformDataBlock intrinsicValues);
220
purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)221 void purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
222 auto bufferNotUsedSince = [purgeTime, this](IntrinsicBuffer* buffer) {
223 // We always keep the current buffer as it is likely to be used again soon.
224 return buffer != fCurrentBuffer && buffer->lastAccessTime() < purgeTime;
225 };
226 this->purgeBuffersIf(bufferNotUsedSince);
227 }
228
freeGpuResources()229 void freeGpuResources() { this->purgeResourcesNotUsedSince(skgpu::StdSteadyClock::now()); }
230
231 private:
232 // The max number of intrinsic buffers to keep around in the cache.
233 static constexpr uint32_t kMaxNumBuffers = 16;
234
235 // Traverse the intrinsic buffers and purge the ones that match the 'pred'.
236 template<typename T> void purgeBuffersIf(T pred);
237
238 DawnResourceProvider* const fResourceProvider;
239 // The current buffer being filled up, as well as the how much of it has been written to.
240 IntrinsicBuffer* fCurrentBuffer = nullptr;
241
242 // All cached intrinsic buffers, in LRU order.
243 SkTInternalLList<IntrinsicBuffer> fIntrinsicBuffersLRU;
244 // The number of intrinsic buffers currently in the cache.
245 uint32_t fNumBuffers = 0;
246 };
247
248 // Find or create a bind buffer info for the given intrinsic values used in the given command
249 // buffer.
add(DawnCommandBuffer * cb,UniformDataBlock intrinsicValues)250 BindBufferInfo DawnResourceProvider::IntrinsicConstantsManager::add(
251 DawnCommandBuffer* cb, UniformDataBlock intrinsicValues) {
252 using Iter = SkTInternalLList<IntrinsicBuffer>::Iter;
253 Iter iter;
254 auto* curr = iter.init(fIntrinsicBuffersLRU, Iter::kHead_IterStart);
255 uint32_t* offset = nullptr;
256 // Find the buffer that contains the given intrinsic values.
257 while (curr != nullptr) {
258 offset = curr->findIntrinsic(intrinsicValues);
259 if (offset != nullptr) {
260 break;
261 }
262 curr = iter.next();
263 }
264 // If we found the buffer, we can return the bind buffer info directly.
265 if (curr != nullptr && offset != nullptr) {
266 // Move the buffer to the head of the LRU list.
267 fIntrinsicBuffersLRU.remove(curr);
268 fIntrinsicBuffersLRU.addToHead(curr);
269 // Track the dawn buffer's usage by the command buffer.
270 cb->trackResource(curr->buffer());
271 curr->updateAccessTime();
272 return {curr->buffer().get(), *offset, SkTo<uint32_t>(intrinsicValues.size())};
273 }
274
275 // TODO: https://b.corp.google.com/issues/259267703
276 // Make updating intrinsic constants faster. Metal has setVertexBytes method to quickly send
277 // intrinsic constants to vertex shader without any buffer. But Dawn doesn't have similar
278 // capability. So we have to use WriteBuffer(), and this method is not allowed to be called when
279 // there is an active render pass.
280 SkASSERT(!cb->hasActivePassEncoder());
281
282 const Caps* caps = fResourceProvider->dawnSharedContext()->caps();
283 const uint32_t stride =
284 SkAlignTo(intrinsicValues.size(), caps->requiredUniformBufferAlignment());
285 // In any one of the following cases, we need to create a new buffer:
286 // (1) There is no current buffer.
287 // (2) The current buffer is full.
288 if (!fCurrentBuffer || fCurrentBuffer->slotsUsed() == IntrinsicBuffer::kNumSlots) {
289 // We can just replace the current buffer; any prior buffer was already tracked in the LRU
290 // list and the intrinsic constants were written directly to the Dawn queue.
291 DawnResourceProvider* resourceProvider = fResourceProvider;
292 auto dawnBuffer =
293 resourceProvider->findOrCreateDawnBuffer(stride * IntrinsicBuffer::kNumSlots,
294 BufferType::kUniform,
295 AccessPattern::kGpuOnly,
296 "IntrinsicConstantBuffer");
297 if (!dawnBuffer) {
298 // If we failed to create a GPU buffer to hold the intrinsic uniforms, we will fail the
299 // Recording being inserted, so return an empty bind info.
300 return {};
301 }
302
303 fCurrentBuffer = new IntrinsicBuffer(dawnBuffer);
304 fIntrinsicBuffersLRU.addToHead(fCurrentBuffer);
305 fNumBuffers++;
306 // If we have too many buffers, remove the least used one.
307 if (fNumBuffers > kMaxNumBuffers) {
308 auto* tail = fIntrinsicBuffersLRU.tail();
309 fIntrinsicBuffersLRU.remove(tail);
310 delete tail;
311 fNumBuffers--;
312 }
313 }
314
315 SkASSERT(fCurrentBuffer && fCurrentBuffer->slotsUsed() < IntrinsicBuffer::kNumSlots);
316 uint32_t newOffset = (fCurrentBuffer->slotsUsed()) * stride;
317 fResourceProvider->dawnSharedContext()->queue().WriteBuffer(
318 fCurrentBuffer->buffer()->dawnBuffer(),
319 newOffset,
320 intrinsicValues.data(),
321 intrinsicValues.size());
322
323 // Track the intrinsic values in the buffer.
324 fCurrentBuffer->trackIntrinsic(intrinsicValues, newOffset);
325
326 cb->trackResource(fCurrentBuffer->buffer());
327 fCurrentBuffer->updateAccessTime();
328
329 return {fCurrentBuffer->buffer().get(), newOffset, SkTo<uint32_t>(intrinsicValues.size())};
330 }
331
purgeBuffersIf(T pred)332 template <typename T> void DawnResourceProvider::IntrinsicConstantsManager::purgeBuffersIf(T pred) {
333 using Iter = SkTInternalLList<IntrinsicBuffer>::Iter;
334 Iter iter;
335 auto* curr = iter.init(fIntrinsicBuffersLRU, Iter::kHead_IterStart);
336 while (curr != nullptr) {
337 auto* next = iter.next();
338 if (pred(curr)) {
339 fIntrinsicBuffersLRU.remove(curr);
340 fNumBuffers--;
341 delete curr;
342 }
343 curr = next;
344 }
345 }
346
347 // DawnResourceProvider::IntrinsicConstantsManager
348 // ----------------------------------------------------------------------------
349
350
DawnResourceProvider(SharedContext * sharedContext,SingleOwner * singleOwner,uint32_t recorderID,size_t resourceBudget)351 DawnResourceProvider::DawnResourceProvider(SharedContext* sharedContext,
352 SingleOwner* singleOwner,
353 uint32_t recorderID,
354 size_t resourceBudget)
355 : ResourceProvider(sharedContext, singleOwner, recorderID, resourceBudget)
356 , fUniformBufferBindGroupCache(kMaxNumberOfCachedBufferBindGroups)
357 , fSingleTextureSamplerBindGroups(kMaxNumberOfCachedTextureBindGroups) {
358 fIntrinsicConstantsManager = std::make_unique<IntrinsicConstantsManager>(this);
359 }
360
361 DawnResourceProvider::~DawnResourceProvider() = default;
362
findOrCreateBlitWithDrawPipeline(const RenderPassDesc & renderPassDesc)363 wgpu::RenderPipeline DawnResourceProvider::findOrCreateBlitWithDrawPipeline(
364 const RenderPassDesc& renderPassDesc) {
365 uint32_t renderPassKey =
366 this->dawnSharedContext()->dawnCaps()->getRenderPassDescKeyForPipeline(renderPassDesc);
367 wgpu::RenderPipeline pipeline = fBlitWithDrawPipelines[renderPassKey];
368 if (!pipeline) {
369 static constexpr char kVertexShaderText[] = R"(
370 var<private> fullscreenTriPositions : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
371 vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
372
373 @vertex
374 fn main(@builtin(vertex_index) vertexIndex : u32) -> @builtin(position) vec4<f32> {
375 return vec4(fullscreenTriPositions[vertexIndex], 1.0, 1.0);
376 }
377 )";
378
379 static constexpr char kFragmentShaderText[] = R"(
380 @group(0) @binding(0) var colorMap: texture_2d<f32>;
381
382 @fragment
383 fn main(@builtin(position) fragPosition : vec4<f32>) -> @location(0) vec4<f32> {
384 var coords : vec2<i32> = vec2<i32>(i32(fragPosition.x), i32(fragPosition.y));
385 return textureLoad(colorMap, coords, 0);
386 }
387 )";
388
389 auto vsModule = create_shader_module(dawnSharedContext()->device(), kVertexShaderText);
390 auto fsModule = create_shader_module(dawnSharedContext()->device(), kFragmentShaderText);
391
392 pipeline = create_blit_render_pipeline(
393 dawnSharedContext(),
394 /*label=*/"BlitWithDraw",
395 std::move(vsModule),
396 std::move(fsModule),
397 /*renderPassColorFormat=*/
398 TextureInfos::GetDawnViewFormat(renderPassDesc.fColorAttachment.fTextureInfo),
399 /*renderPassDepthStencilFormat=*/
400 renderPassDesc.fDepthStencilAttachment.fTextureInfo.isValid()
401 ? TextureInfos::GetDawnViewFormat(
402 renderPassDesc.fDepthStencilAttachment.fTextureInfo)
403 : wgpu::TextureFormat::Undefined,
404 /*numSamples=*/renderPassDesc.fColorAttachment.fTextureInfo.numSamples());
405
406 if (pipeline) {
407 fBlitWithDrawPipelines.set(renderPassKey, pipeline);
408 }
409 }
410
411 return pipeline;
412 }
413
onCreateWrappedTexture(const BackendTexture & texture)414 sk_sp<Texture> DawnResourceProvider::onCreateWrappedTexture(const BackendTexture& texture) {
415 // Convert to smart pointers. wgpu::Texture* constructor will increment the ref count.
416 wgpu::Texture dawnTexture = BackendTextures::GetDawnTexturePtr(texture);
417 wgpu::TextureView dawnTextureView = BackendTextures::GetDawnTextureViewPtr(texture);
418 SkASSERT(!dawnTexture || !dawnTextureView);
419
420 if (!dawnTexture && !dawnTextureView) {
421 return {};
422 }
423
424 if (dawnTexture) {
425 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
426 texture.dimensions(),
427 texture.info(),
428 std::move(dawnTexture));
429 } else {
430 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
431 texture.dimensions(),
432 texture.info(),
433 std::move(dawnTextureView));
434 }
435 }
436
findOrCreateDiscardableMSAALoadTexture(SkISize dimensions,const TextureInfo & msaaInfo)437 sk_sp<DawnTexture> DawnResourceProvider::findOrCreateDiscardableMSAALoadTexture(
438 SkISize dimensions, const TextureInfo& msaaInfo) {
439 SkASSERT(msaaInfo.isValid());
440
441 // Derive the load texture's info from MSAA texture's info.
442 DawnTextureInfo dawnMsaaLoadTextureInfo;
443 SkAssertResult(TextureInfos::GetDawnTextureInfo(msaaInfo, &dawnMsaaLoadTextureInfo));
444 dawnMsaaLoadTextureInfo.fSampleCount = 1;
445 dawnMsaaLoadTextureInfo.fUsage |= wgpu::TextureUsage::TextureBinding;
446
447 #if !defined(__EMSCRIPTEN__)
448 // MSAA texture can be transient attachment (memoryless) but the load texture cannot be.
449 // This is because the load texture will need to have its content retained between two passes
450 // loading:
451 // - first pass: the resolve texture is blitted to the load texture.
452 // - 2nd pass: the actual render pass is started and the load texture is blitted to the MSAA
453 // texture.
454 dawnMsaaLoadTextureInfo.fUsage &= (~wgpu::TextureUsage::TransientAttachment);
455 #endif
456
457 auto texture = this->findOrCreateDiscardableMSAAAttachment(
458 dimensions, TextureInfos::MakeDawn(dawnMsaaLoadTextureInfo));
459
460 return sk_sp<DawnTexture>(static_cast<DawnTexture*>(texture.release()));
461 }
462
createGraphicsPipeline(const RuntimeEffectDictionary * runtimeDict,const GraphicsPipelineDesc & pipelineDesc,const RenderPassDesc & renderPassDesc,SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags)463 sk_sp<GraphicsPipeline> DawnResourceProvider::createGraphicsPipeline(
464 const RuntimeEffectDictionary* runtimeDict,
465 const GraphicsPipelineDesc& pipelineDesc,
466 const RenderPassDesc& renderPassDesc,
467 SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags) {
468 return DawnGraphicsPipeline::Make(this->dawnSharedContext(),
469 this,
470 runtimeDict,
471 pipelineDesc,
472 renderPassDesc,
473 pipelineCreationFlags);
474 }
475
createComputePipeline(const ComputePipelineDesc & desc)476 sk_sp<ComputePipeline> DawnResourceProvider::createComputePipeline(
477 const ComputePipelineDesc& desc) {
478 return DawnComputePipeline::Make(this->dawnSharedContext(), desc);
479 }
480
createTexture(SkISize dimensions,const TextureInfo & info,skgpu::Budgeted budgeted)481 sk_sp<Texture> DawnResourceProvider::createTexture(SkISize dimensions,
482 const TextureInfo& info,
483 skgpu::Budgeted budgeted) {
484 return DawnTexture::Make(this->dawnSharedContext(),
485 dimensions,
486 info,
487 budgeted);
488 }
489
createBuffer(size_t size,BufferType type,AccessPattern accessPattern)490 sk_sp<Buffer> DawnResourceProvider::createBuffer(size_t size,
491 BufferType type,
492 AccessPattern accessPattern) {
493 return DawnBuffer::Make(this->dawnSharedContext(), size, type, accessPattern);
494 }
495
createSampler(const SamplerDesc & samplerDesc)496 sk_sp<Sampler> DawnResourceProvider::createSampler(const SamplerDesc& samplerDesc) {
497 return DawnSampler::Make(this->dawnSharedContext(), samplerDesc);
498 }
499
onCreateBackendTexture(SkISize dimensions,const TextureInfo & info)500 BackendTexture DawnResourceProvider::onCreateBackendTexture(SkISize dimensions,
501 const TextureInfo& info) {
502 wgpu::Texture texture = DawnTexture::MakeDawnTexture(this->dawnSharedContext(),
503 dimensions,
504 info);
505 if (!texture) {
506 return {};
507 }
508
509 return BackendTextures::MakeDawn(texture.MoveToCHandle());
510 }
511
onDeleteBackendTexture(const BackendTexture & texture)512 void DawnResourceProvider::onDeleteBackendTexture(const BackendTexture& texture) {
513 SkASSERT(texture.isValid());
514 SkASSERT(texture.backend() == BackendApi::kDawn);
515
516 // Automatically release the pointers in wgpu::TextureView & wgpu::Texture's dtor.
517 // Acquire() won't increment the ref count.
518 wgpu::TextureView::Acquire(BackendTextures::GetDawnTextureViewPtr(texture));
519 // We need to explicitly call Destroy() here since since that is the recommended way to delete
520 // a Dawn texture predictably versus just dropping a ref and relying on garbage collection.
521 //
522 // Additionally this helps to work around an issue where Skia may have cached a BindGroup that
523 // references the underlying texture. Skia currently doesn't destroy BindGroups when its use of
524 // the texture goes away, thus a ref to the texture remains on the BindGroup and memory is never
525 // cleared up unless we call Destroy() here.
526 wgpu::Texture::Acquire(BackendTextures::GetDawnTexturePtr(texture)).Destroy();
527 }
528
dawnSharedContext() const529 DawnSharedContext* DawnResourceProvider::dawnSharedContext() const {
530 return static_cast<DawnSharedContext*>(fSharedContext);
531 }
532
findOrCreateDawnBuffer(size_t size,BufferType type,AccessPattern accessPattern,std::string_view label)533 sk_sp<DawnBuffer> DawnResourceProvider::findOrCreateDawnBuffer(size_t size,
534 BufferType type,
535 AccessPattern accessPattern,
536 std::string_view label) {
537 sk_sp<Buffer> buffer = this->findOrCreateBuffer(size, type, accessPattern, std::move(label));
538 DawnBuffer* ptr = static_cast<DawnBuffer*>(buffer.release());
539 return sk_sp<DawnBuffer>(ptr);
540 }
541
getOrCreateUniformBuffersBindGroupLayout()542 const wgpu::BindGroupLayout& DawnResourceProvider::getOrCreateUniformBuffersBindGroupLayout() {
543 if (fUniformBuffersBindGroupLayout) {
544 return fUniformBuffersBindGroupLayout;
545 }
546
547 std::array<wgpu::BindGroupLayoutEntry, 4> entries;
548 entries[0].binding = DawnGraphicsPipeline::kIntrinsicUniformBufferIndex;
549 entries[0].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
550 entries[0].buffer.type = wgpu::BufferBindingType::Uniform;
551 entries[0].buffer.hasDynamicOffset = true;
552 entries[0].buffer.minBindingSize = 0;
553
554 entries[1].binding = DawnGraphicsPipeline::kRenderStepUniformBufferIndex;
555 entries[1].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
556 entries[1].buffer.type = fSharedContext->caps()->storageBufferSupport()
557 ? wgpu::BufferBindingType::ReadOnlyStorage
558 : wgpu::BufferBindingType::Uniform;
559 entries[1].buffer.hasDynamicOffset = true;
560 entries[1].buffer.minBindingSize = 0;
561
562 entries[2].binding = DawnGraphicsPipeline::kPaintUniformBufferIndex;
563 entries[2].visibility = wgpu::ShaderStage::Fragment;
564 entries[2].buffer.type = fSharedContext->caps()->storageBufferSupport()
565 ? wgpu::BufferBindingType::ReadOnlyStorage
566 : wgpu::BufferBindingType::Uniform;
567 entries[2].buffer.hasDynamicOffset = true;
568 entries[2].buffer.minBindingSize = 0;
569
570 // Gradient buffer will only be used when storage buffers are preferred, else large
571 // gradients use a texture fallback, set binding type as a uniform when not in use to
572 // satisfy any binding type restricions for non-supported ssbo devices.
573 entries[3].binding = DawnGraphicsPipeline::kGradientBufferIndex;
574 entries[3].visibility = wgpu::ShaderStage::Fragment;
575 entries[3].buffer.type = fSharedContext->caps()->storageBufferSupport()
576 ? wgpu::BufferBindingType::ReadOnlyStorage
577 : wgpu::BufferBindingType::Uniform;
578 entries[3].buffer.hasDynamicOffset = true;
579 entries[3].buffer.minBindingSize = 0;
580
581 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
582 if (fSharedContext->caps()->setBackendLabels()) {
583 groupLayoutDesc.label = "Uniform buffers bind group layout";
584 }
585
586 groupLayoutDesc.entryCount = entries.size();
587 groupLayoutDesc.entries = entries.data();
588 fUniformBuffersBindGroupLayout =
589 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
590
591 return fUniformBuffersBindGroupLayout;
592 }
593
594 const wgpu::BindGroupLayout&
getOrCreateSingleTextureSamplerBindGroupLayout()595 DawnResourceProvider::getOrCreateSingleTextureSamplerBindGroupLayout() {
596 if (fSingleTextureSamplerBindGroupLayout) {
597 return fSingleTextureSamplerBindGroupLayout;
598 }
599
600 std::array<wgpu::BindGroupLayoutEntry, 2> entries;
601
602 entries[0].binding = 0;
603 entries[0].visibility = wgpu::ShaderStage::Fragment;
604 entries[0].sampler.type = wgpu::SamplerBindingType::Filtering;
605
606 entries[1].binding = 1;
607 entries[1].visibility = wgpu::ShaderStage::Fragment;
608 entries[1].texture.sampleType = wgpu::TextureSampleType::Float;
609 entries[1].texture.viewDimension = wgpu::TextureViewDimension::e2D;
610 entries[1].texture.multisampled = false;
611
612 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
613 if (fSharedContext->caps()->setBackendLabels()) {
614 groupLayoutDesc.label = "Single texture + sampler bind group layout";
615 }
616
617 groupLayoutDesc.entryCount = entries.size();
618 groupLayoutDesc.entries = entries.data();
619 fSingleTextureSamplerBindGroupLayout =
620 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
621
622 return fSingleTextureSamplerBindGroupLayout;
623 }
624
getOrCreateNullBuffer()625 const wgpu::Buffer& DawnResourceProvider::getOrCreateNullBuffer() {
626 if (!fNullBuffer) {
627 wgpu::BufferDescriptor desc;
628 if (fSharedContext->caps()->setBackendLabels()) {
629 desc.label = "UnusedBufferSlot";
630 }
631 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform |
632 wgpu::BufferUsage::Storage;
633 desc.size = kBufferBindingSizeAlignment;
634 desc.mappedAtCreation = false;
635
636 fNullBuffer = this->dawnSharedContext()->device().CreateBuffer(&desc);
637 SkASSERT(fNullBuffer);
638 }
639
640 return fNullBuffer;
641 }
642
findOrCreateUniformBuffersBindGroup(const std::array<std::pair<const DawnBuffer *,uint32_t>,kNumUniformEntries> & boundBuffersAndSizes)643 const wgpu::BindGroup& DawnResourceProvider::findOrCreateUniformBuffersBindGroup(
644 const std::array<std::pair<const DawnBuffer*, uint32_t>, kNumUniformEntries>&
645 boundBuffersAndSizes) {
646 auto key = make_ubo_bind_group_key(boundBuffersAndSizes);
647 auto* existingBindGroup = fUniformBufferBindGroupCache.find(key);
648 if (existingBindGroup) {
649 // cache hit.
650 return *existingBindGroup;
651 }
652
653 // Translate to wgpu::BindGroupDescriptor
654 std::array<wgpu::BindGroupEntry, kNumUniformEntries> entries;
655
656 constexpr uint32_t kBindingIndices[] = {
657 DawnGraphicsPipeline::kIntrinsicUniformBufferIndex,
658 DawnGraphicsPipeline::kRenderStepUniformBufferIndex,
659 DawnGraphicsPipeline::kPaintUniformBufferIndex,
660 DawnGraphicsPipeline::kGradientBufferIndex,
661 };
662
663 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
664 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
665 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
666
667 entries[i].binding = kBindingIndices[i];
668 entries[i].offset = 0;
669 if (boundBuffer) {
670 entries[i].buffer = boundBuffer->dawnBuffer();
671 entries[i].size = SkAlignTo(bindingSize, kBufferBindingSizeAlignment);
672 } else {
673 entries[i].buffer = this->getOrCreateNullBuffer();
674 entries[i].size = wgpu::kWholeSize;
675 }
676 }
677
678 wgpu::BindGroupDescriptor desc;
679 desc.layout = this->getOrCreateUniformBuffersBindGroupLayout();
680 desc.entryCount = entries.size();
681 desc.entries = entries.data();
682
683 const auto& device = this->dawnSharedContext()->device();
684 auto bindGroup = device.CreateBindGroup(&desc);
685
686 return *fUniformBufferBindGroupCache.insert(key, bindGroup);
687 }
688
findOrCreateSingleTextureSamplerBindGroup(const DawnSampler * sampler,const DawnTexture * texture)689 const wgpu::BindGroup& DawnResourceProvider::findOrCreateSingleTextureSamplerBindGroup(
690 const DawnSampler* sampler, const DawnTexture* texture) {
691 auto key = make_texture_bind_group_key(sampler, texture);
692 auto* existingBindGroup = fSingleTextureSamplerBindGroups.find(key);
693 if (existingBindGroup) {
694 // cache hit.
695 return *existingBindGroup;
696 }
697
698 std::array<wgpu::BindGroupEntry, 2> entries;
699
700 entries[0].binding = 0;
701 entries[0].sampler = sampler->dawnSampler();
702 entries[1].binding = 1;
703 entries[1].textureView = texture->sampleTextureView();
704
705 wgpu::BindGroupDescriptor desc;
706 desc.layout = getOrCreateSingleTextureSamplerBindGroupLayout();
707 desc.entryCount = entries.size();
708 desc.entries = entries.data();
709
710 const auto& device = this->dawnSharedContext()->device();
711 auto bindGroup = device.CreateBindGroup(&desc);
712
713 return *fSingleTextureSamplerBindGroups.insert(key, bindGroup);
714 }
715
onFreeGpuResources()716 void DawnResourceProvider::onFreeGpuResources() {
717 fIntrinsicConstantsManager->freeGpuResources();
718 }
719
onPurgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)720 void DawnResourceProvider::onPurgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
721 fIntrinsicConstantsManager->purgeResourcesNotUsedSince(purgeTime);
722 }
723
findOrCreateIntrinsicBindBufferInfo(DawnCommandBuffer * cb,UniformDataBlock intrinsicValues)724 BindBufferInfo DawnResourceProvider::findOrCreateIntrinsicBindBufferInfo(
725 DawnCommandBuffer* cb, UniformDataBlock intrinsicValues) {
726 return fIntrinsicConstantsManager->add(cb, intrinsicValues);
727 }
728
729 } // namespace skgpu::graphite
730