1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
9
10 #include "include/core/SkData.h"
11 #include "include/core/SkString.h"
12 #include "include/gpu/ganesh/GrDirectContext.h"
13 #include "include/gpu/ganesh/GrTypes.h"
14 #include "include/private/base/SkDebug.h"
15 #include "include/private/gpu/ganesh/GrTypesPriv.h"
16 #include "src/core/SkTaskGroup.h"
17 #include "src/core/SkTraceEvent.h"
18 #include "src/gpu/Blend.h"
19 #include "src/gpu/RefCntedCallback.h"
20 #include "src/gpu/ganesh/GrDirectContextPriv.h"
21 #include "src/gpu/ganesh/GrGeometryProcessor.h"
22 #include "src/gpu/ganesh/GrSamplerState.h"
23 #include "src/gpu/ganesh/GrStencilSettings.h"
24 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
25 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
26 #include "src/gpu/ganesh/vk/GrVkDescriptorPool.h"
27 #include "src/gpu/ganesh/vk/GrVkGpu.h"
28 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
29 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
30 #include "src/gpu/ganesh/vk/GrVkUtil.h"
31
32 #include <cstring>
33
34 class GrProgramInfo;
35 class GrRenderTarget;
36 class GrVkDescriptorSet;
37
GrVkResourceProvider(GrVkGpu * gpu)38 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
39 : fGpu(gpu)
40 , fPipelineCache(VK_NULL_HANDLE) {
41 fPipelineStateCache = sk_make_sp<PipelineStateCache>(gpu);
42 }
43
~GrVkResourceProvider()44 GrVkResourceProvider::~GrVkResourceProvider() {
45 SkASSERT(fRenderPassArray.empty());
46 SkASSERT(fExternalRenderPasses.empty());
47 SkASSERT(fMSAALoadPipelines.empty());
48 SkASSERT(VK_NULL_HANDLE == fPipelineCache);
49 }
50
pipelineCache()51 VkPipelineCache GrVkResourceProvider::pipelineCache() {
52 if (fPipelineCache == VK_NULL_HANDLE) {
53 TRACE_EVENT0("skia.shaders", "CreatePipelineCache-GrVkResourceProvider");
54 VkPipelineCacheCreateInfo createInfo;
55 memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
56 createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
57 createInfo.pNext = nullptr;
58 createInfo.flags = 0;
59
60 auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
61 sk_sp<SkData> cached;
62 if (persistentCache) {
63 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
64 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
65 cached = persistentCache->load(*keyData);
66 }
67 bool usedCached = false;
68 if (cached) {
69 const uint32_t* cacheHeader = (const uint32_t*)cached->data();
70 if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
71 // For version one of the header, the total header size is 16 bytes plus
72 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
73 // the breakdown of these bytes.
74 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
75 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
76 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
77 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
78 !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
79 createInfo.initialDataSize = cached->size();
80 createInfo.pInitialData = cached->data();
81 usedCached = true;
82 }
83 }
84 }
85 if (!usedCached) {
86 createInfo.initialDataSize = 0;
87 createInfo.pInitialData = nullptr;
88 }
89
90 VkResult result;
91 GR_VK_CALL_RESULT(fGpu, result, CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
92 &fPipelineCache));
93 if (VK_SUCCESS != result) {
94 fPipelineCache = VK_NULL_HANDLE;
95 }
96 }
97 return fPipelineCache;
98 }
99
init()100 void GrVkResourceProvider::init() {
101 // Init uniform descriptor objects
102 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
103 fDescriptorSetManagers.emplace_back(dsm);
104 SkASSERT(1 == fDescriptorSetManagers.size());
105 fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
106 dsm = GrVkDescriptorSetManager::CreateInputManager(fGpu);
107 fDescriptorSetManagers.emplace_back(dsm);
108 SkASSERT(2 == fDescriptorSetManagers.size());
109 fInputDSHandle = GrVkDescriptorSetManager::Handle(1);
110 }
111
makePipeline(const GrProgramInfo & programInfo,VkPipelineShaderStageCreateInfo * shaderStageInfo,int shaderStageCount,VkRenderPass compatibleRenderPass,VkPipelineLayout layout,uint32_t subpass)112 sk_sp<const GrVkPipeline> GrVkResourceProvider::makePipeline(
113 const GrProgramInfo& programInfo,
114 VkPipelineShaderStageCreateInfo* shaderStageInfo,
115 int shaderStageCount,
116 VkRenderPass compatibleRenderPass,
117 VkPipelineLayout layout,
118 uint32_t subpass) {
119 return GrVkPipeline::Make(fGpu, programInfo, shaderStageInfo, shaderStageCount,
120 compatibleRenderPass, layout, this->pipelineCache(), subpass);
121 }
122
123 // To create framebuffers, we first need to create a simple RenderPass that is
124 // only used for framebuffer creation. When we actually render we will create
125 // RenderPasses as needed that are compatible with the framebuffer.
126 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderTarget * target,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)127 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderTarget* target,
128 CompatibleRPHandle* compatibleHandle,
129 bool withResolve,
130 bool withStencil,
131 SelfDependencyFlags selfDepFlags,
132 LoadFromResolve loadFromResolve) {
133 // Get attachment information from render target. This includes which attachments the render
134 // target has (color, stencil) and the attachments format and sample count.
135 GrVkRenderPass::AttachmentFlags attachmentFlags;
136 GrVkRenderPass::AttachmentsDescriptor attachmentsDesc;
137 if (!target->getAttachmentsDescriptor(&attachmentsDesc, &attachmentFlags,
138 withResolve, withStencil)) {
139 return nullptr;
140 }
141
142 return this->findCompatibleRenderPass(&attachmentsDesc, attachmentFlags, selfDepFlags,
143 loadFromResolve, compatibleHandle);
144 }
145
146 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor * desc,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve,CompatibleRPHandle * compatibleHandle)147 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor* desc,
148 GrVkRenderPass::AttachmentFlags attachmentFlags,
149 SelfDependencyFlags selfDepFlags,
150 LoadFromResolve loadFromResolve,
151 CompatibleRPHandle* compatibleHandle) {
152 for (int i = 0; i < fRenderPassArray.size(); ++i) {
153 if (fRenderPassArray[i].isCompatible(*desc, attachmentFlags, selfDepFlags,
154 loadFromResolve)) {
155 const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
156 renderPass->ref();
157 if (compatibleHandle) {
158 *compatibleHandle = CompatibleRPHandle(i);
159 }
160 return renderPass;
161 }
162 }
163
164 GrVkRenderPass* renderPass = GrVkRenderPass::CreateSimple(fGpu, desc, attachmentFlags,
165 selfDepFlags, loadFromResolve);
166 if (!renderPass) {
167 return nullptr;
168 }
169 fRenderPassArray.emplace_back(renderPass);
170
171 if (compatibleHandle) {
172 *compatibleHandle = CompatibleRPHandle(fRenderPassArray.size() - 1);
173 }
174 return renderPass;
175 }
176
findCompatibleExternalRenderPass(VkRenderPass renderPass,uint32_t colorAttachmentIndex)177 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
178 VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
179 for (int i = 0; i < fExternalRenderPasses.size(); ++i) {
180 if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
181 fExternalRenderPasses[i]->ref();
182 #ifdef SK_DEBUG
183 uint32_t cachedColorIndex;
184 SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
185 SkASSERT(cachedColorIndex == colorAttachmentIndex);
186 #endif
187 return fExternalRenderPasses[i];
188 }
189 }
190
191 const GrVkRenderPass* newRenderPass = new GrVkRenderPass(fGpu, renderPass,
192 colorAttachmentIndex);
193 fExternalRenderPasses.push_back(newRenderPass);
194 newRenderPass->ref();
195 return newRenderPass;
196 }
197
findRenderPass(GrVkRenderTarget * target,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)198 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
199 GrVkRenderTarget* target,
200 const GrVkRenderPass::LoadStoreOps& colorOps,
201 const GrVkRenderPass::LoadStoreOps& resolveOps,
202 const GrVkRenderPass::LoadStoreOps& stencilOps,
203 CompatibleRPHandle* compatibleHandle,
204 bool withResolve,
205 bool withStencil,
206 SelfDependencyFlags selfDepFlags,
207 LoadFromResolve loadFromResolve) {
208 GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
209 GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
210 : &tempRPHandle;
211 *pRPHandle = target->compatibleRenderPassHandle(withResolve, withStencil, selfDepFlags,
212 loadFromResolve);
213 if (!pRPHandle->isValid()) {
214 return nullptr;
215 }
216
217 return this->findRenderPass(*pRPHandle, colorOps, resolveOps, stencilOps);
218 }
219
220 const GrVkRenderPass*
findRenderPass(const CompatibleRPHandle & compatibleHandle,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)221 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
222 const GrVkRenderPass::LoadStoreOps& colorOps,
223 const GrVkRenderPass::LoadStoreOps& resolveOps,
224 const GrVkRenderPass::LoadStoreOps& stencilOps) {
225 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.size());
226 CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
227 const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
228 colorOps,
229 resolveOps,
230 stencilOps);
231 if (!renderPass) {
232 return nullptr;
233 }
234 renderPass->ref();
235 return renderPass;
236 }
237
findOrCreateCompatibleDescriptorPool(VkDescriptorType type,uint32_t count)238 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
239 VkDescriptorType type, uint32_t count) {
240 return GrVkDescriptorPool::Create(fGpu, type, count);
241 }
242
findOrCreateCompatibleSampler(GrSamplerState params,const skgpu::VulkanYcbcrConversionInfo & ycbcrInfo)243 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
244 GrSamplerState params, const skgpu::VulkanYcbcrConversionInfo& ycbcrInfo) {
245 GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
246 if (!sampler) {
247 sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
248 if (!sampler) {
249 return nullptr;
250 }
251 fSamplers.add(sampler);
252 }
253 SkASSERT(sampler);
254 sampler->ref();
255 return sampler;
256 }
257
findOrCreateCompatibleSamplerYcbcrConversion(const skgpu::VulkanYcbcrConversionInfo & ycbcrInfo)258 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
259 const skgpu::VulkanYcbcrConversionInfo& ycbcrInfo) {
260 GrVkSamplerYcbcrConversion* ycbcrConversion =
261 fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
262 if (!ycbcrConversion) {
263 ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
264 if (!ycbcrConversion) {
265 return nullptr;
266 }
267 fYcbcrConversions.add(ycbcrConversion);
268 }
269 SkASSERT(ycbcrConversion);
270 ycbcrConversion->ref();
271 return ycbcrConversion;
272 }
273
findOrCreateCompatiblePipelineState(GrRenderTarget * renderTarget,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,bool overrideSubpassForResolveLoad)274 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
275 GrRenderTarget* renderTarget,
276 const GrProgramInfo& programInfo,
277 VkRenderPass compatibleRenderPass,
278 bool overrideSubpassForResolveLoad) {
279 return fPipelineStateCache->findOrCreatePipelineState(renderTarget, programInfo,
280 compatibleRenderPass,
281 overrideSubpassForResolveLoad);
282 }
283
findOrCreateCompatiblePipelineState(const GrProgramDesc & desc,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,GrThreadSafePipelineBuilder::Stats::ProgramCacheResult * stat)284 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
285 const GrProgramDesc& desc,
286 const GrProgramInfo& programInfo,
287 VkRenderPass compatibleRenderPass,
288 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult* stat) {
289
290 auto tmp = fPipelineStateCache->findOrCreatePipelineState(desc, programInfo,
291 compatibleRenderPass, stat);
292 if (!tmp) {
293 fPipelineStateCache->stats()->incNumPreCompilationFailures();
294 } else {
295 fPipelineStateCache->stats()->incNumPreProgramCacheResult(*stat);
296 }
297
298 return tmp;
299 }
300
findOrCreateMSAALoadPipeline(const GrVkRenderPass & renderPass,int numSamples,VkPipelineShaderStageCreateInfo * shaderStageInfo,VkPipelineLayout pipelineLayout)301 sk_sp<const GrVkPipeline> GrVkResourceProvider::findOrCreateMSAALoadPipeline(
302 const GrVkRenderPass& renderPass,
303 int numSamples,
304 VkPipelineShaderStageCreateInfo* shaderStageInfo,
305 VkPipelineLayout pipelineLayout) {
306 // Find or Create a compatible pipeline
307 sk_sp<const GrVkPipeline> pipeline;
308 for (int i = 0; i < fMSAALoadPipelines.size() && !pipeline; ++i) {
309 if (fMSAALoadPipelines[i].fRenderPass->isCompatible(renderPass)) {
310 pipeline = fMSAALoadPipelines[i].fPipeline;
311 }
312 }
313 if (!pipeline) {
314 pipeline = GrVkPipeline::Make(
315 fGpu,
316 /*vertexAttribs=*/GrGeometryProcessor::AttributeSet(),
317 /*instanceAttribs=*/GrGeometryProcessor::AttributeSet(),
318 GrPrimitiveType::kTriangleStrip,
319 kTopLeft_GrSurfaceOrigin,
320 GrStencilSettings(),
321 numSamples,
322 /*isHWantialiasState=*/false,
323 skgpu::BlendInfo(),
324 /*isWireframe=*/false,
325 /*useConservativeRaster=*/false,
326 /*subpass=*/0,
327 shaderStageInfo,
328 /*shaderStageCount=*/2,
329 renderPass.vkRenderPass(),
330 pipelineLayout,
331 /*ownsLayout=*/false,
332 this->pipelineCache());
333 if (!pipeline) {
334 return nullptr;
335 }
336 fMSAALoadPipelines.push_back({pipeline, &renderPass});
337 }
338 SkASSERT(pipeline);
339 return pipeline;
340 }
341
getZeroSamplerDescriptorSetHandle(GrVkDescriptorSetManager::Handle * handle)342 void GrVkResourceProvider::getZeroSamplerDescriptorSetHandle(
343 GrVkDescriptorSetManager::Handle* handle) {
344 SkASSERT(handle);
345 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
346 if (fDescriptorSetManagers[i]->isZeroSampler()) {
347 *handle = GrVkDescriptorSetManager::Handle(i);
348 return;
349 }
350 }
351
352 GrVkDescriptorSetManager* dsm =
353 GrVkDescriptorSetManager::CreateZeroSamplerManager(fGpu);
354 fDescriptorSetManagers.emplace_back(dsm);
355 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.size() - 1);
356 }
357
getSamplerDescriptorSetHandle(VkDescriptorType type,const GrVkUniformHandler & uniformHandler,GrVkDescriptorSetManager::Handle * handle)358 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
359 const GrVkUniformHandler& uniformHandler,
360 GrVkDescriptorSetManager::Handle* handle) {
361 SkASSERT(handle);
362 SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
363 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
364 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
365 if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
366 *handle = GrVkDescriptorSetManager::Handle(i);
367 return;
368 }
369 }
370
371 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
372 uniformHandler);
373 fDescriptorSetManagers.emplace_back(dsm);
374 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.size() - 1);
375 }
376
getUniformDSLayout() const377 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
378 SkASSERT(fUniformDSHandle.isValid());
379 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
380 }
381
getInputDSLayout() const382 VkDescriptorSetLayout GrVkResourceProvider::getInputDSLayout() const {
383 SkASSERT(fInputDSHandle.isValid());
384 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->layout();
385 }
386
getSamplerDSLayout(const GrVkDescriptorSetManager::Handle & handle) const387 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
388 const GrVkDescriptorSetManager::Handle& handle) const {
389 SkASSERT(handle.isValid());
390 return fDescriptorSetManagers[handle.toIndex()]->layout();
391 }
392
getUniformDescriptorSet()393 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
394 SkASSERT(fUniformDSHandle.isValid());
395 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
396 fUniformDSHandle);
397 }
398
getInputDescriptorSet()399 const GrVkDescriptorSet* GrVkResourceProvider::getInputDescriptorSet() {
400 SkASSERT(fInputDSHandle.isValid());
401 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->getDescriptorSet(fGpu, fInputDSHandle);
402 }
403
getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle & handle)404 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
405 const GrVkDescriptorSetManager::Handle& handle) {
406 SkASSERT(handle.isValid());
407 return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
408 }
409
recycleDescriptorSet(const GrVkDescriptorSet * descSet,const GrVkDescriptorSetManager::Handle & handle)410 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
411 const GrVkDescriptorSetManager::Handle& handle) {
412 SkASSERT(descSet);
413 SkASSERT(handle.isValid());
414 int managerIdx = handle.toIndex();
415 SkASSERT(managerIdx < fDescriptorSetManagers.size());
416 fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
417 }
418
findOrCreateCommandPool()419 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
420 GrVkCommandPool* result;
421 if (!fAvailableCommandPools.empty()) {
422 result = fAvailableCommandPools.back();
423 fAvailableCommandPools.pop_back();
424 } else {
425 result = GrVkCommandPool::Create(fGpu);
426 if (!result) {
427 return nullptr;
428 }
429 }
430 SkASSERT(result->unique());
431 SkDEBUGCODE(
432 for (const GrVkCommandPool* pool : fActiveCommandPools) {
433 SkASSERT(pool != result);
434 }
435 for (const GrVkCommandPool* pool : fAvailableCommandPools) {
436 SkASSERT(pool != result);
437 }
438 )
439 fActiveCommandPools.push_back(result);
440 result->ref();
441 return result;
442 }
443
checkCommandBuffers()444 void GrVkResourceProvider::checkCommandBuffers() {
445 // When resetting a command buffer it can trigger client provided procs (e.g. release or
446 // finished) to be called. During these calls the client could trigger us to abandon the vk
447 // context, e.g. if we are in a DEVICE_LOST state. When we abandon the vk context we will
448 // unref all the fActiveCommandPools and reset the array. Since this can happen in the middle
449 // of the loop here, we need to additionally check that fActiveCommandPools still has pools on
450 // each iteration.
451 //
452 // TODO: We really need to have a more robust way to protect us from client proc calls that
453 // happen in the middle of us doing work. This may be just one of many potential pitfalls that
454 // could happen from the client triggering GrDirectContext changes during a proc call.
455 for (int i = fActiveCommandPools.size() - 1; !fActiveCommandPools.empty() && i >= 0; --i) {
456 GrVkCommandPool* pool = fActiveCommandPools[i];
457 if (!pool->isOpen()) {
458 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
459 if (buffer->finished(fGpu)) {
460 fActiveCommandPools.removeShuffle(i);
461 SkASSERT(pool->unique());
462 pool->reset(fGpu);
463 // After resetting the pool (specifically releasing the pool's resources) we may
464 // have called a client callback proc which may have disconnected the GrVkGpu. In
465 // that case we do not want to push the pool back onto the cache, but instead just
466 // drop the pool.
467 if (fGpu->disconnected()) {
468 pool->unref();
469 return;
470 }
471 fAvailableCommandPools.push_back(pool);
472 }
473 }
474 }
475 }
476
forceSyncAllCommandBuffers()477 void GrVkResourceProvider::forceSyncAllCommandBuffers() {
478 for (int i = fActiveCommandPools.size() - 1; !fActiveCommandPools.empty() && i >= 0; --i) {
479 GrVkCommandPool* pool = fActiveCommandPools[i];
480 if (!pool->isOpen()) {
481 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
482 buffer->forceSync(fGpu);
483 }
484 }
485 }
486
addFinishedProcToActiveCommandBuffers(sk_sp<skgpu::RefCntedCallback> finishedCallback)487 void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
488 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
489 for (int i = 0; i < fActiveCommandPools.size(); ++i) {
490 GrVkCommandPool* pool = fActiveCommandPools[i];
491 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
492 buffer->addFinishedProc(finishedCallback);
493 }
494 }
495
destroyResources()496 void GrVkResourceProvider::destroyResources() {
497 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
498 if (taskGroup) {
499 taskGroup->wait();
500 }
501
502 // Release all msaa load pipelines
503 fMSAALoadPipelines.clear();
504
505 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
506 for (int i = 0; i < fRenderPassArray.size(); ++i) {
507 fRenderPassArray[i].releaseResources();
508 }
509 fRenderPassArray.clear();
510
511 for (int i = 0; i < fExternalRenderPasses.size(); ++i) {
512 fExternalRenderPasses[i]->unref();
513 }
514 fExternalRenderPasses.clear();
515
516 // Iterate through all store GrVkSamplers and unref them before resetting the hash table.
517 fSamplers.foreach([&](auto* elt) { elt->unref(); });
518 fSamplers.reset();
519
520 fYcbcrConversions.foreach([&](auto* elt) { elt->unref(); });
521 fYcbcrConversions.reset();
522
523 fPipelineStateCache->release();
524
525 GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
526 fPipelineCache = VK_NULL_HANDLE;
527
528 for (GrVkCommandPool* pool : fActiveCommandPools) {
529 SkASSERT(pool->unique());
530 pool->unref();
531 }
532 fActiveCommandPools.clear();
533
534 for (GrVkCommandPool* pool : fAvailableCommandPools) {
535 SkASSERT(pool->unique());
536 pool->unref();
537 }
538 fAvailableCommandPools.clear();
539
540 // We must release/destroy all command buffers and pipeline states before releasing the
541 // GrVkDescriptorSetManagers. Additionally, we must release all uniform buffers since they hold
542 // refs to GrVkDescriptorSets.
543 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
544 fDescriptorSetManagers[i]->release(fGpu);
545 }
546 fDescriptorSetManagers.clear();
547
548 }
549
releaseUnlockedBackendObjects()550 void GrVkResourceProvider::releaseUnlockedBackendObjects() {
551 for (GrVkCommandPool* pool : fAvailableCommandPools) {
552 SkASSERT(pool->unique());
553 pool->unref();
554 }
555 fAvailableCommandPools.clear();
556 }
557
storePipelineCacheData()558 void GrVkResourceProvider::storePipelineCacheData() {
559 if (this->pipelineCache() == VK_NULL_HANDLE) {
560 return;
561 }
562 size_t dataSize = 0;
563 VkResult result;
564 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
565 &dataSize, nullptr));
566 if (result != VK_SUCCESS) {
567 return;
568 }
569
570 std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
571
572 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
573 &dataSize, (void*)data.get()));
574 if (result != VK_SUCCESS) {
575 return;
576 }
577
578 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
579 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
580
581 fGpu->getContext()->priv().getPersistentCache()->store(
582 *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize), SkString("VkPipelineCache"));
583 }
584
585 ////////////////////////////////////////////////////////////////////////////////
586
CompatibleRenderPassSet(GrVkRenderPass * renderPass)587 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(GrVkRenderPass* renderPass)
588 : fLastReturnedIndex(0) {
589 renderPass->ref();
590 fRenderPasses.push_back(renderPass);
591 }
592
isCompatible(const GrVkRenderPass::AttachmentsDescriptor & attachmentsDescriptor,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve) const593 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
594 const GrVkRenderPass::AttachmentsDescriptor& attachmentsDescriptor,
595 GrVkRenderPass::AttachmentFlags attachmentFlags,
596 SelfDependencyFlags selfDepFlags,
597 LoadFromResolve loadFromResolve) const {
598 // The first GrVkRenderpass should always exists since we create the basic load store
599 // render pass on create
600 SkASSERT(fRenderPasses[0]);
601 return fRenderPasses[0]->isCompatible(attachmentsDescriptor, attachmentFlags, selfDepFlags,
602 loadFromResolve);
603 }
604
getRenderPass(GrVkGpu * gpu,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)605 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
606 GrVkGpu* gpu,
607 const GrVkRenderPass::LoadStoreOps& colorOps,
608 const GrVkRenderPass::LoadStoreOps& resolveOps,
609 const GrVkRenderPass::LoadStoreOps& stencilOps) {
610 for (int i = 0; i < fRenderPasses.size(); ++i) {
611 int idx = (i + fLastReturnedIndex) % fRenderPasses.size();
612 if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, resolveOps, stencilOps)) {
613 fLastReturnedIndex = idx;
614 return fRenderPasses[idx];
615 }
616 }
617 GrVkRenderPass* renderPass = GrVkRenderPass::Create(gpu, *this->getCompatibleRenderPass(),
618 colorOps, resolveOps, stencilOps);
619 if (!renderPass) {
620 return nullptr;
621 }
622 fRenderPasses.push_back(renderPass);
623 fLastReturnedIndex = fRenderPasses.size() - 1;
624 return renderPass;
625 }
626
releaseResources()627 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources() {
628 for (int i = 0; i < fRenderPasses.size(); ++i) {
629 if (fRenderPasses[i]) {
630 fRenderPasses[i]->unref();
631 fRenderPasses[i] = nullptr;
632 }
633 }
634 }
635
636