1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkDescriptorSetManager.h"
9
10 #include "include/core/SkTypes.h"
11 #include "include/private/base/SkTo.h"
12 #include "include/private/gpu/ganesh/GrTypesPriv.h"
13 #include "src/gpu/ganesh/vk/GrVkCaps.h"
14 #include "src/gpu/ganesh/vk/GrVkDescriptorPool.h"
15 #include "src/gpu/ganesh/vk/GrVkDescriptorSet.h"
16 #include "src/gpu/ganesh/vk/GrVkGpu.h"
17 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
18 #include "src/gpu/ganesh/vk/GrVkSampler.h"
19 #include "src/gpu/ganesh/vk/GrVkUniformHandler.h"
20 #include "src/gpu/ganesh/vk/GrVkUtil.h"
21
22 #include <string.h>
23 #include <memory>
24
25 using namespace skia_private;
26
27 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
28 #include <sanitizer/lsan_interface.h>
29 #endif
30
CreateUniformManager(GrVkGpu * gpu)31 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateUniformManager(GrVkGpu* gpu) {
32 STArray<1, uint32_t> visibilities;
33 uint32_t stages = kVertex_GrShaderFlag | kFragment_GrShaderFlag;
34 visibilities.push_back(stages);
35 TArray<const GrVkSampler*> samplers;
36 return Create(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities, samplers);
37 }
38
CreateSamplerManager(GrVkGpu * gpu,VkDescriptorType type,const GrVkUniformHandler & uniformHandler)39 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
40 GrVkGpu* gpu, VkDescriptorType type, const GrVkUniformHandler& uniformHandler) {
41 STArray<4, uint32_t> visibilities;
42 STArray<4, const GrVkSampler*> immutableSamplers;
43 SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
44 for (int i = 0 ; i < uniformHandler.numSamplers(); ++i) {
45 visibilities.push_back(uniformHandler.samplerVisibility(i));
46 immutableSamplers.push_back(uniformHandler.immutableSampler(i));
47 }
48 return Create(gpu, type, visibilities, immutableSamplers);
49 }
50
CreateZeroSamplerManager(GrVkGpu * gpu)51 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateZeroSamplerManager(GrVkGpu* gpu) {
52 TArray<uint32_t> visibilities;
53 TArray<const GrVkSampler*> immutableSamplers;
54 return Create(gpu, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, visibilities, immutableSamplers);
55 }
56
CreateInputManager(GrVkGpu * gpu)57 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateInputManager(GrVkGpu* gpu) {
58 STArray<1, uint32_t> visibilities;
59 visibilities.push_back(kFragment_GrShaderFlag);
60 TArray<const GrVkSampler*> samplers;
61 return Create(gpu, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, visibilities, samplers);
62 }
63
visibility_to_vk_stage_flags(uint32_t visibility)64 VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
65 VkShaderStageFlags flags = 0;
66
67 if (visibility & kVertex_GrShaderFlag) {
68 flags |= VK_SHADER_STAGE_VERTEX_BIT;
69 }
70 if (visibility & kFragment_GrShaderFlag) {
71 flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
72 }
73 return flags;
74 }
75
get_layout_and_desc_count(GrVkGpu * gpu,VkDescriptorType type,const TArray<uint32_t> & visibilities,const TArray<const GrVkSampler * > & immutableSamplers,VkDescriptorSetLayout * descSetLayout,uint32_t * descCountPerSet)76 static bool get_layout_and_desc_count(GrVkGpu* gpu,
77 VkDescriptorType type,
78 const TArray<uint32_t>& visibilities,
79 const TArray<const GrVkSampler*>& immutableSamplers,
80 VkDescriptorSetLayout* descSetLayout,
81 uint32_t* descCountPerSet) {
82 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
83 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
84 uint32_t numBindings = visibilities.size();
85 std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
86 new VkDescriptorSetLayoutBinding[numBindings]);
87 *descCountPerSet = 0;
88 for (uint32_t i = 0; i < numBindings; ++i) {
89 uint32_t visibility = visibilities[i];
90 dsSamplerBindings[i].binding = i;
91 dsSamplerBindings[i].descriptorType = type;
92 dsSamplerBindings[i].descriptorCount = 1;
93 dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
94 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
95 if (immutableSamplers[i]) {
96 (*descCountPerSet) += gpu->vkCaps().ycbcrCombinedImageSamplerDescriptorCount();
97 dsSamplerBindings[i].pImmutableSamplers = immutableSamplers[i]->samplerPtr();
98 } else {
99 (*descCountPerSet)++;
100 dsSamplerBindings[i].pImmutableSamplers = nullptr;
101 }
102 }
103 }
104
105 VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
106 memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
107 dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
108 dsSamplerLayoutCreateInfo.pNext = nullptr;
109 dsSamplerLayoutCreateInfo.flags = 0;
110 dsSamplerLayoutCreateInfo.bindingCount = numBindings;
111 // Setting to nullptr fixes an error in the param checker validation layer. Even though
112 // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
113 // null.
114 dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
115
116 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
117 // skia:8713
118 __lsan::ScopedDisabler lsanDisabler;
119 #endif
120 VkResult result;
121 GR_VK_CALL_RESULT(gpu, result,
122 CreateDescriptorSetLayout(gpu->device(),
123 &dsSamplerLayoutCreateInfo,
124 nullptr,
125 descSetLayout));
126 if (result != VK_SUCCESS) {
127 return false;
128 }
129 } else if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
130 static constexpr int kUniformDescPerSet = 1;
131 SkASSERT(kUniformDescPerSet == visibilities.size());
132 // Create Uniform Buffer Descriptor
133 VkDescriptorSetLayoutBinding dsUniBinding;
134 dsUniBinding.binding = GrVkUniformHandler::kUniformBinding;
135 dsUniBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
136 dsUniBinding.descriptorCount = 1;
137 dsUniBinding.stageFlags = visibility_to_vk_stage_flags(visibilities[0]);
138 dsUniBinding.pImmutableSamplers = nullptr;
139
140 VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
141 uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
142 uniformLayoutCreateInfo.pNext = nullptr;
143 uniformLayoutCreateInfo.flags = 0;
144 uniformLayoutCreateInfo.bindingCount = 1;
145 uniformLayoutCreateInfo.pBindings = &dsUniBinding;
146
147 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
148 // skia:8713
149 __lsan::ScopedDisabler lsanDisabler;
150 #endif
151 VkResult result;
152 GR_VK_CALL_RESULT(gpu, result, CreateDescriptorSetLayout(gpu->device(),
153 &uniformLayoutCreateInfo,
154 nullptr,
155 descSetLayout));
156 if (result != VK_SUCCESS) {
157 return false;
158 }
159
160 *descCountPerSet = kUniformDescPerSet;
161 } else {
162 SkASSERT(type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
163 static constexpr int kInputDescPerSet = 1;
164 SkASSERT(kInputDescPerSet == visibilities.size());
165
166 // Create Input Buffer Descriptor
167 VkDescriptorSetLayoutBinding dsInpuBinding;
168 dsInpuBinding.binding = 0;
169 dsInpuBinding.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
170 dsInpuBinding.descriptorCount = 1;
171 SkASSERT(visibilities[0] == kFragment_GrShaderFlag);
172 dsInpuBinding.stageFlags = visibility_to_vk_stage_flags(visibilities[0]);
173 dsInpuBinding.pImmutableSamplers = nullptr;
174
175 VkDescriptorSetLayoutCreateInfo inputLayoutCreateInfo;
176 inputLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
177 inputLayoutCreateInfo.pNext = nullptr;
178 inputLayoutCreateInfo.flags = 0;
179 inputLayoutCreateInfo.bindingCount = 1;
180 inputLayoutCreateInfo.pBindings = &dsInpuBinding;
181
182 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
183 // skia:8713
184 __lsan::ScopedDisabler lsanDisabler;
185 #endif
186 VkResult result;
187 GR_VK_CALL_RESULT(gpu, result, CreateDescriptorSetLayout(gpu->device(),
188 &inputLayoutCreateInfo,
189 nullptr, descSetLayout));
190 if (result != VK_SUCCESS) {
191 return false;
192 }
193
194 *descCountPerSet = kInputDescPerSet;
195 }
196 return true;
197 }
198
Create(GrVkGpu * gpu,VkDescriptorType type,const TArray<uint32_t> & visibilities,const TArray<const GrVkSampler * > & immutableSamplers)199 GrVkDescriptorSetManager* GrVkDescriptorSetManager::Create(
200 GrVkGpu* gpu, VkDescriptorType type,
201 const TArray<uint32_t>& visibilities,
202 const TArray<const GrVkSampler*>& immutableSamplers) {
203 #ifdef SK_DEBUG
204 if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
205 SkASSERT(visibilities.size() == immutableSamplers.size());
206 } else {
207 SkASSERT(immutableSamplers.empty());
208 }
209 #endif
210
211 VkDescriptorSetLayout descSetLayout;
212 uint32_t descCountPerSet;
213 if (!get_layout_and_desc_count(gpu, type, visibilities, immutableSamplers, &descSetLayout,
214 &descCountPerSet)) {
215 return nullptr;
216 }
217 return new GrVkDescriptorSetManager(gpu, type, descSetLayout, descCountPerSet, visibilities,
218 immutableSamplers);
219 }
220
GrVkDescriptorSetManager(GrVkGpu * gpu,VkDescriptorType type,VkDescriptorSetLayout descSetLayout,uint32_t descCountPerSet,const TArray<uint32_t> & visibilities,const TArray<const GrVkSampler * > & immutableSamplers)221 GrVkDescriptorSetManager::GrVkDescriptorSetManager(
222 GrVkGpu* gpu, VkDescriptorType type, VkDescriptorSetLayout descSetLayout,
223 uint32_t descCountPerSet, const TArray<uint32_t>& visibilities,
224 const TArray<const GrVkSampler*>& immutableSamplers)
225 : fPoolManager(descSetLayout, type, descCountPerSet) {
226 for (int i = 0; i < visibilities.size(); ++i) {
227 fBindingVisibilities.push_back(visibilities[i]);
228 }
229 for (int i = 0; i < immutableSamplers.size(); ++i) {
230 const GrVkSampler* sampler = immutableSamplers[i];
231 if (sampler) {
232 sampler->ref();
233 }
234 fImmutableSamplers.push_back(sampler);
235 }
236 }
237
getDescriptorSet(GrVkGpu * gpu,const Handle & handle)238 const GrVkDescriptorSet* GrVkDescriptorSetManager::getDescriptorSet(GrVkGpu* gpu,
239 const Handle& handle) {
240 const GrVkDescriptorSet* ds = nullptr;
241 int count = fFreeSets.size();
242 if (count > 0) {
243 ds = fFreeSets[count - 1];
244 fFreeSets.removeShuffle(count - 1);
245 } else {
246 VkDescriptorSet vkDS;
247 if (!fPoolManager.getNewDescriptorSet(gpu, &vkDS)) {
248 return nullptr;
249 }
250
251 ds = new GrVkDescriptorSet(gpu, vkDS, fPoolManager.fPool, handle);
252 }
253 SkASSERT(ds);
254 return ds;
255 }
256
recycleDescriptorSet(const GrVkDescriptorSet * descSet)257 void GrVkDescriptorSetManager::recycleDescriptorSet(const GrVkDescriptorSet* descSet) {
258 SkASSERT(descSet);
259 fFreeSets.push_back(descSet);
260 }
261
release(GrVkGpu * gpu)262 void GrVkDescriptorSetManager::release(GrVkGpu* gpu) {
263 fPoolManager.freeGPUResources(gpu);
264
265 for (int i = 0; i < fFreeSets.size(); ++i) {
266 fFreeSets[i]->unref();
267 }
268 fFreeSets.clear();
269
270 for (int i = 0; i < fImmutableSamplers.size(); ++i) {
271 if (fImmutableSamplers[i]) {
272 fImmutableSamplers[i]->unref();
273 }
274 }
275 fImmutableSamplers.clear();
276 }
277
isCompatible(VkDescriptorType type,const GrVkUniformHandler * uniHandler) const278 bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
279 const GrVkUniformHandler* uniHandler) const {
280 SkASSERT(uniHandler);
281 if (type != fPoolManager.fDescType) {
282 return false;
283 }
284
285 SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
286 if (fBindingVisibilities.size() != uniHandler->numSamplers()) {
287 return false;
288 }
289 for (int i = 0; i < uniHandler->numSamplers(); ++i) {
290 if (uniHandler->samplerVisibility(i) != fBindingVisibilities[i] ||
291 uniHandler->immutableSampler(i) != fImmutableSamplers[i]) {
292 return false;
293 }
294 }
295 return true;
296 }
297
isZeroSampler() const298 bool GrVkDescriptorSetManager::isZeroSampler() const {
299 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER != fPoolManager.fDescType) {
300 return false;
301 }
302 if (!fBindingVisibilities.empty()) {
303 return false;
304 }
305 return true;
306 }
307
308 ////////////////////////////////////////////////////////////////////////////////
309
DescriptorPoolManager(VkDescriptorSetLayout layout,VkDescriptorType type,uint32_t descCountPerSet)310 GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
311 VkDescriptorSetLayout layout,
312 VkDescriptorType type,
313 uint32_t descCountPerSet)
314 : fDescLayout(layout)
315 , fDescType(type)
316 , fDescCountPerSet(descCountPerSet)
317 , fMaxDescriptors(kStartNumDescriptors)
318 , fCurrentDescriptorCount(0)
319 , fPool(nullptr) {
320 }
321
getNewPool(GrVkGpu * gpu)322 bool GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
323 if (fPool) {
324 fPool->unref();
325 uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
326 if (newPoolSize < kMaxDescriptors) {
327 fMaxDescriptors = newPoolSize;
328 } else {
329 fMaxDescriptors = kMaxDescriptors;
330 }
331
332 }
333 fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
334 fMaxDescriptors);
335 return SkToBool(fPool);
336 }
337
getNewDescriptorSet(GrVkGpu * gpu,VkDescriptorSet * ds)338 bool GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
339 VkDescriptorSet* ds) {
340 if (!fMaxDescriptors) {
341 return false;
342 }
343 fCurrentDescriptorCount += fDescCountPerSet;
344 if (!fPool || fCurrentDescriptorCount > fMaxDescriptors) {
345 if (!this->getNewPool(gpu) ) {
346 return false;
347 }
348 fCurrentDescriptorCount = fDescCountPerSet;
349 }
350
351 VkDescriptorSetAllocateInfo dsAllocateInfo;
352 memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
353 dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
354 dsAllocateInfo.pNext = nullptr;
355 dsAllocateInfo.descriptorPool = fPool->descPool();
356 dsAllocateInfo.descriptorSetCount = 1;
357 dsAllocateInfo.pSetLayouts = &fDescLayout;
358 VkResult result;
359 GR_VK_CALL_RESULT(gpu, result, AllocateDescriptorSets(gpu->device(),
360 &dsAllocateInfo,
361 ds));
362 return result == VK_SUCCESS;
363 }
364
freeGPUResources(GrVkGpu * gpu)365 void GrVkDescriptorSetManager::DescriptorPoolManager::freeGPUResources(GrVkGpu* gpu) {
366 if (fDescLayout) {
367 GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDescLayout,
368 nullptr));
369 fDescLayout = VK_NULL_HANDLE;
370 }
371
372 if (fPool) {
373 fPool->unref();
374 fPool = nullptr;
375 }
376 }
377
378