1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2018 NVIDIA Corporation
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Vulkan descriptor set tests
23 *//*--------------------------------------------------------------------*/
24
25 // These tests generate random descriptor set layouts, where each descriptor
26 // set has a random number of bindings, each binding has a random array size
27 // and random descriptor type. The descriptor types are all backed by buffers
28 // or buffer views, and each buffer is filled with a unique integer starting
29 // from zero. The shader fetches from each descriptor (possibly using dynamic
30 // indexing of the descriptor array) and compares against the expected value.
31 //
32 // The different test cases vary the maximum number of descriptors used of
33 // each type. "Low" limit tests use the spec minimum maximum limit, "high"
34 // limit tests use up to 4k descriptors of the corresponding type. Test cases
35 // also vary the type indexing used, and shader stage.
36
37 #include "vktBindingDescriptorSetRandomTests.hpp"
38
39 #include "vkBufferWithMemory.hpp"
40 #include "vkImageWithMemory.hpp"
41 #include "vkQueryUtil.hpp"
42 #include "vkBuilderUtil.hpp"
43 #include "vkCmdUtil.hpp"
44 #include "vkTypeUtil.hpp"
45 #include "vkObjUtil.hpp"
46 #include "vkRayTracingUtil.hpp"
47
48 #include "vktTestGroupUtil.hpp"
49 #include "vktTestCase.hpp"
50
51 #include "deDefs.h"
52 #include "deMath.h"
53 #include "deRandom.h"
54 #include "deSharedPtr.hpp"
55 #include "deString.h"
56
57 #include "tcuTestCase.hpp"
58 #include "tcuTestLog.hpp"
59
60 #include <string>
61 #include <sstream>
62 #include <algorithm>
63 #include <map>
64 #include <utility>
65 #include <memory>
66
67 namespace vkt
68 {
69 namespace BindingModel
70 {
71 namespace
72 {
73 using namespace vk;
74 using namespace std;
75
76 static const uint32_t DIM = 8;
77
78 #ifndef CTS_USES_VULKANSC
79 static const VkFlags ALL_RAY_TRACING_STAGES = VK_SHADER_STAGE_RAYGEN_BIT_KHR | VK_SHADER_STAGE_ANY_HIT_BIT_KHR |
80 VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR | VK_SHADER_STAGE_MISS_BIT_KHR |
81 VK_SHADER_STAGE_INTERSECTION_BIT_KHR | VK_SHADER_STAGE_CALLABLE_BIT_KHR;
82 #endif
83
84 typedef enum
85 {
86 INDEX_TYPE_NONE = 0,
87 INDEX_TYPE_CONSTANT,
88 INDEX_TYPE_PUSHCONSTANT,
89 INDEX_TYPE_DEPENDENT,
90 INDEX_TYPE_RUNTIME_SIZE,
91 } IndexType;
92
93 typedef enum
94 {
95 STAGE_COMPUTE = 0,
96 STAGE_VERTEX,
97 STAGE_FRAGMENT,
98 STAGE_RAYGEN_NV,
99 STAGE_RAYGEN,
100 STAGE_INTERSECT,
101 STAGE_ANY_HIT,
102 STAGE_CLOSEST_HIT,
103 STAGE_MISS,
104 STAGE_CALLABLE,
105 STAGE_TASK,
106 STAGE_MESH,
107 } Stage;
108
109 typedef enum
110 {
111 UPDATE_AFTER_BIND_DISABLED = 0,
112 UPDATE_AFTER_BIND_ENABLED,
113 } UpdateAfterBind;
114
115 struct DescriptorId
116 {
DescriptorIdvkt::BindingModel::__anon26d2b09e0111::DescriptorId117 DescriptorId(uint32_t set_, uint32_t binding_, uint32_t number_) : set(set_), binding(binding_), number(number_)
118 {
119 }
120
operator <vkt::BindingModel::__anon26d2b09e0111::DescriptorId121 bool operator<(const DescriptorId &other) const
122 {
123 return (set < other.set ||
124 (set == other.set && (binding < other.binding || (binding == other.binding && number < other.number))));
125 }
126
127 uint32_t set;
128 uint32_t binding;
129 uint32_t number;
130 };
131
132 struct WriteInfo
133 {
WriteInfovkt::BindingModel::__anon26d2b09e0111::WriteInfo134 WriteInfo() : ptr(nullptr), expected(0u), writeGenerated(false)
135 {
136 }
137
138 int32_t *ptr;
139 int32_t expected;
140 bool writeGenerated;
141 };
142
isRayTracingStageKHR(const Stage stage)143 bool isRayTracingStageKHR(const Stage stage)
144 {
145 switch (stage)
146 {
147 case STAGE_COMPUTE:
148 case STAGE_VERTEX:
149 case STAGE_FRAGMENT:
150 case STAGE_RAYGEN_NV:
151 case STAGE_TASK:
152 case STAGE_MESH:
153 return false;
154
155 case STAGE_RAYGEN:
156 case STAGE_INTERSECT:
157 case STAGE_ANY_HIT:
158 case STAGE_CLOSEST_HIT:
159 case STAGE_MISS:
160 case STAGE_CALLABLE:
161 return true;
162
163 default:
164 TCU_THROW(InternalError, "Unknown stage specified");
165 }
166 }
167
isMeshStage(Stage stage)168 bool isMeshStage(Stage stage)
169 {
170 return (stage == STAGE_TASK || stage == STAGE_MESH);
171 }
172
isVertexPipelineStage(Stage stage)173 bool isVertexPipelineStage(Stage stage)
174 {
175 return (isMeshStage(stage) || stage == STAGE_VERTEX);
176 }
177
178 #ifndef CTS_USES_VULKANSC
getShaderStageFlag(const Stage stage)179 VkShaderStageFlagBits getShaderStageFlag(const Stage stage)
180 {
181 switch (stage)
182 {
183 case STAGE_RAYGEN:
184 return VK_SHADER_STAGE_RAYGEN_BIT_KHR;
185 case STAGE_ANY_HIT:
186 return VK_SHADER_STAGE_ANY_HIT_BIT_KHR;
187 case STAGE_CLOSEST_HIT:
188 return VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR;
189 case STAGE_MISS:
190 return VK_SHADER_STAGE_MISS_BIT_KHR;
191 case STAGE_INTERSECT:
192 return VK_SHADER_STAGE_INTERSECTION_BIT_KHR;
193 case STAGE_CALLABLE:
194 return VK_SHADER_STAGE_CALLABLE_BIT_KHR;
195 default:
196 TCU_THROW(InternalError, "Unknown stage specified");
197 }
198 }
199 #endif
200
getAllShaderStagesFor(Stage stage)201 VkShaderStageFlags getAllShaderStagesFor(Stage stage)
202 {
203 #ifndef CTS_USES_VULKANSC
204 if (stage == STAGE_RAYGEN_NV)
205 return VK_SHADER_STAGE_RAYGEN_BIT_NV;
206
207 if (isRayTracingStageKHR(stage))
208 return ALL_RAY_TRACING_STAGES;
209
210 if (isMeshStage(stage))
211 return (VK_SHADER_STAGE_MESH_BIT_EXT | ((stage == STAGE_TASK) ? VK_SHADER_STAGE_TASK_BIT_EXT : 0));
212 #else
213 DE_UNREF(stage);
214 #endif // CTS_USES_VULKANSC
215
216 return (VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT);
217 }
218
getAllPipelineStagesFor(Stage stage)219 VkPipelineStageFlags getAllPipelineStagesFor(Stage stage)
220 {
221 #ifndef CTS_USES_VULKANSC
222 if (stage == STAGE_RAYGEN_NV)
223 return VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
224
225 if (isRayTracingStageKHR(stage))
226 return VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
227
228 if (isMeshStage(stage))
229 return (VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT |
230 ((stage == STAGE_TASK) ? VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT : 0));
231 #else
232 DE_UNREF(stage);
233 #endif // CTS_USES_VULKANSC
234
235 return (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
236 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
237 }
238
usesAccelerationStructure(const Stage stage)239 bool usesAccelerationStructure(const Stage stage)
240 {
241 return (isRayTracingStageKHR(stage) && stage != STAGE_RAYGEN && stage != STAGE_CALLABLE);
242 }
243
244 class RandomLayout
245 {
246 public:
RandomLayout(uint32_t numSets)247 RandomLayout(uint32_t numSets)
248 : layoutBindings(numSets)
249 , layoutBindingFlags(numSets)
250 , arraySizes(numSets)
251 , variableDescriptorSizes(numSets)
252 {
253 }
254
255 // These three are indexed by [set][binding]
256 vector<vector<VkDescriptorSetLayoutBinding>> layoutBindings;
257 vector<vector<VkDescriptorBindingFlags>> layoutBindingFlags;
258 vector<vector<uint32_t>> arraySizes;
259 // size of the variable descriptor (last) binding in each set
260 vector<uint32_t> variableDescriptorSizes;
261
262 // List of descriptors that will write the descriptor value instead of reading it.
263 map<DescriptorId, WriteInfo> descriptorWrites;
264 };
265
266 struct CaseDef
267 {
268 IndexType indexType;
269 uint32_t numDescriptorSets;
270 uint32_t maxPerStageUniformBuffers;
271 uint32_t maxUniformBuffersDynamic;
272 uint32_t maxPerStageStorageBuffers;
273 uint32_t maxStorageBuffersDynamic;
274 uint32_t maxPerStageSampledImages;
275 uint32_t maxPerStageStorageImages;
276 uint32_t maxPerStageStorageTexelBuffers;
277 uint32_t maxInlineUniformBlocks;
278 uint32_t maxInlineUniformBlockSize;
279 uint32_t maxPerStageInputAttachments;
280 Stage stage;
281 UpdateAfterBind uab;
282 uint32_t seed;
283 VkFlags allShaderStages;
284 VkFlags allPipelineStages;
285 // Shared by the test case and the test instance.
286 std::shared_ptr<RandomLayout> randomLayout;
287 };
288
289 class DescriptorSetRandomTestInstance : public TestInstance
290 {
291 public:
292 DescriptorSetRandomTestInstance(Context &context, const std::shared_ptr<CaseDef> &data);
293 ~DescriptorSetRandomTestInstance(void);
294 tcu::TestStatus iterate(void);
295
296 private:
297 // Shared pointer because the test case and the test instance need to share the random layout information. Specifically, the
298 // descriptorWrites map, which is filled from the test case and used by the test instance.
299 std::shared_ptr<CaseDef> m_data_ptr;
300 CaseDef &m_data;
301 };
302
DescriptorSetRandomTestInstance(Context & context,const std::shared_ptr<CaseDef> & data)303 DescriptorSetRandomTestInstance::DescriptorSetRandomTestInstance(Context &context, const std::shared_ptr<CaseDef> &data)
304 : vkt::TestInstance(context)
305 , m_data_ptr(data)
306 , m_data(*m_data_ptr.get())
307 {
308 }
309
~DescriptorSetRandomTestInstance(void)310 DescriptorSetRandomTestInstance::~DescriptorSetRandomTestInstance(void)
311 {
312 }
313
314 class DescriptorSetRandomTestCase : public TestCase
315 {
316 public:
317 DescriptorSetRandomTestCase(tcu::TestContext &context, const char *name, const CaseDef &data);
318 ~DescriptorSetRandomTestCase(void);
319 virtual void initPrograms(SourceCollections &programCollection) const;
320 virtual TestInstance *createInstance(Context &context) const;
321 virtual void checkSupport(Context &context) const;
322
323 private:
324 // See DescriptorSetRandomTestInstance about the need for a shared pointer here.
325 std::shared_ptr<CaseDef> m_data_ptr;
326 CaseDef &m_data;
327 };
328
DescriptorSetRandomTestCase(tcu::TestContext & context,const char * name,const CaseDef & data)329 DescriptorSetRandomTestCase::DescriptorSetRandomTestCase(tcu::TestContext &context, const char *name,
330 const CaseDef &data)
331 : vkt::TestCase(context, name)
332 , m_data_ptr(std::make_shared<CaseDef>(data))
333 , m_data(*reinterpret_cast<CaseDef *>(m_data_ptr.get()))
334 {
335 }
336
~DescriptorSetRandomTestCase(void)337 DescriptorSetRandomTestCase::~DescriptorSetRandomTestCase(void)
338 {
339 }
340
checkSupport(Context & context) const341 void DescriptorSetRandomTestCase::checkSupport(Context &context) const
342 {
343 VkPhysicalDeviceProperties2 properties;
344 deMemset(&properties, 0, sizeof(properties));
345 properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
346
347 #ifndef CTS_USES_VULKANSC
348 void **pNextTail = &properties.pNext;
349 // Get needed properties.
350 VkPhysicalDeviceInlineUniformBlockPropertiesEXT inlineUniformProperties;
351 deMemset(&inlineUniformProperties, 0, sizeof(inlineUniformProperties));
352 inlineUniformProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT;
353
354 if (context.isDeviceFunctionalitySupported("VK_EXT_inline_uniform_block"))
355 {
356 *pNextTail = &inlineUniformProperties;
357 pNextTail = &inlineUniformProperties.pNext;
358 }
359 *pNextTail = NULL;
360 #endif
361
362 context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
363
364 // Get needed features.
365 auto features = context.getDeviceFeatures2();
366 auto indexingFeatures = context.getDescriptorIndexingFeatures();
367 #ifndef CTS_USES_VULKANSC
368 auto inlineUniformFeatures = context.getInlineUniformBlockFeatures();
369 #endif
370
371 // Check needed properties and features
372 if (isVertexPipelineStage(m_data.stage) && !features.features.vertexPipelineStoresAndAtomics)
373 {
374 TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
375 }
376 #ifndef CTS_USES_VULKANSC
377 else if (m_data.stage == STAGE_RAYGEN_NV)
378 {
379 context.requireDeviceFunctionality("VK_NV_ray_tracing");
380 }
381 else if (isRayTracingStageKHR(m_data.stage))
382 {
383 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
384 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
385
386 const VkPhysicalDeviceRayTracingPipelineFeaturesKHR &rayTracingPipelineFeaturesKHR =
387 context.getRayTracingPipelineFeatures();
388 if (rayTracingPipelineFeaturesKHR.rayTracingPipeline == false)
389 TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceRayTracingPipelineFeaturesKHR.rayTracingPipeline");
390
391 const VkPhysicalDeviceAccelerationStructureFeaturesKHR &accelerationStructureFeaturesKHR =
392 context.getAccelerationStructureFeatures();
393 if (accelerationStructureFeaturesKHR.accelerationStructure == false)
394 TCU_THROW(TestError, "VK_KHR_ray_tracing_pipeline requires "
395 "VkPhysicalDeviceAccelerationStructureFeaturesKHR.accelerationStructure");
396 }
397
398 if (isMeshStage(m_data.stage))
399 {
400 const auto &meshFeatures = context.getMeshShaderFeaturesEXT();
401
402 if (!meshFeatures.meshShader)
403 TCU_THROW(NotSupportedError, "Mesh shaders not supported");
404
405 if (m_data.stage == STAGE_TASK && !meshFeatures.taskShader)
406 TCU_THROW(NotSupportedError, "Task shaders not supported");
407 }
408 #endif
409
410 // Note binding 0 in set 0 is the output storage image, always present and not subject to dynamic indexing.
411 if ((m_data.indexType == INDEX_TYPE_PUSHCONSTANT || m_data.indexType == INDEX_TYPE_DEPENDENT ||
412 m_data.indexType == INDEX_TYPE_RUNTIME_SIZE) &&
413 ((m_data.maxPerStageUniformBuffers > 0u && !features.features.shaderUniformBufferArrayDynamicIndexing) ||
414 (m_data.maxPerStageStorageBuffers > 0u && !features.features.shaderStorageBufferArrayDynamicIndexing) ||
415 (m_data.maxPerStageStorageImages > 1u && !features.features.shaderStorageImageArrayDynamicIndexing) ||
416 (m_data.stage == STAGE_FRAGMENT && m_data.maxPerStageInputAttachments > 0u &&
417 (!indexingFeatures.shaderInputAttachmentArrayDynamicIndexing)) ||
418 (m_data.maxPerStageSampledImages > 0u && !indexingFeatures.shaderUniformTexelBufferArrayDynamicIndexing) ||
419 (m_data.maxPerStageStorageTexelBuffers > 0u &&
420 !indexingFeatures.shaderStorageTexelBufferArrayDynamicIndexing)))
421 {
422 TCU_THROW(NotSupportedError, "Dynamic indexing not supported");
423 }
424
425 if (m_data.numDescriptorSets > properties.properties.limits.maxBoundDescriptorSets)
426 {
427 TCU_THROW(NotSupportedError, "Number of descriptor sets not supported");
428 }
429
430 if ((m_data.maxPerStageUniformBuffers + m_data.maxPerStageStorageBuffers + m_data.maxPerStageSampledImages +
431 m_data.maxPerStageStorageImages + m_data.maxPerStageStorageTexelBuffers + m_data.maxPerStageInputAttachments) >
432 properties.properties.limits.maxPerStageResources)
433 {
434 TCU_THROW(NotSupportedError, "Number of descriptors not supported");
435 }
436
437 if (m_data.maxPerStageUniformBuffers > properties.properties.limits.maxPerStageDescriptorUniformBuffers ||
438 m_data.maxPerStageStorageBuffers > properties.properties.limits.maxPerStageDescriptorStorageBuffers ||
439 m_data.maxUniformBuffersDynamic > properties.properties.limits.maxDescriptorSetUniformBuffersDynamic ||
440 m_data.maxStorageBuffersDynamic > properties.properties.limits.maxDescriptorSetStorageBuffersDynamic ||
441 m_data.maxPerStageSampledImages > properties.properties.limits.maxPerStageDescriptorSampledImages ||
442 (m_data.maxPerStageStorageImages + m_data.maxPerStageStorageTexelBuffers) >
443 properties.properties.limits.maxPerStageDescriptorStorageImages ||
444 m_data.maxPerStageInputAttachments > properties.properties.limits.maxPerStageDescriptorInputAttachments)
445 {
446 TCU_THROW(NotSupportedError, "Number of descriptors not supported");
447 }
448
449 #ifndef CTS_USES_VULKANSC
450 if (m_data.maxInlineUniformBlocks != 0 && !inlineUniformFeatures.inlineUniformBlock)
451 {
452 TCU_THROW(NotSupportedError, "Inline uniform blocks not supported");
453 }
454
455 if (m_data.maxInlineUniformBlocks > inlineUniformProperties.maxPerStageDescriptorInlineUniformBlocks)
456 {
457 TCU_THROW(NotSupportedError, "Number of inline uniform blocks not supported");
458 }
459
460 if (m_data.maxInlineUniformBlocks != 0 &&
461 m_data.maxInlineUniformBlockSize > inlineUniformProperties.maxInlineUniformBlockSize)
462 {
463 TCU_THROW(NotSupportedError, "Inline uniform block size not supported");
464 }
465 #endif
466
467 if (m_data.indexType == INDEX_TYPE_RUNTIME_SIZE && !indexingFeatures.runtimeDescriptorArray)
468 {
469 TCU_THROW(NotSupportedError, "runtimeDescriptorArray not supported");
470 }
471 }
472
473 // Return a random value in the range [min, max]
randRange(deRandom * rnd,int32_t min,int32_t max)474 int32_t randRange(deRandom *rnd, int32_t min, int32_t max)
475 {
476 if (max < 0)
477 return 0;
478
479 return (deRandom_getUint32(rnd) % (max - min + 1)) + min;
480 }
481
chooseWritesRandomly(vk::VkDescriptorType type,RandomLayout & randomLayout,deRandom & rnd,uint32_t set,uint32_t binding,uint32_t count)482 void chooseWritesRandomly(vk::VkDescriptorType type, RandomLayout &randomLayout, deRandom &rnd, uint32_t set,
483 uint32_t binding, uint32_t count)
484 {
485 // Make sure the type supports writes.
486 switch (type)
487 {
488 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
489 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
490 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
491 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
492 break;
493 default:
494 DE_ASSERT(false);
495 break;
496 }
497
498 for (uint32_t i = 0u; i < count; ++i)
499 {
500 // 1/2 chance of being a write.
501 if (randRange(&rnd, 1, 2) == 1)
502 randomLayout.descriptorWrites[DescriptorId(set, binding, i)] = {};
503 }
504 }
505
generateRandomLayout(RandomLayout & randomLayout,const CaseDef & caseDef,deRandom & rnd)506 void generateRandomLayout(RandomLayout &randomLayout, const CaseDef &caseDef, deRandom &rnd)
507 {
508 // Count the number of each resource type, to avoid overflowing the limits.
509 uint32_t numUBO = 0;
510 uint32_t numUBODyn = 0;
511 uint32_t numSSBO = 0;
512 uint32_t numSSBODyn = 0;
513 uint32_t numImage = 0;
514 uint32_t numStorageTex = 0;
515 uint32_t numTexBuffer = 0;
516 #ifndef CTS_USES_VULKANSC
517 uint32_t numInlineUniformBlocks = 0;
518 #endif
519 uint32_t numInputAttachments = 0;
520
521 // TODO: Consider varying these
522 uint32_t minBindings = 0;
523 // Try to keep the workload roughly constant while exercising higher numbered sets.
524 uint32_t maxBindings = 128u / caseDef.numDescriptorSets;
525 // No larger than 32 elements for dynamic indexing tests, due to 128B limit
526 // for push constants (used for the indices)
527 uint32_t maxArray = caseDef.indexType == INDEX_TYPE_NONE ? 0 : 32;
528
529 // Each set has a random number of bindings, each binding has a random
530 // array size and a random descriptor type.
531 for (uint32_t s = 0; s < caseDef.numDescriptorSets; ++s)
532 {
533 vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
534 vector<VkDescriptorBindingFlags> &bindingsFlags = randomLayout.layoutBindingFlags[s];
535 vector<uint32_t> &arraySizes = randomLayout.arraySizes[s];
536 int numBindings = randRange(&rnd, minBindings, maxBindings);
537
538 // Guarantee room for the output image
539 if (s == 0 && numBindings == 0)
540 {
541 numBindings = 1;
542 }
543 // Guarantee room for the raytracing acceleration structure
544 if (s == 0 && numBindings < 2 && usesAccelerationStructure(caseDef.stage))
545 {
546 numBindings = 2;
547 }
548
549 bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
550 bindingsFlags = vector<VkDescriptorBindingFlags>(numBindings);
551 arraySizes = vector<uint32_t>(numBindings);
552 }
553
554 // BUFFER_DYNAMIC descriptor types cannot be used with VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT bindings in one set
555 bool allowDynamicBuffers = caseDef.uab != UPDATE_AFTER_BIND_ENABLED;
556
557 // Iterate over bindings first, then over sets. This prevents the low-limit bindings
558 // from getting clustered in low-numbered sets.
559 for (uint32_t b = 0; b <= maxBindings; ++b)
560 {
561 for (uint32_t s = 0; s < caseDef.numDescriptorSets; ++s)
562 {
563 vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
564 vector<uint32_t> &arraySizes = randomLayout.arraySizes[s];
565
566 if (b >= bindings.size())
567 {
568 continue;
569 }
570
571 VkDescriptorSetLayoutBinding &binding = bindings[b];
572 binding.binding = b;
573 binding.pImmutableSamplers = NULL;
574 binding.stageFlags = caseDef.allShaderStages;
575
576 // Output image
577 if (s == 0 && b == 0)
578 {
579 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
580 binding.descriptorCount = 1;
581 binding.stageFlags = caseDef.allShaderStages;
582 numImage++;
583 arraySizes[b] = 0;
584 continue;
585 }
586
587 #ifndef CTS_USES_VULKANSC
588 // Raytracing acceleration structure
589 if (s == 0 && b == 1 && usesAccelerationStructure(caseDef.stage))
590 {
591 binding.descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR;
592 binding.descriptorCount = 1;
593 binding.stageFlags = caseDef.allShaderStages;
594 arraySizes[b] = 0;
595 continue;
596 }
597 #endif
598
599 binding.descriptorCount = 0;
600
601 // Select a random type of descriptor.
602 std::map<int, vk::VkDescriptorType> intToType;
603 {
604 int index = 0;
605 intToType[index++] = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
606 intToType[index++] = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
607 intToType[index++] = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
608 intToType[index++] = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
609 intToType[index++] = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
610 #ifndef CTS_USES_VULKANSC
611 intToType[index++] = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT;
612 #endif
613 if (caseDef.stage == STAGE_FRAGMENT)
614 {
615 intToType[index++] = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
616 }
617 if (allowDynamicBuffers)
618 {
619 intToType[index++] = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
620 intToType[index++] = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
621 }
622 }
623
624 int r = randRange(&rnd, 0, static_cast<int>(intToType.size() - 1));
625 DE_ASSERT(r >= 0 && static_cast<size_t>(r) < intToType.size());
626
627 // Add a binding for that descriptor type if possible.
628 binding.descriptorType = intToType[r];
629 switch (binding.descriptorType)
630 {
631 default:
632 DE_ASSERT(0); // Fallthrough
633 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
634 if (numUBO < caseDef.maxPerStageUniformBuffers)
635 {
636 arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageUniformBuffers - numUBO));
637 binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
638 numUBO += binding.descriptorCount;
639 }
640 break;
641 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
642 if (numSSBO < caseDef.maxPerStageStorageBuffers)
643 {
644 arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageStorageBuffers - numSSBO));
645 binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
646 numSSBO += binding.descriptorCount;
647
648 chooseWritesRandomly(binding.descriptorType, randomLayout, rnd, s, b, binding.descriptorCount);
649 }
650 break;
651 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
652 if (numStorageTex < caseDef.maxPerStageStorageTexelBuffers)
653 {
654 arraySizes[b] =
655 randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageStorageTexelBuffers - numStorageTex));
656 binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
657 numStorageTex += binding.descriptorCount;
658
659 chooseWritesRandomly(binding.descriptorType, randomLayout, rnd, s, b, binding.descriptorCount);
660 }
661 break;
662 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
663 if (numImage < caseDef.maxPerStageStorageImages)
664 {
665 arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageStorageImages - numImage));
666 binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
667 numImage += binding.descriptorCount;
668
669 chooseWritesRandomly(binding.descriptorType, randomLayout, rnd, s, b, binding.descriptorCount);
670 }
671 break;
672 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
673 if (numTexBuffer < caseDef.maxPerStageSampledImages)
674 {
675 arraySizes[b] =
676 randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageSampledImages - numTexBuffer));
677 binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
678 numTexBuffer += binding.descriptorCount;
679 }
680 break;
681 #ifndef CTS_USES_VULKANSC
682 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
683 if (caseDef.maxInlineUniformBlocks > 0)
684 {
685 if (numInlineUniformBlocks < caseDef.maxInlineUniformBlocks)
686 {
687 arraySizes[b] = randRange(
688 &rnd, 1, (caseDef.maxInlineUniformBlockSize - 16) / 16); // subtract 16 for "ivec4 unused"
689 arraySizes[b] = de::min(maxArray, arraySizes[b]);
690 binding.descriptorCount =
691 (arraySizes[b] ? arraySizes[b] : 1) * 16 + 16; // add 16 for "ivec4 unused"
692 numInlineUniformBlocks++;
693 }
694 else
695 {
696 // The meaning of descriptorCount for inline uniform blocks is diferrent from usual, which means
697 // (descriptorCount == 0) doesn't mean it will be discarded.
698 // So we use a similar trick to the below by replacing with a different type of descriptor.
699 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
700 }
701 }
702 else
703 {
704 // Plug in an unused descriptor type, so validation layers that don't
705 // support inline_uniform_block don't crash.
706 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
707 }
708 break;
709 #endif
710 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
711 if (numUBODyn < caseDef.maxUniformBuffersDynamic && numUBO < caseDef.maxPerStageUniformBuffers)
712 {
713 arraySizes[b] = randRange(&rnd, 0,
714 de::min(maxArray, de::min(caseDef.maxUniformBuffersDynamic - numUBODyn,
715 caseDef.maxPerStageUniformBuffers - numUBO)));
716 binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
717 numUBO += binding.descriptorCount;
718 numUBODyn += binding.descriptorCount;
719 }
720 break;
721 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
722 if (numSSBODyn < caseDef.maxStorageBuffersDynamic && numSSBO < caseDef.maxPerStageStorageBuffers)
723 {
724 arraySizes[b] = randRange(&rnd, 0,
725 de::min(maxArray, de::min(caseDef.maxStorageBuffersDynamic - numSSBODyn,
726 caseDef.maxPerStageStorageBuffers - numSSBO)));
727 binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
728 numSSBO += binding.descriptorCount;
729 numSSBODyn += binding.descriptorCount;
730
731 chooseWritesRandomly(binding.descriptorType, randomLayout, rnd, s, b, binding.descriptorCount);
732 }
733 break;
734 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
735 if (numInputAttachments < caseDef.maxPerStageInputAttachments)
736 {
737 arraySizes[b] = randRange(
738 &rnd, 0, de::min(maxArray, caseDef.maxPerStageInputAttachments - numInputAttachments));
739 binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
740 numInputAttachments += binding.descriptorCount;
741 }
742 break;
743 }
744
745 binding.stageFlags = ((binding.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) ?
746 (VkFlags)(VK_SHADER_STAGE_FRAGMENT_BIT) :
747 caseDef.allShaderStages);
748 }
749 }
750
751 for (uint32_t s = 0; s < caseDef.numDescriptorSets; ++s)
752 {
753 vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
754 vector<VkDescriptorBindingFlags> &bindingsFlags = randomLayout.layoutBindingFlags[s];
755 vector<uint32_t> &variableDescriptorSizes = randomLayout.variableDescriptorSizes;
756
757 // Choose a variable descriptor count size. If the feature is not supported, we'll just
758 // allocate the whole thing later on.
759 if (bindings.size() > 0 &&
760 bindings[bindings.size() - 1].descriptorType != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC &&
761 bindings[bindings.size() - 1].descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC &&
762 bindings[bindings.size() - 1].descriptorType != VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT &&
763 #ifndef CTS_USES_VULKANSC
764 bindings[bindings.size() - 1].descriptorType != VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR &&
765 #endif
766 bindings[bindings.size() - 1].descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
767 !(s == 0 && bindings.size() == 1) && // Don't cut out the output image binding
768 randRange(&rnd, 1, 4) == 1) // 1 in 4 chance
769 {
770
771 bindingsFlags[bindings.size() - 1] |= VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT;
772 variableDescriptorSizes[s] = randRange(&rnd, 0, bindings[bindings.size() - 1].descriptorCount);
773 #ifndef CTS_USES_VULKANSC
774 if (bindings[bindings.size() - 1].descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
775 {
776 // keep a multiple of 16B
777 variableDescriptorSizes[s] &= ~0xF;
778 }
779 #endif
780 }
781 }
782 }
783
784 class CheckDecider
785 {
786 public:
CheckDecider(deRandom & rnd,uint32_t descriptorCount)787 CheckDecider(deRandom &rnd, uint32_t descriptorCount)
788 : m_rnd(rnd)
789 , m_count(descriptorCount)
790 , m_remainder(0u)
791 , m_have_remainder(false)
792 {
793 }
794
shouldCheck(uint32_t arrayIndex)795 bool shouldCheck(uint32_t arrayIndex)
796 {
797 // Always check the first 3 and the last one, at least.
798 if (arrayIndex <= 2u || arrayIndex == m_count - 1u)
799 return true;
800
801 if (!m_have_remainder)
802 {
803 // Find a random remainder for this set and binding.
804 DE_ASSERT(m_count >= kRandomChecksPerBinding);
805
806 // Because the divisor will be m_count/kRandomChecksPerBinding and the remainder will be chosen randomly for the
807 // divisor, we expect to check around kRandomChecksPerBinding descriptors per binding randomly, no matter the amount of
808 // descriptors in the binding.
809 m_remainder = static_cast<uint32_t>(
810 randRange(&m_rnd, 0, static_cast<int32_t>((m_count / kRandomChecksPerBinding) - 1)));
811 m_have_remainder = true;
812 }
813
814 return (arrayIndex % m_count == m_remainder);
815 }
816
817 private:
818 static constexpr uint32_t kRandomChecksPerBinding = 4u;
819
820 deRandom &m_rnd;
821 uint32_t m_count;
822 uint32_t m_remainder;
823 bool m_have_remainder;
824 };
825
initPrograms(SourceCollections & programCollection) const826 void DescriptorSetRandomTestCase::initPrograms(SourceCollections &programCollection) const
827 {
828 const vk::ShaderBuildOptions buildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, 0u, true);
829
830 deRandom rnd;
831 deRandom_init(&rnd, m_data.seed);
832
833 m_data.randomLayout.reset(new RandomLayout(m_data.numDescriptorSets));
834 RandomLayout &randomLayout = *m_data.randomLayout.get();
835 generateRandomLayout(randomLayout, m_data, rnd);
836
837 std::stringstream decls, checks;
838
839 uint32_t inputAttachments = 0;
840 uint32_t descriptor = 0;
841
842 for (uint32_t s = 0; s < m_data.numDescriptorSets; ++s)
843 {
844 vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
845 vector<VkDescriptorBindingFlags> bindingsFlags = randomLayout.layoutBindingFlags[s];
846 vector<uint32_t> &arraySizes = randomLayout.arraySizes[s];
847 vector<uint32_t> &variableDescriptorSizes = randomLayout.variableDescriptorSizes;
848
849 for (size_t b = 0; b < bindings.size(); ++b)
850 {
851 VkDescriptorSetLayoutBinding &binding = bindings[b];
852 uint32_t descriptorIncrement = 1;
853 #ifndef CTS_USES_VULKANSC
854 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
855 descriptorIncrement = 16;
856 #endif
857
858 // Construct the declaration for the binding
859 if (binding.descriptorCount > 0)
860 {
861 std::stringstream array;
862 if (m_data.indexType == INDEX_TYPE_RUNTIME_SIZE
863 #ifndef CTS_USES_VULKANSC
864 && binding.descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
865 #endif
866 )
867 {
868 if (arraySizes[b])
869 {
870 array << "[]";
871 }
872 }
873 else
874 {
875 if (arraySizes[b])
876 {
877 array << "[" << arraySizes[b] << "]";
878 }
879 }
880
881 switch (binding.descriptorType)
882 {
883 #ifndef CTS_USES_VULKANSC
884 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
885 decls << "layout(set = " << s << ", binding = " << b << ") uniform inlineubodef" << s << "_" << b
886 << " { ivec4 unused; int val" << array.str() << "; } inlineubo" << s << "_" << b << ";\n";
887 break;
888 #endif
889 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
890 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
891 decls << "layout(set = " << s << ", binding = " << b << ") uniform ubodef" << s << "_" << b
892 << " { int val; } ubo" << s << "_" << b << array.str() << ";\n";
893 break;
894 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
895 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
896 decls << "layout(set = " << s << ", binding = " << b << ") buffer sbodef" << s << "_" << b
897 << " { int val; } ssbo" << s << "_" << b << array.str() << ";\n";
898 break;
899 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
900 decls << "layout(set = " << s << ", binding = " << b << ") uniform itextureBuffer texbo" << s << "_"
901 << b << array.str() << ";\n";
902 break;
903 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
904 decls << "layout(r32i, set = " << s << ", binding = " << b << ") uniform iimageBuffer image" << s
905 << "_" << b << array.str() << ";\n";
906 break;
907 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
908 decls << "layout(r32i, set = " << s << ", binding = " << b << ") uniform iimage2D simage" << s
909 << "_" << b << array.str() << ";\n";
910 break;
911 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
912 decls << "layout(input_attachment_index = " << inputAttachments << ", set = " << s
913 << ", binding = " << b << ") uniform isubpassInput attachment" << s << "_" << b << array.str()
914 << ";\n";
915 inputAttachments += binding.descriptorCount;
916 break;
917 #ifndef CTS_USES_VULKANSC
918 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
919 DE_ASSERT(s == 0 && b == 1);
920 DE_ASSERT(bindings.size() >= 2);
921 decls << "layout(set = " << s << ", binding = " << b << ") uniform accelerationStructureEXT as" << s
922 << "_" << b << ";\n";
923 break;
924 #endif
925 default:
926 DE_ASSERT(0);
927 }
928
929 const uint32_t arraySize = de::max(1u, arraySizes[b]);
930 CheckDecider checkDecider(rnd, arraySize);
931
932 for (uint32_t ai = 0; ai < arraySize; ++ai, descriptor += descriptorIncrement)
933 {
934 // Don't access descriptors past the end of the allocated range for
935 // variable descriptor count
936 if (b == bindings.size() - 1 &&
937 (bindingsFlags[b] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT))
938 {
939 #ifndef CTS_USES_VULKANSC
940 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
941 {
942 // Convert to bytes and add 16 for "ivec4 unused" in case of inline uniform block
943 const uint32_t uboRange = ai * 16 + 16;
944 if (uboRange >= variableDescriptorSizes[s])
945 continue;
946 }
947 else
948 #endif
949 {
950 if (ai >= variableDescriptorSizes[s])
951 continue;
952 }
953 }
954
955 if (s == 0 && b == 0)
956 {
957 // This is the output image, skip.
958 continue;
959 }
960
961 if (s == 0 && b == 1 && usesAccelerationStructure(m_data.stage))
962 {
963 // This is the raytracing acceleration structure, skip.
964 continue;
965 }
966
967 if (checkDecider.shouldCheck(ai))
968 {
969 // Check that the value in the descriptor equals its descriptor number.
970 // i.e. check "ubo[c].val == descriptor" or "ubo[pushconst[c]].val == descriptor"
971 // When doing a write check, write the descriptor number in the value.
972
973 // First, construct the index. This can be a constant literal, a value
974 // from a push constant, or a function of the previous descriptor value.
975 std::stringstream ind;
976 switch (m_data.indexType)
977 {
978 case INDEX_TYPE_NONE:
979 case INDEX_TYPE_CONSTANT:
980 // The index is just the constant literal
981 if (arraySizes[b])
982 {
983 ind << "[" << ai << "]";
984 }
985 break;
986 case INDEX_TYPE_PUSHCONSTANT:
987 // identity is an int[], directly index it
988 if (arraySizes[b])
989 {
990 ind << "[pc.identity[" << ai << "]]";
991 }
992 break;
993 case INDEX_TYPE_RUNTIME_SIZE:
994 case INDEX_TYPE_DEPENDENT:
995 // Index is a function of the previous return value (which is reset to zero)
996 if (arraySizes[b])
997 {
998 ind << "[accum + " << ai << "]";
999 }
1000 break;
1001 default:
1002 DE_ASSERT(0);
1003 }
1004
1005 const DescriptorId descriptorId(s, static_cast<uint32_t>(b), ai);
1006 auto writesItr = randomLayout.descriptorWrites.find(descriptorId);
1007
1008 if (writesItr == randomLayout.descriptorWrites.end())
1009 {
1010 // Fetch from the descriptor.
1011 switch (binding.descriptorType)
1012 {
1013 #ifndef CTS_USES_VULKANSC
1014 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
1015 checks << " temp = inlineubo" << s << "_" << b << ".val" << ind.str() << ";\n";
1016 break;
1017 #endif
1018 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1019 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1020 checks << " temp = ubo" << s << "_" << b << ind.str() << ".val;\n";
1021 break;
1022 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1023 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1024 checks << " temp = ssbo" << s << "_" << b << ind.str() << ".val;\n";
1025 break;
1026 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1027 checks << " temp = texelFetch(texbo" << s << "_" << b << ind.str() << ", 0).x;\n";
1028 break;
1029 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1030 checks << " temp = imageLoad(image" << s << "_" << b << ind.str() << ", 0).x;\n";
1031 break;
1032 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1033 checks << " temp = imageLoad(simage" << s << "_" << b << ind.str()
1034 << ", ivec2(0, 0)).x;\n";
1035 break;
1036 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1037 checks << " temp = subpassLoad(attachment" << s << "_" << b << ind.str() << ").r;\n";
1038 break;
1039 default:
1040 DE_ASSERT(0);
1041 }
1042
1043 // Accumulate any incorrect values.
1044 checks << " accum |= temp - " << descriptor << ";\n";
1045 }
1046 else
1047 {
1048 // Check descriptor write. We need to confirm we are actually generating write code for this descriptor.
1049 writesItr->second.writeGenerated = true;
1050
1051 // Assign each write operation to a single invocation to avoid race conditions.
1052 const auto expectedInvocationID = descriptor % (DIM * DIM);
1053 const std::string writeCond =
1054 "if (" + de::toString(expectedInvocationID) + " == invocationID)";
1055
1056 switch (binding.descriptorType)
1057 {
1058 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1059 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1060 checks << " " << writeCond << " ssbo" << s << "_" << b << ind.str()
1061 << ".val = " << descriptor << ";\n";
1062 break;
1063 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1064 checks << " " << writeCond << " imageStore(image" << s << "_" << b << ind.str()
1065 << ", 0, ivec4(" << descriptor << ", 0, 0, 0));\n";
1066 break;
1067 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1068 checks << " " << writeCond << " imageStore(simage" << s << "_" << b << ind.str()
1069 << ", ivec2(0, 0), ivec4(" << descriptor << ", 0, 0, 0));\n";
1070 break;
1071 default:
1072 DE_ASSERT(0);
1073 }
1074 }
1075 }
1076 }
1077 }
1078 }
1079 }
1080
1081 std::stringstream pushdecl;
1082 switch (m_data.indexType)
1083 {
1084 case INDEX_TYPE_PUSHCONSTANT:
1085 pushdecl << "layout (push_constant, std430) uniform Block { int identity[32]; } pc;\n";
1086 break;
1087 default:
1088 DE_ASSERT(0);
1089 case INDEX_TYPE_NONE:
1090 case INDEX_TYPE_CONSTANT:
1091 case INDEX_TYPE_DEPENDENT:
1092 case INDEX_TYPE_RUNTIME_SIZE:
1093 break;
1094 }
1095
1096 switch (m_data.stage)
1097 {
1098 default:
1099 DE_ASSERT(0); // Fallthrough
1100 case STAGE_COMPUTE:
1101 {
1102 std::stringstream css;
1103 css << "#version 450 core\n"
1104 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1105 << pushdecl.str() << decls.str()
1106 << "layout(local_size_x = 1, local_size_y = 1) in;\n"
1107 "void main()\n"
1108 "{\n"
1109 " const int invocationID = int(gl_GlobalInvocationID.y) * "
1110 << DIM
1111 << " + int(gl_GlobalInvocationID.x);\n"
1112 " int accum = 0, temp;\n"
1113 << checks.str()
1114 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1115 " imageStore(simage0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1116 "}\n";
1117
1118 programCollection.glslSources.add("test") << glu::ComputeSource(css.str());
1119 break;
1120 }
1121 #ifndef CTS_USES_VULKANSC
1122 case STAGE_RAYGEN_NV:
1123 {
1124 std::stringstream css;
1125 css << "#version 460 core\n"
1126 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1127 "#extension GL_NV_ray_tracing : require\n"
1128 << pushdecl.str() << decls.str()
1129 << "void main()\n"
1130 "{\n"
1131 " const int invocationID = int(gl_LaunchIDNV.y) * "
1132 << DIM
1133 << " + int(gl_LaunchIDNV.x);\n"
1134 " int accum = 0, temp;\n"
1135 << checks.str()
1136 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1137 " imageStore(simage0_0, ivec2(gl_LaunchIDNV.xy), color);\n"
1138 "}\n";
1139
1140 programCollection.glslSources.add("test") << glu::RaygenSource(css.str());
1141 break;
1142 }
1143 case STAGE_RAYGEN:
1144 {
1145 std::stringstream css;
1146 css << "#version 460 core\n"
1147 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1148 "#extension GL_EXT_ray_tracing : require\n"
1149 << pushdecl.str() << decls.str()
1150 << "void main()\n"
1151 "{\n"
1152 " const int invocationID = int(gl_LaunchIDEXT.y) * "
1153 << DIM
1154 << " + int(gl_LaunchIDEXT.x);\n"
1155 " int accum = 0, temp;\n"
1156 << checks.str()
1157 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1158 " imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1159 "}\n";
1160
1161 programCollection.glslSources.add("test") << glu::RaygenSource(updateRayTracingGLSL(css.str())) << buildOptions;
1162 break;
1163 }
1164 case STAGE_INTERSECT:
1165 {
1166 {
1167 programCollection.glslSources.add("rgen")
1168 << glu::RaygenSource(updateRayTracingGLSL(getCommonRayGenerationShader())) << buildOptions;
1169 }
1170
1171 {
1172 std::stringstream css;
1173 css << "#version 460 core\n"
1174 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1175 "#extension GL_EXT_ray_tracing : require\n"
1176 "hitAttributeEXT vec3 hitAttribute;\n"
1177 << pushdecl.str() << decls.str()
1178 << "void main()\n"
1179 "{\n"
1180 " const int invocationID = int(gl_LaunchIDEXT.y) * "
1181 << DIM
1182 << " + int(gl_LaunchIDEXT.x);\n"
1183 " int accum = 0, temp;\n"
1184 << checks.str()
1185 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1186 " imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1187 " hitAttribute = vec3(0.0f, 0.0f, 0.0f);\n"
1188 " reportIntersectionEXT(1.0f, 0);\n"
1189 "}\n";
1190
1191 programCollection.glslSources.add("test")
1192 << glu::IntersectionSource(updateRayTracingGLSL(css.str())) << buildOptions;
1193 }
1194
1195 break;
1196 }
1197 case STAGE_ANY_HIT:
1198 {
1199 {
1200 programCollection.glslSources.add("rgen")
1201 << glu::RaygenSource(updateRayTracingGLSL(getCommonRayGenerationShader())) << buildOptions;
1202 }
1203
1204 {
1205 std::stringstream css;
1206 css << "#version 460 core\n"
1207 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1208 "#extension GL_EXT_ray_tracing : require\n"
1209 "layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
1210 "hitAttributeEXT vec3 attribs;\n"
1211 << pushdecl.str() << decls.str()
1212 << "void main()\n"
1213 "{\n"
1214 " const int invocationID = int(gl_LaunchIDEXT.y) * "
1215 << DIM
1216 << " + int(gl_LaunchIDEXT.x);\n"
1217 " int accum = 0, temp;\n"
1218 << checks.str()
1219 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1220 " imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1221 "}\n";
1222
1223 programCollection.glslSources.add("test")
1224 << glu::AnyHitSource(updateRayTracingGLSL(css.str())) << buildOptions;
1225 }
1226
1227 break;
1228 }
1229 case STAGE_CLOSEST_HIT:
1230 {
1231 {
1232 programCollection.glslSources.add("rgen")
1233 << glu::RaygenSource(updateRayTracingGLSL(getCommonRayGenerationShader())) << buildOptions;
1234 }
1235
1236 {
1237 std::stringstream css;
1238 css << "#version 460 core\n"
1239 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1240 "#extension GL_EXT_ray_tracing : require\n"
1241 "layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
1242 "hitAttributeEXT vec3 attribs;\n"
1243 << pushdecl.str() << decls.str()
1244 << "void main()\n"
1245 "{\n"
1246 " const int invocationID = int(gl_LaunchIDEXT.y) * "
1247 << DIM
1248 << " + int(gl_LaunchIDEXT.x);\n"
1249 " int accum = 0, temp;\n"
1250 << checks.str()
1251 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1252 " imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1253 "}\n";
1254
1255 programCollection.glslSources.add("test")
1256 << glu::ClosestHitSource(updateRayTracingGLSL(css.str())) << buildOptions;
1257 }
1258
1259 break;
1260 }
1261 case STAGE_MISS:
1262 {
1263 {
1264 programCollection.glslSources.add("rgen")
1265 << glu::RaygenSource(updateRayTracingGLSL(getCommonRayGenerationShader())) << buildOptions;
1266 }
1267
1268 {
1269 std::stringstream css;
1270 css << "#version 460 core\n"
1271 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1272 "#extension GL_EXT_ray_tracing : require\n"
1273 "layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
1274 << pushdecl.str() << decls.str()
1275 << "void main()\n"
1276 "{\n"
1277 " const int invocationID = int(gl_LaunchIDEXT.y) * "
1278 << DIM
1279 << " + int(gl_LaunchIDEXT.x);\n"
1280 " int accum = 0, temp;\n"
1281 << checks.str()
1282 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1283 " imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1284 "}\n";
1285
1286 programCollection.glslSources.add("test")
1287 << glu::MissSource(updateRayTracingGLSL(css.str())) << buildOptions;
1288 }
1289
1290 break;
1291 }
1292 case STAGE_CALLABLE:
1293 {
1294 {
1295 std::stringstream css;
1296 css << "#version 460 core\n"
1297 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1298 "#extension GL_EXT_ray_tracing : require\n"
1299 "layout(location = 0) callableDataEXT float dummy;"
1300 "layout(set = 0, binding = 1) uniform accelerationStructureEXT topLevelAS;\n"
1301 "\n"
1302 "void main()\n"
1303 "{\n"
1304 " executeCallableEXT(0, 0);\n"
1305 "}\n";
1306
1307 programCollection.glslSources.add("rgen")
1308 << glu::RaygenSource(updateRayTracingGLSL(css.str())) << buildOptions;
1309 }
1310
1311 {
1312 std::stringstream css;
1313 css << "#version 460 core\n"
1314 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1315 "#extension GL_EXT_ray_tracing : require\n"
1316 "layout(location = 0) callableDataInEXT float dummy;"
1317 << pushdecl.str() << decls.str()
1318 << "void main()\n"
1319 "{\n"
1320 " const int invocationID = int(gl_LaunchIDEXT.y) * "
1321 << DIM
1322 << " + int(gl_LaunchIDEXT.x);\n"
1323 " int accum = 0, temp;\n"
1324 << checks.str()
1325 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1326 " imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1327 "}\n";
1328
1329 programCollection.glslSources.add("test")
1330 << glu::CallableSource(updateRayTracingGLSL(css.str())) << buildOptions;
1331 }
1332 break;
1333 }
1334 #endif
1335 case STAGE_VERTEX:
1336 {
1337 std::stringstream vss;
1338 vss << "#version 450 core\n"
1339 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1340 << pushdecl.str() << decls.str()
1341 << "void main()\n"
1342 "{\n"
1343 " const int invocationID = gl_VertexIndex;\n"
1344 " int accum = 0, temp;\n"
1345 << checks.str()
1346 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1347 " imageStore(simage0_0, ivec2(gl_VertexIndex % "
1348 << DIM << ", gl_VertexIndex / " << DIM
1349 << "), color);\n"
1350 " gl_PointSize = 1.0f;\n"
1351 " gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1352 "}\n";
1353
1354 programCollection.glslSources.add("test") << glu::VertexSource(vss.str());
1355 break;
1356 }
1357 case STAGE_TASK:
1358 {
1359 std::stringstream task;
1360 task << "#version 450\n"
1361 << "#extension GL_EXT_mesh_shader : enable\n"
1362 << "#extension GL_EXT_nonuniform_qualifier : enable\n"
1363 << pushdecl.str() << decls.str() << "layout(local_size_x=1, local_size_y=1, local_size_z=1) in;\n"
1364 << "void main()\n"
1365 << "{\n"
1366 << " const int invocationID = int(gl_GlobalInvocationID.y) * " << DIM
1367 << " + int(gl_GlobalInvocationID.x);\n"
1368 << " int accum = 0, temp;\n"
1369 << checks.str() << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1370 << " imageStore(simage0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1371 << " EmitMeshTasksEXT(0, 0, 0);\n"
1372 << "}\n";
1373 programCollection.glslSources.add("test") << glu::TaskSource(task.str()) << buildOptions;
1374
1375 std::stringstream mesh;
1376 mesh << "#version 450\n"
1377 << "#extension GL_EXT_mesh_shader : enable\n"
1378 << "#extension GL_EXT_nonuniform_qualifier : enable\n"
1379 << "layout(local_size_x=1, local_size_y=1, local_size_z=1) in;\n"
1380 << "layout(triangles) out;\n"
1381 << "layout(max_vertices=3, max_primitives=1) out;\n"
1382 << "void main()\n"
1383 << "{\n"
1384 << " SetMeshOutputsEXT(0, 0);\n"
1385 << "}\n";
1386 programCollection.glslSources.add("mesh") << glu::MeshSource(mesh.str()) << buildOptions;
1387
1388 break;
1389 }
1390 case STAGE_MESH:
1391 {
1392 std::stringstream mesh;
1393 mesh << "#version 450\n"
1394 << "#extension GL_EXT_mesh_shader : enable\n"
1395 << "#extension GL_EXT_nonuniform_qualifier : enable\n"
1396 << pushdecl.str() << decls.str() << "layout(local_size_x=1, local_size_y=1, local_size_z=1) in;\n"
1397 << "layout(triangles) out;\n"
1398 << "layout(max_vertices=3, max_primitives=1) out;\n"
1399 << "void main()\n"
1400 << "{\n"
1401 << " const int invocationID = int(gl_GlobalInvocationID.y) * " << DIM
1402 << " + int(gl_GlobalInvocationID.x);\n"
1403 << " int accum = 0, temp;\n"
1404 << checks.str() << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1405 << " imageStore(simage0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1406 << "}\n";
1407 programCollection.glslSources.add("test") << glu::MeshSource(mesh.str()) << buildOptions;
1408
1409 break;
1410 }
1411 case STAGE_FRAGMENT:
1412 {
1413 std::stringstream vss;
1414 vss << "#version 450 core\n"
1415 "void main()\n"
1416 "{\n"
1417 // full-viewport quad
1418 " gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * "
1419 "float(gl_VertexIndex&1), 1);\n"
1420 "}\n";
1421
1422 programCollection.glslSources.add("vert") << glu::VertexSource(vss.str());
1423
1424 std::stringstream fss;
1425 fss << "#version 450 core\n"
1426 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1427 << pushdecl.str() << decls.str()
1428 << "void main()\n"
1429 "{\n"
1430 " const int invocationID = int(gl_FragCoord.y) * "
1431 << DIM
1432 << " + int(gl_FragCoord.x);\n"
1433 " int accum = 0, temp;\n"
1434 << checks.str()
1435 << " ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1436 " imageStore(simage0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1437 "}\n";
1438
1439 programCollection.glslSources.add("test") << glu::FragmentSource(fss.str());
1440 break;
1441 }
1442 }
1443 }
1444
createInstance(Context & context) const1445 TestInstance *DescriptorSetRandomTestCase::createInstance(Context &context) const
1446 {
1447 return new DescriptorSetRandomTestInstance(context, m_data_ptr);
1448 }
1449
appendShaderStageCreateInfo(std::vector<VkPipelineShaderStageCreateInfo> & vec,VkShaderModule module,VkShaderStageFlagBits stage)1450 void appendShaderStageCreateInfo(std::vector<VkPipelineShaderStageCreateInfo> &vec, VkShaderModule module,
1451 VkShaderStageFlagBits stage)
1452 {
1453 const VkPipelineShaderStageCreateInfo info = {
1454 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
1455 nullptr, // const void* pNext;
1456 0u, // VkPipelineShaderStageCreateFlags flags;
1457 stage, // VkShaderStageFlagBits stage;
1458 module, // VkShaderModule module;
1459 "main", // const char* pName;
1460 nullptr, // const VkSpecializationInfo* pSpecializationInfo;
1461 };
1462
1463 vec.push_back(info);
1464 }
1465
iterate(void)1466 tcu::TestStatus DescriptorSetRandomTestInstance::iterate(void)
1467 {
1468 const InstanceInterface &vki = m_context.getInstanceInterface();
1469 const DeviceInterface &vk = m_context.getDeviceInterface();
1470 const VkDevice device = m_context.getDevice();
1471 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
1472 Allocator &allocator = m_context.getDefaultAllocator();
1473 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1474
1475 deRandom rnd;
1476 VkPhysicalDeviceProperties2 properties = getPhysicalDeviceExtensionProperties(vki, physicalDevice);
1477 #ifndef CTS_USES_VULKANSC
1478 uint32_t shaderGroupHandleSize = 0;
1479 uint32_t shaderGroupBaseAlignment = 1;
1480 #endif
1481
1482 deRandom_init(&rnd, m_data.seed);
1483 RandomLayout &randomLayout = *m_data.randomLayout.get();
1484
1485 #ifndef CTS_USES_VULKANSC
1486 if (m_data.stage == STAGE_RAYGEN_NV)
1487 {
1488 const VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties =
1489 getPhysicalDeviceExtensionProperties(vki, physicalDevice);
1490
1491 shaderGroupHandleSize = rayTracingProperties.shaderGroupHandleSize;
1492 }
1493
1494 if (isRayTracingStageKHR(m_data.stage))
1495 {
1496 de::MovePtr<RayTracingProperties> rayTracingPropertiesKHR;
1497
1498 rayTracingPropertiesKHR = makeRayTracingProperties(vki, physicalDevice);
1499 shaderGroupHandleSize = rayTracingPropertiesKHR->getShaderGroupHandleSize();
1500 shaderGroupBaseAlignment = rayTracingPropertiesKHR->getShaderGroupBaseAlignment();
1501 }
1502 #endif
1503
1504 // Get needed features.
1505 auto descriptorIndexingSupported = m_context.isDeviceFunctionalitySupported("VK_EXT_descriptor_indexing");
1506 auto indexingFeatures = m_context.getDescriptorIndexingFeatures();
1507 #ifndef CTS_USES_VULKANSC
1508 auto inlineUniformFeatures = m_context.getInlineUniformBlockFeatures();
1509 #endif
1510
1511 VkPipelineBindPoint bindPoint;
1512
1513 switch (m_data.stage)
1514 {
1515 case STAGE_COMPUTE:
1516 bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1517 break;
1518 #ifndef CTS_USES_VULKANSC
1519 case STAGE_RAYGEN_NV:
1520 bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1521 break;
1522 #endif
1523 default:
1524 bindPoint =
1525 #ifndef CTS_USES_VULKANSC
1526 isRayTracingStageKHR(m_data.stage) ? VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR :
1527 #endif
1528 VK_PIPELINE_BIND_POINT_GRAPHICS;
1529 break;
1530 }
1531
1532 DE_ASSERT(m_data.numDescriptorSets <= 32);
1533 Move<vk::VkDescriptorSetLayout> descriptorSetLayouts[32];
1534 Move<vk::VkDescriptorPool> descriptorPools[32];
1535 Move<vk::VkDescriptorSet> descriptorSets[32];
1536
1537 uint32_t numDescriptors = 0;
1538 for (uint32_t s = 0; s < m_data.numDescriptorSets; ++s)
1539 {
1540 vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
1541 vector<VkDescriptorBindingFlags> &bindingsFlags = randomLayout.layoutBindingFlags[s];
1542 vector<uint32_t> &variableDescriptorSizes = randomLayout.variableDescriptorSizes;
1543
1544 VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1545 VkDescriptorSetLayoutCreateFlags layoutCreateFlags = 0;
1546
1547 for (size_t b = 0; b < bindings.size(); ++b)
1548 {
1549 VkDescriptorSetLayoutBinding &binding = bindings[b];
1550 numDescriptors += binding.descriptorCount;
1551
1552 // Randomly choose some bindings to use update-after-bind, if it is supported
1553 if (descriptorIndexingSupported && m_data.uab == UPDATE_AFTER_BIND_ENABLED &&
1554 randRange(&rnd, 1, 8) == 1 && // 1 in 8 chance
1555 (binding.descriptorType != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1556 indexingFeatures.descriptorBindingUniformBufferUpdateAfterBind) &&
1557 (binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1558 indexingFeatures.descriptorBindingStorageImageUpdateAfterBind) &&
1559 (binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1560 indexingFeatures.descriptorBindingStorageBufferUpdateAfterBind) &&
1561 (binding.descriptorType != VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER ||
1562 indexingFeatures.descriptorBindingUniformTexelBufferUpdateAfterBind) &&
1563 (binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1564 indexingFeatures.descriptorBindingStorageTexelBufferUpdateAfterBind) &&
1565 #ifndef CTS_USES_VULKANSC
1566 (binding.descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT ||
1567 inlineUniformFeatures.descriptorBindingInlineUniformBlockUpdateAfterBind) &&
1568 #endif
1569 (binding.descriptorType != VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) &&
1570 (binding.descriptorType != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) &&
1571 (binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1572 #ifndef CTS_USES_VULKANSC
1573 && (binding.descriptorType != VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR)
1574 #endif
1575 )
1576 {
1577 bindingsFlags[b] |= VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT;
1578 layoutCreateFlags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT;
1579 poolCreateFlags |= VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT;
1580 }
1581
1582 if (!indexingFeatures.descriptorBindingVariableDescriptorCount)
1583 {
1584 bindingsFlags[b] &= ~VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT;
1585 }
1586 }
1587
1588 // Create a layout and allocate a descriptor set for it.
1589
1590 const VkDescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsInfo = {
1591 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, // VkStructureType sType;
1592 DE_NULL, // const void* pNext;
1593 (uint32_t)bindings.size(), // uint32_t bindingCount;
1594 bindings.empty() ? DE_NULL : bindingsFlags.data(), // const VkDescriptorBindingFlags* pBindingFlags;
1595 };
1596
1597 const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo = {
1598 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
1599 (descriptorIndexingSupported ? &bindingFlagsInfo : DE_NULL), // const void* pNext;
1600 layoutCreateFlags, // VkDescriptorSetLayoutCreateFlags flags;
1601 (uint32_t)bindings.size(), // uint32_t bindingCount;
1602 bindings.empty() ? DE_NULL : bindings.data() // const VkDescriptorSetLayoutBinding* pBindings;
1603 };
1604
1605 descriptorSetLayouts[s] = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1606
1607 vk::DescriptorPoolBuilder poolBuilder;
1608 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, m_data.maxPerStageUniformBuffers);
1609 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, m_data.maxUniformBuffersDynamic);
1610 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_data.maxPerStageStorageBuffers);
1611 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, m_data.maxStorageBuffersDynamic);
1612 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, m_data.maxPerStageSampledImages);
1613 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, m_data.maxPerStageStorageTexelBuffers);
1614 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_data.maxPerStageStorageImages);
1615 if (m_data.maxPerStageInputAttachments > 0u)
1616 {
1617 poolBuilder.addType(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, m_data.maxPerStageInputAttachments);
1618 }
1619 #ifndef CTS_USES_VULKANSC
1620 if (m_data.maxInlineUniformBlocks > 0u)
1621 {
1622 poolBuilder.addType(VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT,
1623 m_data.maxInlineUniformBlocks * m_data.maxInlineUniformBlockSize);
1624 }
1625 if (usesAccelerationStructure(m_data.stage))
1626 {
1627 poolBuilder.addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, 1u);
1628 }
1629
1630 VkDescriptorPoolInlineUniformBlockCreateInfoEXT inlineUniformBlockPoolCreateInfo = {
1631 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT, // VkStructureType sType;
1632 DE_NULL, // const void* pNext;
1633 m_data.maxInlineUniformBlocks, // uint32_t maxInlineUniformBlockBindings;
1634 };
1635 #endif
1636 descriptorPools[s] = poolBuilder.build(vk, device, poolCreateFlags, 1u,
1637 #ifndef CTS_USES_VULKANSC
1638 m_data.maxInlineUniformBlocks ? &inlineUniformBlockPoolCreateInfo :
1639 #endif
1640 DE_NULL);
1641
1642 VkDescriptorSetVariableDescriptorCountAllocateInfo variableCountInfo = {
1643 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, // VkStructureType sType;
1644 DE_NULL, // const void* pNext;
1645 0, // uint32_t descriptorSetCount;
1646 DE_NULL, // const uint32_t* pDescriptorCounts;
1647 };
1648
1649 const void *pNext = DE_NULL;
1650 if (bindings.size() > 0 &&
1651 bindingsFlags[bindings.size() - 1] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)
1652 {
1653 variableCountInfo.descriptorSetCount = 1;
1654 variableCountInfo.pDescriptorCounts = &variableDescriptorSizes[s];
1655 pNext = &variableCountInfo;
1656 }
1657
1658 descriptorSets[s] = makeDescriptorSet(vk, device, *descriptorPools[s], *descriptorSetLayouts[s], pNext);
1659 }
1660
1661 // Create a buffer to hold data for all descriptors.
1662 VkDeviceSize align =
1663 std::max({properties.properties.limits.minTexelBufferOffsetAlignment,
1664 properties.properties.limits.minUniformBufferOffsetAlignment,
1665 properties.properties.limits.minStorageBufferOffsetAlignment, (VkDeviceSize)sizeof(uint32_t)});
1666
1667 de::MovePtr<BufferWithMemory> buffer;
1668
1669 buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1670 vk, device, allocator,
1671 makeBufferCreateInfo(align * numDescriptors,
1672 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
1673 VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1674 MemoryRequirement::HostVisible));
1675 uint8_t *bufferPtr = (uint8_t *)buffer->getAllocation().getHostPtr();
1676
1677 // Create storage images separately.
1678 uint32_t storageImageCount = 0u;
1679 vector<Move<VkImage>> storageImages;
1680
1681 const VkImageCreateInfo storageImgCreateInfo = {
1682 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1683 DE_NULL, // const void* pNext;
1684 0u, // VkImageCreateFlags flags;
1685 VK_IMAGE_TYPE_2D, // VkImageType imageType;
1686 VK_FORMAT_R32_SINT, // VkFormat format;
1687 {1u, 1u, 1u}, // VkExtent3D extent;
1688 1u, // uint32_t mipLevels;
1689 1u, // uint32_t arrayLayers;
1690 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1691 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1692 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1693 VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1694 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1695 1u, // uint32_t queueFamilyIndexCount;
1696 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
1697 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1698 };
1699
1700 // Create storage images.
1701 for (const auto &bindings : randomLayout.layoutBindings)
1702 for (const auto &binding : bindings)
1703 {
1704 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1705 {
1706 storageImageCount += binding.descriptorCount;
1707 for (uint32_t d = 0; d < binding.descriptorCount; ++d)
1708 {
1709 storageImages.push_back(createImage(vk, device, &storageImgCreateInfo));
1710 }
1711 }
1712 }
1713
1714 // Allocate memory for them.
1715 vk::VkMemoryRequirements storageImageMemReqs;
1716 vk.getImageMemoryRequirements(device, *storageImages.front(), &storageImageMemReqs);
1717
1718 de::MovePtr<Allocation> storageImageAlloc;
1719 VkDeviceSize storageImageBlockSize = 0u;
1720 {
1721 VkDeviceSize mod = (storageImageMemReqs.size % storageImageMemReqs.alignment);
1722 storageImageBlockSize = storageImageMemReqs.size + ((mod == 0u) ? 0u : storageImageMemReqs.alignment - mod);
1723 }
1724 storageImageMemReqs.size = storageImageBlockSize * storageImageCount;
1725 storageImageAlloc = allocator.allocate(storageImageMemReqs, MemoryRequirement::Any);
1726
1727 // Allocate buffer to copy storage images to.
1728 auto storageImgBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1729 vk, device, allocator,
1730 makeBufferCreateInfo(storageImageCount * sizeof(int32_t), VK_BUFFER_USAGE_TRANSFER_DST_BIT),
1731 MemoryRequirement::HostVisible));
1732 int32_t *storageImgBufferPtr = reinterpret_cast<int32_t *>(storageImgBuffer->getAllocation().getHostPtr());
1733
1734 // Create image views.
1735 vector<Move<VkImageView>> storageImageViews;
1736 {
1737 VkImageViewCreateInfo storageImageViewCreateInfo = {
1738 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1739 DE_NULL, // const void* pNext;
1740 0u, // VkImageViewCreateFlags flags;
1741 DE_NULL, // VkImage image;
1742 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
1743 VK_FORMAT_R32_SINT, // VkFormat format;
1744 { // VkComponentMapping channels;
1745 VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
1746 VK_COMPONENT_SWIZZLE_IDENTITY},
1747 {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u} // VkImageSubresourceRange subresourceRange;
1748 };
1749
1750 for (uint32_t i = 0; i < static_cast<uint32_t>(storageImages.size()); ++i)
1751 {
1752 // Bind image memory.
1753 vk::VkImage img = *storageImages[i];
1754 VK_CHECK(vk.bindImageMemory(device, img, storageImageAlloc->getMemory(),
1755 storageImageAlloc->getOffset() + i * storageImageBlockSize));
1756
1757 // Create view.
1758 storageImageViewCreateInfo.image = img;
1759 storageImageViews.push_back(createImageView(vk, device, &storageImageViewCreateInfo));
1760 }
1761 }
1762
1763 // Create input attachment images.
1764 vector<Move<VkImage>> inputAttachments;
1765 const VkImageCreateInfo imgCreateInfo = {
1766 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1767 DE_NULL, // const void* pNext;
1768 0u, // VkImageCreateFlags flags;
1769 VK_IMAGE_TYPE_2D, // VkImageType imageType;
1770 VK_FORMAT_R32_SINT, // VkFormat format;
1771 {DIM, DIM, 1u}, // VkExtent3D extent;
1772 1u, // uint32_t mipLevels;
1773 1u, // uint32_t arrayLayers;
1774 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1775 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1776 (VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT), // VkImageUsageFlags usage;
1777 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1778 1u, // uint32_t queueFamilyIndexCount;
1779 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
1780 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1781
1782 };
1783
1784 uint32_t inputAttachmentCount = 0u;
1785 for (const auto &bindings : randomLayout.layoutBindings)
1786 for (const auto &binding : bindings)
1787 {
1788 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
1789 {
1790 inputAttachmentCount += binding.descriptorCount;
1791 for (uint32_t d = 0; d < binding.descriptorCount; ++d)
1792 {
1793 inputAttachments.push_back(createImage(vk, device, &imgCreateInfo));
1794 }
1795 }
1796 }
1797
1798 de::MovePtr<Allocation> inputAttachmentAlloc;
1799 VkDeviceSize imageBlockSize = 0u;
1800
1801 if (inputAttachmentCount > 0u)
1802 {
1803 VkMemoryRequirements imageReqs = getImageMemoryRequirements(vk, device, inputAttachments.back().get());
1804 VkDeviceSize mod = imageReqs.size % imageReqs.alignment;
1805
1806 // Create memory for every input attachment image.
1807 imageBlockSize = imageReqs.size + ((mod == 0u) ? 0u : (imageReqs.alignment - mod));
1808 imageReqs.size = imageBlockSize * inputAttachmentCount;
1809 inputAttachmentAlloc = allocator.allocate(imageReqs, MemoryRequirement::Any);
1810 }
1811
1812 // Bind memory to each input attachment and create an image view.
1813 VkImageViewCreateInfo inputAttachmentViewParams = {
1814 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1815 DE_NULL, // const void* pNext;
1816 0u, // VkImageViewCreateFlags flags;
1817 DE_NULL, // VkImage image;
1818 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
1819 VK_FORMAT_R32_SINT, // VkFormat format;
1820 { // VkComponentMapping channels;
1821 VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
1822 VK_COMPONENT_SWIZZLE_IDENTITY},
1823 {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u} // VkImageSubresourceRange subresourceRange;
1824 };
1825 vector<Move<VkImageView>> inputAttachmentViews;
1826
1827 for (uint32_t i = 0; i < static_cast<uint32_t>(inputAttachments.size()); ++i)
1828 {
1829 vk::VkImage img = *inputAttachments[i];
1830 VK_CHECK(vk.bindImageMemory(device, img, inputAttachmentAlloc->getMemory(),
1831 inputAttachmentAlloc->getOffset() + i * imageBlockSize));
1832
1833 inputAttachmentViewParams.image = img;
1834 inputAttachmentViews.push_back(createImageView(vk, device, &inputAttachmentViewParams));
1835 }
1836
1837 // Create a view for each descriptor. Fill descriptor 'd' with an integer value equal to 'd'. In case the descriptor would be
1838 // written to from the shader, store a -1 in it instead. Skip inline uniform blocks and use images for input attachments and
1839 // storage images.
1840
1841 Move<VkCommandPool> cmdPool = createCommandPool(vk, device, 0, queueFamilyIndex);
1842 const VkQueue queue = m_context.getUniversalQueue();
1843 Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1844
1845 const VkImageSubresourceRange clearRange = {
1846 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1847 0u, // uint32_t baseMipLevel;
1848 1u, // uint32_t levelCount;
1849 0u, // uint32_t baseArrayLayer;
1850 1u // uint32_t layerCount;
1851 };
1852
1853 VkImageMemoryBarrier preInputAttachmentBarrier = {
1854 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
1855 DE_NULL, // const void* pNext
1856 0u, // VkAccessFlags srcAccessMask
1857 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
1858 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
1859 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout
1860 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
1861 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
1862 DE_NULL, // VkImage image
1863 {
1864 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask
1865 0u, // uint32_t baseMipLevel
1866 1u, // uint32_t mipLevels,
1867 0u, // uint32_t baseArray
1868 1u, // uint32_t arraySize
1869 }};
1870
1871 VkImageMemoryBarrier postInputAttachmentBarrier = {
1872 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1873 DE_NULL, // const void* pNext;
1874 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1875 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, // VkAccessFlags dstAccessMask;
1876 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1877 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
1878 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1879 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1880 DE_NULL, // VkImage image;
1881 clearRange, // VkImageSubresourceRange subresourceRange;
1882 };
1883
1884 VkImageMemoryBarrier preStorageImageBarrier = {
1885 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
1886 DE_NULL, // const void* pNext
1887 0u, // VkAccessFlags srcAccessMask
1888 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
1889 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
1890 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout
1891 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
1892 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
1893 DE_NULL, // VkImage image
1894 {
1895 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask
1896 0u, // uint32_t baseMipLevel
1897 1u, // uint32_t mipLevels,
1898 0u, // uint32_t baseArray
1899 1u, // uint32_t arraySize
1900 }};
1901
1902 VkImageMemoryBarrier postStorageImageBarrier = {
1903 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1904 DE_NULL, // const void* pNext;
1905 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1906 (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT), // VkAccessFlags dstAccessMask;
1907 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1908 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout;
1909 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1910 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1911 DE_NULL, // VkImage image;
1912 clearRange, // VkImageSubresourceRange subresourceRange;
1913 };
1914
1915 vk::VkClearColorValue clearValue;
1916 clearValue.uint32[0] = 0u;
1917 clearValue.uint32[1] = 0u;
1918 clearValue.uint32[2] = 0u;
1919 clearValue.uint32[3] = 0u;
1920
1921 beginCommandBuffer(vk, *cmdBuffer, 0u);
1922
1923 int descriptor = 0;
1924 uint32_t attachmentIndex = 0;
1925 uint32_t storageImgIndex = 0;
1926
1927 typedef vk::Unique<vk::VkBufferView> BufferViewHandleUp;
1928 typedef de::SharedPtr<BufferViewHandleUp> BufferViewHandleSp;
1929
1930 vector<BufferViewHandleSp> bufferViews(de::max(1u, numDescriptors));
1931
1932 for (uint32_t s = 0; s < m_data.numDescriptorSets; ++s)
1933 {
1934 vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
1935 for (size_t b = 0; b < bindings.size(); ++b)
1936 {
1937 VkDescriptorSetLayoutBinding &binding = bindings[b];
1938
1939 if (binding.descriptorCount == 0)
1940 {
1941 continue;
1942 }
1943 #ifndef CTS_USES_VULKANSC
1944 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR)
1945 {
1946 descriptor++;
1947 }
1948 #endif
1949 else if (binding.descriptorType != VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT &&
1950 #ifndef CTS_USES_VULKANSC
1951 binding.descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT &&
1952 #endif
1953 binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1954 {
1955 for (uint32_t d = descriptor; d < descriptor + binding.descriptorCount; ++d)
1956 {
1957 DescriptorId descriptorId(s, static_cast<uint32_t>(b), d - descriptor);
1958 auto writeInfoItr = randomLayout.descriptorWrites.find(descriptorId);
1959 int32_t *ptr = (int32_t *)(bufferPtr + align * d);
1960
1961 if (writeInfoItr == randomLayout.descriptorWrites.end())
1962 {
1963 *ptr = static_cast<int32_t>(d);
1964 }
1965 else
1966 {
1967 *ptr = -1;
1968 writeInfoItr->second.ptr = ptr;
1969 writeInfoItr->second.expected = d;
1970 }
1971
1972 const vk::VkBufferViewCreateInfo viewCreateInfo = {
1973 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
1974 DE_NULL,
1975 (vk::VkBufferViewCreateFlags)0,
1976 **buffer, // buffer
1977 VK_FORMAT_R32_SINT, // format
1978 (vk::VkDeviceSize)align * d, // offset
1979 (vk::VkDeviceSize)sizeof(uint32_t) // range
1980 };
1981 vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
1982 bufferViews[d] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
1983 }
1984 descriptor += binding.descriptorCount;
1985 }
1986 #ifndef CTS_USES_VULKANSC
1987 else if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
1988 {
1989 // subtract 16 for "ivec4 unused"
1990 DE_ASSERT(binding.descriptorCount >= 16);
1991 descriptor += binding.descriptorCount - 16;
1992 }
1993 #endif
1994 else if (binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1995 {
1996 // Storage image.
1997 for (uint32_t d = descriptor; d < descriptor + binding.descriptorCount; ++d)
1998 {
1999 VkImage img = *storageImages[storageImgIndex];
2000 DescriptorId descriptorId(s, static_cast<uint32_t>(b), d - descriptor);
2001 int32_t *ptr = storageImgBufferPtr + storageImgIndex;
2002
2003 auto writeInfoItr = randomLayout.descriptorWrites.find(descriptorId);
2004 const bool isWrite = (writeInfoItr != randomLayout.descriptorWrites.end());
2005
2006 if (isWrite)
2007 {
2008 writeInfoItr->second.ptr = ptr;
2009 writeInfoItr->second.expected = static_cast<int32_t>(d);
2010 }
2011
2012 preStorageImageBarrier.image = img;
2013 clearValue.int32[0] = (isWrite ? -1 : static_cast<int32_t>(d));
2014 postStorageImageBarrier.image = img;
2015
2016 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2017 VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0,
2018 (const VkMemoryBarrier *)DE_NULL, 0, (const VkBufferMemoryBarrier *)DE_NULL,
2019 1, &preStorageImageBarrier);
2020 vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1,
2021 &clearRange);
2022 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
2023 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2024 (const VkBufferMemoryBarrier *)DE_NULL, 1, &postStorageImageBarrier);
2025
2026 ++storageImgIndex;
2027 }
2028 descriptor += binding.descriptorCount;
2029 }
2030 else
2031 {
2032 // Input attachment.
2033 for (uint32_t d = descriptor; d < descriptor + binding.descriptorCount; ++d)
2034 {
2035 VkImage img = *inputAttachments[attachmentIndex];
2036
2037 preInputAttachmentBarrier.image = img;
2038 clearValue.int32[0] = static_cast<int32_t>(d);
2039 postInputAttachmentBarrier.image = img;
2040
2041 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2042 VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0,
2043 (const VkMemoryBarrier *)DE_NULL, 0, (const VkBufferMemoryBarrier *)DE_NULL,
2044 1, &preInputAttachmentBarrier);
2045 vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1,
2046 &clearRange);
2047 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
2048 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0, 0,
2049 (const VkMemoryBarrier *)DE_NULL, 0, (const VkBufferMemoryBarrier *)DE_NULL,
2050 1, &postInputAttachmentBarrier);
2051
2052 ++attachmentIndex;
2053 }
2054 descriptor += binding.descriptorCount;
2055 }
2056 }
2057 }
2058
2059 // Flush modified memory.
2060 flushAlloc(vk, device, buffer->getAllocation());
2061
2062 // Push constants are used for dynamic indexing. PushConstant[i] = i.
2063 const VkPushConstantRange pushConstRange = {
2064 m_data.allShaderStages, // VkShaderStageFlags stageFlags
2065 0, // uint32_t offset
2066 128 // uint32_t size
2067 };
2068
2069 vector<vk::VkDescriptorSetLayout> descriptorSetLayoutsRaw(m_data.numDescriptorSets);
2070 for (size_t i = 0; i < m_data.numDescriptorSets; ++i)
2071 {
2072 descriptorSetLayoutsRaw[i] = descriptorSetLayouts[i].get();
2073 }
2074
2075 const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
2076 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
2077 DE_NULL, // const void* pNext;
2078 (VkPipelineLayoutCreateFlags)0, // VkPipelineLayoutCreateFlags flags;
2079 m_data.numDescriptorSets, // uint32_t setLayoutCount;
2080 &descriptorSetLayoutsRaw[0], // const VkDescriptorSetLayout* pSetLayouts;
2081 m_data.indexType == INDEX_TYPE_PUSHCONSTANT ? 1u : 0u, // uint32_t pushConstantRangeCount;
2082 &pushConstRange, // const VkPushConstantRange* pPushConstantRanges;
2083 };
2084
2085 Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2086
2087 if (m_data.indexType == INDEX_TYPE_PUSHCONSTANT)
2088 {
2089 // PushConstant[i] = i
2090 for (uint32_t i = 0; i < (uint32_t)(128 / sizeof(uint32_t)); ++i)
2091 {
2092 vk.cmdPushConstants(*cmdBuffer, *pipelineLayout, m_data.allShaderStages, (uint32_t)(i * sizeof(uint32_t)),
2093 (uint32_t)sizeof(uint32_t), &i);
2094 }
2095 }
2096
2097 de::MovePtr<BufferWithMemory> copyBuffer;
2098 copyBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2099 vk, device, allocator, makeBufferCreateInfo(DIM * DIM * sizeof(uint32_t), VK_BUFFER_USAGE_TRANSFER_DST_BIT),
2100 MemoryRequirement::HostVisible));
2101
2102 // Special case for the output storage image.
2103 const VkImageCreateInfo imageCreateInfo = {
2104 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2105 DE_NULL, // const void* pNext;
2106 (VkImageCreateFlags)0u, // VkImageCreateFlags flags;
2107 VK_IMAGE_TYPE_2D, // VkImageType imageType;
2108 VK_FORMAT_R32_SINT, // VkFormat format;
2109 {
2110 DIM, // uint32_t width;
2111 DIM, // uint32_t height;
2112 1u // uint32_t depth;
2113 }, // VkExtent3D extent;
2114 1u, // uint32_t mipLevels;
2115 1u, // uint32_t arrayLayers;
2116 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2117 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2118 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2119 VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
2120 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2121 0u, // uint32_t queueFamilyIndexCount;
2122 DE_NULL, // const uint32_t* pQueueFamilyIndices;
2123 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2124 };
2125
2126 VkImageViewCreateInfo imageViewCreateInfo = {
2127 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2128 DE_NULL, // const void* pNext;
2129 (VkImageViewCreateFlags)0u, // VkImageViewCreateFlags flags;
2130 DE_NULL, // VkImage image;
2131 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
2132 VK_FORMAT_R32_SINT, // VkFormat format;
2133 {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
2134 VK_COMPONENT_SWIZZLE_IDENTITY}, // VkComponentMapping components;
2135 {
2136 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2137 0u, // uint32_t baseMipLevel;
2138 1u, // uint32_t levelCount;
2139 0u, // uint32_t baseArrayLayer;
2140 1u // uint32_t layerCount;
2141 } // VkImageSubresourceRange subresourceRange;
2142 };
2143
2144 de::MovePtr<ImageWithMemory> image;
2145 Move<VkImageView> imageView;
2146
2147 image = de::MovePtr<ImageWithMemory>(
2148 new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2149 imageViewCreateInfo.image = **image;
2150 imageView = createImageView(vk, device, &imageViewCreateInfo, NULL);
2151
2152 #ifndef CTS_USES_VULKANSC
2153 // Create ray tracing structures
2154 de::MovePtr<vk::BottomLevelAccelerationStructure> bottomLevelAccelerationStructure;
2155 de::MovePtr<vk::TopLevelAccelerationStructure> topLevelAccelerationStructure;
2156 VkStridedDeviceAddressRegionKHR raygenShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
2157 VkStridedDeviceAddressRegionKHR missShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
2158 VkStridedDeviceAddressRegionKHR hitShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
2159 VkStridedDeviceAddressRegionKHR callableShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
2160
2161 if (usesAccelerationStructure(m_data.stage))
2162 {
2163 // Create bottom level acceleration structure
2164 {
2165 bottomLevelAccelerationStructure = makeBottomLevelAccelerationStructure();
2166
2167 bottomLevelAccelerationStructure->setDefaultGeometryData(getShaderStageFlag(m_data.stage));
2168
2169 bottomLevelAccelerationStructure->createAndBuild(vk, device, *cmdBuffer, allocator);
2170 }
2171
2172 // Create top level acceleration structure
2173 {
2174 topLevelAccelerationStructure = makeTopLevelAccelerationStructure();
2175
2176 topLevelAccelerationStructure->setInstanceCount(1);
2177 topLevelAccelerationStructure->addInstance(
2178 de::SharedPtr<BottomLevelAccelerationStructure>(bottomLevelAccelerationStructure.release()));
2179
2180 topLevelAccelerationStructure->createAndBuild(vk, device, *cmdBuffer, allocator);
2181 }
2182 }
2183 #endif
2184
2185 descriptor = 0;
2186 attachmentIndex = 0;
2187 storageImgIndex = 0;
2188
2189 for (uint32_t s = 0; s < m_data.numDescriptorSets; ++s)
2190 {
2191 vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
2192 vector<VkDescriptorBindingFlags> &bindingsFlags = randomLayout.layoutBindingFlags[s];
2193 vector<uint32_t> &arraySizes = randomLayout.arraySizes[s];
2194 vector<uint32_t> &variableDescriptorSizes = randomLayout.variableDescriptorSizes;
2195
2196 vector<VkDescriptorBufferInfo> bufferInfoVec(numDescriptors);
2197 vector<VkDescriptorImageInfo> imageInfoVec(numDescriptors);
2198 vector<VkBufferView> bufferViewVec(numDescriptors);
2199 #ifndef CTS_USES_VULKANSC
2200 vector<VkWriteDescriptorSetInlineUniformBlockEXT> inlineInfoVec(numDescriptors);
2201 vector<VkWriteDescriptorSetAccelerationStructureKHR> accelerationInfoVec(numDescriptors);
2202 #endif
2203 vector<uint32_t> descriptorNumber(numDescriptors);
2204 vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2205 vector<VkWriteDescriptorSet> writesAfterBindVec(0);
2206 int vecIndex = 0;
2207 int numDynamic = 0;
2208
2209 #ifndef CTS_USES_VULKANSC
2210 vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore, imgTemplateEntriesAfter,
2211 bufTemplateEntriesBefore, bufTemplateEntriesAfter, texelBufTemplateEntriesBefore,
2212 texelBufTemplateEntriesAfter, inlineTemplateEntriesBefore, inlineTemplateEntriesAfter;
2213 #endif
2214 for (size_t b = 0; b < bindings.size(); ++b)
2215 {
2216 VkDescriptorSetLayoutBinding &binding = bindings[b];
2217 uint32_t descriptorIncrement = 1;
2218 #ifndef CTS_USES_VULKANSC
2219 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
2220 descriptorIncrement = 16;
2221 #endif
2222
2223 // Construct the declaration for the binding
2224 if (binding.descriptorCount > 0)
2225 {
2226 bool updateAfterBind = !!(bindingsFlags[b] & VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT);
2227 for (uint32_t ai = 0; ai < de::max(1u, arraySizes[b]); ++ai, descriptor += descriptorIncrement)
2228 {
2229 // Don't access descriptors past the end of the allocated range for
2230 // variable descriptor count
2231 if (b == bindings.size() - 1 &&
2232 (bindingsFlags[b] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT))
2233 {
2234 #ifndef CTS_USES_VULKANSC
2235 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
2236 {
2237 // Convert to bytes and add 16 for "ivec4 unused" in case of inline uniform block
2238 const uint32_t uboRange = ai * 16 + 16;
2239 if (uboRange >= variableDescriptorSizes[s])
2240 continue;
2241 }
2242 else
2243 #endif
2244 {
2245 if (ai >= variableDescriptorSizes[s])
2246 continue;
2247 }
2248 }
2249
2250 // output image
2251 switch (binding.descriptorType)
2252 {
2253 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2254 // Output image. Special case.
2255 if (s == 0 && b == 0)
2256 {
2257 imageInfoVec[vecIndex] =
2258 makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL);
2259 }
2260 else
2261 {
2262 imageInfoVec[vecIndex] = makeDescriptorImageInfo(
2263 DE_NULL, storageImageViews[storageImgIndex].get(), VK_IMAGE_LAYOUT_GENERAL);
2264 }
2265 ++storageImgIndex;
2266 break;
2267 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2268 imageInfoVec[vecIndex] =
2269 makeDescriptorImageInfo(DE_NULL, inputAttachmentViews[attachmentIndex].get(),
2270 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
2271 ++attachmentIndex;
2272 break;
2273 #ifndef CTS_USES_VULKANSC
2274 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
2275 // Handled below.
2276 break;
2277 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
2278 // Handled below.
2279 break;
2280 #endif
2281 default:
2282 // Other descriptor types.
2283 bufferInfoVec[vecIndex] =
2284 makeDescriptorBufferInfo(**buffer, descriptor * align, sizeof(uint32_t));
2285 bufferViewVec[vecIndex] = **bufferViews[descriptor];
2286 break;
2287 }
2288
2289 descriptorNumber[descriptor] = descriptor;
2290
2291 VkWriteDescriptorSet w = {
2292 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
2293 DE_NULL, // const void* pNext;
2294 *descriptorSets[s], // VkDescriptorSet dstSet;
2295 (uint32_t)b, // uint32_t dstBinding;
2296 ai, // uint32_t dstArrayElement;
2297 1u, // uint32_t descriptorCount;
2298 binding.descriptorType, // VkDescriptorType descriptorType;
2299 &imageInfoVec[vecIndex], // const VkDescriptorImageInfo* pImageInfo;
2300 &bufferInfoVec[vecIndex], // const VkDescriptorBufferInfo* pBufferInfo;
2301 &bufferViewVec[vecIndex], // const VkBufferView* pTexelBufferView;
2302 };
2303
2304 #ifndef CTS_USES_VULKANSC
2305 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
2306 {
2307 VkWriteDescriptorSetInlineUniformBlockEXT iuBlock = {
2308 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT, // VkStructureType sType;
2309 DE_NULL, // const void* pNext;
2310 sizeof(uint32_t), // uint32_t dataSize;
2311 &descriptorNumber[descriptor], // const void* pData;
2312 };
2313
2314 inlineInfoVec[vecIndex] = iuBlock;
2315 w.dstArrayElement = ai * 16 + 16; // add 16 to skip "ivec4 unused"
2316 w.pNext = &inlineInfoVec[vecIndex];
2317 w.descriptorCount = sizeof(uint32_t);
2318 }
2319
2320 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR)
2321 {
2322 const TopLevelAccelerationStructure *topLevelAccelerationStructurePtr =
2323 topLevelAccelerationStructure.get();
2324 VkWriteDescriptorSetAccelerationStructureKHR accelerationStructureWriteDescriptorSet = {
2325 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, // VkStructureType sType;
2326 DE_NULL, // const void* pNext;
2327 w.descriptorCount, // uint32_t accelerationStructureCount;
2328 topLevelAccelerationStructurePtr
2329 ->getPtr(), // const VkAccelerationStructureKHR* pAccelerationStructures;
2330 };
2331
2332 accelerationInfoVec[vecIndex] = accelerationStructureWriteDescriptorSet;
2333 w.dstArrayElement = 0;
2334 w.pNext = &accelerationInfoVec[vecIndex];
2335 }
2336
2337 VkDescriptorUpdateTemplateEntry templateEntry = {
2338 (uint32_t)b, // uint32_t dstBinding;
2339 ai, // uint32_t dstArrayElement;
2340 1u, // uint32_t descriptorCount;
2341 binding.descriptorType, // VkDescriptorType descriptorType;
2342 0, // size_t offset;
2343 0, // size_t stride;
2344 };
2345
2346 switch (binding.descriptorType)
2347 {
2348 default:
2349 TCU_THROW(InternalError, "Unknown descriptor type");
2350
2351 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2352 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2353 templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2354 (updateAfterBind ? imgTemplateEntriesAfter : imgTemplateEntriesBefore).push_back(templateEntry);
2355 break;
2356 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2357 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2358 templateEntry.offset = vecIndex * sizeof(VkBufferView);
2359 (updateAfterBind ? texelBufTemplateEntriesAfter : texelBufTemplateEntriesBefore)
2360 .push_back(templateEntry);
2361 break;
2362 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2363 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2364 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2365 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2366 templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2367 (updateAfterBind ? bufTemplateEntriesAfter : bufTemplateEntriesBefore).push_back(templateEntry);
2368 break;
2369 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
2370 templateEntry.offset = descriptor * sizeof(uint32_t);
2371 templateEntry.dstArrayElement = ai * 16 + 16; // add 16 to skip "ivec4 dummy"
2372 templateEntry.descriptorCount = sizeof(uint32_t);
2373 (updateAfterBind ? inlineTemplateEntriesAfter : inlineTemplateEntriesBefore)
2374 .push_back(templateEntry);
2375 break;
2376 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
2377 DE_ASSERT(!updateAfterBind);
2378 DE_ASSERT(usesAccelerationStructure(m_data.stage));
2379 break;
2380 }
2381 #endif
2382
2383 vecIndex++;
2384
2385 (updateAfterBind ? writesAfterBindVec : writesBeforeBindVec).push_back(w);
2386
2387 // Count the number of dynamic descriptors in this set.
2388 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2389 binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2390 {
2391 numDynamic++;
2392 }
2393 }
2394 }
2395 }
2396
2397 // Make zeros have at least one element so &zeros[0] works
2398 vector<uint32_t> zeros(de::max(1, numDynamic));
2399 deMemset(&zeros[0], 0, numDynamic * sizeof(uint32_t));
2400
2401 #ifndef CTS_USES_VULKANSC
2402 // Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2403 if (randRange(&rnd, 1, 2) == 1 && m_context.contextSupports(vk::ApiVersion(0, 1, 1, 0)) &&
2404 !usesAccelerationStructure(m_data.stage))
2405 {
2406 DE_ASSERT(!usesAccelerationStructure(m_data.stage));
2407
2408 VkDescriptorUpdateTemplateCreateInfo templateCreateInfo = {
2409 VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, // VkStructureType sType;
2410 NULL, // void* pNext;
2411 0, // VkDescriptorUpdateTemplateCreateFlags flags;
2412 0, // uint32_t descriptorUpdateEntryCount;
2413 DE_NULL, // uint32_t descriptorUpdateEntryCount;
2414 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, // VkDescriptorUpdateTemplateType templateType;
2415 descriptorSetLayouts[s].get(), // VkDescriptorSetLayout descriptorSetLayout;
2416 bindPoint, // VkPipelineBindPoint pipelineBindPoint;
2417 0, // VkPipelineLayout pipelineLayout;
2418 0, // uint32_t set;
2419 };
2420
2421 void *templateVectorData[] = {
2422 imageInfoVec.data(),
2423 bufferInfoVec.data(),
2424 bufferViewVec.data(),
2425 descriptorNumber.data(),
2426 };
2427
2428 vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] = {
2429 &imgTemplateEntriesBefore,
2430 &bufTemplateEntriesBefore,
2431 &texelBufTemplateEntriesBefore,
2432 &inlineTemplateEntriesBefore,
2433 };
2434
2435 vector<VkDescriptorUpdateTemplateEntry> *templateVectorsAfter[] = {
2436 &imgTemplateEntriesAfter,
2437 &bufTemplateEntriesAfter,
2438 &texelBufTemplateEntriesAfter,
2439 &inlineTemplateEntriesAfter,
2440 };
2441
2442 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2443 {
2444 if (templateVectorsBefore[i]->size())
2445 {
2446 templateCreateInfo.descriptorUpdateEntryCount = (uint32_t)templateVectorsBefore[i]->size();
2447 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2448 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate =
2449 createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2450 vk.updateDescriptorSetWithTemplate(device, descriptorSets[s].get(), *descriptorUpdateTemplate,
2451 templateVectorData[i]);
2452 }
2453 }
2454
2455 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, s, 1, &descriptorSets[s].get(), numDynamic,
2456 &zeros[0]);
2457
2458 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsAfter); ++i)
2459 {
2460 if (templateVectorsAfter[i]->size())
2461 {
2462 templateCreateInfo.descriptorUpdateEntryCount = (uint32_t)templateVectorsAfter[i]->size();
2463 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsAfter[i]->data();
2464 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate =
2465 createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2466 vk.updateDescriptorSetWithTemplate(device, descriptorSets[s].get(), *descriptorUpdateTemplate,
2467 templateVectorData[i]);
2468 }
2469 }
2470 }
2471 else
2472 #endif
2473 {
2474 if (writesBeforeBindVec.size())
2475 {
2476 vk.updateDescriptorSets(device, (uint32_t)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2477 }
2478
2479 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, s, 1, &descriptorSets[s].get(), numDynamic,
2480 &zeros[0]);
2481
2482 if (writesAfterBindVec.size())
2483 {
2484 vk.updateDescriptorSets(device, (uint32_t)writesAfterBindVec.size(), &writesAfterBindVec[0], 0, NULL);
2485 }
2486 }
2487 }
2488
2489 Move<VkPipeline> pipeline;
2490 Move<VkRenderPass> renderPass;
2491 Move<VkFramebuffer> framebuffer;
2492
2493 #ifndef CTS_USES_VULKANSC
2494 de::MovePtr<BufferWithMemory> sbtBuffer;
2495 de::MovePtr<BufferWithMemory> raygenShaderBindingTable;
2496 de::MovePtr<BufferWithMemory> missShaderBindingTable;
2497 de::MovePtr<BufferWithMemory> hitShaderBindingTable;
2498 de::MovePtr<BufferWithMemory> callableShaderBindingTable;
2499 de::MovePtr<RayTracingPipeline> rayTracingPipeline;
2500 #endif
2501
2502 // Disable interval watchdog timer for long shader compilations that can
2503 // happen when the number of descriptor sets gets to 32 and above.
2504 if (m_data.numDescriptorSets >= 32)
2505 {
2506 m_context.getTestContext().touchWatchdogAndDisableIntervalTimeLimit();
2507 }
2508
2509 if (m_data.stage == STAGE_COMPUTE)
2510 {
2511 const Unique<VkShaderModule> shader(
2512 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2513
2514 const VkPipelineShaderStageCreateInfo shaderCreateInfo = {
2515 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2516 DE_NULL,
2517 (VkPipelineShaderStageCreateFlags)0,
2518 VK_SHADER_STAGE_COMPUTE_BIT, // stage
2519 *shader, // shader
2520 "main",
2521 DE_NULL, // pSpecializationInfo
2522 };
2523
2524 const VkComputePipelineCreateInfo pipelineCreateInfo = {
2525 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
2526 DE_NULL,
2527 0u, // flags
2528 shaderCreateInfo, // cs
2529 *pipelineLayout, // layout
2530 (vk::VkPipeline)0, // basePipelineHandle
2531 0u, // basePipelineIndex
2532 };
2533 pipeline = createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2534 }
2535 #ifndef CTS_USES_VULKANSC
2536 else if (m_data.stage == STAGE_RAYGEN_NV)
2537 {
2538 const Unique<VkShaderModule> shader(
2539 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2540
2541 const VkPipelineShaderStageCreateInfo shaderCreateInfo = {
2542 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
2543 DE_NULL, // const void* pNext;
2544 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
2545 VK_SHADER_STAGE_RAYGEN_BIT_NV, // VkShaderStageFlagBits stage;
2546 *shader, // VkShaderModule module;
2547 "main", // const char* pName;
2548 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
2549 };
2550
2551 VkRayTracingShaderGroupCreateInfoNV group = {
2552 VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV, // VkStructureType sType;
2553 DE_NULL, // const void* pNext;
2554 VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV, // VkRayTracingShaderGroupTypeNV type;
2555 0, // uint32_t generalShader;
2556 VK_SHADER_UNUSED_KHR, // uint32_t closestHitShader;
2557 VK_SHADER_UNUSED_KHR, // uint32_t anyHitShader;
2558 VK_SHADER_UNUSED_KHR, // uint32_t intersectionShader;
2559 };
2560
2561 VkRayTracingPipelineCreateInfoNV pipelineCreateInfo = {
2562 VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV, // VkStructureType sType;
2563 DE_NULL, // const void* pNext;
2564 0, // VkPipelineCreateFlags flags;
2565 1, // uint32_t stageCount;
2566 &shaderCreateInfo, // const VkPipelineShaderStageCreateInfo* pStages;
2567 1, // uint32_t groupCount;
2568 &group, // const VkRayTracingShaderGroupCreateInfoNV* pGroups;
2569 0, // uint32_t maxRecursionDepth;
2570 *pipelineLayout, // VkPipelineLayout layout;
2571 (vk::VkPipeline)0, // VkPipeline basePipelineHandle;
2572 0u, // int32_t basePipelineIndex;
2573 };
2574
2575 pipeline = createRayTracingPipelineNV(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2576
2577 const auto allocSize = de::roundUp(static_cast<VkDeviceSize>(shaderGroupHandleSize),
2578 properties.properties.limits.nonCoherentAtomSize);
2579
2580 sbtBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2581 vk, device, allocator,
2582 makeBufferCreateInfo(allocSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_RAY_TRACING_BIT_NV),
2583 MemoryRequirement::HostVisible));
2584
2585 const auto &alloc = sbtBuffer->getAllocation();
2586 const auto ptr = reinterpret_cast<uint32_t *>(alloc.getHostPtr());
2587
2588 invalidateAlloc(vk, device, alloc);
2589 vk.getRayTracingShaderGroupHandlesKHR(device, *pipeline, 0, 1, static_cast<uintptr_t>(allocSize), ptr);
2590 }
2591 else if (m_data.stage == STAGE_RAYGEN)
2592 {
2593 rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2594
2595 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,
2596 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0),
2597 0);
2598
2599 pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2600
2601 raygenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2602 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2603 raygenShaderBindingTableRegion =
2604 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0),
2605 shaderGroupHandleSize, shaderGroupHandleSize);
2606 }
2607 else if (m_data.stage == STAGE_INTERSECT)
2608 {
2609 rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2610
2611 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,
2612 createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0),
2613 0);
2614 rayTracingPipeline->addShader(VK_SHADER_STAGE_INTERSECTION_BIT_KHR,
2615 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0),
2616 1);
2617
2618 pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2619
2620 raygenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2621 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2622 raygenShaderBindingTableRegion =
2623 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0),
2624 shaderGroupHandleSize, shaderGroupHandleSize);
2625
2626 hitShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2627 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2628 hitShaderBindingTableRegion =
2629 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, hitShaderBindingTable->get(), 0),
2630 shaderGroupHandleSize, shaderGroupHandleSize);
2631 }
2632 else if (m_data.stage == STAGE_ANY_HIT)
2633 {
2634 rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2635
2636 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,
2637 createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0),
2638 0);
2639 rayTracingPipeline->addShader(VK_SHADER_STAGE_ANY_HIT_BIT_KHR,
2640 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0),
2641 1);
2642
2643 pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2644
2645 raygenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2646 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2647 raygenShaderBindingTableRegion =
2648 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0),
2649 shaderGroupHandleSize, shaderGroupHandleSize);
2650
2651 hitShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2652 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2653 hitShaderBindingTableRegion =
2654 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, hitShaderBindingTable->get(), 0),
2655 shaderGroupHandleSize, shaderGroupHandleSize);
2656 }
2657 else if (m_data.stage == STAGE_CLOSEST_HIT)
2658 {
2659 rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2660
2661 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,
2662 createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0),
2663 0);
2664 rayTracingPipeline->addShader(VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR,
2665 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0),
2666 1);
2667
2668 pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2669
2670 raygenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2671 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2672 raygenShaderBindingTableRegion =
2673 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0),
2674 shaderGroupHandleSize, shaderGroupHandleSize);
2675
2676 hitShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2677 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2678 hitShaderBindingTableRegion =
2679 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, hitShaderBindingTable->get(), 0),
2680 shaderGroupHandleSize, shaderGroupHandleSize);
2681 }
2682 else if (m_data.stage == STAGE_MISS)
2683 {
2684 rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2685
2686 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,
2687 createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0),
2688 0);
2689 rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR,
2690 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0),
2691 1);
2692
2693 pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2694
2695 raygenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2696 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2697 raygenShaderBindingTableRegion =
2698 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0),
2699 shaderGroupHandleSize, shaderGroupHandleSize);
2700
2701 missShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2702 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2703 missShaderBindingTableRegion =
2704 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, missShaderBindingTable->get(), 0),
2705 shaderGroupHandleSize, shaderGroupHandleSize);
2706 }
2707 else if (m_data.stage == STAGE_CALLABLE)
2708 {
2709 rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2710
2711 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,
2712 createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0),
2713 0);
2714 rayTracingPipeline->addShader(VK_SHADER_STAGE_CALLABLE_BIT_KHR,
2715 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0),
2716 1);
2717
2718 pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2719
2720 raygenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2721 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2722 raygenShaderBindingTableRegion =
2723 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0),
2724 shaderGroupHandleSize, shaderGroupHandleSize);
2725
2726 callableShaderBindingTable = rayTracingPipeline->createShaderBindingTable(
2727 vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2728 callableShaderBindingTableRegion =
2729 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, callableShaderBindingTable->get(), 0),
2730 shaderGroupHandleSize, shaderGroupHandleSize);
2731 }
2732 #endif
2733 else
2734 {
2735 const VkAttachmentDescription attachmentDescription = {
2736 // Input attachment
2737 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags
2738 VK_FORMAT_R32_SINT, // VkFormat format
2739 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
2740 VK_ATTACHMENT_LOAD_OP_LOAD, // VkAttachmentLoadOp loadOp
2741 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp
2742 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp
2743 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp
2744 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout initialLayout
2745 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL // VkImageLayout finalLayout
2746 };
2747
2748 vector<VkAttachmentDescription> attachmentDescriptions(inputAttachments.size(), attachmentDescription);
2749 vector<VkAttachmentReference> attachmentReferences;
2750
2751 attachmentReferences.reserve(inputAttachments.size());
2752 VkAttachmentReference attachmentReference = {0u, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
2753 for (size_t i = 0; i < inputAttachments.size(); ++i)
2754 {
2755 attachmentReference.attachment = static_cast<uint32_t>(i);
2756 attachmentReferences.push_back(attachmentReference);
2757 }
2758
2759 const VkSubpassDescription subpassDesc = {
2760 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
2761 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
2762 static_cast<uint32_t>(attachmentReferences.size()), // uint32_t inputAttachmentCount
2763 de::dataOrNull(attachmentReferences), // const VkAttachmentReference* pInputAttachments
2764 0u, // uint32_t colorAttachmentCount
2765 DE_NULL, // const VkAttachmentReference* pColorAttachments
2766 DE_NULL, // const VkAttachmentReference* pResolveAttachments
2767 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
2768 0u, // uint32_t preserveAttachmentCount
2769 DE_NULL // const uint32_t* pPreserveAttachments
2770 };
2771
2772 const VkSubpassDependency subpassDependency = {
2773 VK_SUBPASS_EXTERNAL, // uint32_t srcSubpass
2774 0, // uint32_t dstSubpass
2775 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags srcStageMask
2776 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // dstStageMask
2777 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask
2778 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
2779 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
2780 VK_DEPENDENCY_BY_REGION_BIT // VkDependencyFlags dependencyFlags
2781 };
2782
2783 const VkRenderPassCreateInfo renderPassParams = {
2784 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
2785 DE_NULL, // const void* pNext
2786 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
2787 static_cast<uint32_t>(attachmentDescriptions.size()), // uint32_t attachmentCount
2788 de::dataOrNull(attachmentDescriptions), // const VkAttachmentDescription* pAttachments
2789 1u, // uint32_t subpassCount
2790 &subpassDesc, // const VkSubpassDescription* pSubpasses
2791 1u, // uint32_t dependencyCount
2792 &subpassDependency // const VkSubpassDependency* pDependencies
2793 };
2794
2795 renderPass = createRenderPass(vk, device, &renderPassParams);
2796
2797 vector<VkImageView> rawInputAttachmentViews;
2798 rawInputAttachmentViews.reserve(inputAttachmentViews.size());
2799 transform(begin(inputAttachmentViews), end(inputAttachmentViews), back_inserter(rawInputAttachmentViews),
2800 [](const Move<VkImageView> &ptr) { return ptr.get(); });
2801
2802 const vk::VkFramebufferCreateInfo framebufferParams = {
2803 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
2804 DE_NULL, // pNext
2805 (vk::VkFramebufferCreateFlags)0,
2806 *renderPass, // renderPass
2807 static_cast<uint32_t>(rawInputAttachmentViews.size()), // attachmentCount
2808 de::dataOrNull(rawInputAttachmentViews), // pAttachments
2809 DIM, // width
2810 DIM, // height
2811 1u, // layers
2812 };
2813
2814 framebuffer = createFramebuffer(vk, device, &framebufferParams);
2815
2816 // Note: vertex input state and input assembly state will not be used for mesh pipelines.
2817
2818 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {
2819 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2820 DE_NULL, // const void* pNext;
2821 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
2822 0u, // uint32_t vertexBindingDescriptionCount;
2823 DE_NULL, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
2824 0u, // uint32_t vertexAttributeDescriptionCount;
2825 DE_NULL // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
2826 };
2827
2828 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
2829 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
2830 DE_NULL, // const void* pNext;
2831 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
2832 (m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST :
2833 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology topology;
2834 VK_FALSE // VkBool32 primitiveRestartEnable;
2835 };
2836
2837 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
2838 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
2839 DE_NULL, // const void* pNext;
2840 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
2841 VK_FALSE, // VkBool32 depthClampEnable;
2842 (m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE, // VkBool32 rasterizerDiscardEnable;
2843 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
2844 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
2845 VK_FRONT_FACE_CLOCKWISE, // VkFrontFace frontFace;
2846 VK_FALSE, // VkBool32 depthBiasEnable;
2847 0.0f, // float depthBiasConstantFactor;
2848 0.0f, // float depthBiasClamp;
2849 0.0f, // float depthBiasSlopeFactor;
2850 1.0f // float lineWidth;
2851 };
2852
2853 const VkPipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
2854 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType
2855 DE_NULL, // const void* pNext
2856 0u, // VkPipelineMultisampleStateCreateFlags flags
2857 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples
2858 VK_FALSE, // VkBool32 sampleShadingEnable
2859 1.0f, // float minSampleShading
2860 DE_NULL, // const VkSampleMask* pSampleMask
2861 VK_FALSE, // VkBool32 alphaToCoverageEnable
2862 VK_FALSE // VkBool32 alphaToOneEnable
2863 };
2864
2865 VkViewport viewport = makeViewport(DIM, DIM);
2866 VkRect2D scissor = makeRect2D(DIM, DIM);
2867
2868 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo = {
2869 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType
2870 DE_NULL, // const void* pNext
2871 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags
2872 1u, // uint32_t viewportCount
2873 &viewport, // const VkViewport* pViewports
2874 1u, // uint32_t scissorCount
2875 &scissor // const VkRect2D* pScissors
2876 };
2877
2878 Move<VkShaderModule> fs;
2879 Move<VkShaderModule> vs;
2880 #ifndef CTS_USES_VULKANSC
2881 Move<VkShaderModule> ms;
2882 Move<VkShaderModule> ts;
2883 #endif // CTS_USES_VULKANSC
2884
2885 const auto &binaries = m_context.getBinaryCollection();
2886
2887 std::vector<VkPipelineShaderStageCreateInfo> stageCreateInfos;
2888
2889 if (m_data.stage == STAGE_VERTEX)
2890 {
2891 vs = createShaderModule(vk, device, binaries.get("test"));
2892 appendShaderStageCreateInfo(stageCreateInfos, vs.get(), VK_SHADER_STAGE_VERTEX_BIT);
2893 }
2894 else if (m_data.stage == STAGE_FRAGMENT)
2895 {
2896 vs = createShaderModule(vk, device, binaries.get("vert"));
2897 fs = createShaderModule(vk, device, binaries.get("test"));
2898 appendShaderStageCreateInfo(stageCreateInfos, vs.get(), VK_SHADER_STAGE_VERTEX_BIT);
2899 appendShaderStageCreateInfo(stageCreateInfos, fs.get(), VK_SHADER_STAGE_FRAGMENT_BIT);
2900 }
2901 #ifndef CTS_USES_VULKANSC
2902 else if (m_data.stage == STAGE_TASK)
2903 {
2904 ts = createShaderModule(vk, device, binaries.get("test"));
2905 ms = createShaderModule(vk, device, binaries.get("mesh"));
2906 appendShaderStageCreateInfo(stageCreateInfos, ts.get(), vk::VK_SHADER_STAGE_TASK_BIT_EXT);
2907 appendShaderStageCreateInfo(stageCreateInfos, ms.get(), VK_SHADER_STAGE_MESH_BIT_EXT);
2908 }
2909 else if (m_data.stage == STAGE_MESH)
2910 {
2911 ms = createShaderModule(vk, device, binaries.get("test"));
2912 appendShaderStageCreateInfo(stageCreateInfos, ms.get(), VK_SHADER_STAGE_MESH_BIT_EXT);
2913 }
2914 #endif // CTS_USES_VULKANSC
2915
2916 const VkGraphicsPipelineCreateInfo graphicsPipelineCreateInfo = {
2917 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
2918 DE_NULL, // const void* pNext;
2919 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
2920 static_cast<uint32_t>(stageCreateInfos.size()), // uint32_t stageCount;
2921 de::dataOrNull(stageCreateInfos), // const VkPipelineShaderStageCreateInfo* pStages;
2922 &vertexInputStateCreateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
2923 &inputAssemblyStateCreateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
2924 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
2925 &viewportStateCreateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
2926 &rasterizationStateCreateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
2927 &multisampleStateCreateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
2928 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
2929 DE_NULL, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
2930 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
2931 pipelineLayout.get(), // VkPipelineLayout layout;
2932 renderPass.get(), // VkRenderPass renderPass;
2933 0u, // uint32_t subpass;
2934 DE_NULL, // VkPipeline basePipelineHandle;
2935 0 // int basePipelineIndex;
2936 };
2937
2938 pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
2939 }
2940
2941 const VkImageMemoryBarrier imageBarrier = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
2942 DE_NULL, // const void* pNext
2943 0u, // VkAccessFlags srcAccessMask
2944 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
2945 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
2946 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout
2947 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
2948 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
2949 **image, // VkImage image
2950 {
2951 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask
2952 0u, // uint32_t baseMipLevel
2953 1u, // uint32_t mipLevels,
2954 0u, // uint32_t baseArray
2955 1u, // uint32_t arraySize
2956 }};
2957
2958 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2959 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2960 (const VkBufferMemoryBarrier *)DE_NULL, 1, &imageBarrier);
2961
2962 vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
2963
2964 VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2965 VkClearValue clearColor = makeClearValueColorU32(0, 0, 0, 0);
2966
2967 VkMemoryBarrier memBarrier = {
2968 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
2969 DE_NULL, // pNext
2970 0u, // srcAccessMask
2971 0u, // dstAccessMask
2972 };
2973
2974 vk.cmdClearColorImage(*cmdBuffer, **image, VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
2975
2976 memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
2977 memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2978 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages, 0, 1, &memBarrier, 0,
2979 DE_NULL, 0, DE_NULL);
2980
2981 if (m_data.stage == STAGE_COMPUTE)
2982 {
2983 vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
2984 }
2985 #ifndef CTS_USES_VULKANSC
2986 else if (m_data.stage == STAGE_RAYGEN_NV)
2987 {
2988 vk.cmdTraceRaysNV(*cmdBuffer, **sbtBuffer, 0, DE_NULL, 0, 0, DE_NULL, 0, 0, DE_NULL, 0, 0, DIM, DIM, 1);
2989 }
2990 else if (isRayTracingStageKHR(m_data.stage))
2991 {
2992 cmdTraceRays(vk, *cmdBuffer, &raygenShaderBindingTableRegion, &missShaderBindingTableRegion,
2993 &hitShaderBindingTableRegion, &callableShaderBindingTableRegion, DIM, DIM, 1);
2994 }
2995 #endif
2996 else
2997 {
2998 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(DIM, DIM), 0, DE_NULL,
2999 VK_SUBPASS_CONTENTS_INLINE);
3000 // Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
3001 if (m_data.stage == STAGE_VERTEX)
3002 {
3003 vk.cmdDraw(*cmdBuffer, DIM * DIM, 1u, 0u, 0u);
3004 }
3005 else if (m_data.stage == STAGE_FRAGMENT)
3006 {
3007 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
3008 }
3009 #ifndef CTS_USES_VULKANSC
3010 else if (isMeshStage(m_data.stage))
3011 {
3012 vk.cmdDrawMeshTasksEXT(*cmdBuffer, DIM, DIM, 1u);
3013 }
3014 #endif // CTS_USES_VULKANSC
3015 endRenderPass(vk, *cmdBuffer);
3016 }
3017
3018 memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
3019 memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
3020 vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1, &memBarrier, 0,
3021 DE_NULL, 0, DE_NULL);
3022
3023 const VkBufferImageCopy copyRegion = makeBufferImageCopy(
3024 makeExtent3D(DIM, DIM, 1u), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
3025 vk.cmdCopyImageToBuffer(*cmdBuffer, **image, VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, ©Region);
3026
3027 const VkBufferMemoryBarrier copyBufferBarrier = {
3028 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
3029 DE_NULL, // const void* pNext;
3030 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
3031 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
3032 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
3033 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
3034 **copyBuffer, // VkBuffer buffer;
3035 0u, // VkDeviceSize offset;
3036 VK_WHOLE_SIZE, // VkDeviceSize size;
3037 };
3038
3039 // Add a barrier to read the copy buffer after copying.
3040 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0u, DE_NULL, 1u,
3041 ©BufferBarrier, 0u, DE_NULL);
3042
3043 // Copy all storage images to the storage image buffer.
3044 VkBufferImageCopy storageImgCopyRegion = {
3045 0u, // VkDeviceSize bufferOffset;
3046 0u, // uint32_t bufferRowLength;
3047 0u, // uint32_t bufferImageHeight;
3048 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u,
3049 1u), // VkImageSubresourceLayers imageSubresource;
3050 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
3051 makeExtent3D(1u, 1u, 1u), // VkExtent3D imageExtent;
3052 };
3053
3054 for (uint32_t i = 0; i < storageImageCount; ++i)
3055 {
3056 storageImgCopyRegion.bufferOffset = sizeof(int32_t) * i;
3057 vk.cmdCopyImageToBuffer(*cmdBuffer, storageImages[i].get(), VK_IMAGE_LAYOUT_GENERAL, **storageImgBuffer, 1u,
3058 &storageImgCopyRegion);
3059 }
3060
3061 const VkBufferMemoryBarrier storageImgBufferBarrier = {
3062 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
3063 DE_NULL, // const void* pNext;
3064 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
3065 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
3066 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
3067 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
3068 **storageImgBuffer, // VkBuffer buffer;
3069 0u, // VkDeviceSize offset;
3070 VK_WHOLE_SIZE, // VkDeviceSize size;
3071 };
3072
3073 // Add a barrier to read the storage image buffer after copying.
3074 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0u, DE_NULL, 1u,
3075 &storageImgBufferBarrier, 0u, DE_NULL);
3076
3077 const VkBufferMemoryBarrier descriptorBufferBarrier = {
3078 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
3079 DE_NULL, // const void* pNext;
3080 (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT), // VkAccessFlags srcAccessMask;
3081 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
3082 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
3083 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
3084 **buffer, // VkBuffer buffer;
3085 0u, // VkDeviceSize offset;
3086 VK_WHOLE_SIZE, // VkDeviceSize size;
3087 };
3088
3089 // Add a barrier to read stored data from shader writes in descriptor memory for other types of descriptors.
3090 vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, nullptr, 1u,
3091 &descriptorBufferBarrier, 0u, nullptr);
3092
3093 endCommandBuffer(vk, *cmdBuffer);
3094
3095 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
3096
3097 // Re-enable watchdog interval timer here to favor virtualized vulkan
3098 // implementation that asynchronously creates the pipeline on the host.
3099 if (m_data.numDescriptorSets >= 32)
3100 {
3101 m_context.getTestContext().touchWatchdogAndEnableIntervalTimeLimit();
3102 }
3103
3104 // Verify output image.
3105 uint32_t *ptr = (uint32_t *)copyBuffer->getAllocation().getHostPtr();
3106 invalidateAlloc(vk, device, copyBuffer->getAllocation());
3107
3108 uint32_t failures = 0;
3109 auto &log = m_context.getTestContext().getLog();
3110
3111 for (uint32_t i = 0; i < DIM * DIM; ++i)
3112 {
3113 if (ptr[i] != 1)
3114 {
3115 failures++;
3116 log << tcu::TestLog::Message << "Failure in copy buffer, ptr[" << i << "] = " << ptr[i]
3117 << tcu::TestLog::EndMessage;
3118 }
3119 }
3120
3121 // Verify descriptors with writes.
3122 invalidateMappedMemoryRange(vk, device, buffer->getAllocation().getMemory(), buffer->getAllocation().getOffset(),
3123 VK_WHOLE_SIZE);
3124 invalidateMappedMemoryRange(vk, device, storageImgBuffer->getAllocation().getMemory(),
3125 storageImgBuffer->getAllocation().getOffset(), VK_WHOLE_SIZE);
3126
3127 for (const auto &descIdWriteInfo : randomLayout.descriptorWrites)
3128 {
3129 const auto &writeInfo = descIdWriteInfo.second;
3130 if (writeInfo.writeGenerated && *writeInfo.ptr != writeInfo.expected)
3131 {
3132 failures++;
3133 log << tcu::TestLog::Message << "Failure in write operation; expected " << writeInfo.expected
3134 << " and found " << *writeInfo.ptr << tcu::TestLog::EndMessage;
3135 }
3136 }
3137
3138 if (failures == 0)
3139 return tcu::TestStatus::pass("Pass");
3140 else
3141 return tcu::TestStatus::fail("failures=" + de::toString(failures));
3142 }
3143
3144 } // namespace
3145
createDescriptorSetRandomTests(tcu::TestContext & testCtx)3146 tcu::TestCaseGroup *createDescriptorSetRandomTests(tcu::TestContext &testCtx)
3147 {
3148 de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "descriptorset_random"));
3149
3150 uint32_t seed = 0;
3151
3152 typedef struct
3153 {
3154 uint32_t count;
3155 const char *name;
3156 } TestGroupCase;
3157
3158 TestGroupCase setsCases[] = {
3159 // 4 descriptor sets
3160 {4, "sets4"},
3161 // 8 descriptor sets
3162 {8, "sets8"},
3163 // 16 descriptor sets
3164 {16, "sets16"},
3165 // 32 descriptor sets
3166 {32, "sets32"},
3167 };
3168
3169 TestGroupCase indexCases[] = {
3170 // all descriptor declarations are not arrays
3171 {INDEX_TYPE_NONE, "noarray"},
3172 // constant indexing of descriptor arrays
3173 {INDEX_TYPE_CONSTANT, "constant"},
3174 // indexing descriptor arrays with push constants
3175 {INDEX_TYPE_PUSHCONSTANT, "unifindexed"},
3176 // dynamically uniform indexing descriptor arrays
3177 {INDEX_TYPE_DEPENDENT, "dynindexed"},
3178 // runtime-size declarations of descriptor arrays
3179 {INDEX_TYPE_RUNTIME_SIZE, "runtimesize"},
3180 };
3181
3182 TestGroupCase uboCases[] = {
3183 // no ubos
3184 {0, "noubo"},
3185 // spec minmax ubo limit
3186 {12, "ubolimitlow"},
3187 // high ubo limit
3188 {4096, "ubolimithigh"},
3189 };
3190
3191 TestGroupCase sboCases[] = {
3192 // no ssbos
3193 {0, "nosbo"},
3194 // spec minmax ssbo limit
3195 {4, "sbolimitlow"},
3196 // high ssbo limit
3197 {4096, "sbolimithigh"},
3198 };
3199
3200 TestGroupCase iaCases[] = {
3201 // no input attachments
3202 {0, "noia"},
3203 // spec minmax input attachment limit
3204 {4, "ialimitlow"},
3205 // high input attachment limit
3206 {64, "ialimithigh"},
3207 };
3208
3209 TestGroupCase sampledImgCases[] = {
3210 // no sampled images
3211 {0, "nosampledimg"},
3212 // spec minmax image limit
3213 {16, "sampledimglow"},
3214 // high image limit
3215 {4096, "sampledimghigh"},
3216 };
3217
3218 const struct
3219 {
3220 uint32_t sImgCount;
3221 uint32_t sTexCount;
3222 const char *name;
3223 } sImgTexCases[] = {
3224 // output storage image only
3225 {1, 0, "outimgonly"},
3226 // output image low storage tex limit
3227 {1, 3, "outimgtexlow"},
3228 // minmax storage images and no storage tex
3229 {4, 0, "lowimgnotex"},
3230 // low storage image single storage texel
3231 {3, 1, "lowimgsingletex"},
3232 // high limit of storage images and texel buffers
3233 {2048, 2048, "storageimghigh"},
3234 };
3235
3236 const struct
3237 {
3238 uint32_t iubCount;
3239 uint32_t iubSize;
3240 const char *name;
3241 } iubCases[] = {
3242 // no inline uniform blocks
3243 {0, 0, "noiub"},
3244 // inline uniform blocks low limit
3245 {4, 256, "iublimitlow"},
3246 // inline uniform blocks high limit
3247 {8, 4096, "iublimithigh"},
3248 };
3249
3250 TestGroupCase stageCases[] = {
3251 // compute
3252 {STAGE_COMPUTE, "comp"},
3253 // fragment
3254 {STAGE_FRAGMENT, "frag"},
3255 // vertex
3256 {STAGE_VERTEX, "vert"},
3257 #ifndef CTS_USES_VULKANSC
3258 // raygen_nv
3259 {STAGE_RAYGEN_NV, "rgnv"},
3260 // raygen
3261 {STAGE_RAYGEN, "rgen"},
3262 // intersect
3263 {STAGE_INTERSECT, "sect"},
3264 // any_hit
3265 {STAGE_ANY_HIT, "ahit"},
3266 // closest_hit
3267 {STAGE_CLOSEST_HIT, "chit"},
3268 // miss
3269 {STAGE_MISS, "miss"},
3270 // callable
3271 {STAGE_CALLABLE, "call"},
3272 // task
3273 {STAGE_TASK, "task"},
3274 // mesh
3275 {STAGE_MESH, "mesh"},
3276 #endif
3277 };
3278
3279 TestGroupCase uabCases[] = {
3280 // no update after bind
3281 {UPDATE_AFTER_BIND_DISABLED, "nouab"},
3282 // enable update after bind
3283 {UPDATE_AFTER_BIND_ENABLED, "uab"},
3284 };
3285
3286 for (int setsNdx = 0; setsNdx < DE_LENGTH_OF_ARRAY(setsCases); setsNdx++)
3287 {
3288 de::MovePtr<tcu::TestCaseGroup> setsGroup(new tcu::TestCaseGroup(testCtx, setsCases[setsNdx].name));
3289 for (int indexNdx = 0; indexNdx < DE_LENGTH_OF_ARRAY(indexCases); indexNdx++)
3290 {
3291 de::MovePtr<tcu::TestCaseGroup> indexGroup(new tcu::TestCaseGroup(testCtx, indexCases[indexNdx].name));
3292 for (int uboNdx = 0; uboNdx < DE_LENGTH_OF_ARRAY(uboCases); uboNdx++)
3293 {
3294 de::MovePtr<tcu::TestCaseGroup> uboGroup(new tcu::TestCaseGroup(testCtx, uboCases[uboNdx].name));
3295 for (int sboNdx = 0; sboNdx < DE_LENGTH_OF_ARRAY(sboCases); sboNdx++)
3296 {
3297 de::MovePtr<tcu::TestCaseGroup> sboGroup(new tcu::TestCaseGroup(testCtx, sboCases[sboNdx].name));
3298 for (int sampledImgNdx = 0; sampledImgNdx < DE_LENGTH_OF_ARRAY(sampledImgCases); sampledImgNdx++)
3299 {
3300 de::MovePtr<tcu::TestCaseGroup> sampledImgGroup(
3301 new tcu::TestCaseGroup(testCtx, sampledImgCases[sampledImgNdx].name));
3302 for (int storageImgNdx = 0; storageImgNdx < DE_LENGTH_OF_ARRAY(sImgTexCases); ++storageImgNdx)
3303 {
3304 de::MovePtr<tcu::TestCaseGroup> storageImgGroup(
3305 new tcu::TestCaseGroup(testCtx, sImgTexCases[storageImgNdx].name));
3306 for (int iubNdx = 0; iubNdx < DE_LENGTH_OF_ARRAY(iubCases); iubNdx++)
3307 {
3308 de::MovePtr<tcu::TestCaseGroup> iubGroup(
3309 new tcu::TestCaseGroup(testCtx, iubCases[iubNdx].name));
3310 for (int uabNdx = 0; uabNdx < DE_LENGTH_OF_ARRAY(uabCases); uabNdx++)
3311 {
3312 de::MovePtr<tcu::TestCaseGroup> uabGroup(
3313 new tcu::TestCaseGroup(testCtx, uabCases[uabNdx].name));
3314 for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3315 {
3316 const Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
3317 const auto shaderStages = getAllShaderStagesFor(currentStage);
3318 const auto pipelineStages = getAllPipelineStagesFor(currentStage);
3319
3320 de::MovePtr<tcu::TestCaseGroup> stageGroup(
3321 new tcu::TestCaseGroup(testCtx, stageCases[stageNdx].name));
3322 for (int iaNdx = 0; iaNdx < DE_LENGTH_OF_ARRAY(iaCases); ++iaNdx)
3323 {
3324 // Input attachments can only be used in the fragment stage.
3325 if (currentStage != STAGE_FRAGMENT && iaCases[iaNdx].count > 0u)
3326 continue;
3327
3328 // Allow only one high limit or all of them.
3329 uint32_t highLimitCount = 0u;
3330 if (uboNdx == DE_LENGTH_OF_ARRAY(uboCases) - 1)
3331 ++highLimitCount;
3332 if (sboNdx == DE_LENGTH_OF_ARRAY(sboCases) - 1)
3333 ++highLimitCount;
3334 if (sampledImgNdx == DE_LENGTH_OF_ARRAY(sampledImgCases) - 1)
3335 ++highLimitCount;
3336 if (storageImgNdx == DE_LENGTH_OF_ARRAY(sImgTexCases) - 1)
3337 ++highLimitCount;
3338 if (iaNdx == DE_LENGTH_OF_ARRAY(iaCases) - 1)
3339 ++highLimitCount;
3340
3341 if (highLimitCount > 1 && highLimitCount < 5)
3342 continue;
3343
3344 // Allow only all, all-but-one, none or one "zero limits" at the same time, except for inline uniform blocks.
3345 uint32_t zeroLimitCount = 0u;
3346 if (uboNdx == 0)
3347 ++zeroLimitCount;
3348 if (sboNdx == 0)
3349 ++zeroLimitCount;
3350 if (sampledImgNdx == 0)
3351 ++zeroLimitCount;
3352 if (storageImgNdx == 0)
3353 ++zeroLimitCount;
3354 if (iaNdx == 0)
3355 ++zeroLimitCount;
3356
3357 if (zeroLimitCount > 1 && zeroLimitCount < 4)
3358 continue;
3359
3360 // Avoid using multiple storage images if no dynamic indexing is being used.
3361 if (storageImgNdx >= 2 && indexNdx < 2)
3362 continue;
3363
3364 // Skip the case of no UBOs, SSBOs or sampled images when no dynamic indexing is being used.
3365 if ((uboNdx == 0 || sboNdx == 0 || sampledImgNdx == 0) && indexNdx < 2)
3366 continue;
3367
3368 de::MovePtr<tcu::TestCaseGroup> iaGroup(
3369 new tcu::TestCaseGroup(testCtx, iaCases[iaNdx].name));
3370
3371 // Generate 10 random cases when working with only 4 sets and the number of descriptors is low. Otherwise just one case.
3372 // Exception: the case of no descriptors of any kind only needs one case.
3373 const uint32_t numSeeds =
3374 (setsCases[setsNdx].count == 4 && uboNdx < 2 && sboNdx < 2 &&
3375 sampledImgNdx < 2 && storageImgNdx < 4 && iubNdx == 0 && iaNdx < 2 &&
3376 (uboNdx != 0 || sboNdx != 0 || sampledImgNdx != 0 ||
3377 storageImgNdx != 0 || iaNdx != 0)) ?
3378 10 :
3379 1;
3380
3381 for (uint32_t rnd = 0; rnd < numSeeds; ++rnd)
3382 {
3383 CaseDef c = {
3384 (IndexType)indexCases[indexNdx].count, // IndexType indexType;
3385 setsCases[setsNdx].count, // uint32_t numDescriptorSets;
3386 uboCases[uboNdx].count, // uint32_t maxPerStageUniformBuffers;
3387 8, // uint32_t maxUniformBuffersDynamic;
3388 sboCases[sboNdx].count, // uint32_t maxPerStageStorageBuffers;
3389 4, // uint32_t maxStorageBuffersDynamic;
3390 sampledImgCases[sampledImgNdx]
3391 .count, // uint32_t maxPerStageSampledImages;
3392 sImgTexCases[storageImgNdx]
3393 .sImgCount, // uint32_t maxPerStageStorageImages;
3394 sImgTexCases[storageImgNdx]
3395 .sTexCount, // uint32_t maxPerStageStorageTexelBuffers;
3396 iubCases[iubNdx].iubCount, // uint32_t maxInlineUniformBlocks;
3397 iubCases[iubNdx].iubSize, // uint32_t maxInlineUniformBlockSize;
3398 iaCases[iaNdx].count, // uint32_t maxPerStageInputAttachments;
3399 currentStage, // Stage stage;
3400 (UpdateAfterBind)uabCases[uabNdx].count, // UpdateAfterBind uab;
3401 seed++, // uint32_t seed;
3402 shaderStages, // VkFlags allShaderStages;
3403 pipelineStages, // VkFlags allPipelineStages;
3404 nullptr, // std::shared_ptr<RandomLayout> randomLayout;
3405 };
3406
3407 string name = de::toString(rnd);
3408 iaGroup->addChild(
3409 new DescriptorSetRandomTestCase(testCtx, name.c_str(), c));
3410 }
3411 stageGroup->addChild(iaGroup.release());
3412 }
3413 uabGroup->addChild(stageGroup.release());
3414 }
3415 iubGroup->addChild(uabGroup.release());
3416 }
3417 storageImgGroup->addChild(iubGroup.release());
3418 }
3419 sampledImgGroup->addChild(storageImgGroup.release());
3420 }
3421 sboGroup->addChild(sampledImgGroup.release());
3422 }
3423 uboGroup->addChild(sboGroup.release());
3424 }
3425 indexGroup->addChild(uboGroup.release());
3426 }
3427 setsGroup->addChild(indexGroup.release());
3428 }
3429 group->addChild(setsGroup.release());
3430 }
3431 return group.release();
3432 }
3433
3434 } // namespace BindingModel
3435 } // namespace vkt
3436