1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2021 The Khronos Group Inc.
6 * Copyright (c) 2021 Valve Corporation.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Tests for VK_VALVE_mutable_descriptor_type and VK_EXT_mutable_descriptor_type.
23 *//*--------------------------------------------------------------------*/
24 #include "vktBindingMutableTests.hpp"
25 #include "vktTestCase.hpp"
26 #include "vktTestGroupUtil.hpp"
27 #include "vktCustomInstancesDevices.hpp"
28
29 #include "tcuCommandLine.hpp"
30
31 #include "vkDefs.hpp"
32 #include "vkRefUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkImageWithMemory.hpp"
35 #include "vkBufferWithMemory.hpp"
36 #include "vkTypeUtil.hpp"
37 #include "vkObjUtil.hpp"
38 #include "vkBarrierUtil.hpp"
39 #include "vkCmdUtil.hpp"
40 #include "vkBuilderUtil.hpp"
41 #include "vkRayTracingUtil.hpp"
42
43 #include "deUniquePtr.hpp"
44 #include "deSTLUtil.hpp"
45 #include "deStringUtil.hpp"
46
47 #include <vector>
48 #include <algorithm>
49 #include <iterator>
50 #include <set>
51 #include <sstream>
52 #include <limits>
53
54 namespace vkt
55 {
56 namespace BindingModel
57 {
58
59 namespace
60 {
61
62 using namespace vk;
63
64 de::SharedPtr<Move<vk::VkDevice>> g_singletonDevice;
65
getDevice(Context & context)66 VkDevice getDevice(Context &context)
67 {
68 if (!g_singletonDevice)
69 {
70 const float queuePriority = 1.0f;
71
72 // Create a universal queue that supports graphics and compute
73 const VkDeviceQueueCreateInfo queueParams{
74 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
75 DE_NULL, // const void* pNext;
76 0u, // VkDeviceQueueCreateFlags flags;
77 context.getUniversalQueueFamilyIndex(), // uint32_t queueFamilyIndex;
78 1u, // uint32_t queueCount;
79 &queuePriority // const float* pQueuePriorities;
80 };
81
82 // \note Extensions in core are not explicitly enabled even though
83 // they are in the extension list advertised to tests.
84 const auto &extensionPtrs = context.getDeviceCreationExtensions();
85
86 VkPhysicalDeviceAccelerationStructureFeaturesKHR accelerationStructureFeatures = initVulkanStructure();
87 VkPhysicalDeviceBufferDeviceAddressFeatures bufferDeviceAddressFeatures = initVulkanStructure();
88 VkPhysicalDeviceRayTracingPipelineFeaturesKHR rayTracingPipelineFeatures = initVulkanStructure();
89 VkPhysicalDeviceRayQueryFeaturesKHR rayQueryFeatures = initVulkanStructure();
90 VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT mutableDescriptorTypeFeatures = initVulkanStructure();
91 VkPhysicalDeviceDescriptorIndexingFeatures descriptorIndexingFeatures = initVulkanStructure();
92 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
93
94 const auto addFeatures = makeStructChainAdder(&features2);
95
96 if (context.isDeviceFunctionalitySupported("VK_KHR_acceleration_structure"))
97 addFeatures(&accelerationStructureFeatures);
98
99 if (context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address"))
100 addFeatures(&bufferDeviceAddressFeatures);
101
102 if (context.isDeviceFunctionalitySupported("VK_KHR_ray_tracing_pipeline"))
103 addFeatures(&rayTracingPipelineFeatures);
104
105 if (context.isDeviceFunctionalitySupported("VK_KHR_ray_query"))
106 addFeatures(&rayQueryFeatures);
107
108 if (context.isDeviceFunctionalitySupported("VK_VALVE_mutable_descriptor_type") ||
109 context.isDeviceFunctionalitySupported("VK_EXT_mutable_descriptor_type"))
110 addFeatures(&mutableDescriptorTypeFeatures);
111
112 if (context.isDeviceFunctionalitySupported("VK_EXT_descriptor_indexing"))
113 addFeatures(&descriptorIndexingFeatures);
114
115 context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
116 features2.features.robustBufferAccess = VK_FALSE; // Disable robustness features.
117
118 const VkDeviceCreateInfo deviceCreateInfo{
119 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
120 &features2, //pNext;
121 (VkDeviceCreateFlags)0u, //flags
122 1, //queueRecordCount;
123 &queueParams, //pRequestedQueues;
124 0, //layerCount;
125 nullptr, //ppEnabledLayerNames;
126 de::sizeU32(extensionPtrs), // uint32_t enabledExtensionCount;
127 de::dataOrNull(extensionPtrs), // const char* const* ppEnabledExtensionNames;
128 DE_NULL, //pEnabledFeatures;
129 };
130
131 Move<VkDevice> device = createCustomDevice(
132 context.getTestContext().getCommandLine().isValidationEnabled(), context.getPlatformInterface(),
133 context.getInstance(), context.getInstanceInterface(), context.getPhysicalDevice(), &deviceCreateInfo);
134 g_singletonDevice = de::SharedPtr<Move<VkDevice>>(new Move<VkDevice>(device));
135 }
136
137 return g_singletonDevice->get();
138 }
139
getDescriptorNumericValue(uint32_t iteration,uint32_t bindingIdx,uint32_t descriptorIdx=0u)140 uint32_t getDescriptorNumericValue(uint32_t iteration, uint32_t bindingIdx, uint32_t descriptorIdx = 0u)
141 {
142 // When assigning numeric values for the descriptor contents, each descriptor will get 0x5aIIBBDD. II is an octed containing the
143 // iteration index. BB is an octet containing the binding index and DD is the descriptor index inside that binding.
144 constexpr uint32_t kNumericValueBase = 0x5a000000u;
145
146 return (kNumericValueBase | ((iteration & 0xFFu) << 16) | ((bindingIdx & 0xFFu) << 8) | (descriptorIdx & 0xFFu));
147 }
148
getAccelerationStructureOffsetX(uint32_t descriptorNumericValue)149 uint16_t getAccelerationStructureOffsetX(uint32_t descriptorNumericValue)
150 {
151 // Keep the lowest 16 bits (binding and descriptor idx) as the offset.
152 return static_cast<uint16_t>(descriptorNumericValue);
153 }
154
155 // Value that will be stored in the output buffer to signal success reading values.
getExpectedOutputBufferValue()156 uint32_t getExpectedOutputBufferValue()
157 {
158 return 2u;
159 }
160
161 // This value will be stored in an image to be sampled when checking descriptors containing samplers alone.
getExternalSampledImageValue()162 uint32_t getExternalSampledImageValue()
163 {
164 return 0x41322314u;
165 }
166
167 // Value that will be ORed with the descriptor value before writing.
getStoredValueMask()168 uint32_t getStoredValueMask()
169 {
170 return 0xFF000000u;
171 }
172
getDescriptorImageFormat()173 VkFormat getDescriptorImageFormat()
174 {
175 return VK_FORMAT_R32_UINT;
176 }
177
getDefaultExtent()178 VkExtent3D getDefaultExtent()
179 {
180 return makeExtent3D(1u, 1u, 1u);
181 }
182
183 // Convert value to hexadecimal.
toHex(uint32_t val)184 std::string toHex(uint32_t val)
185 {
186 std::ostringstream s;
187 s << "0x" << std::hex << val << "u";
188 return s.str();
189 }
190
191 // Returns the list of descriptor types that cannot be part of a mutable descriptor.
getForbiddenMutableTypes()192 std::vector<VkDescriptorType> getForbiddenMutableTypes()
193 {
194 return std::vector<VkDescriptorType>{
195 VK_DESCRIPTOR_TYPE_MUTABLE_EXT,
196 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
197 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
198 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT,
199 };
200 }
201
202 // Returns the list of descriptor types that are mandatory for the extension.
getMandatoryMutableTypes()203 std::vector<VkDescriptorType> getMandatoryMutableTypes()
204 {
205 return std::vector<VkDescriptorType>{
206 VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
207 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
208 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER};
209 }
210
211 // This helps quickly transform a vector of descriptor types into a bitmask, which makes it easier to check some conditions.
212 enum DescriptorTypeFlagBits
213 {
214 DTFB_SAMPLER = (1 << 0),
215 DTFB_COMBINED_IMAGE_SAMPLER = (1 << 1),
216 DTFB_SAMPLED_IMAGE = (1 << 2),
217 DTFB_STORAGE_IMAGE = (1 << 3),
218 DTFB_UNIFORM_TEXEL_BUFFER = (1 << 4),
219 DTFB_STORAGE_TEXEL_BUFFER = (1 << 5),
220 DTFB_UNIFORM_BUFFER = (1 << 6),
221 DTFB_STORAGE_BUFFER = (1 << 7),
222 DTFB_UNIFORM_BUFFER_DYNAMIC = (1 << 8),
223 DTFB_STORAGE_BUFFER_DYNAMIC = (1 << 9),
224 DTFB_INPUT_ATTACHMENT = (1 << 10),
225 DTFB_INLINE_UNIFORM_BLOCK_EXT = (1 << 11),
226 DTFB_ACCELERATION_STRUCTURE_KHR = (1 << 12),
227 DTFB_ACCELERATION_STRUCTURE_NV = (1 << 13),
228 DTFB_MUTABLE = (1 << 14),
229 };
230
231 using DescriptorTypeFlags = uint32_t;
232
233 // Convert type to its corresponding flag bit.
toDescriptorTypeFlagBit(VkDescriptorType descriptorType)234 DescriptorTypeFlagBits toDescriptorTypeFlagBit(VkDescriptorType descriptorType)
235 {
236 switch (descriptorType)
237 {
238 case VK_DESCRIPTOR_TYPE_SAMPLER:
239 return DTFB_SAMPLER;
240 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
241 return DTFB_COMBINED_IMAGE_SAMPLER;
242 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
243 return DTFB_SAMPLED_IMAGE;
244 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
245 return DTFB_STORAGE_IMAGE;
246 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
247 return DTFB_UNIFORM_TEXEL_BUFFER;
248 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
249 return DTFB_STORAGE_TEXEL_BUFFER;
250 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
251 return DTFB_UNIFORM_BUFFER;
252 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
253 return DTFB_STORAGE_BUFFER;
254 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
255 return DTFB_UNIFORM_BUFFER_DYNAMIC;
256 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
257 return DTFB_STORAGE_BUFFER_DYNAMIC;
258 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
259 return DTFB_INPUT_ATTACHMENT;
260 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
261 return DTFB_INLINE_UNIFORM_BLOCK_EXT;
262 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
263 return DTFB_ACCELERATION_STRUCTURE_KHR;
264 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV:
265 return DTFB_ACCELERATION_STRUCTURE_NV;
266 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
267 return DTFB_MUTABLE;
268 default:
269 break;
270 }
271
272 // Unreachable.
273 DE_ASSERT(false);
274 return DTFB_SAMPLER;
275 }
276
277 // Convert vector of descriptor types to a bitfield.
toDescriptorTypeFlags(const std::vector<VkDescriptorType> & types)278 DescriptorTypeFlags toDescriptorTypeFlags(const std::vector<VkDescriptorType> &types)
279 {
280 DescriptorTypeFlags result = 0u;
281 for (const auto &t : types)
282 result |= toDescriptorTypeFlagBit(t);
283 return result;
284 }
285
286 // Convert bitfield to vector of descriptor types.
toDescriptorTypeVector(DescriptorTypeFlags bitfield)287 std::vector<VkDescriptorType> toDescriptorTypeVector(DescriptorTypeFlags bitfield)
288 {
289 std::vector<VkDescriptorType> result;
290
291 if (bitfield & DTFB_SAMPLER)
292 result.push_back(VK_DESCRIPTOR_TYPE_SAMPLER);
293 if (bitfield & DTFB_COMBINED_IMAGE_SAMPLER)
294 result.push_back(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
295 if (bitfield & DTFB_SAMPLED_IMAGE)
296 result.push_back(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
297 if (bitfield & DTFB_STORAGE_IMAGE)
298 result.push_back(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
299 if (bitfield & DTFB_UNIFORM_TEXEL_BUFFER)
300 result.push_back(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
301 if (bitfield & DTFB_STORAGE_TEXEL_BUFFER)
302 result.push_back(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
303 if (bitfield & DTFB_UNIFORM_BUFFER)
304 result.push_back(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
305 if (bitfield & DTFB_STORAGE_BUFFER)
306 result.push_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
307 if (bitfield & DTFB_UNIFORM_BUFFER_DYNAMIC)
308 result.push_back(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
309 if (bitfield & DTFB_STORAGE_BUFFER_DYNAMIC)
310 result.push_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
311 if (bitfield & DTFB_INPUT_ATTACHMENT)
312 result.push_back(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
313 if (bitfield & DTFB_INLINE_UNIFORM_BLOCK_EXT)
314 result.push_back(VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
315 if (bitfield & DTFB_ACCELERATION_STRUCTURE_KHR)
316 result.push_back(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
317 if (bitfield & DTFB_ACCELERATION_STRUCTURE_NV)
318 result.push_back(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV);
319 if (bitfield & DTFB_MUTABLE)
320 result.push_back(VK_DESCRIPTOR_TYPE_MUTABLE_EXT);
321
322 return result;
323 }
324
325 // How to create the source set when copying descriptors from another set.
326 // * MUTABLE means to transform bindings into mutable bindings.
327 // * NONMUTABLE means to transform bindings into non-mutable bindings.
328 enum class SourceSetStrategy
329 {
330 MUTABLE = 0,
331 NONMUTABLE,
332 NO_SOURCE,
333 };
334
335 enum class PoolMutableStrategy
336 {
337 KEEP_TYPES = 0,
338 EXPAND_TYPES,
339 NO_TYPES,
340 };
341
342 // Type of information that's present in VkWriteDescriptorSet.
343 enum class WriteType
344 {
345 IMAGE_INFO = 0,
346 BUFFER_INFO,
347 BUFFER_VIEW,
348 ACCELERATION_STRUCTURE_INFO,
349 };
350
351 struct WriteInfo
352 {
353 WriteType writeType;
354 union
355 {
356 VkDescriptorImageInfo imageInfo;
357 VkDescriptorBufferInfo bufferInfo;
358 VkBufferView bufferView;
359 VkWriteDescriptorSetAccelerationStructureKHR asInfo;
360 };
361
WriteInfovkt::BindingModel::__anonf255f59c0111::WriteInfo362 explicit WriteInfo(const VkDescriptorImageInfo &info_) : writeType(WriteType::IMAGE_INFO), imageInfo(info_)
363 {
364 }
365
WriteInfovkt::BindingModel::__anonf255f59c0111::WriteInfo366 explicit WriteInfo(const VkDescriptorBufferInfo &info_) : writeType(WriteType::BUFFER_INFO), bufferInfo(info_)
367 {
368 }
369
WriteInfovkt::BindingModel::__anonf255f59c0111::WriteInfo370 explicit WriteInfo(VkBufferView view_) : writeType(WriteType::BUFFER_VIEW), bufferView(view_)
371 {
372 }
373
WriteInfovkt::BindingModel::__anonf255f59c0111::WriteInfo374 explicit WriteInfo(const VkWriteDescriptorSetAccelerationStructureKHR &asInfo_)
375 : writeType(WriteType::ACCELERATION_STRUCTURE_INFO)
376 , asInfo(asInfo_)
377 {
378 }
379 };
380
381 // Resource backing up a single binding.
382 enum class ResourceType
383 {
384 SAMPLER = 0,
385 IMAGE,
386 COMBINED_IMAGE_SAMPLER,
387 BUFFER,
388 BUFFER_VIEW,
389 ACCELERATION_STRUCTURE,
390 };
391
392 // Type of resource backing up a particular descriptor type.
toResourceType(VkDescriptorType descriptorType)393 ResourceType toResourceType(VkDescriptorType descriptorType)
394 {
395 ResourceType resourceType = ResourceType::SAMPLER;
396 switch (descriptorType)
397 {
398 case VK_DESCRIPTOR_TYPE_SAMPLER:
399 resourceType = ResourceType::SAMPLER;
400 break;
401
402 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
403 resourceType = ResourceType::COMBINED_IMAGE_SAMPLER;
404 break;
405
406 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
407 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
408 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
409 resourceType = ResourceType::IMAGE;
410 break;
411
412 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
413 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
414 resourceType = ResourceType::BUFFER_VIEW;
415 break;
416
417 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
418 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
419 resourceType = ResourceType::BUFFER;
420 break;
421
422 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
423 resourceType = ResourceType::ACCELERATION_STRUCTURE;
424 break;
425
426 default:
427 DE_ASSERT(false);
428 break;
429 }
430
431 return resourceType;
432 }
433
isShaderWritable(VkDescriptorType descriptorType)434 bool isShaderWritable(VkDescriptorType descriptorType)
435 {
436 return (descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
437 descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
438 }
439
makeDefaultSampler(const DeviceInterface & vkd,VkDevice device)440 Move<VkSampler> makeDefaultSampler(const DeviceInterface &vkd, VkDevice device)
441 {
442 const VkSamplerCreateInfo samplerCreateInfo = {
443 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
444 nullptr, // const void* pNext;
445 0u, // VkSamplerCreateFlags flags;
446 VK_FILTER_NEAREST, // VkFilter magFilter;
447 VK_FILTER_NEAREST, // VkFilter minFilter;
448 VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
449 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeU;
450 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeV;
451 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeW;
452 0.f, // float mipLodBias;
453 VK_FALSE, // VkBool32 anisotropyEnable;
454 1.f, // float maxAnisotropy;
455 VK_FALSE, // VkBool32 compareEnable;
456 VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
457 0.f, // float minLod;
458 0.f, // float maxLod;
459 VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
460 VK_FALSE, // VkBool32 unnormalizedCoordinates;
461 };
462
463 return createSampler(vkd, device, &samplerCreateInfo);
464 }
465
makeDefaultImage(const DeviceInterface & vkd,VkDevice device,Allocator & alloc)466 de::MovePtr<ImageWithMemory> makeDefaultImage(const DeviceInterface &vkd, VkDevice device, Allocator &alloc)
467 {
468 const auto extent = makeExtent3D(1u, 1u, 1u);
469 const VkImageUsageFlags usageFlags =
470 (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
471 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
472
473 const VkImageCreateInfo imageCreateInfo = {
474 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
475 nullptr, // const void* pNext;
476 0u, // VkImageCreateFlags flags;
477 VK_IMAGE_TYPE_2D, // VkImageType imageType;
478 getDescriptorImageFormat(), // VkFormat format;
479 extent, // VkExtent3D extent;
480 1u, // uint32_t mipLevels;
481 1u, // uint32_t arrayLayers;
482 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
483 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
484 usageFlags, // VkImageUsageFlags usage;
485 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
486 0u, // uint32_t queueFamilyIndexCount;
487 nullptr, // const uint32_t* pQueueFamilyIndices;
488 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
489 };
490 return de::MovePtr<ImageWithMemory>(
491 new ImageWithMemory(vkd, device, alloc, imageCreateInfo, MemoryRequirement::Any));
492 }
493
makeDefaultImageView(const DeviceInterface & vkd,VkDevice device,VkImage image)494 Move<VkImageView> makeDefaultImageView(const DeviceInterface &vkd, VkDevice device, VkImage image)
495 {
496 const auto subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
497 return makeImageView(vkd, device, image, VK_IMAGE_VIEW_TYPE_2D, getDescriptorImageFormat(), subresourceRange);
498 }
499
makeDefaultBuffer(const DeviceInterface & vkd,VkDevice device,Allocator & alloc,uint32_t numElements=1u)500 de::MovePtr<BufferWithMemory> makeDefaultBuffer(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
501 uint32_t numElements = 1u)
502 {
503 const VkBufferUsageFlags bufferUsage =
504 (VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
505 VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
506 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT);
507
508 const auto bufferSize = static_cast<VkDeviceSize>(sizeof(uint32_t) * static_cast<size_t>(numElements));
509
510 const auto bufferCreateInfo = makeBufferCreateInfo(bufferSize, bufferUsage);
511
512 return de::MovePtr<BufferWithMemory>(
513 new BufferWithMemory(vkd, device, alloc, bufferCreateInfo, MemoryRequirement::HostVisible));
514 }
515
makeDefaultBufferView(const DeviceInterface & vkd,VkDevice device,VkBuffer buffer)516 Move<VkBufferView> makeDefaultBufferView(const DeviceInterface &vkd, VkDevice device, VkBuffer buffer)
517 {
518 const auto bufferOffset = static_cast<VkDeviceSize>(0);
519 const auto bufferSize = static_cast<VkDeviceSize>(sizeof(uint32_t));
520
521 return makeBufferView(vkd, device, buffer, getDescriptorImageFormat(), bufferOffset, bufferSize);
522 }
523
524 struct AccelerationStructureData
525 {
526 using TLASPtr = de::MovePtr<TopLevelAccelerationStructure>;
527 using BLASPtr = de::MovePtr<BottomLevelAccelerationStructure>;
528
529 TLASPtr tlas;
530 BLASPtr blas;
531
swapvkt::BindingModel::__anonf255f59c0111::AccelerationStructureData532 void swap(AccelerationStructureData &other)
533 {
534 auto myTlasPtr = tlas.release();
535 auto myBlasPtr = blas.release();
536
537 auto otherTlasPtr = other.tlas.release();
538 auto otherBlasPtr = other.blas.release();
539
540 tlas = TLASPtr(otherTlasPtr);
541 blas = BLASPtr(otherBlasPtr);
542
543 other.tlas = TLASPtr(myTlasPtr);
544 other.blas = BLASPtr(myBlasPtr);
545 }
546
AccelerationStructureDatavkt::BindingModel::__anonf255f59c0111::AccelerationStructureData547 AccelerationStructureData() : tlas(), blas()
548 {
549 }
550
AccelerationStructureDatavkt::BindingModel::__anonf255f59c0111::AccelerationStructureData551 AccelerationStructureData(AccelerationStructureData &&other) : AccelerationStructureData()
552 {
553 swap(other);
554 }
555
operator =vkt::BindingModel::__anonf255f59c0111::AccelerationStructureData556 AccelerationStructureData &operator=(AccelerationStructureData &&other)
557 {
558 swap(other);
559 return *this;
560 }
561 };
562
makeDefaultAccelerationStructure(const DeviceInterface & vkd,VkDevice device,VkCommandBuffer cmdBuffer,Allocator & alloc,bool triangles,uint16_t offsetX)563 AccelerationStructureData makeDefaultAccelerationStructure(const DeviceInterface &vkd, VkDevice device,
564 VkCommandBuffer cmdBuffer, Allocator &alloc, bool triangles,
565 uint16_t offsetX)
566 {
567 AccelerationStructureData data;
568
569 // Triangle around (offsetX, 0) with depth 5.0.
570 const float middleX = static_cast<float>(offsetX);
571 const float leftX = middleX - 0.5f;
572 const float rightX = middleX + 0.5f;
573 const float topY = 0.5f;
574 const float bottomY = -0.5f;
575 const float depth = 5.0f;
576
577 std::vector<tcu::Vec3> vertices;
578
579 if (triangles)
580 {
581 vertices.reserve(3u);
582 vertices.emplace_back(middleX, topY, depth);
583 vertices.emplace_back(rightX, bottomY, depth);
584 vertices.emplace_back(leftX, bottomY, depth);
585 }
586 else
587 {
588 vertices.reserve(2u);
589 vertices.emplace_back(leftX, bottomY, depth);
590 vertices.emplace_back(rightX, topY, depth);
591 }
592
593 data.tlas = makeTopLevelAccelerationStructure();
594 data.blas = makeBottomLevelAccelerationStructure();
595
596 VkGeometryInstanceFlagsKHR instanceFlags = 0u;
597 if (triangles)
598 instanceFlags |= VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
599
600 data.blas->addGeometry(vertices, triangles, VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR);
601 data.blas->createAndBuild(vkd, device, cmdBuffer, alloc);
602
603 de::SharedPtr<BottomLevelAccelerationStructure> blasSharedPtr(data.blas.release());
604 data.tlas->setInstanceCount(1u);
605 data.tlas->addInstance(blasSharedPtr, identityMatrix3x4, 0u, 0xFFu, 0u, instanceFlags);
606 data.tlas->createAndBuild(vkd, device, cmdBuffer, alloc);
607
608 return data;
609 }
610
611 const auto kShaderAccess = (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
612
613 struct Resource
614 {
615 VkDescriptorType descriptorType;
616 ResourceType resourceType;
617 Move<VkSampler> sampler;
618 de::MovePtr<ImageWithMemory> imageWithMemory;
619 Move<VkImageView> imageView;
620 de::MovePtr<BufferWithMemory> bufferWithMemory;
621 Move<VkBufferView> bufferView;
622 AccelerationStructureData asData;
623 uint32_t initialValue;
624
Resourcevkt::BindingModel::__anonf255f59c0111::Resource625 Resource(VkDescriptorType descriptorType_, const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
626 uint32_t qIndex, VkQueue queue, bool useAABBs, uint32_t initialValue_, uint32_t numElements = 1u)
627 : descriptorType(descriptorType_)
628 , resourceType(toResourceType(descriptorType))
629 , sampler()
630 , imageWithMemory()
631 , imageView()
632 , bufferWithMemory()
633 , bufferView()
634 , asData()
635 , initialValue(initialValue_)
636 {
637 if (numElements != 1u)
638 DE_ASSERT(resourceType == ResourceType::BUFFER);
639
640 switch (resourceType)
641 {
642 case ResourceType::SAMPLER:
643 sampler = makeDefaultSampler(vkd, device);
644 break;
645
646 case ResourceType::IMAGE:
647 imageWithMemory = makeDefaultImage(vkd, device, alloc);
648 imageView = makeDefaultImageView(vkd, device, imageWithMemory->get());
649 break;
650
651 case ResourceType::COMBINED_IMAGE_SAMPLER:
652 sampler = makeDefaultSampler(vkd, device);
653 imageWithMemory = makeDefaultImage(vkd, device, alloc);
654 imageView = makeDefaultImageView(vkd, device, imageWithMemory->get());
655 break;
656
657 case ResourceType::BUFFER:
658 bufferWithMemory = makeDefaultBuffer(vkd, device, alloc, numElements);
659 break;
660
661 case ResourceType::BUFFER_VIEW:
662 bufferWithMemory = makeDefaultBuffer(vkd, device, alloc);
663 bufferView = makeDefaultBufferView(vkd, device, bufferWithMemory->get());
664 break;
665
666 case ResourceType::ACCELERATION_STRUCTURE:
667 {
668 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
669 const auto cmdBufferPtr =
670 allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
671 const auto cmdBuffer = cmdBufferPtr.get();
672 const bool triangles = !useAABBs;
673
674 beginCommandBuffer(vkd, cmdBuffer);
675 asData = makeDefaultAccelerationStructure(vkd, device, cmdBuffer, alloc, triangles,
676 getAccelerationStructureOffsetX(initialValue));
677 endCommandBuffer(vkd, cmdBuffer);
678 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
679 }
680 break;
681
682 default:
683 DE_ASSERT(false);
684 break;
685 }
686
687 if (imageWithMemory || bufferWithMemory)
688 {
689 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
690 const auto cmdBufferPtr =
691 allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
692 const auto cmdBuffer = cmdBufferPtr.get();
693
694 if (imageWithMemory)
695 {
696 // Prepare staging buffer.
697 const auto bufferSize = static_cast<VkDeviceSize>(sizeof(initialValue));
698 const VkBufferUsageFlags bufferUsage = (VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
699 const auto stagingBufferInfo = makeBufferCreateInfo(bufferSize, bufferUsage);
700
701 BufferWithMemory stagingBuffer(vkd, device, alloc, stagingBufferInfo, MemoryRequirement::HostVisible);
702 auto &bufferAlloc = stagingBuffer.getAllocation();
703 void *bufferData = bufferAlloc.getHostPtr();
704
705 deMemcpy(bufferData, &initialValue, sizeof(initialValue));
706 flushAlloc(vkd, device, bufferAlloc);
707
708 beginCommandBuffer(vkd, cmdBuffer);
709
710 // Transition and copy image.
711 const auto copyRegion = makeBufferImageCopy(
712 makeExtent3D(1u, 1u, 1u), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
713
714 // Switch image to TRANSFER_DST_OPTIMAL before copying data to it.
715 const auto subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
716
717 const auto preTransferBarrier = makeImageMemoryBarrier(
718 0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
719 imageWithMemory->get(), subresourceRange);
720
721 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
722 0u, nullptr, 0u, nullptr, 1u, &preTransferBarrier);
723
724 // Copy data to image.
725 vkd.cmdCopyBufferToImage(cmdBuffer, stagingBuffer.get(), imageWithMemory->get(),
726 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
727
728 // Switch image to the GENERAL layout before reading or writing to it from shaders.
729 const auto postTransferBarrier = makeImageMemoryBarrier(
730 VK_ACCESS_TRANSFER_WRITE_BIT, kShaderAccess, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
731 VK_IMAGE_LAYOUT_GENERAL, imageWithMemory->get(), subresourceRange);
732
733 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
734 0u, 0u, nullptr, 0u, nullptr, 1u, &postTransferBarrier);
735
736 endCommandBuffer(vkd, cmdBuffer);
737 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
738 }
739
740 if (bufferWithMemory)
741 {
742 auto &bufferAlloc = bufferWithMemory->getAllocation();
743 void *bufferData = bufferAlloc.getHostPtr();
744
745 const std::vector<uint32_t> bufferValues(numElements, initialValue);
746 deMemcpy(bufferData, bufferValues.data(), de::dataSize(bufferValues));
747 flushAlloc(vkd, device, bufferAlloc);
748
749 beginCommandBuffer(vkd, cmdBuffer);
750
751 // Make sure host writes happen before shader reads/writes. Note: this barrier is not needed in theory.
752 const auto hostToShaderBarrier = makeMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, kShaderAccess);
753
754 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0u,
755 1u, &hostToShaderBarrier, 0u, nullptr, 0u, nullptr);
756
757 endCommandBuffer(vkd, cmdBuffer);
758 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
759 }
760 }
761 }
762
763 // Remove problematic copy constructor.
764 Resource(const Resource &) = delete;
765
766 // Make it movable.
Resourcevkt::BindingModel::__anonf255f59c0111::Resource767 Resource(Resource &&other) noexcept
768 : descriptorType(other.descriptorType)
769 , resourceType(other.resourceType)
770 , sampler(other.sampler)
771 , imageWithMemory(other.imageWithMemory.release())
772 , imageView(other.imageView)
773 , bufferWithMemory(other.bufferWithMemory.release())
774 , bufferView(other.bufferView)
775 , asData(std::move(other.asData))
776 , initialValue(other.initialValue)
777 {
778 }
779
~Resourcevkt::BindingModel::__anonf255f59c0111::Resource780 ~Resource()
781 {
782 }
783
makeWriteInfovkt::BindingModel::__anonf255f59c0111::Resource784 WriteInfo makeWriteInfo() const
785 {
786 using WriteInfoPtr = de::MovePtr<WriteInfo>;
787
788 WriteInfoPtr writeInfo;
789
790 switch (resourceType)
791 {
792 case ResourceType::SAMPLER:
793 {
794 const VkDescriptorImageInfo imageInfo = {sampler.get(), DE_NULL, VK_IMAGE_LAYOUT_UNDEFINED};
795 writeInfo = WriteInfoPtr(new WriteInfo(imageInfo));
796 }
797 break;
798
799 case ResourceType::IMAGE:
800 {
801 const VkDescriptorImageInfo imageInfo = {DE_NULL, imageView.get(), VK_IMAGE_LAYOUT_GENERAL};
802 writeInfo = WriteInfoPtr(new WriteInfo(imageInfo));
803 }
804 break;
805
806 case ResourceType::COMBINED_IMAGE_SAMPLER:
807 {
808 const VkDescriptorImageInfo imageInfo = {sampler.get(), imageView.get(), VK_IMAGE_LAYOUT_GENERAL};
809 writeInfo = WriteInfoPtr(new WriteInfo(imageInfo));
810 }
811 break;
812
813 case ResourceType::BUFFER:
814 {
815 const VkDescriptorBufferInfo bufferInfo = {bufferWithMemory->get(), 0ull,
816 static_cast<VkDeviceSize>(sizeof(uint32_t))};
817 writeInfo = WriteInfoPtr(new WriteInfo(bufferInfo));
818 }
819 break;
820
821 case ResourceType::BUFFER_VIEW:
822 writeInfo = WriteInfoPtr(new WriteInfo(bufferView.get()));
823 break;
824
825 case ResourceType::ACCELERATION_STRUCTURE:
826 {
827 VkWriteDescriptorSetAccelerationStructureKHR asWrite = initVulkanStructure();
828 asWrite.accelerationStructureCount = 1u;
829 asWrite.pAccelerationStructures = asData.tlas.get()->getPtr();
830 writeInfo = WriteInfoPtr(new WriteInfo(asWrite));
831 }
832 break;
833
834 default:
835 DE_ASSERT(false);
836 break;
837 }
838
839 return *writeInfo;
840 }
841
getStoredValuevkt::BindingModel::__anonf255f59c0111::Resource842 tcu::Maybe<uint32_t> getStoredValue(const DeviceInterface &vkd, VkDevice device, Allocator &alloc, uint32_t qIndex,
843 VkQueue queue, uint32_t position = 0u) const
844 {
845 if (position != 0u)
846 DE_ASSERT(static_cast<bool>(bufferWithMemory));
847
848 if (imageWithMemory || bufferWithMemory)
849 {
850 // Command pool and buffer.
851 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
852 const auto cmdBufferPtr =
853 allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
854 const auto cmdBuffer = cmdBufferPtr.get();
855
856 if (imageWithMemory)
857 {
858 // Prepare staging buffer.
859 uint32_t result;
860 const auto bufferSize = static_cast<VkDeviceSize>(sizeof(result));
861 const VkBufferUsageFlags bufferUsage = (VK_BUFFER_USAGE_TRANSFER_DST_BIT);
862 const auto stagingBufferInfo = makeBufferCreateInfo(bufferSize, bufferUsage);
863
864 BufferWithMemory stagingBuffer(vkd, device, alloc, stagingBufferInfo, MemoryRequirement::HostVisible);
865 auto &bufferAlloc = stagingBuffer.getAllocation();
866 void *bufferData = bufferAlloc.getHostPtr();
867
868 // Copy image value to staging buffer.
869 beginCommandBuffer(vkd, cmdBuffer);
870
871 // Make sure shader accesses happen before transfers and prepare image for transfer.
872 const auto colorResourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
873
874 const auto preTransferBarrier = makeImageMemoryBarrier(
875 kShaderAccess, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
876 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, imageWithMemory->get(), colorResourceRange);
877
878 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
879 0u, 0u, nullptr, 0u, nullptr, 1u, &preTransferBarrier);
880
881 // Copy image contents to staging buffer.
882 const auto copyRegion = makeBufferImageCopy(
883 makeExtent3D(1u, 1u, 1u), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
884 vkd.cmdCopyImageToBuffer(cmdBuffer, imageWithMemory->get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
885 stagingBuffer.get(), 1u, ©Region);
886
887 // Make sure writes are visible from the host.
888 const auto postTransferBarrier =
889 makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
890 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1u,
891 &postTransferBarrier, 0u, nullptr, 0u, nullptr);
892
893 endCommandBuffer(vkd, cmdBuffer);
894 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
895
896 // Get value from staging buffer.
897 invalidateAlloc(vkd, device, bufferAlloc);
898 deMemcpy(&result, bufferData, sizeof(result));
899 return tcu::just(result);
900 }
901
902 if (bufferWithMemory)
903 {
904 auto &bufferAlloc = bufferWithMemory->getAllocation();
905 auto bufferData = reinterpret_cast<const char *>(bufferAlloc.getHostPtr());
906 uint32_t result;
907
908 // Make sure shader writes are visible from the host.
909 beginCommandBuffer(vkd, cmdBuffer);
910
911 const auto shaderToHostBarrier = makeMemoryBarrier(kShaderAccess, VK_ACCESS_HOST_READ_BIT);
912 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
913 1u, &shaderToHostBarrier, 0u, nullptr, 0u, nullptr);
914
915 endCommandBuffer(vkd, cmdBuffer);
916 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
917
918 invalidateAlloc(vkd, device, bufferAlloc);
919 deMemcpy(&result, bufferData + sizeof(uint32_t) * static_cast<size_t>(position), sizeof(result));
920 return tcu::just(result);
921 }
922 }
923
924 return tcu::Nothing;
925 }
926 };
927
928 struct BindingInterface
929 {
~BindingInterfacevkt::BindingModel::__anonf255f59c0111::BindingInterface930 virtual ~BindingInterface()
931 {
932 }
933
934 // Minimum number of iterations to test all mutable types.
935 virtual uint32_t maxTypes() const = 0;
936
937 // Types that will be used by the binding at a given iteration.
938 virtual std::vector<VkDescriptorType> typesAtIteration(uint32_t iteration) const = 0;
939
940 // Binding's main type.
941 virtual VkDescriptorType mainType() const = 0;
942
943 // Binding's list of mutable types, if present.
944 virtual std::vector<VkDescriptorType> mutableTypes() const = 0;
945
946 // Descriptor count in the binding.
947 virtual size_t size() const = 0;
948
949 // Is the binding an array binding?
950 virtual bool isArray() const = 0;
951
952 // Is the binding an unbounded array?
953 virtual bool isUnbounded() const = 0;
954
955 // Will the binding use different descriptor types in a given iteration?
needsAliasingvkt::BindingModel::__anonf255f59c0111::BindingInterface956 virtual bool needsAliasing(uint32_t iteration) const
957 {
958 const auto typesVec = typesAtIteration(iteration);
959 std::set<VkDescriptorType> descTypes(begin(typesVec), end(typesVec));
960 return (descTypes.size() > 1u);
961 }
962
963 // Will the binding need aliasing on any iteration up to a given number?
needsAliasingUpTovkt::BindingModel::__anonf255f59c0111::BindingInterface964 virtual bool needsAliasingUpTo(uint32_t numIterations) const
965 {
966 std::vector<bool> needsAliasingFlags;
967 needsAliasingFlags.reserve(numIterations);
968
969 for (uint32_t iter = 0u; iter < numIterations; ++iter)
970 needsAliasingFlags.push_back(needsAliasing(iter));
971
972 return std::any_of(begin(needsAliasingFlags), end(needsAliasingFlags), [](bool f) { return f; });
973 }
974
975 private:
hasDescriptorTypevkt::BindingModel::__anonf255f59c0111::BindingInterface976 virtual bool hasDescriptorType(uint32_t iteration, VkDescriptorType descriptorType) const
977 {
978 const auto typesVec = typesAtIteration(iteration);
979 return (std::find(begin(typesVec), end(typesVec), descriptorType) != end(typesVec));
980 }
981
982 public:
983 // Convert one particular binding to a mutable or non-mutable equivalent binding, returning the equivalent binding.
984 virtual de::MovePtr<BindingInterface> toMutable(uint32_t iteration) const = 0;
985 virtual de::MovePtr<BindingInterface> toNonMutable(uint32_t iteration) const = 0;
986
987 // Create resources needed to back up this binding.
988 virtual std::vector<Resource> createResources(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
989 uint32_t qIndex, VkQueue queue, uint32_t iteration, bool useAABBs,
990 uint32_t baseValue) const = 0;
991
992 // Get GLSL binding declarations. Note: no array size means no array, if size is < 0 it means unbounded array.
993 virtual std::string glslDeclarations(uint32_t iteration, uint32_t setNum, uint32_t bindingNum,
994 uint32_t inputAttachmentIdx, tcu::Maybe<int32_t> arraySize) const = 0;
995
996 // Get GLSL statements to check this binding.
997 virtual std::string glslCheckStatements(uint32_t iteration, uint32_t setNum, uint32_t bindingNum,
998 uint32_t baseValue, tcu::Maybe<uint32_t> arrayIndex,
999 bool usePushConstants) const = 0;
1000 };
1001
1002 // Represents a single binding that will be used in a test.
1003 class SingleBinding : public BindingInterface
1004 {
1005 private:
1006 VkDescriptorType type; // The descriptor type.
1007 std::vector<VkDescriptorType>
1008 mutableTypesVec; // The types that will be used for each iteration of a test if mutable.
1009
1010 public:
SingleBinding(VkDescriptorType type_,std::vector<VkDescriptorType> mutableTypes_)1011 SingleBinding(VkDescriptorType type_, std::vector<VkDescriptorType> mutableTypes_)
1012 : type(type_)
1013 , mutableTypesVec(std::move(mutableTypes_))
1014 {
1015 static const auto kForbiddenMutableTypes = getForbiddenMutableTypes();
1016 const auto kBeginForbidden = begin(kForbiddenMutableTypes);
1017 const auto kEndForbidden = end(kForbiddenMutableTypes);
1018
1019 // For release builds.
1020 DE_UNREF(kBeginForbidden);
1021 DE_UNREF(kEndForbidden);
1022
1023 if (type != VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1024 {
1025 DE_ASSERT(mutableTypesVec.empty());
1026 }
1027 else
1028 {
1029 DE_ASSERT(!mutableTypesVec.empty());
1030 DE_ASSERT(std::none_of(begin(mutableTypesVec), end(mutableTypesVec),
1031 [&kBeginForbidden, &kEndForbidden](VkDescriptorType t) -> bool
1032 { return std::find(kBeginForbidden, kEndForbidden, t) != kEndForbidden; }));
1033 }
1034 }
1035
maxTypes() const1036 uint32_t maxTypes() const override
1037 {
1038 if (type != VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1039 return 1u;
1040 const auto vecSize = mutableTypesVec.size();
1041 DE_ASSERT(vecSize <= std::numeric_limits<uint32_t>::max());
1042 return static_cast<uint32_t>(vecSize);
1043 }
1044
typeAtIteration(uint32_t iteration) const1045 VkDescriptorType typeAtIteration(uint32_t iteration) const
1046 {
1047 return typesAtIteration(iteration)[0];
1048 }
1049
usedTypes() const1050 std::vector<VkDescriptorType> usedTypes() const
1051 {
1052 if (type != VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1053 return std::vector<VkDescriptorType>(1u, type);
1054 return mutableTypesVec;
1055 }
1056
typesAtIteration(uint32_t iteration) const1057 std::vector<VkDescriptorType> typesAtIteration(uint32_t iteration) const override
1058 {
1059 const auto typesVec = usedTypes();
1060 return std::vector<VkDescriptorType>(1u, typesVec[static_cast<size_t>(iteration) % typesVec.size()]);
1061 }
1062
mainType() const1063 VkDescriptorType mainType() const override
1064 {
1065 return type;
1066 }
1067
mutableTypes() const1068 std::vector<VkDescriptorType> mutableTypes() const override
1069 {
1070 return mutableTypesVec;
1071 }
1072
size() const1073 size_t size() const override
1074 {
1075 return size_t{1u};
1076 }
1077
isArray() const1078 bool isArray() const override
1079 {
1080 return false;
1081 }
1082
isUnbounded() const1083 bool isUnbounded() const override
1084 {
1085 return false;
1086 }
1087
toMutable(uint32_t iteration) const1088 de::MovePtr<BindingInterface> toMutable(uint32_t iteration) const override
1089 {
1090 DE_UNREF(iteration);
1091
1092 static const auto kMandatoryMutableTypeFlags = toDescriptorTypeFlags(getMandatoryMutableTypes());
1093 if (type == VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1094 {
1095 const auto descFlags = toDescriptorTypeFlags(mutableTypesVec);
1096 return de::MovePtr<BindingInterface>(new SingleBinding(type, toDescriptorTypeVector(descFlags)));
1097 }
1098
1099 // Make sure it's not a forbidden mutable type.
1100 static const auto kForbiddenMutableTypes = getForbiddenMutableTypes();
1101 DE_ASSERT(std::find(begin(kForbiddenMutableTypes), end(kForbiddenMutableTypes), type) ==
1102 end(kForbiddenMutableTypes));
1103
1104 // Convert the binding to mutable using a wider set of descriptor types if possible, including the binding type.
1105 const auto descFlags = (kMandatoryMutableTypeFlags | toDescriptorTypeFlagBit(type));
1106
1107 return de::MovePtr<BindingInterface>(
1108 new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, toDescriptorTypeVector(descFlags)));
1109 }
1110
toNonMutable(uint32_t iteration) const1111 de::MovePtr<BindingInterface> toNonMutable(uint32_t iteration) const override
1112 {
1113 return de::MovePtr<BindingInterface>(
1114 new SingleBinding(typeAtIteration(iteration), std::vector<VkDescriptorType>()));
1115 }
1116
createResources(const DeviceInterface & vkd,VkDevice device,Allocator & alloc,uint32_t qIndex,VkQueue queue,uint32_t iteration,bool useAABBs,uint32_t baseValue) const1117 std::vector<Resource> createResources(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
1118 uint32_t qIndex, VkQueue queue, uint32_t iteration, bool useAABBs,
1119 uint32_t baseValue) const override
1120 {
1121 const auto descriptorType = typeAtIteration(iteration);
1122
1123 std::vector<Resource> resources;
1124 resources.emplace_back(descriptorType, vkd, device, alloc, qIndex, queue, useAABBs, baseValue);
1125 return resources;
1126 }
1127
glslDeclarations(uint32_t iteration,uint32_t setNum,uint32_t bindingNum,uint32_t inputAttachmentIdx,tcu::Maybe<int32_t> arraySize) const1128 std::string glslDeclarations(uint32_t iteration, uint32_t setNum, uint32_t bindingNum, uint32_t inputAttachmentIdx,
1129 tcu::Maybe<int32_t> arraySize) const override
1130 {
1131 const auto descriptorType = typeAtIteration(iteration);
1132 const std::string arraySuffix =
1133 ((static_cast<bool>(arraySize)) ?
1134 ((arraySize.get() < 0) ? "[]" : ("[" + de::toString(arraySize.get()) + "]")) :
1135 "");
1136 const std::string layoutAttribs = "set=" + de::toString(setNum) + ", binding=" + de::toString(bindingNum);
1137 const std::string bindingSuffix = "_" + de::toString(setNum) + "_" + de::toString(bindingNum);
1138 const std::string nameSuffix = bindingSuffix + arraySuffix;
1139 std::ostringstream declarations;
1140
1141 declarations << "layout (";
1142
1143 switch (descriptorType)
1144 {
1145 case VK_DESCRIPTOR_TYPE_SAMPLER:
1146 declarations << layoutAttribs << ") uniform sampler sampler" << nameSuffix;
1147 break;
1148
1149 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1150 declarations << layoutAttribs << ") uniform usampler2D combinedSampler" << nameSuffix;
1151 break;
1152
1153 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1154 declarations << layoutAttribs << ") uniform utexture2D sampledImage" << nameSuffix;
1155 break;
1156
1157 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1158 declarations << layoutAttribs << ") uniform uboBlock" << bindingSuffix << " { uint val; } ubo"
1159 << nameSuffix;
1160 break;
1161
1162 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1163 declarations << layoutAttribs << ") buffer sboBlock" << bindingSuffix << " { uint val; } ssbo"
1164 << nameSuffix;
1165 break;
1166
1167 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1168 declarations << layoutAttribs << ") uniform utextureBuffer uniformTexel" << nameSuffix;
1169 break;
1170
1171 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1172 declarations << layoutAttribs << ", r32ui) uniform uimageBuffer storageTexel" << nameSuffix;
1173 break;
1174
1175 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1176 declarations << layoutAttribs << ", r32ui) uniform uimage2D storageImage" << nameSuffix;
1177 break;
1178
1179 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1180 declarations << layoutAttribs << ", input_attachment_index=" << inputAttachmentIdx
1181 << ") uniform usubpassInput inputAttachment" << nameSuffix;
1182 break;
1183
1184 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
1185 declarations << layoutAttribs << ") uniform accelerationStructureEXT accelerationStructure" << nameSuffix;
1186 break;
1187
1188 default:
1189 DE_ASSERT(false);
1190 break;
1191 }
1192
1193 declarations << ";\n";
1194
1195 return declarations.str();
1196 }
1197
glslCheckStatements(uint32_t iteration,uint32_t setNum,uint32_t bindingNum,uint32_t baseValue_,tcu::Maybe<uint32_t> arrayIndex,bool usePushConstants) const1198 std::string glslCheckStatements(uint32_t iteration, uint32_t setNum, uint32_t bindingNum, uint32_t baseValue_,
1199 tcu::Maybe<uint32_t> arrayIndex, bool usePushConstants) const override
1200 {
1201 const auto descriptorType = typeAtIteration(iteration);
1202 const std::string bindingSuffix = "_" + de::toString(setNum) + "_" + de::toString(bindingNum);
1203
1204 std::string indexSuffix;
1205 if (arrayIndex)
1206 {
1207 indexSuffix = de::toString(arrayIndex.get());
1208 if (usePushConstants)
1209 indexSuffix += " + pc.zero";
1210 indexSuffix = "[" + indexSuffix + "]";
1211 }
1212
1213 const std::string nameSuffix = bindingSuffix + indexSuffix;
1214 const std::string baseValue = toHex(baseValue_);
1215 const std::string externalImageValue = toHex(getExternalSampledImageValue());
1216 const std::string mask = toHex(getStoredValueMask());
1217
1218 std::ostringstream checks;
1219
1220 // Note: all of these depend on an external anyError uint variable.
1221 switch (descriptorType)
1222 {
1223 case VK_DESCRIPTOR_TYPE_SAMPLER:
1224 // Note this depends on an "externalSampledImage" binding.
1225 checks << " {\n";
1226 checks << " uint readValue = texture(usampler2D(externalSampledImage, sampler" << nameSuffix
1227 << "), vec2(0, 0)).r;\n";
1228 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1229 checks << " anyError |= ((readValue == " << externalImageValue << ") ? 0u : 1u);\n";
1230 //checks << " anyError = readValue;\n";
1231 checks << " }\n";
1232 break;
1233
1234 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1235 checks << " {\n";
1236 checks << " uint readValue = texture(combinedSampler" << nameSuffix << ", vec2(0, 0)).r;\n";
1237 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1238 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1239 //checks << " anyError = readValue;\n";
1240 checks << " }\n";
1241 break;
1242
1243 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1244 // Note this depends on an "externalSampler" binding.
1245 checks << " {\n";
1246 checks << " uint readValue = texture(usampler2D(sampledImage" << nameSuffix
1247 << ", externalSampler), vec2(0, 0)).r;\n";
1248 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1249 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1250 //checks << " anyError = readValue;\n";
1251 checks << " }\n";
1252 break;
1253
1254 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1255 checks << " {\n";
1256 checks << " uint readValue = ubo" << nameSuffix << ".val;\n";
1257 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1258 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1259 //checks << " anyError = readValue;\n";
1260 checks << " }\n";
1261 break;
1262
1263 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1264 checks << " {\n";
1265 checks << " uint readValue = ssbo" << nameSuffix << ".val;\n";
1266 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1267 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1268 //checks << " anyError = readValue;\n";
1269 // Check writes.
1270 checks << " ssbo" << nameSuffix << ".val = (readValue | " << mask << ");\n";
1271 checks << " }\n";
1272 break;
1273
1274 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1275 checks << " {\n";
1276 checks << " uint readValue = texelFetch(uniformTexel" << nameSuffix << ", 0).x;\n";
1277 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1278 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1279 //checks << " anyError = readValue;\n";
1280 checks << " }\n";
1281 break;
1282
1283 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1284 checks << " {\n";
1285 checks << " uint readValue = imageLoad(storageTexel" << nameSuffix << ", 0).x;\n";
1286 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1287 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1288 //checks << " anyError = readValue;\n";
1289 checks << " readValue |= " << mask << ";\n";
1290 // Check writes.
1291 checks << " imageStore(storageTexel" << nameSuffix << ", 0, uvec4(readValue, 0, 0, 0));\n";
1292 checks << " }\n";
1293 break;
1294
1295 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1296 checks << " {\n";
1297 checks << " uint readValue = imageLoad(storageImage" << nameSuffix << ", ivec2(0, 0)).x;\n";
1298 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1299 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1300 //checks << " anyError = readValue;\n";
1301 checks << " readValue |= " << mask << ";\n";
1302 // Check writes.
1303 checks << " imageStore(storageImage" << nameSuffix << ", ivec2(0, 0), uvec4(readValue, 0, 0, 0));\n";
1304 checks << " }\n";
1305 break;
1306
1307 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1308 checks << " {\n";
1309 checks << " uint readValue = subpassLoad(inputAttachment" << nameSuffix << ").x;\n";
1310 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1311 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1312 //checks << " anyError = readValue;\n";
1313 checks << " }\n";
1314 break;
1315
1316 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
1317 checks << " {\n";
1318 checks << " const uint cullMask = 0xFF;\n";
1319 checks << " const vec3 origin = vec3(" << getAccelerationStructureOffsetX(baseValue_)
1320 << ".0, 0.0, 0.0);\n";
1321 checks << " const vec3 direction = vec3(0.0, 0.0, 1.0);\n";
1322 checks << " const float tmin = 1.0;\n";
1323 checks << " const float tmax = 10.0;\n";
1324 checks << " uint candidateFound = 0u;\n";
1325 checks << " rayQueryEXT rq;\n";
1326 checks << " rayQueryInitializeEXT(rq, accelerationStructure" << nameSuffix
1327 << ", gl_RayFlagsNoneEXT, cullMask, origin, tmin, direction, tmax);\n";
1328 checks << " while (rayQueryProceedEXT(rq)) {\n";
1329 checks << " const uint candidateType = rayQueryGetIntersectionTypeEXT(rq, false);\n";
1330 checks << " if (candidateType == gl_RayQueryCandidateIntersectionTriangleEXT || candidateType == "
1331 "gl_RayQueryCandidateIntersectionAABBEXT) {\n";
1332 checks << " candidateFound = 1u;\n";
1333 checks << " }\n";
1334 checks << " }\n";
1335 checks << " anyError |= ((candidateFound == 1u) ? 0u : 1u);\n";
1336 checks << " }\n";
1337 break;
1338
1339 default:
1340 DE_ASSERT(false);
1341 break;
1342 }
1343
1344 return checks.str();
1345 }
1346 };
1347
1348 // Represents an array of bindings. Individual bindings are stored as SingleBindings because each one of them may take a different
1349 // type in each iteration (i.e. they can all have different descriptor type vectors).
1350 class ArrayBinding : public BindingInterface
1351 {
1352 private:
1353 bool unbounded;
1354 std::vector<SingleBinding> bindings;
1355
1356 public:
ArrayBinding(bool unbounded_,std::vector<SingleBinding> bindings_)1357 ArrayBinding(bool unbounded_, std::vector<SingleBinding> bindings_)
1358 : unbounded(unbounded_)
1359 , bindings(std::move(bindings_))
1360 {
1361 // We need to check all single bindings have the same effective type, even if mutable descriptors have different orders.
1362 DE_ASSERT(!bindings.empty());
1363
1364 std::set<VkDescriptorType> basicTypes;
1365 std::set<DescriptorTypeFlags> bindingTypes;
1366
1367 for (const auto &b : bindings)
1368 {
1369 basicTypes.insert(b.mainType());
1370 bindingTypes.insert(toDescriptorTypeFlags(b.usedTypes()));
1371 }
1372
1373 DE_ASSERT(basicTypes.size() == 1u);
1374 DE_ASSERT(bindingTypes.size() == 1u);
1375
1376 // For release builds.
1377 DE_UNREF(basicTypes);
1378 DE_UNREF(bindingTypes);
1379 }
1380
maxTypes() const1381 uint32_t maxTypes() const override
1382 {
1383 // Each binding may have the same effective type but a different number of iterations due to repeated types.
1384 std::vector<size_t> bindingSizes;
1385 bindingSizes.reserve(bindings.size());
1386
1387 std::transform(begin(bindings), end(bindings), std::back_inserter(bindingSizes),
1388 [](const SingleBinding &b) { return b.usedTypes().size(); });
1389
1390 const auto maxElement = std::max_element(begin(bindingSizes), end(bindingSizes));
1391 DE_ASSERT(maxElement != end(bindingSizes));
1392 DE_ASSERT(*maxElement <= std::numeric_limits<uint32_t>::max());
1393 return static_cast<uint32_t>(*maxElement);
1394 }
1395
typesAtIteration(uint32_t iteration) const1396 std::vector<VkDescriptorType> typesAtIteration(uint32_t iteration) const override
1397 {
1398 std::vector<VkDescriptorType> result;
1399 result.reserve(bindings.size());
1400
1401 for (const auto &b : bindings)
1402 result.push_back(b.typeAtIteration(iteration));
1403
1404 return result;
1405 }
1406
mainType() const1407 VkDescriptorType mainType() const override
1408 {
1409 return bindings[0].mainType();
1410 }
1411
mutableTypes() const1412 std::vector<VkDescriptorType> mutableTypes() const override
1413 {
1414 return bindings[0].mutableTypes();
1415 }
1416
size() const1417 size_t size() const override
1418 {
1419 return bindings.size();
1420 }
1421
isArray() const1422 bool isArray() const override
1423 {
1424 return true;
1425 }
1426
isUnbounded() const1427 bool isUnbounded() const override
1428 {
1429 return unbounded;
1430 }
1431
toMutable(uint32_t iteration) const1432 de::MovePtr<BindingInterface> toMutable(uint32_t iteration) const override
1433 {
1434 // Replicate the first binding once converted, as all are equivalent.
1435 const auto firstBindingPtr = bindings[0].toMutable(iteration);
1436 const auto firstBinding = *dynamic_cast<SingleBinding *>(firstBindingPtr.get());
1437 const std::vector<SingleBinding> newBindings(bindings.size(), firstBinding);
1438
1439 return de::MovePtr<BindingInterface>(new ArrayBinding(unbounded, newBindings));
1440 }
1441
toNonMutable(uint32_t iteration) const1442 de::MovePtr<BindingInterface> toNonMutable(uint32_t iteration) const override
1443 {
1444 // Make sure this binding can be converted to nonmutable for a given iteration.
1445 DE_ASSERT(!needsAliasing(iteration));
1446
1447 // We could use each SingleBinding's toNonMutable(), but this is the same.
1448 const auto descType = bindings[0].typeAtIteration(iteration);
1449 const SingleBinding firstBinding(descType, std::vector<VkDescriptorType>());
1450 const std::vector<SingleBinding> newBindings(bindings.size(), firstBinding);
1451
1452 return de::MovePtr<BindingInterface>(new ArrayBinding(unbounded, newBindings));
1453 }
1454
createResources(const DeviceInterface & vkd,VkDevice device,Allocator & alloc,uint32_t qIndex,VkQueue queue,uint32_t iteration,bool useAABBs,uint32_t baseValue) const1455 std::vector<Resource> createResources(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
1456 uint32_t qIndex, VkQueue queue, uint32_t iteration, bool useAABBs,
1457 uint32_t baseValue) const override
1458 {
1459 std::vector<Resource> resources;
1460 const auto numBindings = static_cast<uint32_t>(bindings.size());
1461
1462 for (uint32_t i = 0u; i < numBindings; ++i)
1463 {
1464 auto resourceVec =
1465 bindings[i].createResources(vkd, device, alloc, qIndex, queue, iteration, useAABBs, baseValue + i);
1466 resources.emplace_back(std::move(resourceVec[0]));
1467 }
1468
1469 return resources;
1470 }
1471
1472 // We will ignore the array size parameter.
glslDeclarations(uint32_t iteration,uint32_t setNum,uint32_t bindingNum,uint32_t inputAttachmentIdx,tcu::Maybe<int32_t> arraySize) const1473 std::string glslDeclarations(uint32_t iteration, uint32_t setNum, uint32_t bindingNum, uint32_t inputAttachmentIdx,
1474 tcu::Maybe<int32_t> arraySize) const override
1475 {
1476 const auto descriptorCount = bindings.size();
1477 const auto arraySizeVal =
1478 (isUnbounded() ? tcu::just(int32_t{-1}) : tcu::just(static_cast<int32_t>(descriptorCount)));
1479
1480 DE_UNREF(arraySize);
1481 DE_ASSERT(descriptorCount < static_cast<size_t>(std::numeric_limits<int32_t>::max()));
1482
1483 // Maybe a single declaration is enough.
1484 if (!needsAliasing(iteration))
1485 return bindings[0].glslDeclarations(iteration, setNum, bindingNum, inputAttachmentIdx, arraySizeVal);
1486
1487 // Aliasing needed. Avoid reusing types.
1488 const auto descriptorTypes = typesAtIteration(iteration);
1489 std::set<VkDescriptorType> usedTypes;
1490 std::ostringstream declarations;
1491
1492 for (size_t descriptorIdx = 0u; descriptorIdx < descriptorCount; ++descriptorIdx)
1493 {
1494 const auto &descriptorType = descriptorTypes[descriptorIdx];
1495 if (usedTypes.count(descriptorType) > 0)
1496 continue;
1497
1498 usedTypes.insert(descriptorType);
1499 declarations << bindings[descriptorIdx].glslDeclarations(iteration, setNum, bindingNum, inputAttachmentIdx,
1500 arraySizeVal);
1501 }
1502
1503 return declarations.str();
1504 }
1505
glslCheckStatements(uint32_t iteration,uint32_t setNum,uint32_t bindingNum,uint32_t baseValue_,tcu::Maybe<uint32_t> arrayIndex,bool usePushConstants) const1506 std::string glslCheckStatements(uint32_t iteration, uint32_t setNum, uint32_t bindingNum, uint32_t baseValue_,
1507 tcu::Maybe<uint32_t> arrayIndex, bool usePushConstants) const override
1508 {
1509 DE_ASSERT(!arrayIndex);
1510 DE_UNREF(arrayIndex); // For release builds.
1511
1512 std::ostringstream checks;
1513 const auto numDescriptors = static_cast<uint32_t>(bindings.size());
1514
1515 for (uint32_t descriptorIdx = 0u; descriptorIdx < numDescriptors; ++descriptorIdx)
1516 {
1517 const auto &binding = bindings[descriptorIdx];
1518 checks << binding.glslCheckStatements(iteration, setNum, bindingNum, baseValue_ + descriptorIdx,
1519 tcu::just(descriptorIdx), usePushConstants);
1520 }
1521
1522 return checks.str();
1523 }
1524 };
1525
1526 class DescriptorSet;
1527
1528 using DescriptorSetPtr = de::SharedPtr<DescriptorSet>;
1529
1530 class DescriptorSet
1531 {
1532 public:
1533 using BindingInterfacePtr = de::MovePtr<BindingInterface>;
1534 using BindingPtrVector = std::vector<BindingInterfacePtr>;
1535
1536 private:
1537 BindingPtrVector bindings;
1538
1539 public:
DescriptorSet(BindingPtrVector & bindings_)1540 explicit DescriptorSet(BindingPtrVector &bindings_) : bindings(std::move(bindings_))
1541 {
1542 DE_ASSERT(!bindings.empty());
1543 }
1544
numBindings() const1545 size_t numBindings() const
1546 {
1547 return bindings.size();
1548 }
1549
getBinding(size_t bindingIdx) const1550 const BindingInterface *getBinding(size_t bindingIdx) const
1551 {
1552 return bindings.at(bindingIdx).get();
1553 }
1554
1555 // Maximum number of descriptor types used by any binding in the set.
maxTypes() const1556 uint32_t maxTypes() const
1557 {
1558 std::vector<uint32_t> maxSizes;
1559 maxSizes.reserve(bindings.size());
1560
1561 std::transform(begin(bindings), end(bindings), std::back_inserter(maxSizes),
1562 [](const BindingInterfacePtr &b) { return b->maxTypes(); });
1563
1564 const auto maxElement = std::max_element(begin(maxSizes), end(maxSizes));
1565 DE_ASSERT(maxElement != end(maxSizes));
1566 return *maxElement;
1567 }
1568
1569 // Create another descriptor set that can be the source for copies when setting descriptor values.
genSourceSet(SourceSetStrategy strategy,uint32_t iteration) const1570 DescriptorSetPtr genSourceSet(SourceSetStrategy strategy, uint32_t iteration) const
1571 {
1572 BindingPtrVector newBindings;
1573 for (const auto &b : bindings)
1574 {
1575 if (strategy == SourceSetStrategy::MUTABLE)
1576 newBindings.push_back(b->toMutable(iteration));
1577 else
1578 newBindings.push_back(b->toNonMutable(iteration));
1579 }
1580
1581 return DescriptorSetPtr(new DescriptorSet(newBindings));
1582 }
1583
1584 // Makes a descriptor pool that can be used when allocating descriptors for this set.
makeDescriptorPool(const DeviceInterface & vkd,VkDevice device,PoolMutableStrategy strategy,VkDescriptorPoolCreateFlags flags) const1585 Move<VkDescriptorPool> makeDescriptorPool(const DeviceInterface &vkd, VkDevice device, PoolMutableStrategy strategy,
1586 VkDescriptorPoolCreateFlags flags) const
1587 {
1588 std::vector<VkDescriptorPoolSize> poolSizes;
1589 std::vector<std::vector<VkDescriptorType>> mutableTypesVec;
1590 std::vector<VkMutableDescriptorTypeListEXT> mutableTypeLists;
1591
1592 // Make vector element addresses stable.
1593 const auto bindingCount = numBindings();
1594 poolSizes.reserve(bindingCount);
1595 mutableTypesVec.reserve(bindingCount);
1596 mutableTypeLists.reserve(bindingCount);
1597
1598 for (const auto &b : bindings)
1599 {
1600 const auto mainType = b->mainType();
1601 const VkDescriptorPoolSize poolSize = {
1602 mainType,
1603 static_cast<uint32_t>(b->size()),
1604 };
1605 poolSizes.push_back(poolSize);
1606
1607 if (strategy == PoolMutableStrategy::KEEP_TYPES || strategy == PoolMutableStrategy::EXPAND_TYPES)
1608 {
1609 if (mainType == VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1610 {
1611 if (strategy == PoolMutableStrategy::KEEP_TYPES)
1612 {
1613 mutableTypesVec.emplace_back(b->mutableTypes());
1614 }
1615 else
1616 {
1617 // Expand the type list with the mandatory types.
1618 static const auto mandatoryTypesFlags = toDescriptorTypeFlags(getMandatoryMutableTypes());
1619 const auto bindingTypes =
1620 toDescriptorTypeVector(mandatoryTypesFlags | toDescriptorTypeFlags(b->mutableTypes()));
1621
1622 mutableTypesVec.emplace_back(bindingTypes);
1623 }
1624
1625 const auto &lastVec = mutableTypesVec.back();
1626 const VkMutableDescriptorTypeListEXT typeList = {static_cast<uint32_t>(lastVec.size()),
1627 de::dataOrNull(lastVec)};
1628 mutableTypeLists.push_back(typeList);
1629 }
1630 else
1631 {
1632 const VkMutableDescriptorTypeListEXT typeList = {0u, nullptr};
1633 mutableTypeLists.push_back(typeList);
1634 }
1635 }
1636 else if (strategy == PoolMutableStrategy::NO_TYPES)
1637 ; // Do nothing, we will not use any type list.
1638 else
1639 DE_ASSERT(false);
1640 }
1641
1642 VkDescriptorPoolCreateInfo poolCreateInfo = initVulkanStructure();
1643
1644 poolCreateInfo.maxSets = 1u;
1645 poolCreateInfo.flags = flags;
1646 poolCreateInfo.poolSizeCount = static_cast<uint32_t>(poolSizes.size());
1647 poolCreateInfo.pPoolSizes = de::dataOrNull(poolSizes);
1648
1649 VkMutableDescriptorTypeCreateInfoEXT mutableInfo = initVulkanStructure();
1650
1651 if (strategy == PoolMutableStrategy::KEEP_TYPES || strategy == PoolMutableStrategy::EXPAND_TYPES)
1652 {
1653 mutableInfo.mutableDescriptorTypeListCount = static_cast<uint32_t>(mutableTypeLists.size());
1654 mutableInfo.pMutableDescriptorTypeLists = de::dataOrNull(mutableTypeLists);
1655 poolCreateInfo.pNext = &mutableInfo;
1656 }
1657
1658 return createDescriptorPool(vkd, device, &poolCreateInfo);
1659 }
1660
1661 private:
1662 // Building the descriptor set layout create info structure is cumbersome, so we'll reuse the same procedure to check support
1663 // and create the layout. This structure contains the result. "supported" is created as an enum to avoid the Move<> to bool
1664 // conversion cast in the contructors.
1665 struct DescriptorSetLayoutResult
1666 {
1667 enum class LayoutSupported
1668 {
1669 NO = 0,
1670 YES
1671 };
1672
1673 LayoutSupported supported;
1674 Move<VkDescriptorSetLayout> layout;
1675
DescriptorSetLayoutResultvkt::BindingModel::__anonf255f59c0111::DescriptorSet::DescriptorSetLayoutResult1676 explicit DescriptorSetLayoutResult(Move<VkDescriptorSetLayout> &&layout_)
1677 : supported(LayoutSupported::YES)
1678 , layout(layout_)
1679 {
1680 }
1681
DescriptorSetLayoutResultvkt::BindingModel::__anonf255f59c0111::DescriptorSet::DescriptorSetLayoutResult1682 explicit DescriptorSetLayoutResult(LayoutSupported supported_) : supported(supported_), layout()
1683 {
1684 }
1685 };
1686
makeOrCheckDescriptorSetLayout(bool checkOnly,const DeviceInterface & vkd,VkDevice device,VkShaderStageFlags stageFlags,VkDescriptorSetLayoutCreateFlags createFlags) const1687 DescriptorSetLayoutResult makeOrCheckDescriptorSetLayout(bool checkOnly, const DeviceInterface &vkd,
1688 VkDevice device, VkShaderStageFlags stageFlags,
1689 VkDescriptorSetLayoutCreateFlags createFlags) const
1690 {
1691 const auto numIterations = maxTypes();
1692 std::vector<VkDescriptorSetLayoutBinding> bindingsVec;
1693 std::vector<std::vector<VkDescriptorType>> mutableTypesVec;
1694 std::vector<VkMutableDescriptorTypeListEXT> mutableTypeLists;
1695
1696 // Make vector element addresses stable.
1697 const auto bindingCount = numBindings();
1698 bindingsVec.reserve(bindingCount);
1699 mutableTypesVec.reserve(bindingCount);
1700 mutableTypeLists.reserve(bindingCount);
1701
1702 for (size_t bindingIdx = 0u; bindingIdx < bindings.size(); ++bindingIdx)
1703 {
1704 const auto &binding = bindings[bindingIdx];
1705 const auto mainType = binding->mainType();
1706
1707 const VkDescriptorSetLayoutBinding layoutBinding = {
1708 static_cast<uint32_t>(bindingIdx), // uint32_t binding;
1709 mainType, // VkDescriptorType descriptorType;
1710 static_cast<uint32_t>(binding->size()), // uint32_t descriptorCount;
1711 stageFlags, // VkShaderStageFlags stageFlags;
1712 nullptr, // const VkSampler* pImmutableSamplers;
1713 };
1714 bindingsVec.push_back(layoutBinding);
1715
1716 // This list may be empty for non-mutable types, which is fine.
1717 mutableTypesVec.push_back(binding->mutableTypes());
1718 const auto &lastVec = mutableTypesVec.back();
1719
1720 const VkMutableDescriptorTypeListEXT typeList = {
1721 static_cast<uint32_t>(lastVec.size()), // uint32_t descriptorTypeCount;
1722 de::dataOrNull(lastVec), // const VkDescriptorType* pDescriptorTypes;
1723 };
1724 mutableTypeLists.push_back(typeList);
1725 }
1726
1727 // Make sure to include the variable descriptor count and/or update after bind binding flags.
1728 const bool updateAfterBind = ((createFlags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT) != 0u);
1729 bool lastIsUnbounded = false;
1730 bool aliasingNeded = false;
1731 std::vector<bool> bindingNeedsAliasing(bindings.size(), false);
1732
1733 for (size_t bindingIdx = 0; bindingIdx < bindings.size(); ++bindingIdx)
1734 {
1735 if (bindingIdx < bindings.size() - 1)
1736 DE_ASSERT(!bindings[bindingIdx]->isUnbounded());
1737 else
1738 lastIsUnbounded = bindings[bindingIdx]->isUnbounded();
1739
1740 if (bindings[bindingIdx]->needsAliasingUpTo(numIterations))
1741 {
1742 bindingNeedsAliasing[bindingIdx] = true;
1743 aliasingNeded = true;
1744 }
1745 }
1746
1747 using FlagsCreateInfoPtr = de::MovePtr<VkDescriptorSetLayoutBindingFlagsCreateInfo>;
1748 using BindingFlagsVecPtr = de::MovePtr<std::vector<VkDescriptorBindingFlags>>;
1749
1750 FlagsCreateInfoPtr flagsCreateInfo;
1751 BindingFlagsVecPtr bindingFlagsVec;
1752
1753 if (updateAfterBind || lastIsUnbounded || aliasingNeded)
1754 {
1755 flagsCreateInfo = FlagsCreateInfoPtr(new VkDescriptorSetLayoutBindingFlagsCreateInfo);
1756 *flagsCreateInfo = initVulkanStructure();
1757
1758 bindingFlagsVec = BindingFlagsVecPtr(new std::vector<VkDescriptorBindingFlags>(
1759 bindingsVec.size(), (updateAfterBind ? VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT : 0)));
1760 if (lastIsUnbounded)
1761 bindingFlagsVec->back() |= VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT;
1762
1763 for (size_t bindingIdx = 0; bindingIdx < bindings.size(); ++bindingIdx)
1764 {
1765 if (bindingNeedsAliasing[bindingIdx])
1766 bindingFlagsVec->at(bindingIdx) |= VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT;
1767 }
1768
1769 flagsCreateInfo->bindingCount = static_cast<uint32_t>(bindingFlagsVec->size());
1770 flagsCreateInfo->pBindingFlags = de::dataOrNull(*bindingFlagsVec);
1771 }
1772
1773 const VkMutableDescriptorTypeCreateInfoEXT createInfoMutable = {
1774 VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT,
1775 flagsCreateInfo.get(),
1776 static_cast<uint32_t>(mutableTypeLists.size()),
1777 de::dataOrNull(mutableTypeLists),
1778 };
1779
1780 const VkDescriptorSetLayoutCreateInfo layoutCreateInfo = {
1781 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
1782 &createInfoMutable, // const void* pNext;
1783 createFlags, // VkDescriptorSetLayoutCreateFlags flags;
1784 static_cast<uint32_t>(bindingsVec.size()), // uint32_t bindingCount;
1785 de::dataOrNull(bindingsVec), // const VkDescriptorSetLayoutBinding* pBindings;
1786 };
1787
1788 if (checkOnly)
1789 {
1790 VkDescriptorSetLayoutSupport support = initVulkanStructure();
1791 vkd.getDescriptorSetLayoutSupport(device, &layoutCreateInfo, &support);
1792 DescriptorSetLayoutResult result((support.supported == VK_TRUE) ?
1793 DescriptorSetLayoutResult::LayoutSupported::YES :
1794 DescriptorSetLayoutResult::LayoutSupported::NO);
1795 return result;
1796 }
1797 else
1798 {
1799 DescriptorSetLayoutResult result(createDescriptorSetLayout(vkd, device, &layoutCreateInfo));
1800 return result;
1801 }
1802 }
1803
1804 public:
makeDescriptorSetLayout(const DeviceInterface & vkd,VkDevice device,VkShaderStageFlags stageFlags,VkDescriptorSetLayoutCreateFlags createFlags) const1805 Move<VkDescriptorSetLayout> makeDescriptorSetLayout(const DeviceInterface &vkd, VkDevice device,
1806 VkShaderStageFlags stageFlags,
1807 VkDescriptorSetLayoutCreateFlags createFlags) const
1808 {
1809 return makeOrCheckDescriptorSetLayout(false /*checkOnly*/, vkd, device, stageFlags, createFlags).layout;
1810 }
1811
checkDescriptorSetLayout(const DeviceInterface & vkd,VkDevice device,VkShaderStageFlags stageFlags,VkDescriptorSetLayoutCreateFlags createFlags) const1812 bool checkDescriptorSetLayout(const DeviceInterface &vkd, VkDevice device, VkShaderStageFlags stageFlags,
1813 VkDescriptorSetLayoutCreateFlags createFlags) const
1814 {
1815 return (makeOrCheckDescriptorSetLayout(true /*checkOnly*/, vkd, device, stageFlags, createFlags).supported ==
1816 DescriptorSetLayoutResult::LayoutSupported::YES);
1817 }
1818
numDescriptors() const1819 size_t numDescriptors() const
1820 {
1821 size_t total = 0;
1822 for (const auto &b : bindings)
1823 total += b->size();
1824 return total;
1825 }
1826
createResources(const DeviceInterface & vkd,VkDevice device,Allocator & alloc,uint32_t qIndex,VkQueue queue,uint32_t iteration,bool useAABBs) const1827 std::vector<Resource> createResources(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
1828 uint32_t qIndex, VkQueue queue, uint32_t iteration, bool useAABBs) const
1829 {
1830 // Create resources for each binding.
1831 std::vector<Resource> result;
1832 result.reserve(numDescriptors());
1833
1834 const auto bindingsCount = static_cast<uint32_t>(bindings.size());
1835
1836 for (uint32_t bindingIdx = 0u; bindingIdx < bindingsCount; ++bindingIdx)
1837 {
1838 const auto &binding = bindings[bindingIdx];
1839 auto bindingResources = binding->createResources(vkd, device, alloc, qIndex, queue, iteration, useAABBs,
1840 getDescriptorNumericValue(iteration, bindingIdx));
1841
1842 for (auto &resource : bindingResources)
1843 result.emplace_back(std::move(resource));
1844 }
1845
1846 return result;
1847 }
1848
1849 // Updates a descriptor set with the given resources. Note: the set must have been created with a layout that's compatible with this object.
updateDescriptorSet(const DeviceInterface & vkd,VkDevice device,VkDescriptorSet set,uint32_t iteration,const std::vector<Resource> & resources) const1850 void updateDescriptorSet(const DeviceInterface &vkd, VkDevice device, VkDescriptorSet set, uint32_t iteration,
1851 const std::vector<Resource> &resources) const
1852 {
1853 // Make sure the number of resources is correct.
1854 const auto numResources = resources.size();
1855 DE_ASSERT(numDescriptors() == numResources);
1856
1857 std::vector<VkWriteDescriptorSet> descriptorWrites;
1858 descriptorWrites.reserve(numResources);
1859
1860 std::vector<VkDescriptorImageInfo> imageInfoVec;
1861 std::vector<VkDescriptorBufferInfo> bufferInfoVec;
1862 std::vector<VkBufferView> bufferViewVec;
1863 std::vector<VkWriteDescriptorSetAccelerationStructureKHR> asWriteVec;
1864 size_t resourceIdx = 0;
1865
1866 // We'll be storing pointers to elements of these vectors as we're appending elements, so we need their addresses to be stable.
1867 imageInfoVec.reserve(numResources);
1868 bufferInfoVec.reserve(numResources);
1869 bufferViewVec.reserve(numResources);
1870 asWriteVec.reserve(numResources);
1871
1872 for (size_t bindingIdx = 0; bindingIdx < bindings.size(); ++bindingIdx)
1873 {
1874 const auto &binding = bindings[bindingIdx];
1875 const auto descriptorTypes = binding->typesAtIteration(iteration);
1876
1877 for (size_t descriptorIdx = 0; descriptorIdx < binding->size(); ++descriptorIdx)
1878 {
1879 // Make sure the resource type matches the expected value.
1880 const auto &resource = resources[resourceIdx];
1881 const auto &descriptorType = descriptorTypes[descriptorIdx];
1882
1883 DE_ASSERT(resource.descriptorType == descriptorType);
1884
1885 // Obtain the descriptor write info for the resource.
1886 const auto writeInfo = resource.makeWriteInfo();
1887
1888 switch (writeInfo.writeType)
1889 {
1890 case WriteType::IMAGE_INFO:
1891 imageInfoVec.push_back(writeInfo.imageInfo);
1892 break;
1893 case WriteType::BUFFER_INFO:
1894 bufferInfoVec.push_back(writeInfo.bufferInfo);
1895 break;
1896 case WriteType::BUFFER_VIEW:
1897 bufferViewVec.push_back(writeInfo.bufferView);
1898 break;
1899 case WriteType::ACCELERATION_STRUCTURE_INFO:
1900 asWriteVec.push_back(writeInfo.asInfo);
1901 break;
1902 default:
1903 DE_ASSERT(false);
1904 break;
1905 }
1906
1907 // Add a new VkWriteDescriptorSet struct or extend the last one with more info. This helps us exercise different implementation code paths.
1908 bool extended = false;
1909
1910 if (!descriptorWrites.empty() && descriptorIdx > 0)
1911 {
1912 auto &last = descriptorWrites.back();
1913 if (last.dstSet == set /* this should always be true */ && last.dstBinding == bindingIdx &&
1914 (last.dstArrayElement + last.descriptorCount) == descriptorIdx &&
1915 last.descriptorType == descriptorType &&
1916 writeInfo.writeType != WriteType::ACCELERATION_STRUCTURE_INFO)
1917 {
1918 // The new write should be in the same vector (imageInfoVec, bufferInfoVec or bufferViewVec) so increasing the count works.
1919 ++last.descriptorCount;
1920 extended = true;
1921 }
1922 }
1923
1924 if (!extended)
1925 {
1926 const VkWriteDescriptorSet write = {
1927 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1928 ((writeInfo.writeType == WriteType::ACCELERATION_STRUCTURE_INFO) ? &asWriteVec.back() :
1929 nullptr),
1930 set,
1931 static_cast<uint32_t>(bindingIdx),
1932 static_cast<uint32_t>(descriptorIdx),
1933 1u,
1934 descriptorType,
1935 (writeInfo.writeType == WriteType::IMAGE_INFO ? &imageInfoVec.back() : nullptr),
1936 (writeInfo.writeType == WriteType::BUFFER_INFO ? &bufferInfoVec.back() : nullptr),
1937 (writeInfo.writeType == WriteType::BUFFER_VIEW ? &bufferViewVec.back() : nullptr),
1938 };
1939 descriptorWrites.push_back(write);
1940 }
1941
1942 ++resourceIdx;
1943 }
1944 }
1945
1946 // Finally, update descriptor set with all the writes.
1947 vkd.updateDescriptorSets(device, static_cast<uint32_t>(descriptorWrites.size()),
1948 de::dataOrNull(descriptorWrites), 0u, nullptr);
1949 }
1950
1951 // Copies between descriptor sets. They must be compatible and related to this set.
copyDescriptorSet(const DeviceInterface & vkd,VkDevice device,VkDescriptorSet srcSet,VkDescriptorSet dstSet) const1952 void copyDescriptorSet(const DeviceInterface &vkd, VkDevice device, VkDescriptorSet srcSet,
1953 VkDescriptorSet dstSet) const
1954 {
1955 std::vector<VkCopyDescriptorSet> copies;
1956
1957 for (size_t bindingIdx = 0; bindingIdx < numBindings(); ++bindingIdx)
1958 {
1959 const auto &binding = getBinding(bindingIdx);
1960 const auto bindingNumber = static_cast<uint32_t>(bindingIdx);
1961 const auto descriptorCount = static_cast<uint32_t>(binding->size());
1962
1963 const VkCopyDescriptorSet copy = {
1964 VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET,
1965 nullptr,
1966 // set, binding, array element.
1967 srcSet,
1968 bindingNumber,
1969 0u,
1970 dstSet,
1971 bindingNumber,
1972 0u,
1973 descriptorCount,
1974 };
1975
1976 copies.push_back(copy);
1977 }
1978
1979 vkd.updateDescriptorSets(device, 0u, nullptr, static_cast<uint32_t>(copies.size()), de::dataOrNull(copies));
1980 }
1981
1982 // Does any binding in the set need aliasing in a given iteration?
needsAliasing(uint32_t iteration) const1983 bool needsAliasing(uint32_t iteration) const
1984 {
1985 std::vector<bool> aliasingNeededFlags;
1986 aliasingNeededFlags.reserve(bindings.size());
1987
1988 std::transform(begin(bindings), end(bindings), std::back_inserter(aliasingNeededFlags),
1989 [iteration](const BindingInterfacePtr &b) { return b->needsAliasing(iteration); });
1990 return std::any_of(begin(aliasingNeededFlags), end(aliasingNeededFlags), [](bool f) { return f; });
1991 }
1992
1993 // Does any binding in the set need aliasing in any iteration?
needsAnyAliasing() const1994 bool needsAnyAliasing() const
1995 {
1996 const auto numIterations = maxTypes();
1997 std::vector<bool> aliasingNeededFlags(numIterations, false);
1998
1999 for (uint32_t iteration = 0; iteration < numIterations; ++iteration)
2000 aliasingNeededFlags[iteration] = needsAliasing(iteration);
2001
2002 return std::any_of(begin(aliasingNeededFlags), end(aliasingNeededFlags), [](bool f) { return f; });
2003 }
2004
2005 // Is the last binding an unbounded array?
lastBindingIsUnbounded() const2006 bool lastBindingIsUnbounded() const
2007 {
2008 if (bindings.empty())
2009 return false;
2010 return bindings.back()->isUnbounded();
2011 }
2012
2013 // Get the variable descriptor count for the last binding if any.
getVariableDescriptorCount() const2014 tcu::Maybe<uint32_t> getVariableDescriptorCount() const
2015 {
2016 if (lastBindingIsUnbounded())
2017 return tcu::just(static_cast<uint32_t>(bindings.back()->size()));
2018 return tcu::Nothing;
2019 }
2020
2021 // Check if the set contains a descriptor type of the given type at the given iteration.
containsTypeAtIteration(VkDescriptorType descriptorType,uint32_t iteration) const2022 bool containsTypeAtIteration(VkDescriptorType descriptorType, uint32_t iteration) const
2023 {
2024 return std::any_of(begin(bindings), end(bindings),
2025 [descriptorType, iteration](const BindingInterfacePtr &b)
2026 {
2027 const auto types = b->typesAtIteration(iteration);
2028 return de::contains(begin(types), end(types), descriptorType);
2029 });
2030 }
2031
2032 // Is any binding an array?
hasArrays() const2033 bool hasArrays() const
2034 {
2035 return std::any_of(begin(bindings), end(bindings), [](const BindingInterfacePtr &b) { return b->isArray(); });
2036 }
2037 };
2038
2039 enum class UpdateType
2040 {
2041 WRITE = 0,
2042 COPY,
2043 };
2044
2045 enum class SourceSetType
2046 {
2047 NORMAL = 0,
2048 HOST_ONLY,
2049 NO_SOURCE,
2050 };
2051
2052 enum class UpdateMoment
2053 {
2054 NORMAL = 0,
2055 UPDATE_AFTER_BIND,
2056 };
2057
2058 enum class TestingStage
2059 {
2060 COMPUTE = 0,
2061 VERTEX,
2062 TESS_EVAL,
2063 TESS_CONTROL,
2064 GEOMETRY,
2065 FRAGMENT,
2066 RAY_GEN,
2067 INTERSECTION,
2068 ANY_HIT,
2069 CLOSEST_HIT,
2070 MISS,
2071 CALLABLE,
2072 };
2073
2074 enum class ArrayAccessType
2075 {
2076 CONSTANT = 0,
2077 PUSH_CONSTANT,
2078 NO_ARRAY,
2079 };
2080
2081 // Are we testing a ray tracing pipeline stage?
isRayTracingStage(TestingStage stage)2082 bool isRayTracingStage(TestingStage stage)
2083 {
2084 switch (stage)
2085 {
2086 case TestingStage::RAY_GEN:
2087 case TestingStage::INTERSECTION:
2088 case TestingStage::ANY_HIT:
2089 case TestingStage::CLOSEST_HIT:
2090 case TestingStage::MISS:
2091 case TestingStage::CALLABLE:
2092 return true;
2093 default:
2094 break;
2095 }
2096
2097 return false;
2098 }
2099
2100 struct TestParams
2101 {
2102 DescriptorSetPtr descriptorSet;
2103 UpdateType updateType;
2104 SourceSetStrategy sourceSetStrategy;
2105 SourceSetType sourceSetType;
2106 PoolMutableStrategy poolMutableStrategy;
2107 UpdateMoment updateMoment;
2108 ArrayAccessType arrayAccessType;
2109 TestingStage testingStage;
2110
getStageFlagsvkt::BindingModel::__anonf255f59c0111::TestParams2111 VkShaderStageFlags getStageFlags() const
2112 {
2113 VkShaderStageFlags flags = 0u;
2114
2115 switch (testingStage)
2116 {
2117 case TestingStage::COMPUTE:
2118 flags |= VK_SHADER_STAGE_COMPUTE_BIT;
2119 break;
2120 case TestingStage::VERTEX:
2121 flags |= VK_SHADER_STAGE_VERTEX_BIT;
2122 break;
2123 case TestingStage::TESS_EVAL:
2124 flags |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
2125 break;
2126 case TestingStage::TESS_CONTROL:
2127 flags |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
2128 break;
2129 case TestingStage::GEOMETRY:
2130 flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
2131 break;
2132 case TestingStage::FRAGMENT:
2133 flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
2134 break;
2135 case TestingStage::RAY_GEN:
2136 flags |= VK_SHADER_STAGE_RAYGEN_BIT_KHR;
2137 break;
2138 case TestingStage::INTERSECTION:
2139 flags |= VK_SHADER_STAGE_INTERSECTION_BIT_KHR;
2140 break;
2141 case TestingStage::ANY_HIT:
2142 flags |= VK_SHADER_STAGE_ANY_HIT_BIT_KHR;
2143 break;
2144 case TestingStage::CLOSEST_HIT:
2145 flags |= VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR;
2146 break;
2147 case TestingStage::MISS:
2148 flags |= VK_SHADER_STAGE_MISS_BIT_KHR;
2149 break;
2150 case TestingStage::CALLABLE:
2151 flags |= VK_SHADER_STAGE_CALLABLE_BIT_KHR;
2152 break;
2153 default:
2154 DE_ASSERT(false);
2155 break;
2156 }
2157
2158 return flags;
2159 }
2160
getPipelineWriteStagevkt::BindingModel::__anonf255f59c0111::TestParams2161 VkPipelineStageFlags getPipelineWriteStage() const
2162 {
2163 VkPipelineStageFlags flags = 0u;
2164
2165 switch (testingStage)
2166 {
2167 case TestingStage::COMPUTE:
2168 flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
2169 break;
2170 case TestingStage::VERTEX:
2171 flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
2172 break;
2173 case TestingStage::TESS_EVAL:
2174 flags |= VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
2175 break;
2176 case TestingStage::TESS_CONTROL:
2177 flags |= VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT;
2178 break;
2179 case TestingStage::GEOMETRY:
2180 flags |= VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
2181 break;
2182 case TestingStage::FRAGMENT:
2183 flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
2184 break;
2185 case TestingStage::RAY_GEN: // fallthrough
2186 case TestingStage::INTERSECTION: // fallthrough
2187 case TestingStage::ANY_HIT: // fallthrough
2188 case TestingStage::CLOSEST_HIT: // fallthrough
2189 case TestingStage::MISS: // fallthrough
2190 case TestingStage::CALLABLE:
2191 flags |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
2192 break;
2193 default:
2194 DE_ASSERT(false);
2195 break;
2196 }
2197
2198 return flags;
2199 }
2200
2201 private:
getLayoutCreateFlagsvkt::BindingModel::__anonf255f59c0111::TestParams2202 VkDescriptorSetLayoutCreateFlags getLayoutCreateFlags(bool isSourceSet) const
2203 {
2204 // UPDATE_AFTER_BIND cannot be used with HOST_ONLY sets.
2205 //DE_ASSERT(!(updateMoment == UpdateMoment::UPDATE_AFTER_BIND && sourceSetType == SourceSetType::HOST_ONLY));
2206
2207 VkDescriptorSetLayoutCreateFlags createFlags = 0u;
2208
2209 if ((!isSourceSet || sourceSetType != SourceSetType::HOST_ONLY) &&
2210 updateMoment == UpdateMoment::UPDATE_AFTER_BIND)
2211 createFlags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT;
2212
2213 if (isSourceSet && sourceSetType == SourceSetType::HOST_ONLY)
2214 createFlags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT;
2215
2216 return createFlags;
2217 }
2218
2219 public:
getSrcLayoutCreateFlagsvkt::BindingModel::__anonf255f59c0111::TestParams2220 VkDescriptorSetLayoutCreateFlags getSrcLayoutCreateFlags() const
2221 {
2222 return getLayoutCreateFlags(true);
2223 }
2224
getDstLayoutCreateFlagsvkt::BindingModel::__anonf255f59c0111::TestParams2225 VkDescriptorSetLayoutCreateFlags getDstLayoutCreateFlags() const
2226 {
2227 return getLayoutCreateFlags(false);
2228 }
2229
2230 private:
getPoolCreateFlagsvkt::BindingModel::__anonf255f59c0111::TestParams2231 VkDescriptorPoolCreateFlags getPoolCreateFlags(bool isSourceSet) const
2232 {
2233 // UPDATE_AFTER_BIND cannot be used with HOST_ONLY sets.
2234 //DE_ASSERT(!(updateMoment == UpdateMoment::UPDATE_AFTER_BIND && sourceSetType == SourceSetType::HOST_ONLY));
2235
2236 VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
2237
2238 if ((!isSourceSet || sourceSetType != SourceSetType::HOST_ONLY) &&
2239 updateMoment == UpdateMoment::UPDATE_AFTER_BIND)
2240 poolCreateFlags |= VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT;
2241
2242 if (isSourceSet && sourceSetType == SourceSetType::HOST_ONLY)
2243 poolCreateFlags |= VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT;
2244
2245 return poolCreateFlags;
2246 }
2247
2248 public:
getSrcPoolCreateFlagsvkt::BindingModel::__anonf255f59c0111::TestParams2249 VkDescriptorPoolCreateFlags getSrcPoolCreateFlags() const
2250 {
2251 return getPoolCreateFlags(true);
2252 }
2253
getDstPoolCreateFlagsvkt::BindingModel::__anonf255f59c0111::TestParams2254 VkDescriptorPoolCreateFlags getDstPoolCreateFlags() const
2255 {
2256 return getPoolCreateFlags(false);
2257 }
2258
getBindPointvkt::BindingModel::__anonf255f59c0111::TestParams2259 VkPipelineBindPoint getBindPoint() const
2260 {
2261 if (testingStage == TestingStage::COMPUTE)
2262 return VK_PIPELINE_BIND_POINT_COMPUTE;
2263 if (isRayTracingStage(testingStage))
2264 return VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR;
2265 return VK_PIPELINE_BIND_POINT_GRAPHICS;
2266 }
2267 };
2268
2269 class MutableTypesTest : public TestCase
2270 {
2271 public:
MutableTypesTest(tcu::TestContext & testCtx,const std::string & name,const TestParams & params)2272 MutableTypesTest(tcu::TestContext &testCtx, const std::string &name, const TestParams ¶ms)
2273 : TestCase(testCtx, name)
2274 , m_params(params)
2275 {
2276 }
2277
2278 ~MutableTypesTest() override = default;
2279
2280 void initPrograms(vk::SourceCollections &programCollection) const override;
2281 TestInstance *createInstance(Context &context) const override;
2282 void checkSupport(Context &context) const override;
2283
2284 private:
2285 TestParams m_params;
2286 };
2287
2288 class MutableTypesInstance : public TestInstance
2289 {
2290 public:
MutableTypesInstance(Context & context,const TestParams & params)2291 MutableTypesInstance(Context &context, const TestParams ¶ms) : TestInstance(context), m_params(params)
2292 {
2293 }
2294
2295 ~MutableTypesInstance() override = default;
2296
2297 tcu::TestStatus iterate() override;
2298
2299 private:
2300 TestParams m_params;
2301 };
2302
2303 // Check if a descriptor set contains a given descriptor type in any iteration up to maxTypes().
containsAnyDescriptorType(const DescriptorSet & descriptorSet,VkDescriptorType descriptorType)2304 bool containsAnyDescriptorType(const DescriptorSet &descriptorSet, VkDescriptorType descriptorType)
2305 {
2306 const auto numIterations = descriptorSet.maxTypes();
2307
2308 for (uint32_t iter = 0u; iter < numIterations; ++iter)
2309 {
2310 if (descriptorSet.containsTypeAtIteration(descriptorType, iter))
2311 return true;
2312 }
2313
2314 return false;
2315 }
2316
2317 // Check if testing this descriptor set needs an external image (for sampler descriptors).
needsExternalImage(const DescriptorSet & descriptorSet)2318 bool needsExternalImage(const DescriptorSet &descriptorSet)
2319 {
2320 return containsAnyDescriptorType(descriptorSet, VK_DESCRIPTOR_TYPE_SAMPLER);
2321 }
2322
2323 // Check if testing this descriptor set needs an external sampler (for sampled images).
needsExternalSampler(const DescriptorSet & descriptorSet)2324 bool needsExternalSampler(const DescriptorSet &descriptorSet)
2325 {
2326 return containsAnyDescriptorType(descriptorSet, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
2327 }
2328
2329 // Check if this descriptor set contains a input attachments.
usesInputAttachments(const DescriptorSet & descriptorSet)2330 bool usesInputAttachments(const DescriptorSet &descriptorSet)
2331 {
2332 return containsAnyDescriptorType(descriptorSet, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
2333 }
2334
2335 // Check if this descriptor set contains acceleration structures.
usesAccelerationStructures(const DescriptorSet & descriptorSet)2336 bool usesAccelerationStructures(const DescriptorSet &descriptorSet)
2337 {
2338 return containsAnyDescriptorType(descriptorSet, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
2339 }
2340
shaderName(uint32_t iteration)2341 std::string shaderName(uint32_t iteration)
2342 {
2343 return ("iteration-" + de::toString(iteration));
2344 }
2345
initPrograms(vk::SourceCollections & programCollection) const2346 void MutableTypesTest::initPrograms(vk::SourceCollections &programCollection) const
2347 {
2348 const bool usePushConstants = (m_params.arrayAccessType == ArrayAccessType::PUSH_CONSTANT);
2349 const bool useExternalImage = needsExternalImage(*m_params.descriptorSet);
2350 const bool useExternalSampler = needsExternalSampler(*m_params.descriptorSet);
2351 const bool rayQueries = usesAccelerationStructures(*m_params.descriptorSet);
2352 const bool rayTracing = isRayTracingStage(m_params.testingStage);
2353 const auto numIterations = m_params.descriptorSet->maxTypes();
2354 const auto numBindings = m_params.descriptorSet->numBindings();
2355 const vk::ShaderBuildOptions rtBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, 0u, true);
2356
2357 // Extra set and bindings for external resources.
2358 std::ostringstream extraSet;
2359 uint32_t extraBindings = 0u;
2360
2361 extraSet << "layout (set=1, binding=" << extraBindings++ << ") buffer OutputBufferBlock { uint value["
2362 << numIterations << "]; } outputBuffer;\n";
2363 if (useExternalImage)
2364 extraSet << "layout (set=1, binding=" << extraBindings++ << ") uniform utexture2D externalSampledImage;\n";
2365 if (useExternalSampler)
2366 extraSet << "layout (set=1, binding=" << extraBindings++ << ") uniform sampler externalSampler;\n";
2367 // The extra binding below will be declared in the "passthrough" ray generation shader.
2368 #if 0
2369 if (rayTracing)
2370 extraSet << "layout (set=1, binding=" << extraBindings++ << ") uniform accelerationStructureEXT externalAS;\n";
2371 #endif
2372
2373 // Common vertex preamble.
2374 std::ostringstream vertexPreamble;
2375 vertexPreamble << "vec2 vertexPositions[3] = vec2[](\n"
2376 << " vec2(0.0, -0.5),\n"
2377 << " vec2(0.5, 0.5),\n"
2378 << " vec2(-0.5, 0.5)\n"
2379 << ");\n";
2380
2381 // Vertex shader body common statements.
2382 std::ostringstream vertexBodyCommon;
2383 vertexBodyCommon << " gl_Position = vec4(vertexPositions[gl_VertexIndex], 0.0, 1.0);\n";
2384
2385 // Common tessellation control preamble.
2386 std::ostringstream tescPreamble;
2387 tescPreamble << "layout (vertices=3) out;\n"
2388 << "in gl_PerVertex\n"
2389 << "{\n"
2390 << " vec4 gl_Position;\n"
2391 << "} gl_in[gl_MaxPatchVertices];\n"
2392 << "out gl_PerVertex\n"
2393 << "{\n"
2394 << " vec4 gl_Position;\n"
2395 << "} gl_out[];\n";
2396
2397 // Common tessellation control body.
2398 std::ostringstream tescBodyCommon;
2399 tescBodyCommon << " gl_TessLevelInner[0] = 1.0;\n"
2400 << " gl_TessLevelInner[1] = 1.0;\n"
2401 << " gl_TessLevelOuter[0] = 1.0;\n"
2402 << " gl_TessLevelOuter[1] = 1.0;\n"
2403 << " gl_TessLevelOuter[2] = 1.0;\n"
2404 << " gl_TessLevelOuter[3] = 1.0;\n"
2405 << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n";
2406
2407 // Common tessellation evaluation preamble.
2408 std::ostringstream tesePreamble;
2409 tesePreamble << "layout (triangles, fractional_odd_spacing, cw) in;\n"
2410 << "in gl_PerVertex\n"
2411 << "{\n"
2412 << " vec4 gl_Position;\n"
2413 << "} gl_in[gl_MaxPatchVertices];\n"
2414 << "out gl_PerVertex\n"
2415 << "{\n"
2416 << " vec4 gl_Position;\n"
2417 << "};\n";
2418
2419 // Common tessellation evaluation body.
2420 std::ostringstream teseBodyCommon;
2421 teseBodyCommon << " gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position) +\n"
2422 << " (gl_TessCoord.y * gl_in[1].gl_Position) +\n"
2423 << " (gl_TessCoord.z * gl_in[2].gl_Position);\n";
2424
2425 // Shader preamble.
2426 std::ostringstream preamble;
2427
2428 preamble << "#version 460\n"
2429 << "#extension GL_EXT_nonuniform_qualifier : enable\n"
2430 << "#extension GL_EXT_debug_printf : enable\n"
2431 << (rayTracing ? "#extension GL_EXT_ray_tracing : enable\n" : "")
2432 << (rayQueries ? "#extension GL_EXT_ray_query : enable\n" : "") << "\n";
2433
2434 if (m_params.testingStage == TestingStage::VERTEX)
2435 {
2436 preamble << vertexPreamble.str();
2437 }
2438 else if (m_params.testingStage == TestingStage::COMPUTE)
2439 {
2440 preamble << "layout (local_size_x=1, local_size_y=1, local_size_z=1) in;\n"
2441 << "\n";
2442 }
2443 else if (m_params.testingStage == TestingStage::GEOMETRY)
2444 {
2445 preamble << "layout (triangles) in;\n"
2446 << "layout (triangle_strip, max_vertices=3) out;\n"
2447 << "in gl_PerVertex\n"
2448 << "{\n"
2449 << " vec4 gl_Position;\n"
2450 << "} gl_in[3];\n"
2451 << "out gl_PerVertex\n"
2452 << "{\n"
2453 << " vec4 gl_Position;\n"
2454 << "};\n";
2455 }
2456 else if (m_params.testingStage == TestingStage::TESS_CONTROL)
2457 {
2458 preamble << tescPreamble.str();
2459 }
2460 else if (m_params.testingStage == TestingStage::TESS_EVAL)
2461 {
2462 preamble << tesePreamble.str();
2463 }
2464 else if (m_params.testingStage == TestingStage::CALLABLE)
2465 {
2466 preamble << "layout (location=0) callableDataInEXT float unusedCallableData;\n";
2467 }
2468 else if (m_params.testingStage == TestingStage::CLOSEST_HIT || m_params.testingStage == TestingStage::ANY_HIT ||
2469 m_params.testingStage == TestingStage::MISS)
2470 {
2471 preamble << "layout (location=0) rayPayloadInEXT float unusedRayPayload;\n";
2472 }
2473 else if (m_params.testingStage == TestingStage::INTERSECTION)
2474 {
2475 preamble << "hitAttributeEXT vec3 hitAttribute;\n";
2476 }
2477
2478 preamble << extraSet.str();
2479 if (usePushConstants)
2480 preamble << "layout (push_constant, std430) uniform PushConstantBlock { uint zero; } pc;\n";
2481 preamble << "\n";
2482
2483 // We need to create a shader per iteration.
2484 for (uint32_t iter = 0u; iter < numIterations; ++iter)
2485 {
2486 // Shader preamble.
2487 std::ostringstream shader;
2488 shader << preamble.str();
2489
2490 uint32_t inputAttachmentCount = 0u;
2491
2492 // Descriptor declarations for this iteration.
2493 for (size_t bindingIdx = 0; bindingIdx < numBindings; ++bindingIdx)
2494 {
2495 DE_ASSERT(bindingIdx <= std::numeric_limits<uint32_t>::max());
2496
2497 const auto binding = m_params.descriptorSet->getBinding(bindingIdx);
2498 const auto bindingTypes = binding->typesAtIteration(iter);
2499 const auto hasInputAttachment =
2500 de::contains(begin(bindingTypes), end(bindingTypes), VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
2501 const auto isArray = binding->isArray();
2502 const auto isUnbounded = binding->isUnbounded();
2503 const auto bindingSize = binding->size();
2504
2505 // If the binding is an input attachment, make sure it's not an array.
2506 DE_ASSERT(!hasInputAttachment || !isArray);
2507
2508 // Make sure the descriptor count fits a int32_t if needed.
2509 DE_ASSERT(!isArray || isUnbounded ||
2510 bindingSize <= static_cast<size_t>(std::numeric_limits<int32_t>::max()));
2511
2512 const auto arraySize =
2513 (isArray ? (isUnbounded ? tcu::just(int32_t{-1}) : tcu::just(static_cast<int32_t>(bindingSize))) :
2514 tcu::Nothing);
2515
2516 shader << binding->glslDeclarations(iter, 0u, static_cast<uint32_t>(bindingIdx), inputAttachmentCount,
2517 arraySize);
2518
2519 if (hasInputAttachment)
2520 ++inputAttachmentCount;
2521 }
2522
2523 // Main body.
2524 shader << "\n"
2525 << "void main() {\n"
2526 // This checks if we are the first invocation to arrive here, so the checks are executed only once.
2527 << " const uint flag = atomicCompSwap(outputBuffer.value[" << iter << "], 0u, 1u);\n"
2528 << " if (flag == 0u) {\n"
2529 << " uint anyError = 0u;\n";
2530
2531 for (size_t bindingIdx = 0; bindingIdx < numBindings; ++bindingIdx)
2532 {
2533 const auto binding = m_params.descriptorSet->getBinding(bindingIdx);
2534 const auto idx32 = static_cast<uint32_t>(bindingIdx);
2535 shader << binding->glslCheckStatements(iter, 0u, idx32, getDescriptorNumericValue(iter, idx32),
2536 tcu::Nothing, usePushConstants);
2537 }
2538
2539 shader << " if (anyError == 0u) {\n"
2540 << " atomicAdd(outputBuffer.value[" << iter << "], 1u);\n"
2541 << " }\n"
2542 << " }\n" // Closes if (flag == 0u).
2543 ;
2544
2545 if (m_params.testingStage == TestingStage::VERTEX)
2546 {
2547 shader << vertexBodyCommon.str();
2548 }
2549 else if (m_params.testingStage == TestingStage::GEOMETRY)
2550 {
2551 shader << " gl_Position = gl_in[0].gl_Position; EmitVertex();\n"
2552 << " gl_Position = gl_in[1].gl_Position; EmitVertex();\n"
2553 << " gl_Position = gl_in[2].gl_Position; EmitVertex();\n";
2554 }
2555 else if (m_params.testingStage == TestingStage::TESS_CONTROL)
2556 {
2557 shader << tescBodyCommon.str();
2558 }
2559 else if (m_params.testingStage == TestingStage::TESS_EVAL)
2560 {
2561 shader << teseBodyCommon.str();
2562 }
2563
2564 shader << "}\n" // End of main().
2565 ;
2566
2567 {
2568 const auto shaderNameStr = shaderName(iter);
2569 const auto shaderStr = shader.str();
2570 auto &glslSource = programCollection.glslSources.add(shaderNameStr);
2571
2572 if (m_params.testingStage == TestingStage::COMPUTE)
2573 glslSource << glu::ComputeSource(shaderStr);
2574 else if (m_params.testingStage == TestingStage::VERTEX)
2575 glslSource << glu::VertexSource(shaderStr);
2576 else if (m_params.testingStage == TestingStage::FRAGMENT)
2577 glslSource << glu::FragmentSource(shaderStr);
2578 else if (m_params.testingStage == TestingStage::GEOMETRY)
2579 glslSource << glu::GeometrySource(shaderStr);
2580 else if (m_params.testingStage == TestingStage::TESS_CONTROL)
2581 glslSource << glu::TessellationControlSource(shaderStr);
2582 else if (m_params.testingStage == TestingStage::TESS_EVAL)
2583 glslSource << glu::TessellationEvaluationSource(shaderStr);
2584 else if (m_params.testingStage == TestingStage::RAY_GEN)
2585 glslSource << glu::RaygenSource(updateRayTracingGLSL(shaderStr));
2586 else if (m_params.testingStage == TestingStage::INTERSECTION)
2587 glslSource << glu::IntersectionSource(updateRayTracingGLSL(shaderStr));
2588 else if (m_params.testingStage == TestingStage::ANY_HIT)
2589 glslSource << glu::AnyHitSource(updateRayTracingGLSL(shaderStr));
2590 else if (m_params.testingStage == TestingStage::CLOSEST_HIT)
2591 glslSource << glu::ClosestHitSource(updateRayTracingGLSL(shaderStr));
2592 else if (m_params.testingStage == TestingStage::MISS)
2593 glslSource << glu::MissSource(updateRayTracingGLSL(shaderStr));
2594 else if (m_params.testingStage == TestingStage::CALLABLE)
2595 glslSource << glu::CallableSource(updateRayTracingGLSL(shaderStr));
2596 else
2597 DE_ASSERT(false);
2598
2599 if (rayTracing || rayQueries)
2600 glslSource << rtBuildOptions;
2601 }
2602 }
2603
2604 if (m_params.testingStage == TestingStage::FRAGMENT || m_params.testingStage == TestingStage::GEOMETRY ||
2605 m_params.testingStage == TestingStage::TESS_CONTROL || m_params.testingStage == TestingStage::TESS_EVAL)
2606 {
2607 // Add passthrough vertex shader that works for points.
2608 std::ostringstream vertPassthrough;
2609 vertPassthrough << "#version 460\n"
2610 << "out gl_PerVertex\n"
2611 << "{\n"
2612 << " vec4 gl_Position;\n"
2613 << "};\n"
2614 << vertexPreamble.str() << "void main() {\n"
2615 << vertexBodyCommon.str() << "}\n";
2616 programCollection.glslSources.add("vert") << glu::VertexSource(vertPassthrough.str());
2617 }
2618
2619 if (m_params.testingStage == TestingStage::TESS_CONTROL)
2620 {
2621 // Add passthrough tessellation evaluation shader.
2622 std::ostringstream tesePassthrough;
2623 tesePassthrough << "#version 460\n"
2624 << tesePreamble.str() << "void main (void)\n"
2625 << "{\n"
2626 << teseBodyCommon.str() << "}\n";
2627
2628 programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource(tesePassthrough.str());
2629 }
2630
2631 if (m_params.testingStage == TestingStage::TESS_EVAL)
2632 {
2633 // Add passthrough tessellation control shader.
2634 std::ostringstream tescPassthrough;
2635 tescPassthrough << "#version 460\n"
2636 << tescPreamble.str() << "void main (void)\n"
2637 << "{\n"
2638 << tescBodyCommon.str() << "}\n";
2639
2640 programCollection.glslSources.add("tesc") << glu::TessellationControlSource(tescPassthrough.str());
2641 }
2642
2643 if (rayTracing && m_params.testingStage != TestingStage::RAY_GEN)
2644 {
2645 // Add a "passthrough" ray generation shader.
2646 std::ostringstream rgen;
2647 rgen << "#version 460 core\n"
2648 << "#extension GL_EXT_ray_tracing : require\n"
2649 << "layout (set=1, binding=" << extraBindings << ") uniform accelerationStructureEXT externalAS;\n"
2650 << ((m_params.testingStage == TestingStage::CALLABLE) ?
2651 "layout (location=0) callableDataEXT float unusedCallableData;\n" :
2652 "layout (location=0) rayPayloadEXT float unusedRayPayload;\n")
2653 << "\n"
2654 << "void main()\n"
2655 << "{\n";
2656
2657 if (m_params.testingStage == TestingStage::INTERSECTION || m_params.testingStage == TestingStage::ANY_HIT ||
2658 m_params.testingStage == TestingStage::CLOSEST_HIT || m_params.testingStage == TestingStage::MISS)
2659 {
2660 // We need to trace rays in this case to get hits or misses.
2661 const auto zDir = ((m_params.testingStage == TestingStage::MISS) ? "-1.0" : "1.0");
2662
2663 rgen << " const uint cullMask = 0xFF;\n"
2664 << " const float tMin = 1.0;\n"
2665 << " const float tMax = 10.0;\n"
2666 << " const vec3 origin = vec3(0.0, 0.0, 0.0);\n"
2667 << " const vec3 direction = vec3(0.0, 0.0, " << zDir << ");\n"
2668 << " traceRayEXT(externalAS, gl_RayFlagsNoneEXT, cullMask, 0, 0, 0, origin, tMin, direction, tMax, "
2669 "0);\n";
2670 }
2671 else if (m_params.testingStage == TestingStage::CALLABLE)
2672 {
2673 rgen << " executeCallableEXT(0, 0);\n";
2674 }
2675
2676 // End of main().
2677 rgen << "}\n";
2678
2679 programCollection.glslSources.add("rgen")
2680 << glu::RaygenSource(updateRayTracingGLSL(rgen.str())) << rtBuildOptions;
2681
2682 // Intersection shaders will ignore the intersection, so we need a passthrough miss shader.
2683 if (m_params.testingStage == TestingStage::INTERSECTION)
2684 {
2685 std::ostringstream miss;
2686 miss << "#version 460 core\n"
2687 << "#extension GL_EXT_ray_tracing : require\n"
2688 << "layout (location=0) rayPayloadEXT float unusedRayPayload;\n"
2689 << "\n"
2690 << "void main()\n"
2691 << "{\n"
2692 << "}\n";
2693
2694 programCollection.glslSources.add("miss")
2695 << glu::MissSource(updateRayTracingGLSL(miss.str())) << rtBuildOptions;
2696 }
2697 }
2698 }
2699
createInstance(Context & context) const2700 TestInstance *MutableTypesTest::createInstance(Context &context) const
2701 {
2702 return new MutableTypesInstance(context, m_params);
2703 }
2704
requirePartiallyBound(Context & context)2705 void requirePartiallyBound(Context &context)
2706 {
2707 context.requireDeviceFunctionality("VK_EXT_descriptor_indexing");
2708 const auto &indexingFeatures = context.getDescriptorIndexingFeatures();
2709 if (!indexingFeatures.descriptorBindingPartiallyBound)
2710 TCU_THROW(NotSupportedError, "Partially bound bindings not supported");
2711 }
2712
requireVariableDescriptorCount(Context & context)2713 void requireVariableDescriptorCount(Context &context)
2714 {
2715 context.requireDeviceFunctionality("VK_EXT_descriptor_indexing");
2716 const auto &indexingFeatures = context.getDescriptorIndexingFeatures();
2717 if (!indexingFeatures.descriptorBindingVariableDescriptorCount)
2718 TCU_THROW(NotSupportedError, "Variable descriptor count not supported");
2719 }
2720
2721 // Calculates the set of used descriptor types for a given set and iteration count, for bindings matching a predicate.
getUsedDescriptorTypes(const DescriptorSet & descriptorSet,uint32_t numIterations,bool (* predicate)(const BindingInterface * binding))2722 std::set<VkDescriptorType> getUsedDescriptorTypes(const DescriptorSet &descriptorSet, uint32_t numIterations,
2723 bool (*predicate)(const BindingInterface *binding))
2724 {
2725 std::set<VkDescriptorType> usedDescriptorTypes;
2726
2727 for (size_t bindingIdx = 0; bindingIdx < descriptorSet.numBindings(); ++bindingIdx)
2728 {
2729 const auto bindingPtr = descriptorSet.getBinding(bindingIdx);
2730 if (predicate(bindingPtr))
2731 {
2732 for (uint32_t iter = 0u; iter < numIterations; ++iter)
2733 {
2734 const auto descTypes = bindingPtr->typesAtIteration(iter);
2735 usedDescriptorTypes.insert(begin(descTypes), end(descTypes));
2736 }
2737 }
2738 }
2739
2740 return usedDescriptorTypes;
2741 }
2742
getAllUsedDescriptorTypes(const DescriptorSet & descriptorSet,uint32_t numIterations)2743 std::set<VkDescriptorType> getAllUsedDescriptorTypes(const DescriptorSet &descriptorSet, uint32_t numIterations)
2744 {
2745 return getUsedDescriptorTypes(descriptorSet, numIterations, [](const BindingInterface *) { return true; });
2746 }
2747
getUsedArrayDescriptorTypes(const DescriptorSet & descriptorSet,uint32_t numIterations)2748 std::set<VkDescriptorType> getUsedArrayDescriptorTypes(const DescriptorSet &descriptorSet, uint32_t numIterations)
2749 {
2750 return getUsedDescriptorTypes(descriptorSet, numIterations, [](const BindingInterface *b) { return b->isArray(); });
2751 }
2752
2753 // Are we testing a vertex pipeline stage?
isVertexStage(TestingStage stage)2754 bool isVertexStage(TestingStage stage)
2755 {
2756 switch (stage)
2757 {
2758 case TestingStage::VERTEX:
2759 case TestingStage::TESS_CONTROL:
2760 case TestingStage::TESS_EVAL:
2761 case TestingStage::GEOMETRY:
2762 return true;
2763 default:
2764 break;
2765 }
2766
2767 return false;
2768 }
2769
checkSupport(Context & context) const2770 void MutableTypesTest::checkSupport(Context &context) const
2771 {
2772 if (!context.isDeviceFunctionalitySupported("VK_VALVE_mutable_descriptor_type") &&
2773 !context.isDeviceFunctionalitySupported("VK_EXT_mutable_descriptor_type"))
2774
2775 TCU_THROW(NotSupportedError,
2776 "VK_VALVE_mutable_descriptor_type or VK_EXT_mutable_descriptor_type is not supported");
2777
2778 VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT mutableDescriptorType = initVulkanStructure();
2779 VkPhysicalDeviceFeatures2KHR features2 = initVulkanStructure(&mutableDescriptorType);
2780
2781 context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
2782
2783 if (!mutableDescriptorType.mutableDescriptorType)
2784 TCU_THROW(NotSupportedError, "mutableDescriptorType feature is not supported");
2785
2786 // Check ray tracing if needed.
2787 const bool rayTracing = isRayTracingStage(m_params.testingStage);
2788
2789 if (rayTracing)
2790 {
2791 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
2792 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
2793 }
2794
2795 // Check if ray queries are needed. Ray queries are used to verify acceleration structure descriptors.
2796 const bool rayQueriesNeeded = usesAccelerationStructures(*m_params.descriptorSet);
2797 if (rayQueriesNeeded)
2798 {
2799 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
2800 context.requireDeviceFunctionality("VK_KHR_ray_query");
2801 }
2802
2803 // We'll use iterations to check each mutable type, as needed.
2804 const auto numIterations = m_params.descriptorSet->maxTypes();
2805
2806 if (m_params.descriptorSet->lastBindingIsUnbounded())
2807 requireVariableDescriptorCount(context);
2808
2809 for (uint32_t iter = 0u; iter < numIterations; ++iter)
2810 {
2811 if (m_params.descriptorSet->needsAliasing(iter))
2812 {
2813 requirePartiallyBound(context);
2814 break;
2815 }
2816 }
2817
2818 if (m_params.updateMoment == UpdateMoment::UPDATE_AFTER_BIND)
2819 {
2820 // Check update after bind for each used descriptor type.
2821 const auto &usedDescriptorTypes = getAllUsedDescriptorTypes(*m_params.descriptorSet, numIterations);
2822 const auto &indexingFeatures = context.getDescriptorIndexingFeatures();
2823
2824 for (const auto &descType : usedDescriptorTypes)
2825 {
2826 switch (descType)
2827 {
2828 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2829 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2830 if (!indexingFeatures.descriptorBindingUniformBufferUpdateAfterBind)
2831 TCU_THROW(NotSupportedError, "Update-after-bind not supported for uniform buffers");
2832 break;
2833
2834 case VK_DESCRIPTOR_TYPE_SAMPLER:
2835 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2836 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2837 if (!indexingFeatures.descriptorBindingSampledImageUpdateAfterBind)
2838 TCU_THROW(NotSupportedError, "Update-after-bind not supported for samplers and sampled images");
2839 break;
2840
2841 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2842 if (!indexingFeatures.descriptorBindingStorageImageUpdateAfterBind)
2843 TCU_THROW(NotSupportedError, "Update-after-bind not supported for storage images");
2844 break;
2845
2846 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2847 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2848 if (!indexingFeatures.descriptorBindingStorageBufferUpdateAfterBind)
2849 TCU_THROW(NotSupportedError, "Update-after-bind not supported for storage buffers");
2850 break;
2851
2852 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2853 if (!indexingFeatures.descriptorBindingUniformTexelBufferUpdateAfterBind)
2854 TCU_THROW(NotSupportedError, "Update-after-bind not supported for uniform texel buffers");
2855 break;
2856
2857 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2858 if (!indexingFeatures.descriptorBindingStorageTexelBufferUpdateAfterBind)
2859 TCU_THROW(NotSupportedError, "Update-after-bind not supported for storage texel buffers");
2860 break;
2861
2862 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2863 TCU_THROW(InternalError, "Tests do not support update-after-bind with input attachments");
2864
2865 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
2866 {
2867 // Just in case we ever mix some of these in.
2868 context.requireDeviceFunctionality("VK_EXT_inline_uniform_block");
2869 const auto &iubFeatures = context.getInlineUniformBlockFeatures();
2870 if (!iubFeatures.descriptorBindingInlineUniformBlockUpdateAfterBind)
2871 TCU_THROW(NotSupportedError, "Update-after-bind not supported for inline uniform blocks");
2872 }
2873 break;
2874
2875 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
2876 {
2877 // Just in case we ever mix some of these in.
2878 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
2879 const auto &asFeatures = context.getAccelerationStructureFeatures();
2880 if (!asFeatures.descriptorBindingAccelerationStructureUpdateAfterBind)
2881 TCU_THROW(NotSupportedError, "Update-after-bind not supported for acceleration structures");
2882 }
2883 break;
2884
2885 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
2886 TCU_THROW(InternalError, "Found VK_DESCRIPTOR_TYPE_MUTABLE_EXT in list of used descriptor types");
2887
2888 default:
2889 TCU_THROW(InternalError, "Unexpected descriptor type found in list of used descriptor types: " +
2890 de::toString(descType));
2891 }
2892 }
2893 }
2894
2895 if (m_params.arrayAccessType == ArrayAccessType::PUSH_CONSTANT)
2896 {
2897 // These require dynamically uniform indices.
2898 const auto &usedDescriptorTypes = getUsedArrayDescriptorTypes(*m_params.descriptorSet, numIterations);
2899 const auto &features = context.getDeviceFeatures();
2900 const auto descriptorIndexingSupported = context.isDeviceFunctionalitySupported("VK_EXT_descriptor_indexing");
2901 const auto &indexingFeatures = context.getDescriptorIndexingFeatures();
2902
2903 for (const auto &descType : usedDescriptorTypes)
2904 {
2905 switch (descType)
2906 {
2907 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2908 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2909 if (!features.shaderUniformBufferArrayDynamicIndexing)
2910 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for uniform buffers");
2911 break;
2912
2913 case VK_DESCRIPTOR_TYPE_SAMPLER:
2914 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2915 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2916 if (!features.shaderSampledImageArrayDynamicIndexing)
2917 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for samplers and sampled images");
2918 break;
2919
2920 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2921 if (!features.shaderStorageImageArrayDynamicIndexing)
2922 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for storage images");
2923 break;
2924
2925 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2926 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2927 if (!features.shaderStorageBufferArrayDynamicIndexing)
2928 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for storage buffers");
2929 break;
2930
2931 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2932 if (!descriptorIndexingSupported || !indexingFeatures.shaderUniformTexelBufferArrayDynamicIndexing)
2933 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for uniform texel buffers");
2934 break;
2935
2936 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2937 if (!descriptorIndexingSupported || !indexingFeatures.shaderStorageTexelBufferArrayDynamicIndexing)
2938 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for storage texel buffers");
2939 break;
2940
2941 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2942 if (!descriptorIndexingSupported || !indexingFeatures.shaderInputAttachmentArrayDynamicIndexing)
2943 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for input attachments");
2944 break;
2945
2946 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
2947 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
2948 break;
2949
2950 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
2951 TCU_THROW(InternalError, "Found VK_DESCRIPTOR_TYPE_MUTABLE_EXT in list of used array descriptor types");
2952
2953 default:
2954 TCU_THROW(InternalError, "Unexpected descriptor type found in list of used descriptor types: " +
2955 de::toString(descType));
2956 }
2957 }
2958 }
2959
2960 // Check layout support.
2961 {
2962 const auto &vkd = context.getDeviceInterface();
2963 const auto device = getDevice(context);
2964 const auto stageFlags = m_params.getStageFlags();
2965
2966 {
2967 const auto layoutCreateFlags = m_params.getDstLayoutCreateFlags();
2968 const auto supported =
2969 m_params.descriptorSet->checkDescriptorSetLayout(vkd, device, stageFlags, layoutCreateFlags);
2970
2971 if (!supported)
2972 TCU_THROW(NotSupportedError, "Required descriptor set layout not supported");
2973 }
2974
2975 if (m_params.updateType == UpdateType::COPY)
2976 {
2977 const auto layoutCreateFlags = m_params.getSrcLayoutCreateFlags();
2978 const auto supported =
2979 m_params.descriptorSet->checkDescriptorSetLayout(vkd, device, stageFlags, layoutCreateFlags);
2980
2981 if (!supported)
2982 TCU_THROW(NotSupportedError, "Required descriptor set layout for source set not supported");
2983
2984 // Check specific layouts for the different source sets are supported.
2985 for (uint32_t iter = 0u; iter < numIterations; ++iter)
2986 {
2987 const auto srcSet = m_params.descriptorSet->genSourceSet(m_params.sourceSetStrategy, iter);
2988 const auto srcLayoutSupported =
2989 srcSet->checkDescriptorSetLayout(vkd, device, stageFlags, layoutCreateFlags);
2990
2991 if (!srcLayoutSupported)
2992 TCU_THROW(NotSupportedError, "Descriptor set layout for source set at iteration " +
2993 de::toString(iter) + " not supported");
2994 }
2995 }
2996 }
2997
2998 // Check supported stores and stages.
2999 const bool vertexStage = isVertexStage(m_params.testingStage);
3000 const bool fragmentStage = (m_params.testingStage == TestingStage::FRAGMENT);
3001 const bool geometryStage = (m_params.testingStage == TestingStage::GEOMETRY);
3002 const bool tessellation =
3003 (m_params.testingStage == TestingStage::TESS_CONTROL || m_params.testingStage == TestingStage::TESS_EVAL);
3004
3005 const auto &features = context.getDeviceFeatures();
3006
3007 if (vertexStage && !features.vertexPipelineStoresAndAtomics)
3008 TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
3009
3010 if (fragmentStage && !features.fragmentStoresAndAtomics)
3011 TCU_THROW(NotSupportedError, "Fragment shader stores and atomics not supported");
3012
3013 if (geometryStage && !features.geometryShader)
3014 TCU_THROW(NotSupportedError, "Geometry shader not supported");
3015
3016 if (tessellation && !features.tessellationShader)
3017 TCU_THROW(NotSupportedError, "Tessellation shaders not supported");
3018 }
3019
3020 // What to do at each iteration step. Used to apply UPDATE_AFTER_BIND or not.
3021 enum class Step
3022 {
3023 UPDATE = 0,
3024 BIND,
3025 };
3026
3027 // Create render pass.
buildRenderPass(const DeviceInterface & vkd,VkDevice device,const std::vector<Resource> & resources)3028 Move<VkRenderPass> buildRenderPass(const DeviceInterface &vkd, VkDevice device, const std::vector<Resource> &resources)
3029 {
3030 const auto imageFormat = getDescriptorImageFormat();
3031
3032 std::vector<VkAttachmentDescription> attachmentDescriptions;
3033 std::vector<VkAttachmentReference> attachmentReferences;
3034 std::vector<uint32_t> attachmentIndices;
3035
3036 for (const auto &resource : resources)
3037 {
3038 if (resource.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
3039 {
3040 const auto nextIndex = static_cast<uint32_t>(attachmentDescriptions.size());
3041
3042 const VkAttachmentDescription description = {
3043 0u, // VkAttachmentDescriptionFlags flags;
3044 imageFormat, // VkFormat format;
3045 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
3046 VK_ATTACHMENT_LOAD_OP_LOAD, // VkAttachmentLoadOp loadOp;
3047 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp storeOp;
3048 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
3049 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
3050 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout initialLayout;
3051 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout finalLayout;
3052 };
3053
3054 const VkAttachmentReference reference = {nextIndex, VK_IMAGE_LAYOUT_GENERAL};
3055
3056 attachmentIndices.push_back(nextIndex);
3057 attachmentDescriptions.push_back(description);
3058 attachmentReferences.push_back(reference);
3059 }
3060 }
3061
3062 const auto attachmentCount = static_cast<uint32_t>(attachmentDescriptions.size());
3063 DE_ASSERT(attachmentCount == static_cast<uint32_t>(attachmentIndices.size()));
3064 DE_ASSERT(attachmentCount == static_cast<uint32_t>(attachmentReferences.size()));
3065
3066 const VkSubpassDescription subpassDescription = {
3067 0u, // VkSubpassDescriptionFlags flags;
3068 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
3069 attachmentCount, // uint32_t inputAttachmentCount;
3070 de::dataOrNull(attachmentReferences), // const VkAttachmentReference* pInputAttachments;
3071 0u, // uint32_t colorAttachmentCount;
3072 nullptr, // const VkAttachmentReference* pColorAttachments;
3073 0u, // const VkAttachmentReference* pResolveAttachments;
3074 nullptr, // const VkAttachmentReference* pDepthStencilAttachment;
3075 0u, // uint32_t preserveAttachmentCount;
3076 nullptr, // const uint32_t* pPreserveAttachments;
3077 };
3078
3079 const VkRenderPassCreateInfo renderPassCreateInfo = {
3080 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
3081 nullptr, // const void* pNext;
3082 0u, // VkRenderPassCreateFlags flags;
3083 static_cast<uint32_t>(attachmentDescriptions.size()), // uint32_t attachmentCount;
3084 de::dataOrNull(attachmentDescriptions), // const VkAttachmentDescription* pAttachments;
3085 1u, // uint32_t subpassCount;
3086 &subpassDescription, // const VkSubpassDescription* pSubpasses;
3087 0u, // uint32_t dependencyCount;
3088 nullptr, // const VkSubpassDependency* pDependencies;
3089 };
3090
3091 return createRenderPass(vkd, device, &renderPassCreateInfo);
3092 }
3093
3094 // Create a graphics pipeline.
buildGraphicsPipeline(const DeviceInterface & vkd,VkDevice device,VkPipelineLayout pipelineLayout,VkShaderModule vertModule,VkShaderModule tescModule,VkShaderModule teseModule,VkShaderModule geomModule,VkShaderModule fragModule,VkRenderPass renderPass)3095 Move<VkPipeline> buildGraphicsPipeline(const DeviceInterface &vkd, VkDevice device, VkPipelineLayout pipelineLayout,
3096 VkShaderModule vertModule, VkShaderModule tescModule, VkShaderModule teseModule,
3097 VkShaderModule geomModule, VkShaderModule fragModule, VkRenderPass renderPass)
3098 {
3099 const auto extent = getDefaultExtent();
3100 const std::vector<VkViewport> viewports(1u, makeViewport(extent));
3101 const std::vector<VkRect2D> scissors(1u, makeRect2D(extent));
3102 const auto hasTess = (tescModule != DE_NULL || teseModule != DE_NULL);
3103 const auto topology = (hasTess ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
3104
3105 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = initVulkanStructure();
3106
3107 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
3108 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
3109 nullptr, // const void* pNext;
3110 0u, // VkPipelineInputAssemblyStateCreateFlags flags;
3111 topology, // VkPrimitiveTopology topology;
3112 VK_FALSE, // VkBool32 primitiveRestartEnable;
3113 };
3114
3115 const VkPipelineTessellationStateCreateInfo tessellationStateCreateInfo = {
3116 VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, // VkStructureType sType;
3117 nullptr, // const void* pNext;
3118 0u, // VkPipelineTessellationStateCreateFlags flags;
3119 (hasTess ? 3u : 0u), // uint32_t patchControlPoints;
3120 };
3121
3122 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo = {
3123 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
3124 nullptr, // const void* pNext;
3125 0u, // VkPipelineViewportStateCreateFlags flags;
3126 static_cast<uint32_t>(viewports.size()), // uint32_t viewportCount;
3127 de::dataOrNull(viewports), // const VkViewport* pViewports;
3128 static_cast<uint32_t>(scissors.size()), // uint32_t scissorCount;
3129 de::dataOrNull(scissors), // const VkRect2D* pScissors;
3130 };
3131
3132 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
3133 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
3134 nullptr, // const void* pNext;
3135 0u, // VkPipelineRasterizationStateCreateFlags flags;
3136 VK_FALSE, // VkBool32 depthClampEnable;
3137 (fragModule == DE_NULL ? VK_TRUE : VK_FALSE), // VkBool32 rasterizerDiscardEnable;
3138 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
3139 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
3140 VK_FRONT_FACE_CLOCKWISE, // VkFrontFace frontFace;
3141 VK_FALSE, // VkBool32 depthBiasEnable;
3142 0.0f, // float depthBiasConstantFactor;
3143 0.0f, // float depthBiasClamp;
3144 0.0f, // float depthBiasSlopeFactor;
3145 1.0f, // float lineWidth;
3146 };
3147
3148 const VkPipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
3149 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
3150 nullptr, // const void* pNext;
3151 0u, // VkPipelineMultisampleStateCreateFlags flags;
3152 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
3153 VK_FALSE, // VkBool32 sampleShadingEnable;
3154 1.0f, // float minSampleShading;
3155 nullptr, // const VkSampleMask* pSampleMask;
3156 VK_FALSE, // VkBool32 alphaToCoverageEnable;
3157 VK_FALSE, // VkBool32 alphaToOneEnable;
3158 };
3159
3160 const VkPipelineDepthStencilStateCreateInfo depthStencilStateCreateInfo = initVulkanStructure();
3161
3162 const VkPipelineColorBlendStateCreateInfo colorBlendStateCreateInfo = initVulkanStructure();
3163
3164 return makeGraphicsPipeline(vkd, device, pipelineLayout, vertModule, tescModule, teseModule, geomModule, fragModule,
3165 renderPass, 0u, &vertexInputStateCreateInfo, &inputAssemblyStateCreateInfo,
3166 (hasTess ? &tessellationStateCreateInfo : nullptr), &viewportStateCreateInfo,
3167 &rasterizationStateCreateInfo, &multisampleStateCreateInfo,
3168 &depthStencilStateCreateInfo, &colorBlendStateCreateInfo, nullptr);
3169 }
3170
buildFramebuffer(const DeviceInterface & vkd,VkDevice device,VkRenderPass renderPass,const std::vector<Resource> & resources)3171 Move<VkFramebuffer> buildFramebuffer(const DeviceInterface &vkd, VkDevice device, VkRenderPass renderPass,
3172 const std::vector<Resource> &resources)
3173 {
3174 const auto extent = getDefaultExtent();
3175
3176 std::vector<VkImageView> inputAttachments;
3177 for (const auto &resource : resources)
3178 {
3179 if (resource.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
3180 inputAttachments.push_back(resource.imageView.get());
3181 }
3182
3183 const VkFramebufferCreateInfo framebufferCreateInfo = {
3184 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
3185 nullptr, // const void* pNext;
3186 0u, // VkFramebufferCreateFlags flags;
3187 renderPass, // VkRenderPass renderPass;
3188 static_cast<uint32_t>(inputAttachments.size()), // uint32_t attachmentCount;
3189 de::dataOrNull(inputAttachments), // const VkImageView* pAttachments;
3190 extent.width, // uint32_t width;
3191 extent.height, // uint32_t height;
3192 extent.depth, // uint32_t layers;
3193 };
3194
3195 return createFramebuffer(vkd, device, &framebufferCreateInfo);
3196 }
3197
iterate()3198 tcu::TestStatus MutableTypesInstance::iterate()
3199 {
3200 const auto &vki = m_context.getInstanceInterface();
3201 const auto &vkd = m_context.getDeviceInterface();
3202 const auto device = getDevice(m_context);
3203 const auto physDev = m_context.getPhysicalDevice();
3204 const auto qIndex = m_context.getUniversalQueueFamilyIndex();
3205 const auto queue = getDeviceQueue(vkd, device, m_context.getUniversalQueueFamilyIndex(), 0);
3206
3207 SimpleAllocator alloc(
3208 vkd, device,
3209 getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
3210
3211 const auto ¶mSet = m_params.descriptorSet;
3212 const auto numIterations = paramSet->maxTypes();
3213 const bool useExternalImage = needsExternalImage(*m_params.descriptorSet);
3214 const bool useExternalSampler = needsExternalSampler(*m_params.descriptorSet);
3215 const auto stageFlags = m_params.getStageFlags();
3216 const bool srcSetNeeded = (m_params.updateType == UpdateType::COPY);
3217 const bool updateAfterBind = (m_params.updateMoment == UpdateMoment::UPDATE_AFTER_BIND);
3218 const auto bindPoint = m_params.getBindPoint();
3219 const bool rayTracing = isRayTracingStage(m_params.testingStage);
3220 const bool useAABBs = (m_params.testingStage == TestingStage::INTERSECTION);
3221
3222 // Resources for each iteration.
3223 std::vector<std::vector<Resource>> allResources;
3224 allResources.reserve(numIterations);
3225
3226 // Command pool.
3227 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
3228
3229 // Descriptor pool and set for the active (dst) descriptor set.
3230 const auto dstPoolFlags = m_params.getDstPoolCreateFlags();
3231 const auto dstLayoutFlags = m_params.getDstLayoutCreateFlags();
3232
3233 const auto dstPool = paramSet->makeDescriptorPool(vkd, device, m_params.poolMutableStrategy, dstPoolFlags);
3234 const auto dstLayout = paramSet->makeDescriptorSetLayout(vkd, device, stageFlags, dstLayoutFlags);
3235 const auto varCount = paramSet->getVariableDescriptorCount();
3236
3237 using VariableCountInfoPtr = de::MovePtr<VkDescriptorSetVariableDescriptorCountAllocateInfo>;
3238
3239 VariableCountInfoPtr dstVariableCountInfo;
3240 if (varCount)
3241 {
3242 dstVariableCountInfo = VariableCountInfoPtr(new VkDescriptorSetVariableDescriptorCountAllocateInfo);
3243 *dstVariableCountInfo = initVulkanStructure();
3244
3245 dstVariableCountInfo->descriptorSetCount = 1u;
3246 dstVariableCountInfo->pDescriptorCounts = &(varCount.get());
3247 }
3248 const auto dstSet = makeDescriptorSet(vkd, device, dstPool.get(), dstLayout.get(), dstVariableCountInfo.get());
3249
3250 // Source pool and set (optional).
3251 const auto srcPoolFlags = m_params.getSrcPoolCreateFlags();
3252 const auto srcLayoutFlags = m_params.getSrcLayoutCreateFlags();
3253 DescriptorSetPtr iterationSrcSet;
3254 Move<VkDescriptorPool> srcPool;
3255 Move<VkDescriptorSetLayout> srcLayout;
3256 Move<VkDescriptorSet> srcSet;
3257
3258 // Extra set for external resources and output buffer.
3259 std::vector<Resource> extraResources;
3260 extraResources.emplace_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vkd, device, alloc, qIndex, queue, useAABBs, 0u,
3261 numIterations);
3262 if (useExternalImage)
3263 extraResources.emplace_back(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, vkd, device, alloc, qIndex, queue, useAABBs,
3264 getExternalSampledImageValue());
3265 if (useExternalSampler)
3266 extraResources.emplace_back(VK_DESCRIPTOR_TYPE_SAMPLER, vkd, device, alloc, qIndex, queue, useAABBs, 0u);
3267 if (rayTracing)
3268 extraResources.emplace_back(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, vkd, device, alloc, qIndex, queue,
3269 useAABBs, 0u);
3270
3271 Move<VkDescriptorPool> extraPool;
3272 {
3273 DescriptorPoolBuilder poolBuilder;
3274 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
3275 if (useExternalImage)
3276 poolBuilder.addType(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
3277 if (useExternalSampler)
3278 poolBuilder.addType(VK_DESCRIPTOR_TYPE_SAMPLER);
3279 if (rayTracing)
3280 poolBuilder.addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
3281 extraPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
3282 }
3283
3284 Move<VkDescriptorSetLayout> extraLayout;
3285 {
3286 DescriptorSetLayoutBuilder layoutBuilder;
3287 layoutBuilder.addBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u, stageFlags, nullptr);
3288 if (useExternalImage)
3289 layoutBuilder.addBinding(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1u, stageFlags, nullptr);
3290 if (useExternalSampler)
3291 layoutBuilder.addBinding(VK_DESCRIPTOR_TYPE_SAMPLER, 1u, stageFlags, nullptr);
3292 if (rayTracing)
3293 {
3294 // The extra acceleration structure is used from the ray generation shader only.
3295 layoutBuilder.addBinding(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, 1u, VK_SHADER_STAGE_RAYGEN_BIT_KHR,
3296 nullptr);
3297 }
3298 extraLayout = layoutBuilder.build(vkd, device);
3299 }
3300
3301 const auto extraSet = makeDescriptorSet(vkd, device, extraPool.get(), extraLayout.get());
3302
3303 // Update extra set.
3304 using DescriptorBufferInfoPtr = de::MovePtr<VkDescriptorBufferInfo>;
3305 using DescriptorImageInfoPtr = de::MovePtr<VkDescriptorImageInfo>;
3306 using DescriptorASInfoPtr = de::MovePtr<VkWriteDescriptorSetAccelerationStructureKHR>;
3307
3308 uint32_t bindingCount = 0u;
3309 DescriptorBufferInfoPtr bufferInfoPtr;
3310 DescriptorImageInfoPtr imageInfoPtr;
3311 DescriptorImageInfoPtr samplerInfoPtr;
3312 DescriptorASInfoPtr asWriteInfoPtr;
3313
3314 const auto outputBufferSize = static_cast<VkDeviceSize>(sizeof(uint32_t) * static_cast<size_t>(numIterations));
3315 bufferInfoPtr = DescriptorBufferInfoPtr(new VkDescriptorBufferInfo(
3316 makeDescriptorBufferInfo(extraResources[bindingCount++].bufferWithMemory->get(), 0ull, outputBufferSize)));
3317 if (useExternalImage)
3318 imageInfoPtr = DescriptorImageInfoPtr(new VkDescriptorImageInfo(
3319 makeDescriptorImageInfo(DE_NULL, extraResources[bindingCount++].imageView.get(), VK_IMAGE_LAYOUT_GENERAL)));
3320 if (useExternalSampler)
3321 samplerInfoPtr = DescriptorImageInfoPtr(new VkDescriptorImageInfo(
3322 makeDescriptorImageInfo(extraResources[bindingCount++].sampler.get(), DE_NULL, VK_IMAGE_LAYOUT_GENERAL)));
3323 if (rayTracing)
3324 {
3325 asWriteInfoPtr = DescriptorASInfoPtr(new VkWriteDescriptorSetAccelerationStructureKHR);
3326 *asWriteInfoPtr = initVulkanStructure();
3327 asWriteInfoPtr->accelerationStructureCount = 1u;
3328 asWriteInfoPtr->pAccelerationStructures = extraResources[bindingCount++].asData.tlas.get()->getPtr();
3329 }
3330
3331 {
3332 bindingCount = 0u;
3333 DescriptorSetUpdateBuilder updateBuilder;
3334 updateBuilder.writeSingle(extraSet.get(), DescriptorSetUpdateBuilder::Location::binding(bindingCount++),
3335 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, bufferInfoPtr.get());
3336 if (useExternalImage)
3337 updateBuilder.writeSingle(extraSet.get(), DescriptorSetUpdateBuilder::Location::binding(bindingCount++),
3338 VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, imageInfoPtr.get());
3339 if (useExternalSampler)
3340 updateBuilder.writeSingle(extraSet.get(), DescriptorSetUpdateBuilder::Location::binding(bindingCount++),
3341 VK_DESCRIPTOR_TYPE_SAMPLER, samplerInfoPtr.get());
3342 if (rayTracing)
3343 updateBuilder.writeSingle(extraSet.get(), DescriptorSetUpdateBuilder::Location::binding(bindingCount++),
3344 VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, asWriteInfoPtr.get());
3345 updateBuilder.update(vkd, device);
3346 }
3347
3348 // Push constants.
3349 const uint32_t zero = 0u;
3350 const VkPushConstantRange pcRange = {stageFlags, 0u /*offset*/, static_cast<uint32_t>(sizeof(zero)) /*size*/};
3351
3352 // Needed for some test variants.
3353 Move<VkShaderModule> vertPassthrough;
3354 Move<VkShaderModule> tesePassthrough;
3355 Move<VkShaderModule> tescPassthrough;
3356 Move<VkShaderModule> rgenPassthrough;
3357 Move<VkShaderModule> missPassthrough;
3358
3359 if (m_params.testingStage == TestingStage::FRAGMENT || m_params.testingStage == TestingStage::GEOMETRY ||
3360 m_params.testingStage == TestingStage::TESS_CONTROL || m_params.testingStage == TestingStage::TESS_EVAL)
3361 {
3362 vertPassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("vert"), 0u);
3363 }
3364
3365 if (m_params.testingStage == TestingStage::TESS_CONTROL)
3366 {
3367 tesePassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("tese"), 0u);
3368 }
3369
3370 if (m_params.testingStage == TestingStage::TESS_EVAL)
3371 {
3372 tescPassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("tesc"), 0u);
3373 }
3374
3375 if (m_params.testingStage == TestingStage::CLOSEST_HIT || m_params.testingStage == TestingStage::ANY_HIT ||
3376 m_params.testingStage == TestingStage::INTERSECTION || m_params.testingStage == TestingStage::MISS ||
3377 m_params.testingStage == TestingStage::CALLABLE)
3378 {
3379 rgenPassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("rgen"), 0u);
3380 }
3381
3382 if (m_params.testingStage == TestingStage::INTERSECTION)
3383 {
3384 missPassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("miss"), 0u);
3385 }
3386
3387 for (uint32_t iteration = 0u; iteration < numIterations; ++iteration)
3388 {
3389 // Generate source set for the current iteration.
3390 if (srcSetNeeded)
3391 {
3392 // Free previous descriptor set before rebuilding the pool.
3393 srcSet = Move<VkDescriptorSet>();
3394 iterationSrcSet = paramSet->genSourceSet(m_params.sourceSetStrategy, iteration);
3395 srcPool = iterationSrcSet->makeDescriptorPool(vkd, device, m_params.poolMutableStrategy, srcPoolFlags);
3396 srcLayout = iterationSrcSet->makeDescriptorSetLayout(vkd, device, stageFlags, srcLayoutFlags);
3397
3398 const auto srcVarCount = iterationSrcSet->getVariableDescriptorCount();
3399 VariableCountInfoPtr srcVariableCountInfo;
3400
3401 if (srcVarCount)
3402 {
3403 srcVariableCountInfo = VariableCountInfoPtr(new VkDescriptorSetVariableDescriptorCountAllocateInfo);
3404 *srcVariableCountInfo = initVulkanStructure();
3405
3406 srcVariableCountInfo->descriptorSetCount = 1u;
3407 srcVariableCountInfo->pDescriptorCounts = &(srcVarCount.get());
3408 }
3409
3410 srcSet = makeDescriptorSet(vkd, device, srcPool.get(), srcLayout.get(), srcVariableCountInfo.get());
3411 }
3412
3413 // Set layouts and sets used in the pipeline.
3414 const std::vector<VkDescriptorSetLayout> setLayouts = {dstLayout.get(), extraLayout.get()};
3415 const std::vector<VkDescriptorSet> usedSets = {dstSet.get(), extraSet.get()};
3416
3417 // Create resources.
3418 allResources.emplace_back(paramSet->createResources(vkd, device, alloc, qIndex, queue, iteration, useAABBs));
3419 const auto &resources = allResources.back();
3420
3421 // Make pipeline for the current iteration.
3422 const auto pipelineLayout = makePipelineLayout(vkd, device, static_cast<uint32_t>(setLayouts.size()),
3423 de::dataOrNull(setLayouts), 1u, &pcRange);
3424 const auto moduleName = shaderName(iteration);
3425 const auto shaderModule = createShaderModule(vkd, device, m_context.getBinaryCollection().get(moduleName), 0u);
3426
3427 Move<VkPipeline> pipeline;
3428 Move<VkRenderPass> renderPass;
3429 Move<VkFramebuffer> framebuffer;
3430
3431 uint32_t shaderGroupHandleSize = 0u;
3432 uint32_t shaderGroupBaseAlignment = 1u;
3433
3434 de::MovePtr<BufferWithMemory> raygenSBT;
3435 de::MovePtr<BufferWithMemory> missSBT;
3436 de::MovePtr<BufferWithMemory> hitSBT;
3437 de::MovePtr<BufferWithMemory> callableSBT;
3438
3439 VkStridedDeviceAddressRegionKHR raygenSBTRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
3440 VkStridedDeviceAddressRegionKHR missSBTRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
3441 VkStridedDeviceAddressRegionKHR hitSBTRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
3442 VkStridedDeviceAddressRegionKHR callableSBTRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
3443
3444 if (bindPoint == VK_PIPELINE_BIND_POINT_COMPUTE)
3445 pipeline = makeComputePipeline(vkd, device, pipelineLayout.get(), shaderModule.get());
3446 else if (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS)
3447 {
3448 VkShaderModule vertModule = DE_NULL;
3449 VkShaderModule teseModule = DE_NULL;
3450 VkShaderModule tescModule = DE_NULL;
3451 VkShaderModule geomModule = DE_NULL;
3452 VkShaderModule fragModule = DE_NULL;
3453
3454 if (m_params.testingStage == TestingStage::VERTEX)
3455 vertModule = shaderModule.get();
3456 else if (m_params.testingStage == TestingStage::FRAGMENT)
3457 {
3458 vertModule = vertPassthrough.get();
3459 fragModule = shaderModule.get();
3460 }
3461 else if (m_params.testingStage == TestingStage::GEOMETRY)
3462 {
3463 vertModule = vertPassthrough.get();
3464 geomModule = shaderModule.get();
3465 }
3466 else if (m_params.testingStage == TestingStage::TESS_CONTROL)
3467 {
3468 vertModule = vertPassthrough.get();
3469 teseModule = tesePassthrough.get();
3470 tescModule = shaderModule.get();
3471 }
3472 else if (m_params.testingStage == TestingStage::TESS_EVAL)
3473 {
3474 vertModule = vertPassthrough.get();
3475 tescModule = tescPassthrough.get();
3476 teseModule = shaderModule.get();
3477 }
3478 else
3479 DE_ASSERT(false);
3480
3481 renderPass = buildRenderPass(vkd, device, resources);
3482 pipeline = buildGraphicsPipeline(vkd, device, pipelineLayout.get(), vertModule, tescModule, teseModule,
3483 geomModule, fragModule, renderPass.get());
3484 framebuffer = buildFramebuffer(vkd, device, renderPass.get(), resources);
3485 }
3486 else if (bindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR)
3487 {
3488 const auto rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
3489 const auto rayTracingPropertiesKHR = makeRayTracingProperties(vki, physDev);
3490 shaderGroupHandleSize = rayTracingPropertiesKHR->getShaderGroupHandleSize();
3491 shaderGroupBaseAlignment = rayTracingPropertiesKHR->getShaderGroupBaseAlignment();
3492
3493 VkShaderModule rgenModule = DE_NULL;
3494 VkShaderModule isecModule = DE_NULL;
3495 VkShaderModule ahitModule = DE_NULL;
3496 VkShaderModule chitModule = DE_NULL;
3497 VkShaderModule missModule = DE_NULL;
3498 VkShaderModule callModule = DE_NULL;
3499
3500 const uint32_t rgenGroup = 0u;
3501 uint32_t hitGroup = 0u;
3502 uint32_t missGroup = 0u;
3503 uint32_t callGroup = 0u;
3504
3505 if (m_params.testingStage == TestingStage::RAY_GEN)
3506 {
3507 rgenModule = shaderModule.get();
3508 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3509 }
3510 else if (m_params.testingStage == TestingStage::INTERSECTION)
3511 {
3512 hitGroup = 1u;
3513 missGroup = 2u;
3514 rgenModule = rgenPassthrough.get();
3515 missModule = missPassthrough.get();
3516 isecModule = shaderModule.get();
3517 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3518 rayTracingPipeline->addShader(VK_SHADER_STAGE_INTERSECTION_BIT_KHR, isecModule, hitGroup);
3519 rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR, missModule, missGroup);
3520 }
3521 else if (m_params.testingStage == TestingStage::ANY_HIT)
3522 {
3523 hitGroup = 1u;
3524 rgenModule = rgenPassthrough.get();
3525 ahitModule = shaderModule.get();
3526 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3527 rayTracingPipeline->addShader(VK_SHADER_STAGE_ANY_HIT_BIT_KHR, ahitModule, hitGroup);
3528 }
3529 else if (m_params.testingStage == TestingStage::CLOSEST_HIT)
3530 {
3531 hitGroup = 1u;
3532 rgenModule = rgenPassthrough.get();
3533 chitModule = shaderModule.get();
3534 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3535 rayTracingPipeline->addShader(VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, chitModule, hitGroup);
3536 }
3537 else if (m_params.testingStage == TestingStage::MISS)
3538 {
3539 missGroup = 1u;
3540 rgenModule = rgenPassthrough.get();
3541 missModule = shaderModule.get();
3542 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3543 rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR, missModule, missGroup);
3544 }
3545 else if (m_params.testingStage == TestingStage::CALLABLE)
3546 {
3547 callGroup = 1u;
3548 rgenModule = rgenPassthrough.get();
3549 callModule = shaderModule.get();
3550 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3551 rayTracingPipeline->addShader(VK_SHADER_STAGE_CALLABLE_BIT_KHR, callModule, callGroup);
3552 }
3553 else
3554 DE_ASSERT(false);
3555
3556 pipeline = rayTracingPipeline->createPipeline(vkd, device, pipelineLayout.get());
3557
3558 raygenSBT = rayTracingPipeline->createShaderBindingTable(
3559 vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, rgenGroup, 1u);
3560 raygenSBTRegion =
3561 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, raygenSBT->get(), 0ull),
3562 shaderGroupHandleSize, shaderGroupHandleSize);
3563
3564 if (missGroup > 0u)
3565 {
3566 missSBT = rayTracingPipeline->createShaderBindingTable(
3567 vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, missGroup, 1u);
3568 missSBTRegion =
3569 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, missSBT->get(), 0ull),
3570 shaderGroupHandleSize, shaderGroupHandleSize);
3571 }
3572
3573 if (hitGroup > 0u)
3574 {
3575 hitSBT = rayTracingPipeline->createShaderBindingTable(
3576 vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, hitGroup, 1u);
3577 hitSBTRegion =
3578 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, hitSBT->get(), 0ull),
3579 shaderGroupHandleSize, shaderGroupHandleSize);
3580 }
3581
3582 if (callGroup > 0u)
3583 {
3584 callableSBT = rayTracingPipeline->createShaderBindingTable(
3585 vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, callGroup, 1u);
3586 callableSBTRegion =
3587 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, callableSBT->get(), 0ull),
3588 shaderGroupHandleSize, shaderGroupHandleSize);
3589 }
3590 }
3591 else
3592 DE_ASSERT(false);
3593
3594 // Command buffer for the current iteration.
3595 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
3596 const auto cmdBuffer = cmdBufferPtr.get();
3597
3598 beginCommandBuffer(vkd, cmdBuffer);
3599
3600 const Step steps[] = {(updateAfterBind ? Step::BIND : Step::UPDATE),
3601 (updateAfterBind ? Step::UPDATE : Step::BIND)};
3602
3603 for (const auto &step : steps)
3604 {
3605 if (step == Step::BIND)
3606 {
3607 vkd.cmdBindPipeline(cmdBuffer, bindPoint, pipeline.get());
3608 vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, pipelineLayout.get(), 0u,
3609 static_cast<uint32_t>(usedSets.size()), de::dataOrNull(usedSets), 0u,
3610 nullptr);
3611 }
3612 else // Step::UPDATE
3613 {
3614 if (srcSetNeeded)
3615 {
3616 // Note: these operations need to be called on paramSet and not iterationSrcSet. The latter is a compatible set
3617 // that's correct and contains compatible bindings but, when a binding has been changed from non-mutable to
3618 // mutable or to an extended mutable type, the list of descriptor types for the mutable bindings in
3619 // iterationSrcSet are not in iteration order like they are in the original set and must not be taken into
3620 // account to update or copy sets.
3621 paramSet->updateDescriptorSet(vkd, device, srcSet.get(), iteration, resources);
3622 paramSet->copyDescriptorSet(vkd, device, srcSet.get(), dstSet.get());
3623 }
3624 else
3625 {
3626 paramSet->updateDescriptorSet(vkd, device, dstSet.get(), iteration, resources);
3627 }
3628 }
3629 }
3630
3631 // Run shader.
3632 vkd.cmdPushConstants(cmdBuffer, pipelineLayout.get(), stageFlags, 0u, static_cast<uint32_t>(sizeof(zero)),
3633 &zero);
3634
3635 if (bindPoint == VK_PIPELINE_BIND_POINT_COMPUTE)
3636 vkd.cmdDispatch(cmdBuffer, 1u, 1u, 1u);
3637 else if (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS)
3638 {
3639 const auto extent = getDefaultExtent();
3640 const auto renderArea = makeRect2D(extent);
3641
3642 beginRenderPass(vkd, cmdBuffer, renderPass.get(), framebuffer.get(), renderArea);
3643 vkd.cmdDraw(cmdBuffer, 3u, 1u, 0u, 0u);
3644 endRenderPass(vkd, cmdBuffer);
3645 }
3646 else if (bindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR)
3647 {
3648 vkd.cmdTraceRaysKHR(cmdBuffer, &raygenSBTRegion, &missSBTRegion, &hitSBTRegion, &callableSBTRegion, 1u, 1u,
3649 1u);
3650 }
3651 else
3652 DE_ASSERT(false);
3653
3654 endCommandBuffer(vkd, cmdBuffer);
3655 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
3656
3657 // Verify output buffer.
3658 {
3659 const auto outputBufferVal = extraResources[0].getStoredValue(vkd, device, alloc, qIndex, queue, iteration);
3660 DE_ASSERT(static_cast<bool>(outputBufferVal));
3661
3662 const auto expectedValue = getExpectedOutputBufferValue();
3663 if (outputBufferVal.get() != expectedValue)
3664 {
3665 std::ostringstream msg;
3666 msg << "Iteration " << iteration << ": unexpected value found in output buffer (expected "
3667 << expectedValue << " and found " << outputBufferVal.get() << ")";
3668 TCU_FAIL(msg.str());
3669 }
3670 }
3671
3672 // Verify descriptor writes.
3673 {
3674 size_t resourcesOffset = 0;
3675 const auto writeMask = getStoredValueMask();
3676 const auto numBindings = paramSet->numBindings();
3677
3678 for (uint32_t bindingIdx = 0u; bindingIdx < numBindings; ++bindingIdx)
3679 {
3680 const auto binding = paramSet->getBinding(bindingIdx);
3681 const auto bindingTypes = binding->typesAtIteration(iteration);
3682
3683 for (size_t descriptorIdx = 0; descriptorIdx < bindingTypes.size(); ++descriptorIdx)
3684 {
3685 const auto &descriptorType = bindingTypes[descriptorIdx];
3686 if (!isShaderWritable(descriptorType))
3687 continue;
3688
3689 const auto &resource = resources[resourcesOffset + descriptorIdx];
3690 const auto initialValue = resource.initialValue;
3691 const auto storedValuePtr = resource.getStoredValue(vkd, device, alloc, qIndex, queue);
3692
3693 DE_ASSERT(static_cast<bool>(storedValuePtr));
3694 const auto storedValue = storedValuePtr.get();
3695 const auto expectedValue = (initialValue | writeMask);
3696 if (expectedValue != storedValue)
3697 {
3698 std::ostringstream msg;
3699 msg << "Iteration " << iteration << ": descriptor at binding " << bindingIdx << " index "
3700 << descriptorIdx << " with type " << de::toString(descriptorType)
3701 << " contains unexpected value " << std::hex << storedValue << " (expected "
3702 << expectedValue << ")";
3703 TCU_FAIL(msg.str());
3704 }
3705 }
3706
3707 resourcesOffset += bindingTypes.size();
3708 }
3709 }
3710 }
3711
3712 return tcu::TestStatus::pass("Pass");
3713 }
3714
3715 using GroupPtr = de::MovePtr<tcu::TestCaseGroup>;
3716
createMutableTestVariants(tcu::TestContext & testCtx,tcu::TestCaseGroup * parentGroup,const DescriptorSetPtr & descriptorSet,const std::vector<TestingStage> & stagesToTest)3717 void createMutableTestVariants(tcu::TestContext &testCtx, tcu::TestCaseGroup *parentGroup,
3718 const DescriptorSetPtr &descriptorSet, const std::vector<TestingStage> &stagesToTest)
3719 {
3720 const struct
3721 {
3722 UpdateType updateType;
3723 const char *name;
3724 } updateTypes[] = {
3725 {UpdateType::WRITE, "update_write"},
3726 {UpdateType::COPY, "update_copy"},
3727 };
3728
3729 const struct
3730 {
3731 SourceSetStrategy sourceSetStrategy;
3732 const char *name;
3733 } sourceStrategies[] = {
3734 {SourceSetStrategy::MUTABLE, "mutable_source"},
3735 {SourceSetStrategy::NONMUTABLE, "nonmutable_source"},
3736 {SourceSetStrategy::NO_SOURCE, "no_source"},
3737 };
3738
3739 const struct
3740 {
3741 SourceSetType sourceSetType;
3742 const char *name;
3743 } sourceTypes[] = {
3744 {SourceSetType::NORMAL, "normal_source"},
3745 {SourceSetType::HOST_ONLY, "host_only_source"},
3746 {SourceSetType::NO_SOURCE, "no_source"},
3747 };
3748
3749 const struct
3750 {
3751 PoolMutableStrategy poolMutableStrategy;
3752 const char *name;
3753 } poolStrategies[] = {
3754 {PoolMutableStrategy::KEEP_TYPES, "pool_same_types"},
3755 {PoolMutableStrategy::NO_TYPES, "pool_no_types"},
3756 {PoolMutableStrategy::EXPAND_TYPES, "pool_expand_types"},
3757 };
3758
3759 const struct
3760 {
3761 UpdateMoment updateMoment;
3762 const char *name;
3763 } updateMoments[] = {
3764 {UpdateMoment::NORMAL, "pre_update"},
3765 {UpdateMoment::UPDATE_AFTER_BIND, "update_after_bind"},
3766 };
3767
3768 const struct
3769 {
3770 ArrayAccessType arrayAccessType;
3771 const char *name;
3772 } arrayAccessTypes[] = {
3773 {ArrayAccessType::CONSTANT, "index_constant"},
3774 {ArrayAccessType::PUSH_CONSTANT, "index_push_constant"},
3775 {ArrayAccessType::NO_ARRAY, "no_array"},
3776 };
3777
3778 const struct StageAndName
3779 {
3780 TestingStage testingStage;
3781 const char *name;
3782 } testStageList[] = {
3783 {TestingStage::COMPUTE, "comp"}, {TestingStage::VERTEX, "vert"}, {TestingStage::TESS_CONTROL, "tesc"},
3784 {TestingStage::TESS_EVAL, "tese"}, {TestingStage::GEOMETRY, "geom"}, {TestingStage::FRAGMENT, "frag"},
3785 {TestingStage::RAY_GEN, "rgen"}, {TestingStage::INTERSECTION, "isec"}, {TestingStage::ANY_HIT, "ahit"},
3786 {TestingStage::CLOSEST_HIT, "chit"}, {TestingStage::MISS, "miss"}, {TestingStage::CALLABLE, "call"},
3787 };
3788
3789 const bool hasArrays = descriptorSet->hasArrays();
3790 const bool hasInputAttachments = usesInputAttachments(*descriptorSet);
3791
3792 for (const auto &ut : updateTypes)
3793 {
3794 GroupPtr updateGroup(new tcu::TestCaseGroup(testCtx, ut.name));
3795
3796 for (const auto &srcStrategy : sourceStrategies)
3797 {
3798 // Skip combinations that make no sense.
3799 if (ut.updateType == UpdateType::WRITE && srcStrategy.sourceSetStrategy != SourceSetStrategy::NO_SOURCE)
3800 continue;
3801
3802 if (ut.updateType == UpdateType::COPY && srcStrategy.sourceSetStrategy == SourceSetStrategy::NO_SOURCE)
3803 continue;
3804
3805 if (srcStrategy.sourceSetStrategy == SourceSetStrategy::NONMUTABLE && descriptorSet->needsAnyAliasing())
3806 continue;
3807
3808 GroupPtr srcStrategyGroup(new tcu::TestCaseGroup(testCtx, srcStrategy.name));
3809
3810 for (const auto &srcType : sourceTypes)
3811 {
3812 // Skip combinations that make no sense.
3813 if (ut.updateType == UpdateType::WRITE && srcType.sourceSetType != SourceSetType::NO_SOURCE)
3814 continue;
3815
3816 if (ut.updateType == UpdateType::COPY && srcType.sourceSetType == SourceSetType::NO_SOURCE)
3817 continue;
3818
3819 GroupPtr srcTypeGroup(new tcu::TestCaseGroup(testCtx, srcType.name));
3820
3821 for (const auto &poolStrategy : poolStrategies)
3822 {
3823 GroupPtr poolStrategyGroup(new tcu::TestCaseGroup(testCtx, poolStrategy.name));
3824
3825 for (const auto &moment : updateMoments)
3826 {
3827 //if (moment.updateMoment == UpdateMoment::UPDATE_AFTER_BIND && srcType.sourceSetType == SourceSetType::HOST_ONLY)
3828 // continue;
3829
3830 if (moment.updateMoment == UpdateMoment::UPDATE_AFTER_BIND && hasInputAttachments)
3831 continue;
3832
3833 GroupPtr momentGroup(new tcu::TestCaseGroup(testCtx, moment.name));
3834
3835 for (const auto &accessType : arrayAccessTypes)
3836 {
3837 // Skip combinations that make no sense.
3838 if (hasArrays && accessType.arrayAccessType == ArrayAccessType::NO_ARRAY)
3839 continue;
3840
3841 if (!hasArrays && accessType.arrayAccessType != ArrayAccessType::NO_ARRAY)
3842 continue;
3843
3844 GroupPtr accessTypeGroup(new tcu::TestCaseGroup(testCtx, accessType.name));
3845
3846 for (const auto &testStage : stagesToTest)
3847 {
3848 const auto beginItr = std::begin(testStageList);
3849 const auto endItr = std::end(testStageList);
3850 const auto iter = std::find_if(beginItr, endItr,
3851 [testStage](const StageAndName &ts)
3852 { return ts.testingStage == testStage; });
3853
3854 DE_ASSERT(iter != endItr);
3855 const auto &stage = *iter;
3856
3857 if (hasInputAttachments && stage.testingStage != TestingStage::FRAGMENT)
3858 continue;
3859
3860 TestParams params = {
3861 descriptorSet,
3862 ut.updateType,
3863 srcStrategy.sourceSetStrategy,
3864 srcType.sourceSetType,
3865 poolStrategy.poolMutableStrategy,
3866 moment.updateMoment,
3867 accessType.arrayAccessType,
3868 stage.testingStage,
3869 };
3870
3871 accessTypeGroup->addChild(new MutableTypesTest(testCtx, stage.name, params));
3872 }
3873
3874 momentGroup->addChild(accessTypeGroup.release());
3875 }
3876
3877 poolStrategyGroup->addChild(momentGroup.release());
3878 }
3879
3880 srcTypeGroup->addChild(poolStrategyGroup.release());
3881 }
3882
3883 srcStrategyGroup->addChild(srcTypeGroup.release());
3884 }
3885
3886 updateGroup->addChild(srcStrategyGroup.release());
3887 }
3888
3889 parentGroup->addChild(updateGroup.release());
3890 }
3891 }
3892
3893 } // namespace
3894
descriptorTypeStr(VkDescriptorType descriptorType)3895 std::string descriptorTypeStr(VkDescriptorType descriptorType)
3896 {
3897 static const auto prefixLen = std::string("VK_DESCRIPTOR_TYPE_").size();
3898 return de::toLower(de::toString(descriptorType).substr(prefixLen));
3899 }
3900
3901 static void createChildren(tcu::TestCaseGroup *testGroup);
3902
cleanupGroup(tcu::TestCaseGroup * testGroup)3903 static void cleanupGroup(tcu::TestCaseGroup *testGroup)
3904 {
3905 DE_UNREF(testGroup);
3906 // Destroy singleton objects.
3907 g_singletonDevice.clear();
3908 }
3909
createDescriptorMutableTests(tcu::TestContext & testCtx)3910 tcu::TestCaseGroup *createDescriptorMutableTests(tcu::TestContext &testCtx)
3911 {
3912 return createTestGroup(testCtx, "mutable_descriptor", createChildren, cleanupGroup);
3913 }
3914
createChildren(tcu::TestCaseGroup * mainGroup)3915 void createChildren(tcu::TestCaseGroup *mainGroup)
3916 {
3917 tcu::TestContext &testCtx = mainGroup->getTestContext();
3918
3919 const VkDescriptorType basicDescriptorTypes[] = {
3920 VK_DESCRIPTOR_TYPE_SAMPLER,
3921 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
3922 VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
3923 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
3924 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
3925 VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
3926 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
3927 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
3928 VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
3929 VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR,
3930 };
3931
3932 static const auto mandatoryTypes = getMandatoryMutableTypes();
3933
3934 using StageVec = std::vector<TestingStage>;
3935
3936 const StageVec allStages = {
3937 TestingStage::COMPUTE, TestingStage::VERTEX, TestingStage::TESS_CONTROL, TestingStage::TESS_EVAL,
3938 TestingStage::GEOMETRY, TestingStage::FRAGMENT, TestingStage::RAY_GEN, TestingStage::INTERSECTION,
3939 TestingStage::ANY_HIT, TestingStage::CLOSEST_HIT, TestingStage::MISS, TestingStage::CALLABLE,
3940 };
3941
3942 const StageVec reducedStages = {
3943 TestingStage::COMPUTE,
3944 TestingStage::VERTEX,
3945 TestingStage::FRAGMENT,
3946 TestingStage::RAY_GEN,
3947 };
3948
3949 const StageVec computeOnly = {
3950 TestingStage::COMPUTE,
3951 };
3952
3953 // Basic tests with a single mutable descriptor.
3954 {
3955 GroupPtr singleCases(new tcu::TestCaseGroup(testCtx, "single"));
3956
3957 for (const auto &descriptorType : basicDescriptorTypes)
3958 {
3959 const auto groupName = descriptorTypeStr(descriptorType);
3960 const std::vector<VkDescriptorType> actualTypes(1u, descriptorType);
3961
3962 DescriptorSetPtr setPtr;
3963 {
3964 DescriptorSet::BindingPtrVector setBindings;
3965 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, actualTypes));
3966 setPtr = DescriptorSetPtr(new DescriptorSet(setBindings));
3967 }
3968
3969 GroupPtr subGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
3970 createMutableTestVariants(testCtx, subGroup.get(), setPtr, allStages);
3971
3972 singleCases->addChild(subGroup.release());
3973 }
3974
3975 // Case with a single descriptor that iterates several types.
3976 {
3977 DescriptorSetPtr setPtr;
3978 {
3979 DescriptorSet::BindingPtrVector setBindings;
3980 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, mandatoryTypes));
3981 setPtr = DescriptorSetPtr(new DescriptorSet(setBindings));
3982 }
3983
3984 GroupPtr subGroup(new tcu::TestCaseGroup(testCtx, "all_mandatory"));
3985 createMutableTestVariants(testCtx, subGroup.get(), setPtr, reducedStages);
3986
3987 singleCases->addChild(subGroup.release());
3988 }
3989
3990 // Cases that try to verify switching from any descriptor type to any other is possible.
3991 {
3992 GroupPtr subGroup(new tcu::TestCaseGroup(testCtx, "switches"));
3993
3994 for (const auto &initialDescriptorType : basicDescriptorTypes)
3995 {
3996 for (const auto &finalDescriptorType : basicDescriptorTypes)
3997 {
3998 if (initialDescriptorType == finalDescriptorType)
3999 continue;
4000
4001 const std::vector<VkDescriptorType> mutableTypes{initialDescriptorType, finalDescriptorType};
4002 DescriptorSet::BindingPtrVector setBindings;
4003 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, mutableTypes));
4004
4005 DescriptorSetPtr setPtr = DescriptorSetPtr(new DescriptorSet(setBindings));
4006
4007 const auto groupName =
4008 descriptorTypeStr(initialDescriptorType) + "_" + descriptorTypeStr(finalDescriptorType);
4009 GroupPtr combinationGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
4010 createMutableTestVariants(testCtx, combinationGroup.get(), setPtr, reducedStages);
4011 subGroup->addChild(combinationGroup.release());
4012 }
4013 }
4014
4015 singleCases->addChild(subGroup.release());
4016 }
4017
4018 mainGroup->addChild(singleCases.release());
4019 }
4020
4021 // Cases with a single non-mutable descriptor. This provides some basic checks to verify copying to non-mutable bindings works.
4022 {
4023 GroupPtr singleNonMutableGroup(new tcu::TestCaseGroup(testCtx, "single_nonmutable"));
4024
4025 for (const auto &descriptorType : basicDescriptorTypes)
4026 {
4027 DescriptorSet::BindingPtrVector bindings;
4028 bindings.emplace_back(new SingleBinding(descriptorType, std::vector<VkDescriptorType>()));
4029 DescriptorSetPtr descriptorSet(new DescriptorSet(bindings));
4030
4031 const auto groupName = descriptorTypeStr(descriptorType);
4032 GroupPtr descGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
4033
4034 createMutableTestVariants(testCtx, descGroup.get(), descriptorSet, reducedStages);
4035 singleNonMutableGroup->addChild(descGroup.release());
4036 }
4037
4038 mainGroup->addChild(singleNonMutableGroup.release());
4039 }
4040
4041 const struct
4042 {
4043 bool unbounded;
4044 const char *name;
4045 } unboundedCases[] = {
4046 {false, "constant_size"},
4047 {true, "unbounded"},
4048 };
4049
4050 const struct
4051 {
4052 bool aliasing;
4053 const char *name;
4054 } aliasingCases[] = {
4055 {false, "noaliasing"},
4056 {true, "aliasing"},
4057 };
4058
4059 const struct
4060 {
4061 bool oneArrayOnly;
4062 bool mixNonMutable;
4063 const char *groupName;
4064 } arrayCountGroups[] = {
4065 // Tests using an array of mutable descriptors
4066 {true, false, "one_array"},
4067 // Tests using multiple arrays of mutable descriptors
4068 {false, false, "multiple_arrays"},
4069 // Tests using multiple arrays of mutable descriptors mixed with arrays of nonmutable ones
4070 {false, true, "multiple_arrays_mixed"},
4071 };
4072
4073 for (const auto &variant : arrayCountGroups)
4074 {
4075 GroupPtr arrayGroup(new tcu::TestCaseGroup(testCtx, variant.groupName));
4076
4077 for (const auto &unboundedCase : unboundedCases)
4078 {
4079 GroupPtr unboundedGroup(new tcu::TestCaseGroup(testCtx, unboundedCase.name));
4080
4081 for (const auto &aliasingCase : aliasingCases)
4082 {
4083 GroupPtr aliasingGroup(new tcu::TestCaseGroup(testCtx, aliasingCase.name));
4084
4085 DescriptorSet::BindingPtrVector setBindings;
4086
4087 // Prepare descriptors for this test variant.
4088 for (size_t mandatoryTypesRotation = 0; mandatoryTypesRotation < mandatoryTypes.size();
4089 ++mandatoryTypesRotation)
4090 {
4091 const bool isLastBinding =
4092 (variant.oneArrayOnly || mandatoryTypesRotation == mandatoryTypes.size() - 1u);
4093 const bool isUnbounded = (unboundedCase.unbounded && isLastBinding);
4094
4095 // Create a rotation of the mandatory types for each mutable array binding.
4096 auto mandatoryTypesVector = mandatoryTypes;
4097 {
4098 const auto beginPtr = &mandatoryTypesVector[0];
4099 const auto endPtr = beginPtr + mandatoryTypesVector.size();
4100 std::rotate(beginPtr, &mandatoryTypesVector[mandatoryTypesRotation], endPtr);
4101 }
4102
4103 std::vector<SingleBinding> arrayBindings;
4104
4105 if (aliasingCase.aliasing)
4106 {
4107 // With aliasing, the descriptor types rotate in each descriptor.
4108 for (size_t typeIdx = 0; typeIdx < mandatoryTypesVector.size(); ++typeIdx)
4109 {
4110 auto rotatedTypes = mandatoryTypesVector;
4111 const auto beginPtr = &rotatedTypes[0];
4112 const auto endPtr = beginPtr + rotatedTypes.size();
4113
4114 std::rotate(beginPtr, &rotatedTypes[typeIdx], endPtr);
4115
4116 arrayBindings.emplace_back(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, rotatedTypes);
4117 }
4118 }
4119 else
4120 {
4121 // Without aliasing, all descriptors use the same type at the same time.
4122 const SingleBinding noAliasingBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, mandatoryTypesVector);
4123 arrayBindings.resize(mandatoryTypesVector.size(), noAliasingBinding);
4124 }
4125
4126 setBindings.emplace_back(new ArrayBinding(isUnbounded, arrayBindings));
4127
4128 if (variant.mixNonMutable && !isUnbounded)
4129 {
4130 // Create a non-mutable array binding interleaved with the other ones.
4131 const SingleBinding nonMutableBinding(mandatoryTypes[mandatoryTypesRotation],
4132 std::vector<VkDescriptorType>());
4133 std::vector<SingleBinding> nonMutableBindings(mandatoryTypes.size(), nonMutableBinding);
4134 setBindings.emplace_back(new ArrayBinding(false, nonMutableBindings));
4135 }
4136
4137 if (variant.oneArrayOnly)
4138 break;
4139 }
4140
4141 DescriptorSetPtr descriptorSet(new DescriptorSet(setBindings));
4142 createMutableTestVariants(testCtx, aliasingGroup.get(), descriptorSet, computeOnly);
4143
4144 unboundedGroup->addChild(aliasingGroup.release());
4145 }
4146
4147 arrayGroup->addChild(unboundedGroup.release());
4148 }
4149
4150 mainGroup->addChild(arrayGroup.release());
4151 }
4152
4153 // Cases with a single mutable binding followed by an array of mutable bindings.
4154 // The array will use a single type beyond the mandatory ones.
4155 {
4156 GroupPtr singleAndArrayGroup(new tcu::TestCaseGroup(testCtx, "single_and_array"));
4157
4158 for (const auto &descriptorType : basicDescriptorTypes)
4159 {
4160 // Input attachments will not use arrays.
4161 if (descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
4162 continue;
4163
4164 if (de::contains(begin(mandatoryTypes), end(mandatoryTypes), descriptorType))
4165 continue;
4166
4167 const auto groupName = descriptorTypeStr(descriptorType);
4168 GroupPtr descTypeGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
4169
4170 for (const auto &aliasingCase : aliasingCases)
4171 {
4172 GroupPtr aliasingGroup(new tcu::TestCaseGroup(testCtx, aliasingCase.name));
4173
4174 DescriptorSet::BindingPtrVector setBindings;
4175 std::vector<SingleBinding> arrayBindings;
4176
4177 // Add single type beyond the mandatory ones.
4178 auto arrayBindingDescTypes = mandatoryTypes;
4179 arrayBindingDescTypes.push_back(descriptorType);
4180
4181 // Single mutable descriptor as the first binding.
4182 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, arrayBindingDescTypes));
4183
4184 // Descriptor array as the second binding.
4185 if (aliasingCase.aliasing)
4186 {
4187 // With aliasing, the descriptor types rotate in each descriptor.
4188 for (size_t typeIdx = 0; typeIdx < arrayBindingDescTypes.size(); ++typeIdx)
4189 {
4190 auto rotatedTypes = arrayBindingDescTypes;
4191 const auto beginPtr = &rotatedTypes[0];
4192 const auto endPtr = beginPtr + rotatedTypes.size();
4193
4194 std::rotate(beginPtr, &rotatedTypes[typeIdx], endPtr);
4195
4196 arrayBindings.emplace_back(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, rotatedTypes);
4197 }
4198 }
4199 else
4200 {
4201 // Without aliasing, all descriptors use the same type at the same time.
4202 const SingleBinding noAliasingBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, arrayBindingDescTypes);
4203 arrayBindings.resize(arrayBindingDescTypes.size(), noAliasingBinding);
4204 }
4205
4206 // Second binding: array binding.
4207 setBindings.emplace_back(new ArrayBinding(false /*unbounded*/, arrayBindings));
4208
4209 // Create set and test variants.
4210 DescriptorSetPtr descriptorSet(new DescriptorSet(setBindings));
4211 createMutableTestVariants(testCtx, aliasingGroup.get(), descriptorSet, computeOnly);
4212
4213 descTypeGroup->addChild(aliasingGroup.release());
4214 }
4215
4216 singleAndArrayGroup->addChild(descTypeGroup.release());
4217 }
4218
4219 mainGroup->addChild(singleAndArrayGroup.release());
4220 }
4221
4222 // Cases with several mutable non-array bindings.
4223 {
4224 GroupPtr multipleGroup(new tcu::TestCaseGroup(testCtx, "multiple"));
4225 GroupPtr mutableOnlyGroup(new tcu::TestCaseGroup(testCtx, "mutable_only"));
4226 GroupPtr mixedGroup(new tcu::TestCaseGroup(testCtx, "mixed"));
4227
4228 // Each descriptor will have a different type in every iteration, like in the one_array aliasing case.
4229 for (int groupIdx = 0; groupIdx < 2; ++groupIdx)
4230 {
4231 const bool mixed = (groupIdx == 1);
4232 DescriptorSet::BindingPtrVector setBindings;
4233
4234 for (size_t typeIdx = 0; typeIdx < mandatoryTypes.size(); ++typeIdx)
4235 {
4236 auto rotatedTypes = mandatoryTypes;
4237 const auto beginPtr = &rotatedTypes[0];
4238 const auto endPtr = beginPtr + rotatedTypes.size();
4239
4240 std::rotate(beginPtr, &rotatedTypes[typeIdx], endPtr);
4241 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, rotatedTypes));
4242
4243 // Additional non-mutable binding interleaved with the mutable ones.
4244 if (mixed)
4245 setBindings.emplace_back(new SingleBinding(rotatedTypes[0], std::vector<VkDescriptorType>()));
4246 }
4247 DescriptorSetPtr descriptorSet(new DescriptorSet(setBindings));
4248
4249 const auto dstGroup = (mixed ? mixedGroup.get() : mutableOnlyGroup.get());
4250 createMutableTestVariants(testCtx, dstGroup, descriptorSet, computeOnly);
4251 }
4252
4253 multipleGroup->addChild(mutableOnlyGroup.release());
4254 multipleGroup->addChild(mixedGroup.release());
4255 mainGroup->addChild(multipleGroup.release());
4256 }
4257 }
4258
4259 } // namespace BindingModel
4260 } // namespace vkt
4261