1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017-2019 The Khronos Group Inc.
6 * Copyright (c) 2018-2020 NVIDIA Corporation
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Vulkan robustness2 tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktRobustnessExtsTests.hpp"
26
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkDeviceUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkObjUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vktRobustnessUtil.hpp"
38
39 #include "vktTestGroupUtil.hpp"
40 #include "vktTestCase.hpp"
41
42 #include "deDefs.h"
43 #include "deMath.h"
44 #include "deRandom.h"
45 #include "deSharedPtr.hpp"
46 #include "deString.h"
47
48 #include "tcuVectorType.hpp"
49 #include "tcuTestCase.hpp"
50 #include "tcuTestLog.hpp"
51 #include "tcuImageCompare.hpp"
52
53 #include <string>
54 #include <sstream>
55 #include <algorithm>
56 #include <limits>
57
58 namespace vkt
59 {
60 namespace robustness
61 {
62 namespace
63 {
64 using namespace vk;
65 using namespace std;
66 using de::SharedPtr;
67 using BufferWithMemoryPtr = de::MovePtr<BufferWithMemory>;
68
69 enum RobustnessFeatureBits
70 {
71 RF_IMG_ROBUSTNESS = (1),
72 RF_ROBUSTNESS2 = (1 << 1),
73 RF_PIPELINE_ROBUSTNESS = (1 << 2),
74 };
75
76 using RobustnessFeatures = uint32_t;
77
78 // Class to wrap a singleton device with the indicated robustness features.
79 template <RobustnessFeatures FEATURES>
80 class SingletonDevice
81 {
SingletonDevice(Context & context)82 SingletonDevice(Context &context)
83 : m_context(context)
84 #ifdef CTS_USES_VULKANSC
85 , m_customInstance(createCustomInstanceFromContext(context))
86 #endif // CTS_USES_VULKANSC
87 , m_logicalDevice()
88 {
89 // Note we are already checking the needed features are available in checkSupport().
90 VkPhysicalDeviceExtendedDynamicStateFeaturesEXT edsFeatures = initVulkanStructure();
91 VkPhysicalDeviceScalarBlockLayoutFeatures scalarBlockLayoutFeatures = initVulkanStructure();
92 VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT shaderImageAtomicInt64Features = initVulkanStructure();
93 VkPhysicalDeviceBufferDeviceAddressFeatures bufferDeviceAddressFeatures = initVulkanStructure();
94 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
95 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
96 #ifndef CTS_USES_VULKANSC
97 VkPhysicalDeviceRayTracingPipelineFeaturesKHR rayTracingPipelineFeatures = initVulkanStructure();
98 VkPhysicalDeviceAccelerationStructureFeaturesKHR accelerationStructureFeatures = initVulkanStructure();
99 VkPhysicalDevicePipelineRobustnessFeaturesEXT pipelineRobustnessFeatures = initVulkanStructure();
100 VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT gplFeatures = initVulkanStructure();
101 #endif // CTS_USES_VULKANSC
102 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
103
104 const auto addFeatures = makeStructChainAdder(&features2);
105
106 // Enable these ones if supported, as they're needed in some tests.
107 if (context.isDeviceFunctionalitySupported("VK_EXT_extended_dynamic_state"))
108 addFeatures(&edsFeatures);
109
110 if (context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"))
111 addFeatures(&scalarBlockLayoutFeatures);
112
113 if (context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"))
114 addFeatures(&shaderImageAtomicInt64Features);
115
116 #ifndef CTS_USES_VULKANSC
117 if (context.isDeviceFunctionalitySupported("VK_KHR_ray_tracing_pipeline"))
118 {
119 addFeatures(&accelerationStructureFeatures);
120 addFeatures(&rayTracingPipelineFeatures);
121 }
122
123 if (context.isDeviceFunctionalitySupported("VK_EXT_graphics_pipeline_library"))
124 addFeatures(&gplFeatures);
125 #endif // CTS_USES_VULKANSC
126
127 if (context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address"))
128 addFeatures(&bufferDeviceAddressFeatures);
129
130 if (FEATURES & RF_IMG_ROBUSTNESS)
131 {
132 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
133
134 if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
135 addFeatures(&imageRobustnessFeatures);
136 }
137
138 if (FEATURES & RF_ROBUSTNESS2)
139 {
140 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
141
142 if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
143 addFeatures(&robustness2Features);
144 }
145
146 #ifndef CTS_USES_VULKANSC
147 if (FEATURES & RF_PIPELINE_ROBUSTNESS)
148 addFeatures(&pipelineRobustnessFeatures);
149 #endif
150
151 const auto &vki = m_context.getInstanceInterface();
152 const auto instance = m_context.getInstance();
153 const auto physicalDevice = chooseDevice(vki, instance, context.getTestContext().getCommandLine());
154
155 vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
156
157 #ifndef CTS_USES_VULKANSC
158 if (FEATURES & RF_PIPELINE_ROBUSTNESS)
159 features2.features.robustBufferAccess = VK_FALSE;
160 #endif
161 m_logicalDevice = createRobustBufferAccessDevice(context,
162 #ifdef CTS_USES_VULKANSC
163 m_customInstance,
164 #endif // CTS_USES_VULKANSC
165 &features2);
166
167 #ifndef CTS_USES_VULKANSC
168 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), instance,
169 *m_logicalDevice, context.getUsedApiVersion(),
170 context.getTestContext().getCommandLine()));
171 #else
172 m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(
173 new DeviceDriverSC(context.getPlatformInterface(), instance, *m_logicalDevice,
174 context.getTestContext().getCommandLine(), context.getResourceInterface(),
175 m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(),
176 context.getUsedApiVersion()),
177 vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
178 #endif // CTS_USES_VULKANSC
179 }
180
181 public:
~SingletonDevice()182 ~SingletonDevice()
183 {
184 }
185
getDevice(Context & context)186 static VkDevice getDevice(Context &context)
187 {
188 if (!m_singletonDevice)
189 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
190 DE_ASSERT(m_singletonDevice);
191 return m_singletonDevice->m_logicalDevice.get();
192 }
getDeviceInterface(Context & context)193 static const DeviceInterface &getDeviceInterface(Context &context)
194 {
195 if (!m_singletonDevice)
196 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
197 DE_ASSERT(m_singletonDevice);
198 return *(m_singletonDevice->m_deviceDriver.get());
199 }
200
destroy()201 static void destroy()
202 {
203 m_singletonDevice.clear();
204 }
205
206 private:
207 const Context &m_context;
208 #ifndef CTS_USES_VULKANSC
209 Move<vk::VkDevice> m_logicalDevice;
210 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
211 #else
212 // Construction needs to happen in this exact order to ensure proper resource destruction
213 CustomInstance m_customInstance;
214 Move<vk::VkDevice> m_logicalDevice;
215 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> m_deviceDriver;
216 #endif // CTS_USES_VULKANSC
217
218 static SharedPtr<SingletonDevice<FEATURES>> m_singletonDevice;
219 };
220
221 template <RobustnessFeatures FEATURES>
222 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
223
224 using ImageRobustnessSingleton = SingletonDevice<RF_IMG_ROBUSTNESS>;
225 using Robustness2Singleton = SingletonDevice<RF_ROBUSTNESS2>;
226
227 using PipelineRobustnessImageRobustnessSingleton = SingletonDevice<RF_IMG_ROBUSTNESS | RF_PIPELINE_ROBUSTNESS>;
228 using PipelineRobustnessRobustness2Singleton = SingletonDevice<RF_ROBUSTNESS2 | RF_PIPELINE_ROBUSTNESS>;
229
230 // Render target / compute grid dimensions
231 static const uint32_t DIM = 8;
232
233 // treated as a phony VkDescriptorType value
234 #define VERTEX_ATTRIBUTE_FETCH 999
235
236 typedef enum
237 {
238 STAGE_COMPUTE = 0,
239 STAGE_VERTEX,
240 STAGE_FRAGMENT,
241 STAGE_RAYGEN
242 } Stage;
243
244 enum class PipelineRobustnessCase
245 {
246 DISABLED = 0,
247 ENABLED_MONOLITHIC,
248 ENABLED_FAST_GPL,
249 ENABLED_OPTIMIZED_GPL,
250 };
251
getConstructionTypeFromRobustnessCase(PipelineRobustnessCase prCase)252 PipelineConstructionType getConstructionTypeFromRobustnessCase(PipelineRobustnessCase prCase)
253 {
254 if (prCase == PipelineRobustnessCase::ENABLED_FAST_GPL)
255 return PIPELINE_CONSTRUCTION_TYPE_FAST_LINKED_LIBRARY;
256 if (prCase == PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL)
257 return PIPELINE_CONSTRUCTION_TYPE_LINK_TIME_OPTIMIZED_LIBRARY;
258 return PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC;
259 }
260
261 struct CaseDef
262 {
263 VkFormat format;
264 Stage stage;
265 VkFlags allShaderStages;
266 VkFlags allPipelineStages;
267 int /*VkDescriptorType*/ descriptorType;
268 VkImageViewType viewType;
269 VkSampleCountFlagBits samples;
270 int bufferLen;
271 bool unroll;
272 bool vol;
273 bool nullDescriptor;
274 bool useTemplate;
275 bool formatQualifier;
276 bool pushDescriptor;
277 bool testRobustness2;
278 PipelineRobustnessCase pipelineRobustnessCase;
279 uint32_t imageDim[3]; // width, height, depth or layers
280 bool readOnly;
281
needsScalarBlockLayoutvkt::robustness::__anon29703f200111::CaseDef282 bool needsScalarBlockLayout() const
283 {
284 bool scalarNeeded = false;
285
286 switch (descriptorType)
287 {
288 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
289 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
290 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
291 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
292 scalarNeeded = true;
293 break;
294 default:
295 scalarNeeded = false;
296 break;
297 }
298
299 return scalarNeeded;
300 }
301
needsPipelineRobustnessvkt::robustness::__anon29703f200111::CaseDef302 bool needsPipelineRobustness(void) const
303 {
304 return (pipelineRobustnessCase != PipelineRobustnessCase::DISABLED);
305 }
306 };
307
formatIsR64(const VkFormat & f)308 static bool formatIsR64(const VkFormat &f)
309 {
310 switch (f)
311 {
312 case VK_FORMAT_R64_SINT:
313 case VK_FORMAT_R64_UINT:
314 return true;
315 default:
316 return false;
317 }
318 }
319
320 // Returns the appropriate singleton device for the given case.
getLogicalDevice(Context & ctx,const bool testRobustness2,const bool testPipelineRobustness)321 VkDevice getLogicalDevice(Context &ctx, const bool testRobustness2, const bool testPipelineRobustness)
322 {
323 if (testPipelineRobustness)
324 {
325 if (testRobustness2)
326 return PipelineRobustnessRobustness2Singleton::getDevice(ctx);
327 return PipelineRobustnessImageRobustnessSingleton::getDevice(ctx);
328 }
329
330 if (testRobustness2)
331 return Robustness2Singleton::getDevice(ctx);
332 return ImageRobustnessSingleton::getDevice(ctx);
333 }
334
335 // Returns the appropriate singleton device driver for the given case.
getDeviceInterface(Context & ctx,const bool testRobustness2,const bool testPipelineRobustness)336 const DeviceInterface &getDeviceInterface(Context &ctx, const bool testRobustness2, const bool testPipelineRobustness)
337 {
338 if (testPipelineRobustness)
339 {
340 if (testRobustness2)
341 return PipelineRobustnessRobustness2Singleton::getDeviceInterface(ctx);
342 return PipelineRobustnessImageRobustnessSingleton::getDeviceInterface(ctx);
343 }
344
345 if (testRobustness2)
346 return Robustness2Singleton::getDeviceInterface(ctx);
347 return ImageRobustnessSingleton::getDeviceInterface(ctx);
348 }
349
350 class Layout
351 {
352 public:
353 vector<VkDescriptorSetLayoutBinding> layoutBindings;
354 vector<uint8_t> refData;
355 };
356
357 class RobustnessExtsTestInstance : public TestInstance
358 {
359 public:
360 RobustnessExtsTestInstance(Context &context, const CaseDef &data);
361 ~RobustnessExtsTestInstance(void);
362 tcu::TestStatus iterate(void);
363
364 private:
365 CaseDef m_data;
366 };
367
RobustnessExtsTestInstance(Context & context,const CaseDef & data)368 RobustnessExtsTestInstance::RobustnessExtsTestInstance(Context &context, const CaseDef &data)
369 : vkt::TestInstance(context)
370 , m_data(data)
371 {
372 }
373
~RobustnessExtsTestInstance(void)374 RobustnessExtsTestInstance::~RobustnessExtsTestInstance(void)
375 {
376 }
377
378 class RobustnessExtsTestCase : public TestCase
379 {
380 public:
381 RobustnessExtsTestCase(tcu::TestContext &context, const std::string &name, const CaseDef data);
382 ~RobustnessExtsTestCase(void);
383 virtual void initPrograms(SourceCollections &programCollection) const;
384 virtual TestInstance *createInstance(Context &context) const;
385 virtual void checkSupport(Context &context) const;
386
387 private:
388 CaseDef m_data;
389 };
390
RobustnessExtsTestCase(tcu::TestContext & context,const std::string & name,const CaseDef data)391 RobustnessExtsTestCase::RobustnessExtsTestCase(tcu::TestContext &context, const std::string &name, const CaseDef data)
392 : vkt::TestCase(context, name)
393 , m_data(data)
394 {
395 }
396
~RobustnessExtsTestCase(void)397 RobustnessExtsTestCase::~RobustnessExtsTestCase(void)
398 {
399 }
400
formatIsFloat(const VkFormat & f)401 static bool formatIsFloat(const VkFormat &f)
402 {
403 switch (f)
404 {
405 case VK_FORMAT_R32_SFLOAT:
406 case VK_FORMAT_R32G32_SFLOAT:
407 case VK_FORMAT_R32G32B32A32_SFLOAT:
408 return true;
409 default:
410 return false;
411 }
412 }
413
formatIsSignedInt(const VkFormat & f)414 static bool formatIsSignedInt(const VkFormat &f)
415 {
416 switch (f)
417 {
418 case VK_FORMAT_R32_SINT:
419 case VK_FORMAT_R64_SINT:
420 case VK_FORMAT_R32G32_SINT:
421 case VK_FORMAT_R32G32B32A32_SINT:
422 return true;
423 default:
424 return false;
425 }
426 }
427
supportsStores(int descriptorType)428 static bool supportsStores(int descriptorType)
429 {
430 switch (descriptorType)
431 {
432 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
433 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
434 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
435 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
436 return true;
437 default:
438 return false;
439 }
440 }
441
442 #ifndef CTS_USES_VULKANSC
getPipelineRobustnessInfo(bool robustness2,int descriptorType)443 static VkPipelineRobustnessCreateInfoEXT getPipelineRobustnessInfo(bool robustness2, int descriptorType)
444 {
445 VkPipelineRobustnessCreateInfoEXT robustnessCreateInfo = initVulkanStructure();
446 robustnessCreateInfo.storageBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
447 robustnessCreateInfo.uniformBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
448 robustnessCreateInfo.vertexInputs = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
449 robustnessCreateInfo.images = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT;
450
451 switch (descriptorType)
452 {
453 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
454 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
455 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
456 robustnessCreateInfo.storageBuffers =
457 (robustness2 ? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT :
458 VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
459 break;
460
461 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
462 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
463 robustnessCreateInfo.images = (robustness2 ? VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT :
464 VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT);
465 break;
466
467 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
468 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
469 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
470 robustnessCreateInfo.uniformBuffers =
471 (robustness2 ? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT :
472 VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
473 break;
474
475 case VERTEX_ATTRIBUTE_FETCH:
476 robustnessCreateInfo.vertexInputs =
477 (robustness2 ? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT :
478 VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
479 break;
480
481 default:
482 DE_ASSERT(0);
483 }
484
485 return robustnessCreateInfo;
486 }
487 #endif
488
checkSupport(Context & context) const489 void RobustnessExtsTestCase::checkSupport(Context &context) const
490 {
491 const auto &vki = context.getInstanceInterface();
492 const auto physicalDevice = context.getPhysicalDevice();
493
494 checkPipelineConstructionRequirements(vki, physicalDevice,
495 getConstructionTypeFromRobustnessCase(m_data.pipelineRobustnessCase));
496
497 // We need to query some features using the physical device instead of using the reported context features because robustness2
498 // and image robustness are always disabled in the default device but they may be available.
499 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
500 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
501 VkPhysicalDeviceScalarBlockLayoutFeatures scalarLayoutFeatures = initVulkanStructure();
502 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
503
504 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
505 const auto addFeatures = makeStructChainAdder(&features2);
506
507 if (context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"))
508 addFeatures(&scalarLayoutFeatures);
509
510 if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
511 addFeatures(&imageRobustnessFeatures);
512
513 if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
514 addFeatures(&robustness2Features);
515
516 #ifndef CTS_USES_VULKANSC
517 VkPhysicalDevicePipelineRobustnessFeaturesEXT pipelineRobustnessFeatures = initVulkanStructure();
518 if (context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"))
519 addFeatures(&pipelineRobustnessFeatures);
520 #endif
521
522 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
523 vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
524
525 if (formatIsR64(m_data.format))
526 {
527 context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
528
529 VkFormatProperties formatProperties;
530 vki.getPhysicalDeviceFormatProperties(physicalDevice, m_data.format, &formatProperties);
531
532 #ifndef CTS_USES_VULKANSC
533 const VkFormatProperties3KHR formatProperties3 = context.getFormatProperties(m_data.format);
534 #endif // CTS_USES_VULKANSC
535
536 switch (m_data.descriptorType)
537 {
538 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
539 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) !=
540 VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
541 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
542 break;
543 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
544 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) !=
545 VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
546 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
547 #ifndef CTS_USES_VULKANSC
548 if ((!m_data.formatQualifier) &&
549 ((formatProperties3.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR) !=
550 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
551 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT is not supported");
552 break;
553 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
554 if ((!m_data.formatQualifier) &&
555 ((formatProperties3.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR) !=
556 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
557 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT is not supported");
558 #endif // CTS_USES_VULKANSC
559 break;
560 case VERTEX_ATTRIBUTE_FETCH:
561 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) !=
562 VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
563 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
564 break;
565 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
566 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) !=
567 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
568 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
569 break;
570 default:
571 DE_ASSERT(true);
572 }
573
574 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
575 {
576 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) !=
577 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
578 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
579 }
580 }
581
582 // Check needed properties and features
583 if (m_data.needsScalarBlockLayout() && !scalarLayoutFeatures.scalarBlockLayout)
584 TCU_THROW(NotSupportedError, "Scalar block layout not supported");
585
586 if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
587 TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
588
589 if (m_data.stage == STAGE_FRAGMENT && !features2.features.fragmentStoresAndAtomics)
590 TCU_THROW(NotSupportedError, "Fragment shader stores not supported");
591
592 if (m_data.stage == STAGE_RAYGEN)
593 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
594
595 switch (m_data.descriptorType)
596 {
597 default:
598 DE_ASSERT(0); // Fallthrough
599 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
600 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
601 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
602 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
603 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
604 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
605 case VERTEX_ATTRIBUTE_FETCH:
606 if (m_data.testRobustness2)
607 {
608 if (!robustness2Features.robustBufferAccess2)
609 TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
610 }
611 else
612 {
613 // This case is not tested here.
614 DE_ASSERT(false);
615 }
616 break;
617 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
618 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
619 if (m_data.testRobustness2)
620 {
621 if (!robustness2Features.robustImageAccess2)
622 TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
623 }
624 else
625 {
626 if (!imageRobustnessFeatures.robustImageAccess)
627 TCU_THROW(NotSupportedError, "robustImageAccess not supported");
628 }
629 break;
630 }
631
632 if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
633 TCU_THROW(NotSupportedError, "nullDescriptor not supported");
634
635 // The fill shader for 64-bit multisample image tests uses a storage image.
636 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && formatIsR64(m_data.format) &&
637 !features2.features.shaderStorageImageMultisample)
638 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
639
640 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) && m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
641 !features2.features.shaderStorageImageMultisample)
642 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
643
644 if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(0, 1, 1, 0)))
645 TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
646
647 #ifndef CTS_USES_VULKANSC
648 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && !m_data.formatQualifier)
649 {
650 const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
651 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
652 TCU_THROW(NotSupportedError, "Format does not support reading without format");
653 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
654 TCU_THROW(NotSupportedError, "Format does not support writing without format");
655 }
656 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER && !m_data.formatQualifier)
657 {
658 const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
659 if (!(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
660 TCU_THROW(NotSupportedError, "Format does not support reading without format");
661 if (!(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
662 TCU_THROW(NotSupportedError, "Format does not support writing without format");
663 }
664 #else
665 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
666 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
667 !m_data.formatQualifier &&
668 (!features2.features.shaderStorageImageReadWithoutFormat ||
669 !features2.features.shaderStorageImageWriteWithoutFormat))
670 TCU_THROW(NotSupportedError,
671 "shaderStorageImageReadWithoutFormat or shaderStorageImageWriteWithoutFormat not supported");
672 #endif // CTS_USES_VULKANSC
673
674 if (m_data.pushDescriptor)
675 context.requireDeviceFunctionality("VK_KHR_push_descriptor");
676
677 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !features2.features.imageCubeArray)
678 TCU_THROW(NotSupportedError, "Cube array image view type not supported");
679
680 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
681 !context.getDeviceFeatures().robustBufferAccess)
682 TCU_THROW(NotSupportedError,
683 "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
684
685 #ifndef CTS_USES_VULKANSC
686 if (m_data.needsPipelineRobustness() && !pipelineRobustnessFeatures.pipelineRobustness)
687 TCU_THROW(NotSupportedError, "pipelineRobustness not supported");
688 #endif
689 }
690
generateLayout(Layout & layout,const CaseDef & caseDef)691 void generateLayout(Layout &layout, const CaseDef &caseDef)
692 {
693 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
694 int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
695 bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
696
697 for (uint32_t b = 0; b < layout.layoutBindings.size(); ++b)
698 {
699 VkDescriptorSetLayoutBinding &binding = bindings[b];
700 binding.binding = b;
701 binding.pImmutableSamplers = NULL;
702 binding.stageFlags = caseDef.allShaderStages;
703 binding.descriptorCount = 1;
704
705 // Output image
706 if (b == 0)
707 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
708 else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
709 binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
710 }
711
712 if (caseDef.nullDescriptor)
713 return;
714
715 if (caseDef.bufferLen == 0)
716 {
717 // Clear color values for image tests
718 static uint32_t urefData[4] = {0x12345678, 0x23456789, 0x34567890, 0x45678901};
719 static uint64_t urefData64[4] = {0x1234567887654321, 0x234567899, 0x345678909, 0x456789019};
720 static float frefData[4] = {123.f, 234.f, 345.f, 456.f};
721
722 if (formatIsR64(caseDef.format))
723 {
724 layout.refData.resize(32);
725 uint64_t *ptr = (uint64_t *)layout.refData.data();
726
727 for (unsigned int i = 0; i < 4; ++i)
728 {
729 ptr[i] = urefData64[i];
730 }
731 }
732 else
733 {
734 layout.refData.resize(16);
735 deMemcpy(layout.refData.data(),
736 formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
737 }
738 }
739 else
740 {
741 layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7 : ~3));
742 for (unsigned int i = 0;
743 i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(uint64_t) : sizeof(uint32_t)); ++i)
744 {
745 if (formatIsFloat(caseDef.format))
746 {
747 float *f = (float *)layout.refData.data() + i;
748 *f = 2.0f * (float)i + 3.0f;
749 }
750 if (formatIsR64(caseDef.format))
751 {
752 uint64_t *u = (uint64_t *)layout.refData.data() + i;
753 *u = 2 * i + 3;
754 }
755 else
756 {
757 int *u = (int *)layout.refData.data() + i;
758 *u = 2 * i + 3;
759 }
760 }
761 }
762 }
763
genFetch(const CaseDef & caseDef,int numComponents,const string & vecType,const string & coord,const string & lod)764 static string genFetch(const CaseDef &caseDef, int numComponents, const string &vecType, const string &coord,
765 const string &lod)
766 {
767 std::stringstream s;
768 // Fetch from the descriptor.
769 switch (caseDef.descriptorType)
770 {
771 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
772 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
773 s << vecType << "(ubo0_1.val[" << coord << "]";
774 for (int i = numComponents; i < 4; ++i)
775 s << ", 0";
776 s << ")";
777 break;
778 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
779 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
780 s << vecType << "(ssbo0_1.val[" << coord << "]";
781 for (int i = numComponents; i < 4; ++i)
782 s << ", 0";
783 s << ")";
784 break;
785 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
786 s << "texelFetch(texbo0_1, " << coord << ")";
787 break;
788 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
789 s << "imageLoad(image0_1, " << coord << ")";
790 break;
791 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
792 if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
793 s << "texelFetch(texture0_1, " << coord << ")";
794 else
795 s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
796 break;
797 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
798 s << "imageLoad(image0_1, " << coord << ")";
799 break;
800 case VERTEX_ATTRIBUTE_FETCH:
801 s << "attr";
802 break;
803 default:
804 DE_ASSERT(0);
805 }
806 return s.str();
807 }
808
809 static const int storeValue = 123;
810
811 // Get the value stored by genStore.
getStoreValue(int descriptorType,int numComponents,const string & vecType,const string & bufType)812 static string getStoreValue(int descriptorType, int numComponents, const string &vecType, const string &bufType)
813 {
814 std::stringstream s;
815 switch (descriptorType)
816 {
817 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
818 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
819 s << vecType << "(" << bufType << "(" << storeValue << ")";
820 for (int i = numComponents; i < 4; ++i)
821 s << ", 0";
822 s << ")";
823 break;
824 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
825 s << vecType << "(" << storeValue << ")";
826 break;
827 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
828 s << vecType << "(" << storeValue << ")";
829 break;
830 default:
831 DE_ASSERT(0);
832 }
833 return s.str();
834 }
835
genStore(int descriptorType,const string & vecType,const string & bufType,const string & coord)836 static string genStore(int descriptorType, const string &vecType, const string &bufType, const string &coord)
837 {
838 std::stringstream s;
839 // Store to the descriptor.
840 switch (descriptorType)
841 {
842 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
843 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
844 s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
845 break;
846 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
847 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
848 break;
849 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
850 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
851 break;
852 default:
853 DE_ASSERT(0);
854 }
855 return s.str();
856 }
857
genAtomic(int descriptorType,const string & bufType,const string & coord)858 static string genAtomic(int descriptorType, const string &bufType, const string &coord)
859 {
860 std::stringstream s;
861 // Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
862 switch (descriptorType)
863 {
864 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
865 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
866 s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
867 break;
868 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
869 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
870 break;
871 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
872 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
873 break;
874 default:
875 DE_ASSERT(0);
876 }
877 return s.str();
878 }
879
getShaderImageFormatQualifier(const tcu::TextureFormat & format)880 static std::string getShaderImageFormatQualifier(const tcu::TextureFormat &format)
881 {
882 const char *orderPart;
883 const char *typePart;
884
885 switch (format.order)
886 {
887 case tcu::TextureFormat::R:
888 orderPart = "r";
889 break;
890 case tcu::TextureFormat::RG:
891 orderPart = "rg";
892 break;
893 case tcu::TextureFormat::RGB:
894 orderPart = "rgb";
895 break;
896 case tcu::TextureFormat::RGBA:
897 orderPart = "rgba";
898 break;
899
900 default:
901 DE_FATAL("Impossible");
902 orderPart = DE_NULL;
903 }
904
905 switch (format.type)
906 {
907 case tcu::TextureFormat::FLOAT:
908 typePart = "32f";
909 break;
910 case tcu::TextureFormat::HALF_FLOAT:
911 typePart = "16f";
912 break;
913
914 case tcu::TextureFormat::UNSIGNED_INT64:
915 typePart = "64ui";
916 break;
917 case tcu::TextureFormat::UNSIGNED_INT32:
918 typePart = "32ui";
919 break;
920 case tcu::TextureFormat::UNSIGNED_INT16:
921 typePart = "16ui";
922 break;
923 case tcu::TextureFormat::UNSIGNED_INT8:
924 typePart = "8ui";
925 break;
926
927 case tcu::TextureFormat::SIGNED_INT64:
928 typePart = "64i";
929 break;
930 case tcu::TextureFormat::SIGNED_INT32:
931 typePart = "32i";
932 break;
933 case tcu::TextureFormat::SIGNED_INT16:
934 typePart = "16i";
935 break;
936 case tcu::TextureFormat::SIGNED_INT8:
937 typePart = "8i";
938 break;
939
940 case tcu::TextureFormat::UNORM_INT16:
941 typePart = "16";
942 break;
943 case tcu::TextureFormat::UNORM_INT8:
944 typePart = "8";
945 break;
946
947 case tcu::TextureFormat::SNORM_INT16:
948 typePart = "16_snorm";
949 break;
950 case tcu::TextureFormat::SNORM_INT8:
951 typePart = "8_snorm";
952 break;
953
954 default:
955 DE_FATAL("Impossible");
956 typePart = DE_NULL;
957 }
958
959 return std::string() + orderPart + typePart;
960 }
961
genCoord(string c,int numCoords,VkSampleCountFlagBits samples,int dim)962 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
963 {
964 if (numCoords == 1)
965 return c;
966
967 if (samples != VK_SAMPLE_COUNT_1_BIT)
968 numCoords--;
969
970 string coord = "ivec" + to_string(numCoords) + "(";
971
972 for (int i = 0; i < numCoords; ++i)
973 {
974 if (i == dim)
975 coord += c;
976 else
977 coord += "0";
978 if (i < numCoords - 1)
979 coord += ", ";
980 }
981 coord += ")";
982
983 // Append sample coordinate
984 if (samples != VK_SAMPLE_COUNT_1_BIT)
985 {
986 coord += ", ";
987 if (dim == numCoords)
988 coord += c;
989 else
990 coord += "0";
991 }
992 return coord;
993 }
994
995 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
genCoordNorm(const CaseDef & caseDef,string c,int numCoords,int numNormalizedCoords,int dim)996 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
997 {
998 // dim can be 3 for cube_array. Reuse the number of layers in that case.
999 dim = std::min(dim, 2);
1000
1001 if (numCoords == 1)
1002 return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
1003
1004 string coord = "vec" + to_string(numCoords) + "(";
1005
1006 for (int i = 0; i < numCoords; ++i)
1007 {
1008 if (i == dim)
1009 coord += c;
1010 else
1011 coord += "0.25";
1012 if (i < numNormalizedCoords)
1013 coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
1014 if (i < numCoords - 1)
1015 coord += ", ";
1016 }
1017 coord += ")";
1018 return coord;
1019 }
1020
initPrograms(SourceCollections & programCollection) const1021 void RobustnessExtsTestCase::initPrograms(SourceCollections &programCollection) const
1022 {
1023 VkFormat format = m_data.format;
1024
1025 Layout layout;
1026 generateLayout(layout, m_data);
1027
1028 if (layout.layoutBindings.size() > 1 &&
1029 layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1030 {
1031 if (format == VK_FORMAT_R64_SINT)
1032 format = VK_FORMAT_R32G32_SINT;
1033
1034 if (format == VK_FORMAT_R64_UINT)
1035 format = VK_FORMAT_R32G32_UINT;
1036 }
1037
1038 std::stringstream decls, checks;
1039
1040 const string r64 = formatIsR64(format) ? "64" : "";
1041 const string i64Type = formatIsR64(format) ? "64_t" : "";
1042 const string vecType =
1043 formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
1044 const string qLevelType = vecType == "vec4" ? "float" :
1045 ((vecType == "ivec4") || (vecType == "i64vec4")) ? ("int" + i64Type) :
1046 ("uint" + i64Type);
1047
1048 decls << "uvec4 abs(uvec4 x) { return x; }\n";
1049 if (formatIsR64(format))
1050 decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
1051 decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
1052
1053 const int componetsSize = (formatIsR64(format) ? 8 : 4);
1054 int refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
1055 // Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
1056 // robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
1057 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1058 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1059 {
1060 refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
1061 }
1062 if (m_data.nullDescriptor)
1063 refDataNumElements = 4;
1064
1065 if (formatIsFloat(format))
1066 {
1067 decls << "float refData[" << refDataNumElements << "] = {";
1068 int i;
1069 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1070 {
1071 if (i != 0)
1072 decls << ", ";
1073 decls << ((const float *)layout.refData.data())[i];
1074 }
1075 while (i < refDataNumElements)
1076 {
1077 if (i != 0)
1078 decls << ", ";
1079 decls << "0";
1080 i++;
1081 }
1082 }
1083 else if (formatIsR64(format))
1084 {
1085 decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
1086 int i;
1087 for (i = 0; i < (int)layout.refData.size() / 8; ++i)
1088 {
1089 if (i != 0)
1090 decls << ", ";
1091 decls << ((const uint64_t *)layout.refData.data())[i] << "l";
1092 }
1093 while (i < refDataNumElements)
1094 {
1095 if (i != 0)
1096 decls << ", ";
1097 decls << "0l";
1098 i++;
1099 }
1100 }
1101 else
1102 {
1103 decls << "int"
1104 << " refData[" << refDataNumElements << "] = {";
1105 int i;
1106 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1107 {
1108 if (i != 0)
1109 decls << ", ";
1110 decls << ((const int *)layout.refData.data())[i];
1111 }
1112 while (i < refDataNumElements)
1113 {
1114 if (i != 0)
1115 decls << ", ";
1116 decls << "0";
1117 i++;
1118 }
1119 }
1120
1121 decls << "};\n";
1122 decls << vecType << " zzzz = " << vecType << "(0);\n";
1123 decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
1124 decls << vecType << " expectedIB;\n";
1125
1126 string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
1127 string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
1128 string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
1129
1130 string imageDim = "";
1131 int numCoords, numNormalizedCoords;
1132 bool layered = false;
1133 switch (m_data.viewType)
1134 {
1135 default:
1136 DE_ASSERT(0); // Fallthrough
1137 case VK_IMAGE_VIEW_TYPE_1D:
1138 imageDim = "1D";
1139 numCoords = 1;
1140 numNormalizedCoords = 1;
1141 break;
1142 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
1143 imageDim = "1DArray";
1144 numCoords = 2;
1145 numNormalizedCoords = 1;
1146 layered = true;
1147 break;
1148 case VK_IMAGE_VIEW_TYPE_2D:
1149 imageDim = "2D";
1150 numCoords = 2;
1151 numNormalizedCoords = 2;
1152 break;
1153 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1154 imageDim = "2DArray";
1155 numCoords = 3;
1156 numNormalizedCoords = 2;
1157 layered = true;
1158 break;
1159 case VK_IMAGE_VIEW_TYPE_3D:
1160 imageDim = "3D";
1161 numCoords = 3;
1162 numNormalizedCoords = 3;
1163 break;
1164 case VK_IMAGE_VIEW_TYPE_CUBE:
1165 imageDim = "Cube";
1166 numCoords = 3;
1167 numNormalizedCoords = 3;
1168 break;
1169 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
1170 imageDim = "CubeArray";
1171 numCoords = 4;
1172 numNormalizedCoords = 3;
1173 layered = true;
1174 break;
1175 }
1176 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
1177 {
1178 switch (m_data.viewType)
1179 {
1180 default:
1181 DE_ASSERT(0); // Fallthrough
1182 case VK_IMAGE_VIEW_TYPE_2D:
1183 imageDim = "2DMS";
1184 break;
1185 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1186 imageDim = "2DMSArray";
1187 break;
1188 }
1189 numCoords++;
1190 }
1191 bool dataDependsOnLayer =
1192 (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) &&
1193 !m_data.nullDescriptor;
1194
1195 // Special case imageLoad(imageCubeArray, ...) which uses ivec3
1196 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1197 {
1198 numCoords = 3;
1199 }
1200
1201 int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
1202 string bufType;
1203 if (numComponents == 1)
1204 bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
1205 else
1206 bufType = imgprefix + "vec" + std::to_string(numComponents);
1207
1208 // For UBO's, which have a declared size in the shader, don't access outside that size.
1209 bool declaredSize = false;
1210 switch (m_data.descriptorType)
1211 {
1212 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1213 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1214 declaredSize = true;
1215 break;
1216 default:
1217 break;
1218 }
1219
1220 checks << " int inboundcoords, clampedLayer;\n";
1221 checks << " " << vecType << " expectedIB2;\n";
1222 if (m_data.unroll)
1223 {
1224 if (declaredSize)
1225 checks << " [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
1226 else
1227 checks << " [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
1228 }
1229 else
1230 {
1231 if (declaredSize)
1232 checks << " [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
1233 else
1234 checks << " [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
1235 }
1236
1237 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1238 checks << " int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
1239 else
1240 checks << " int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
1241
1242 decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
1243
1244 const char *vol = m_data.vol ? "volatile " : "";
1245 const char *ro = m_data.readOnly ? "readonly " : "";
1246
1247 // Construct the declaration for the binding
1248 switch (m_data.descriptorType)
1249 {
1250 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1251 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1252 decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
1253 break;
1254 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1255 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1256 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType
1257 << " val[]; } ssbo0_1;\n";
1258 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType
1259 << " val[]; } ssbo0_1_pad;\n";
1260 break;
1261 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1262 switch (format)
1263 {
1264 case VK_FORMAT_R64_SINT:
1265 decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
1266 break;
1267 case VK_FORMAT_R64_UINT:
1268 decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
1269 break;
1270 default:
1271 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
1272 }
1273 break;
1274 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1275 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix
1276 << "imageBuffer image0_1;\n";
1277 break;
1278 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1279 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image"
1280 << imageDim << " image0_1;\n";
1281 break;
1282 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1283 switch (format)
1284 {
1285 case VK_FORMAT_R64_SINT:
1286 decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
1287 break;
1288 case VK_FORMAT_R64_UINT:
1289 decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
1290 break;
1291 default:
1292 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
1293 break;
1294 }
1295 break;
1296 case VERTEX_ATTRIBUTE_FETCH:
1297 if (formatIsR64(format))
1298 {
1299 decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t"))
1300 << " attr;\n";
1301 }
1302 else
1303 {
1304 decls << "layout(location = 0) in " << vecType << " attr;\n";
1305 }
1306 break;
1307 default:
1308 DE_ASSERT(0);
1309 }
1310
1311 string expectedOOB;
1312 string defaultw;
1313
1314 switch (m_data.descriptorType)
1315 {
1316 default:
1317 DE_ASSERT(0); // Fallthrough
1318 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1319 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1320 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1321 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1322 expectedOOB = "zzzz";
1323 defaultw = "0";
1324 break;
1325 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1326 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1327 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1328 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1329 case VERTEX_ATTRIBUTE_FETCH:
1330 if (numComponents == 1)
1331 {
1332 expectedOOB = "zzzo";
1333 }
1334 else if (numComponents == 2)
1335 {
1336 expectedOOB = "zzzo";
1337 }
1338 else
1339 {
1340 expectedOOB = "zzzz";
1341 }
1342 defaultw = "1";
1343 break;
1344 }
1345
1346 string idx;
1347 switch (m_data.descriptorType)
1348 {
1349 default:
1350 DE_ASSERT(0); // Fallthrough
1351 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1352 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1353 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1354 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1355 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1356 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1357 case VERTEX_ATTRIBUTE_FETCH:
1358 idx = "idx";
1359 break;
1360 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1361 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1362 idx = "0";
1363 break;
1364 }
1365
1366 if (m_data.nullDescriptor)
1367 {
1368 checks << " expectedIB = zzzz;\n";
1369 checks << " inboundcoords = 0;\n";
1370 checks << " int paddedinboundcoords = 0;\n";
1371 // Vertex attribute fetch still gets format conversion applied
1372 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1373 expectedOOB = "zzzz";
1374 }
1375 else
1376 {
1377 checks << " expectedIB.x = refData[" << idx << "];\n";
1378 if (numComponents > 1)
1379 {
1380 checks << " expectedIB.y = refData[" << idx << "+1];\n";
1381 }
1382 else
1383 {
1384 checks << " expectedIB.y = 0;\n";
1385 }
1386 if (numComponents > 2)
1387 {
1388 checks << " expectedIB.z = refData[" << idx << "+2];\n";
1389 checks << " expectedIB.w = refData[" << idx << "+3];\n";
1390 }
1391 else
1392 {
1393 checks << " expectedIB.z = 0;\n";
1394 checks << " expectedIB.w = " << defaultw << ";\n";
1395 }
1396
1397 switch (m_data.descriptorType)
1398 {
1399 default:
1400 DE_ASSERT(0); // Fallthrough
1401 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1402 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1403 // UBOs can either strictly bounds check against inboundcoords, or can
1404 // return the contents from memory for the range padded up to paddedinboundcoords.
1405 checks << " int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1406 // fallthrough
1407 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1408 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1409 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1410 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1411 case VERTEX_ATTRIBUTE_FETCH:
1412 checks << " inboundcoords = "
1413 << layout.refData.size() / (formatIsR64(format) ? sizeof(uint64_t) : sizeof(uint32_t)) /
1414 numComponents
1415 << ";\n";
1416 break;
1417 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1418 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1419 // set per-component below
1420 break;
1421 }
1422 }
1423
1424 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1425 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1426 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1427 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1428 !m_data.readOnly)
1429 {
1430 for (int i = 0; i < numCoords; ++i)
1431 {
1432 // Treat i==3 coord (cube array layer) like i == 2
1433 uint32_t coordDim = m_data.imageDim[i == 3 ? 2 : i];
1434 if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1435 checks << " inboundcoords = " << coordDim << ";\n";
1436
1437 string coord = genCoord("c", numCoords, m_data.samples, i);
1438 string inboundcoords = m_data.nullDescriptor ? "0" :
1439 (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ?
1440 to_string(m_data.samples) :
1441 "inboundcoords";
1442
1443 checks << " if (c < 0 || c >= " << inboundcoords << ") "
1444 << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1445 if (m_data.formatQualifier && (format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1446 {
1447 checks << " if (c < 0 || c >= " << inboundcoords << ") "
1448 << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1449 }
1450 }
1451 }
1452
1453 for (int i = 0; i < numCoords; ++i)
1454 {
1455 // Treat i==3 coord (cube array layer) like i == 2
1456 uint32_t coordDim = m_data.imageDim[i == 3 ? 2 : i];
1457 if (!m_data.nullDescriptor)
1458 {
1459 switch (m_data.descriptorType)
1460 {
1461 default:
1462 break;
1463 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1464 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1465 checks << " inboundcoords = " << coordDim << ";\n";
1466 break;
1467 }
1468 }
1469
1470 string coord = genCoord("c", numCoords, m_data.samples, i);
1471
1472 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1473 {
1474 if (formatIsR64(format))
1475 {
1476 checks << " temp.x = attr;\n";
1477 checks << " temp.y = 0l;\n";
1478 checks << " temp.z = 0l;\n";
1479 checks << " temp.w = 0l;\n";
1480 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else "
1481 "temp -= zzzz;\n";
1482 }
1483 else
1484 {
1485 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1486 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else "
1487 "temp -= "
1488 << expectedOOB << ";\n";
1489 }
1490 // Accumulate any incorrect values.
1491 checks << " accum += abs(temp);\n";
1492 }
1493 // Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1494 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1495 !(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1496 (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1497 {
1498 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1499
1500 checks << " expectedIB2 = expectedIB;\n";
1501
1502 // Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1503 if (dataDependsOnLayer && i == numNormalizedCoords)
1504 checks << " if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1505
1506 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1507 {
1508 if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1509 {
1510 checks << " if (temp == zzzz) temp = " << vecType << "(0);\n";
1511 if (m_data.formatQualifier && numComponents < 4)
1512 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1513 checks << " else temp = " << vecType << "(1);\n";
1514 }
1515 else
1516 // multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1517 checks << " if (c >= 0 && c < " << m_data.samples
1518 << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1519 }
1520 else
1521 {
1522 // Storage buffers may be split into per-component loads. Generate a second
1523 // expected out of bounds value where some subset of the components are
1524 // actually in-bounds. If both loads and stores are split into per-component
1525 // accesses, then the result value can be a mix of storeValue and zero.
1526 string expectedOOB2 = expectedOOB;
1527 string expectedOOB3 = expectedOOB;
1528 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1529 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1530 !m_data.nullDescriptor)
1531 {
1532 int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1533 int mod =
1534 (int)((len / (formatIsR64(format) ? sizeof(uint64_t) : sizeof(uint32_t))) % numComponents);
1535 string sstoreValue = de::toString(storeValue);
1536 switch (mod)
1537 {
1538 case 0:
1539 break;
1540 case 1:
1541 expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1542 expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1543 break;
1544 case 2:
1545 expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1546 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1547 break;
1548 case 3:
1549 expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1550 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1551 break;
1552 }
1553 }
1554
1555 // Entirely in-bounds.
1556 checks << " if (c >= 0 && c < inboundcoords) {\n"
1557 " if (temp == expectedIB2) temp = "
1558 << vecType << "(0); else temp = " << vecType
1559 << "(1);\n"
1560 " }\n";
1561
1562 // normal out-of-bounds value
1563 if (m_data.testRobustness2)
1564 checks << " else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1565 else
1566 // image_robustness relaxes alpha which is allowed to be zero or one
1567 checks << " else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1568
1569 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1570 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1571 {
1572 checks << " else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = "
1573 << vecType << "(0);\n";
1574 }
1575
1576 // null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1577 if (m_data.nullDescriptor && m_data.formatQualifier &&
1578 (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1579 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1580 numComponents < 4)
1581 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1582
1583 // non-volatile value replaced with stored value
1584 if (supportsStores(m_data.descriptorType) && !m_data.vol)
1585 {
1586 checks << " else if (temp == "
1587 << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType)
1588 << ") temp = " << vecType << "(0);\n";
1589
1590 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC ||
1591 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
1592 {
1593
1594 for (int mask = (numComponents * numComponents) - 2; mask > 0; mask--)
1595 {
1596 checks << " else if (temp == " << vecType << "(";
1597 for (int vecIdx = 0; vecIdx < 4; vecIdx++)
1598 {
1599 if (mask & (1 << vecIdx))
1600 checks << storeValue;
1601 else
1602 checks << "0";
1603
1604 if (vecIdx != 3)
1605 checks << ",";
1606 }
1607 checks << ")) temp = " << vecType << "(0);\n";
1608 }
1609 }
1610 }
1611
1612 // value straddling the boundary, returning a partial vector
1613 if (expectedOOB2 != expectedOOB)
1614 checks << " else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType
1615 << "(0);\n";
1616 if (expectedOOB3 != expectedOOB)
1617 checks << " else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType
1618 << "(0);\n";
1619
1620 // failure
1621 checks << " else temp = " << vecType << "(1);\n";
1622 }
1623 // Accumulate any incorrect values.
1624 checks << " accum += abs(temp);\n";
1625
1626 // Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1627 if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1628 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1629 {
1630 // Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1631 string coord0 = genCoord("0", numCoords, m_data.samples, i);
1632 checks << " if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c")
1633 << "; else temp = " << vecType << "(0);\n";
1634 checks << " if (c != 0) temp -= " << expectedOOB << ";\n";
1635 checks << " accum += abs(temp);\n";
1636 }
1637 }
1638 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1639 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1640 {
1641 string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1642
1643 checks << " expectedIB2 = expectedIB;\n";
1644
1645 // Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1646 if (dataDependsOnLayer && i == numNormalizedCoords)
1647 {
1648 checks << " clampedLayer = clamp(c, 0, " << coordDim - 1 << ");\n";
1649 checks << " expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1650 }
1651
1652 stringstream normexpected;
1653 // Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1654 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1655 (layered && i == numCoords - 1))
1656 normexpected << " temp -= expectedIB2;\n";
1657 else
1658 {
1659 normexpected << " if (c >= 0 && c < inboundcoords)\n";
1660 normexpected << " temp -= expectedIB2;\n";
1661 normexpected << " else\n";
1662 if (m_data.testRobustness2)
1663 normexpected << " temp -= " << expectedOOB << ";\n";
1664 else
1665 // image_robustness relaxes alpha which is allowed to be zero or one
1666 normexpected << " temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1667 }
1668
1669 checks << " temp = texture(texture0_1, " << coordNorm << ");\n";
1670 checks << normexpected.str();
1671 checks << " accum += abs(temp);\n";
1672 checks << " temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1673 checks << normexpected.str();
1674 checks << " accum += abs(temp);\n";
1675 checks << " temp = textureGrad(texture0_1, " << coordNorm << ", "
1676 << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", "
1677 << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1678 checks << normexpected.str();
1679 checks << " accum += abs(temp);\n";
1680 }
1681 if (m_data.nullDescriptor)
1682 {
1683 const char *sizeswiz;
1684 switch (m_data.viewType)
1685 {
1686 default:
1687 DE_ASSERT(0); // Fallthrough
1688 case VK_IMAGE_VIEW_TYPE_1D:
1689 sizeswiz = ".xxxx";
1690 break;
1691 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
1692 sizeswiz = ".xyxx";
1693 break;
1694 case VK_IMAGE_VIEW_TYPE_2D:
1695 sizeswiz = ".xyxx";
1696 break;
1697 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1698 sizeswiz = ".xyzx";
1699 break;
1700 case VK_IMAGE_VIEW_TYPE_3D:
1701 sizeswiz = ".xyzx";
1702 break;
1703 case VK_IMAGE_VIEW_TYPE_CUBE:
1704 sizeswiz = ".xyxx";
1705 break;
1706 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
1707 sizeswiz = ".xyzx";
1708 break;
1709 }
1710 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1711 {
1712 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1713 {
1714 checks << " temp = textureSize(texture0_1, 0)" << sizeswiz << ";\n";
1715 checks << " accum += abs(temp);\n";
1716
1717 // checking textureSize with clearly out of range LOD values
1718 checks << " temp = textureSize(texture0_1, " << -i << ")" << sizeswiz << ";\n";
1719 checks << " accum += abs(temp);\n";
1720 checks << " temp = textureSize(texture0_1, " << (std::numeric_limits<int32_t>::max() - i) << ")"
1721 << sizeswiz << ";\n";
1722 checks << " accum += abs(temp);\n";
1723 }
1724 else
1725 {
1726 checks << " temp = textureSize(texture0_1)" << sizeswiz << ";\n";
1727 checks << " accum += abs(temp);\n";
1728 checks << " temp = textureSamples(texture0_1).xxxx;\n";
1729 checks << " accum += abs(temp);\n";
1730 }
1731 }
1732 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1733 {
1734 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1735 {
1736 checks << " temp = imageSize(image0_1)" << sizeswiz << ";\n";
1737 checks << " accum += abs(temp);\n";
1738 }
1739 else
1740 {
1741 checks << " temp = imageSize(image0_1)" << sizeswiz << ";\n";
1742 checks << " accum += abs(temp);\n";
1743 checks << " temp = imageSamples(image0_1).xxxx;\n";
1744 checks << " accum += abs(temp);\n";
1745 }
1746 }
1747 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1748 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1749 {
1750 // expect zero for runtime-sized array .length()
1751 checks << " temp = " << vecType << "(ssbo0_1.val.length());\n";
1752 checks << " accum += abs(temp);\n";
1753 checks << " temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1754 checks << " accum += abs(temp);\n";
1755 }
1756 }
1757 }
1758 checks << " }\n";
1759
1760 // outside the coordinates loop because we only need to call it once
1761 if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1762 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1763 {
1764 checks << " temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1765 checks << " temp = " << vecType << "(temp_ql);\n";
1766 checks << " accum += abs(temp);\n";
1767
1768 if (m_data.stage == STAGE_FRAGMENT)
1769 {
1770 // as here we only want to check that textureQueryLod returns 0 when
1771 // texture0_1 is null, we don't need to use the actual texture coordinates
1772 // (and modify the vertex shader below to do so). Any coordinates are fine.
1773 // gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1774 std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1775 checks << " vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1776 checks << " temp_ql = " << qLevelType << "(ceil(abs(lod.x) + abs(lod.y)));\n";
1777 checks << " temp = " << vecType << "(temp_ql);\n";
1778 checks << " accum += abs(temp);\n";
1779 }
1780 }
1781
1782 const bool needsScalarLayout = m_data.needsScalarBlockLayout();
1783 const uint32_t shaderBuildOptions =
1784 (needsScalarLayout ? static_cast<uint32_t>(vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS) : 0u);
1785
1786 const bool is64BitFormat = formatIsR64(m_data.format);
1787 std::string support =
1788 "#version 460 core\n"
1789 "#extension GL_EXT_nonuniform_qualifier : enable\n" +
1790 (needsScalarLayout ? std::string("#extension GL_EXT_scalar_block_layout : enable\n") : std::string()) +
1791 "#extension GL_EXT_samplerless_texture_functions : enable\n"
1792 "#extension GL_EXT_control_flow_attributes : enable\n"
1793 "#extension GL_EXT_shader_image_load_formatted : enable\n";
1794 std::string SupportR64 = "#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1795 "#extension GL_EXT_shader_image_int64 : require\n";
1796 if (is64BitFormat)
1797 support += SupportR64;
1798 if (m_data.stage == STAGE_RAYGEN)
1799 support += "#extension GL_EXT_ray_tracing : require\n";
1800
1801 std::string code = " " + vecType + " accum = " + vecType +
1802 "(0);\n"
1803 " " +
1804 vecType +
1805 " temp;\n"
1806 " " +
1807 qLevelType + " temp_ql;\n" + checks.str() + " " + vecType + " color = (accum != " + vecType +
1808 "(0)) ? " + vecType + "(0,0,0,0) : " + vecType + "(1,0,0,1);\n";
1809
1810 switch (m_data.stage)
1811 {
1812 default:
1813 DE_ASSERT(0); // Fallthrough
1814 case STAGE_COMPUTE:
1815 {
1816 std::stringstream css;
1817 css << support << decls.str()
1818 << "layout(local_size_x = 1, local_size_y = 1) in;\n"
1819 "void main()\n"
1820 "{\n"
1821 << code
1822 << " imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1823 "}\n";
1824
1825 programCollection.glslSources.add("test")
1826 << glu::ComputeSource(css.str())
1827 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion,
1828 is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0,
1829 shaderBuildOptions);
1830 break;
1831 }
1832 case STAGE_RAYGEN:
1833 {
1834 std::stringstream css;
1835 css << support << decls.str()
1836 << "void main()\n"
1837 "{\n"
1838 << code
1839 << " imageStore(image0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1840 "}\n";
1841
1842 programCollection.glslSources.add("test")
1843 << glu::RaygenSource(css.str())
1844 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, shaderBuildOptions,
1845 true);
1846 break;
1847 }
1848 case STAGE_VERTEX:
1849 {
1850 std::stringstream vss;
1851 vss << support << decls.str()
1852 << "void main()\n"
1853 "{\n"
1854 << code << " imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM
1855 << "), color);\n"
1856 " gl_PointSize = 1.0f;\n"
1857 " gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1858 "}\n";
1859
1860 programCollection.glslSources.add("test")
1861 << glu::VertexSource(vss.str())
1862 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1863 break;
1864 }
1865 case STAGE_FRAGMENT:
1866 {
1867 std::stringstream vss;
1868 vss << "#version 450 core\n"
1869 "void main()\n"
1870 "{\n"
1871 // full-viewport quad
1872 " gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * "
1873 "float(gl_VertexIndex&1), 1);\n"
1874 "}\n";
1875
1876 programCollection.glslSources.add("vert")
1877 << glu::VertexSource(vss.str())
1878 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1879
1880 std::stringstream fss;
1881 fss << support << decls.str()
1882 << "void main()\n"
1883 "{\n"
1884 << code
1885 << " imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1886 "}\n";
1887
1888 programCollection.glslSources.add("test")
1889 << glu::FragmentSource(fss.str())
1890 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1891 break;
1892 }
1893 }
1894
1895 // The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1896 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1897 {
1898 const std::string ivecCords =
1899 (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1900 std::stringstream fillShader;
1901
1902 fillShader
1903 << "#version 450\n"
1904 << SupportR64
1905 << "\n"
1906 "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1907 "layout (" +
1908 getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1909 << string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image"
1910 << imageDim
1911 << +" u_resultImage;\n"
1912 "\n"
1913 "layout(std430, binding = 1) buffer inputBuffer\n"
1914 "{\n"
1915 " int"
1916 << (is64BitFormat ? "64_t" : "")
1917 << " data[];\n"
1918 "} inBuffer;\n"
1919 "\n"
1920 "void main(void)\n"
1921 "{\n"
1922 " int gx = int(gl_GlobalInvocationID.x);\n"
1923 " int gy = int(gl_GlobalInvocationID.y);\n"
1924 " int gz = int(gl_GlobalInvocationID.z);\n"
1925 " uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1926
1927 for (int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1928 {
1929 fillShader << " imageStore(u_resultImage, " << ivecCords << ", " << ndx
1930 << ", i64vec4(inBuffer.data[index]));\n";
1931 }
1932
1933 fillShader << "}\n";
1934
1935 programCollection.glslSources.add("fillShader")
1936 << glu::ComputeSource(fillShader.str())
1937 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion,
1938 is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0,
1939 shaderBuildOptions);
1940 }
1941 }
1942
imageViewTypeToImageType(VkImageViewType type)1943 VkImageType imageViewTypeToImageType(VkImageViewType type)
1944 {
1945 switch (type)
1946 {
1947 case VK_IMAGE_VIEW_TYPE_1D:
1948 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
1949 return VK_IMAGE_TYPE_1D;
1950 case VK_IMAGE_VIEW_TYPE_2D:
1951 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1952 case VK_IMAGE_VIEW_TYPE_CUBE:
1953 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
1954 return VK_IMAGE_TYPE_2D;
1955 case VK_IMAGE_VIEW_TYPE_3D:
1956 return VK_IMAGE_TYPE_3D;
1957 default:
1958 DE_ASSERT(false);
1959 }
1960
1961 return VK_IMAGE_TYPE_2D;
1962 }
1963
createInstance(Context & context) const1964 TestInstance *RobustnessExtsTestCase::createInstance(Context &context) const
1965 {
1966 return new RobustnessExtsTestInstance(context, m_data);
1967 }
1968
iterate(void)1969 tcu::TestStatus RobustnessExtsTestInstance::iterate(void)
1970 {
1971 const VkInstance instance = m_context.getInstance();
1972 const InstanceInterface &vki = m_context.getInstanceInterface();
1973 const VkDevice device = getLogicalDevice(m_context, m_data.testRobustness2, m_data.needsPipelineRobustness());
1974 const vk::DeviceInterface &vk =
1975 getDeviceInterface(m_context, m_data.testRobustness2, m_data.needsPipelineRobustness());
1976 const VkPhysicalDevice physicalDevice = chooseDevice(vki, instance, m_context.getTestContext().getCommandLine());
1977 SimpleAllocator allocator(vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1978
1979 Layout layout;
1980 generateLayout(layout, m_data);
1981
1982 // Get needed properties.
1983 VkPhysicalDeviceProperties2 properties = initVulkanStructure();
1984
1985 #ifndef CTS_USES_VULKANSC
1986 VkPhysicalDeviceRayTracingPipelinePropertiesKHR rayTracingProperties = initVulkanStructure();
1987 #endif
1988
1989 VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties = initVulkanStructure();
1990
1991 #ifndef CTS_USES_VULKANSC
1992 if (m_context.isDeviceFunctionalitySupported("VK_KHR_ray_tracing_pipeline"))
1993 {
1994 rayTracingProperties.pNext = properties.pNext;
1995 properties.pNext = &rayTracingProperties;
1996 }
1997 #endif
1998
1999 if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
2000 {
2001 robustness2Properties.pNext = properties.pNext;
2002 properties.pNext = &robustness2Properties;
2003 }
2004
2005 vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
2006
2007 if (m_data.testRobustness2)
2008 {
2009 if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
2010 robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
2011 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
2012
2013 if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
2014 robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
2015 !deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
2016 return tcu::TestStatus(QP_TEST_RESULT_FAIL,
2017 "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
2018 }
2019
2020 VkPipelineBindPoint bindPoint;
2021
2022 switch (m_data.stage)
2023 {
2024 case STAGE_COMPUTE:
2025 bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
2026 break;
2027 #ifndef CTS_USES_VULKANSC
2028 case STAGE_RAYGEN:
2029 bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR;
2030 break;
2031 #endif
2032 default:
2033 bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
2034 break;
2035 }
2036
2037 Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
2038 Move<vk::VkDescriptorPool> descriptorPool;
2039 Move<vk::VkDescriptorSet> descriptorSet;
2040
2041 int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
2042 int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
2043
2044 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
2045
2046 VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
2047
2048 #ifndef CTS_USES_VULKANSC
2049 VkDescriptorSetLayoutCreateFlags layoutCreateFlags =
2050 m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
2051 #else
2052 VkDescriptorSetLayoutCreateFlags layoutCreateFlags = 0;
2053 #endif
2054
2055 // Create a layout and allocate a descriptor set for it.
2056
2057 const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo = {
2058 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, DE_NULL,
2059
2060 layoutCreateFlags, (uint32_t)bindings.size(), bindings.empty() ? DE_NULL : bindings.data()};
2061
2062 descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
2063
2064 vk::DescriptorPoolBuilder poolBuilder;
2065 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
2066 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
2067 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
2068 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
2069 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
2070 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
2071 poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
2072 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
2073
2074 descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
2075
2076 const void *pNext = DE_NULL;
2077
2078 if (!m_data.pushDescriptor)
2079 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
2080
2081 BufferWithMemoryPtr buffer;
2082
2083 uint8_t *bufferPtr = DE_NULL;
2084 if (!m_data.nullDescriptor)
2085 {
2086 // Create a buffer to hold data for all descriptors.
2087 VkDeviceSize size = de::max((VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1), (VkDeviceSize)256);
2088
2089 VkBufferUsageFlags usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
2090 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2091 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
2092 {
2093 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
2094 usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
2095 }
2096 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2097 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2098 {
2099 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
2100 usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
2101 }
2102 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
2103 {
2104 usage |= VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
2105 }
2106 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
2107 {
2108 usage |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
2109 }
2110 else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2111 {
2112 size = m_data.bufferLen;
2113 }
2114
2115 buffer = BufferWithMemoryPtr(new BufferWithMemory(vk, device, allocator, makeBufferCreateInfo(size, usage),
2116 MemoryRequirement::HostVisible));
2117 bufferPtr = (uint8_t *)buffer->getAllocation().getHostPtr();
2118
2119 deMemset(bufferPtr, 0x3f, (size_t)size);
2120
2121 deMemset(bufferPtr, 0, m_data.bufferLen);
2122 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2123 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
2124 {
2125 deMemset(
2126 bufferPtr, 0,
2127 deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
2128 }
2129 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2130 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2131 {
2132 deMemset(
2133 bufferPtr, 0,
2134 deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
2135 }
2136 }
2137
2138 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
2139
2140 Move<VkDescriptorSetLayout> descriptorSetLayoutR64;
2141 Move<VkDescriptorPool> descriptorPoolR64;
2142 Move<VkDescriptorSet> descriptorSetFillImage;
2143 Move<VkShaderModule> shaderModuleFillImage;
2144 Move<VkPipelineLayout> pipelineLayoutFillImage;
2145 Move<VkPipeline> pipelineFillImage;
2146
2147 Move<VkCommandPool> cmdPool = createCommandPool(vk, device, 0, queueFamilyIndex);
2148 Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2149 VkQueue queue;
2150
2151 vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
2152
2153 const VkImageSubresourceRange barrierRange = {
2154 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2155 0u, // uint32_t baseMipLevel;
2156 VK_REMAINING_MIP_LEVELS, // uint32_t levelCount;
2157 0u, // uint32_t baseArrayLayer;
2158 VK_REMAINING_ARRAY_LAYERS // uint32_t layerCount;
2159 };
2160
2161 VkImageMemoryBarrier preImageBarrier = {
2162 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
2163 DE_NULL, // const void* pNext
2164 0u, // VkAccessFlags srcAccessMask
2165 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
2166 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
2167 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout
2168 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
2169 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
2170 DE_NULL, // VkImage image
2171 barrierRange, // VkImageSubresourceRange subresourceRange;
2172 };
2173
2174 VkImageMemoryBarrier postImageBarrier = {
2175 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2176 DE_NULL, // const void* pNext;
2177 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
2178 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
2179 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
2180 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout;
2181 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
2182 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
2183 DE_NULL, // VkImage image;
2184 barrierRange, // VkImageSubresourceRange subresourceRange;
2185 };
2186
2187 vk::VkClearColorValue clearValue;
2188 clearValue.uint32[0] = 0u;
2189 clearValue.uint32[1] = 0u;
2190 clearValue.uint32[2] = 0u;
2191 clearValue.uint32[3] = 0u;
2192
2193 beginCommandBuffer(vk, *cmdBuffer, 0u);
2194
2195 typedef vk::Unique<vk::VkBufferView> BufferViewHandleUp;
2196 typedef de::SharedPtr<BufferViewHandleUp> BufferViewHandleSp;
2197 typedef de::SharedPtr<ImageWithMemory> ImageWithMemorySp;
2198 typedef de::SharedPtr<Unique<VkImageView>> VkImageViewSp;
2199
2200 vector<BufferViewHandleSp> bufferViews(1);
2201
2202 VkImageCreateFlags mutableFormatFlag = 0;
2203 // The 64-bit image tests use a view format which differs from the image.
2204 if (formatIsR64(m_data.format))
2205 mutableFormatFlag = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
2206 VkImageCreateFlags imageCreateFlags = mutableFormatFlag;
2207 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2208 imageCreateFlags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
2209
2210 const bool featureSampledImage =
2211 ((getPhysicalDeviceFormatProperties(vki, physicalDevice, m_data.format).optimalTilingFeatures &
2212 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
2213
2214 const VkImageUsageFlags usageSampledImage =
2215 (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
2216
2217 const VkImageCreateInfo outputImageCreateInfo = {
2218 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2219 DE_NULL, // const void* pNext;
2220 mutableFormatFlag, // VkImageCreateFlags flags;
2221 VK_IMAGE_TYPE_2D, // VkImageType imageType;
2222 m_data.format, // VkFormat format;
2223 {
2224 DIM, // uint32_t width;
2225 DIM, // uint32_t height;
2226 1u // uint32_t depth;
2227 }, // VkExtent3D extent;
2228 1u, // uint32_t mipLevels;
2229 1u, // uint32_t arrayLayers;
2230 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2231 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2232 VK_IMAGE_USAGE_STORAGE_BIT | usageSampledImage | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2233 VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
2234 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2235 0u, // uint32_t queueFamilyIndexCount;
2236 DE_NULL, // const uint32_t* pQueueFamilyIndices;
2237 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2238 };
2239
2240 uint32_t width = m_data.imageDim[0];
2241 uint32_t height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ?
2242 m_data.imageDim[1] :
2243 1;
2244 uint32_t depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2245 uint32_t layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
2246 m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
2247 m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ?
2248 m_data.imageDim[2] :
2249 1;
2250
2251 const VkImageUsageFlags usageImage =
2252 (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT :
2253 (VkImageUsageFlagBits)0);
2254
2255 const VkImageCreateInfo imageCreateInfo = {
2256 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2257 DE_NULL, // const void* pNext;
2258 imageCreateFlags, // VkImageCreateFlags flags;
2259 imageViewTypeToImageType(m_data.viewType), // VkImageType imageType;
2260 m_data.format, // VkFormat format;
2261 {
2262 width, // uint32_t width;
2263 height, // uint32_t height;
2264 depth // uint32_t depth;
2265 }, // VkExtent3D extent;
2266 1u, // uint32_t mipLevels;
2267 layers, // uint32_t arrayLayers;
2268 m_data.samples, // VkSampleCountFlagBits samples;
2269 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2270 usageImage | usageSampledImage | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2271 VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
2272 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2273 0u, // uint32_t queueFamilyIndexCount;
2274 DE_NULL, // const uint32_t* pQueueFamilyIndices;
2275 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2276 };
2277
2278 VkImageViewCreateInfo imageViewCreateInfo = {
2279 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2280 DE_NULL, // const void* pNext;
2281 (VkImageViewCreateFlags)0u, // VkImageViewCreateFlags flags;
2282 DE_NULL, // VkImage image;
2283 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
2284 m_data.format, // VkFormat format;
2285 {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
2286 VK_COMPONENT_SWIZZLE_IDENTITY}, // VkComponentMapping components;
2287 {
2288 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2289 0u, // uint32_t baseMipLevel;
2290 VK_REMAINING_MIP_LEVELS, // uint32_t levelCount;
2291 0u, // uint32_t baseArrayLayer;
2292 VK_REMAINING_ARRAY_LAYERS // uint32_t layerCount;
2293 } // VkImageSubresourceRange subresourceRange;
2294 };
2295
2296 vector<ImageWithMemorySp> images(2);
2297 vector<VkImageViewSp> imageViews(2);
2298
2299 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2300 {
2301 uint32_t *ptr = (uint32_t *)bufferPtr;
2302 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2303 }
2304
2305 BufferWithMemoryPtr bufferImageR64;
2306 BufferWithMemoryPtr bufferOutputImageR64;
2307 const VkDeviceSize sizeOutputR64 = 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height *
2308 outputImageCreateInfo.extent.depth;
2309 const VkDeviceSize sizeOneLayers =
2310 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
2311 const VkDeviceSize sizeImageR64 = sizeOneLayers * layers;
2312
2313 if (formatIsR64(m_data.format))
2314 {
2315 bufferOutputImageR64 = BufferWithMemoryPtr(new BufferWithMemory(
2316 vk, device, allocator, makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
2317 MemoryRequirement::HostVisible));
2318
2319 uint64_t *bufferUint64Ptr = (uint64_t *)bufferOutputImageR64->getAllocation().getHostPtr();
2320
2321 for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
2322 {
2323 bufferUint64Ptr[ndx] = 0;
2324 }
2325 flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
2326
2327 bufferImageR64 = BufferWithMemoryPtr(new BufferWithMemory(
2328 vk, device, allocator,
2329 makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
2330 MemoryRequirement::HostVisible));
2331
2332 for (uint32_t layerNdx = 0; layerNdx < layers; ++layerNdx)
2333 {
2334 bufferUint64Ptr = (uint64_t *)bufferImageR64->getAllocation().getHostPtr();
2335 bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
2336
2337 for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
2338 {
2339 bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE &&
2340 m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ?
2341 layerNdx :
2342 0);
2343 }
2344 }
2345 flushAlloc(vk, device, bufferImageR64->getAllocation());
2346 }
2347
2348 for (size_t b = 0; b < bindings.size(); ++b)
2349 {
2350 VkDescriptorSetLayoutBinding &binding = bindings[b];
2351
2352 if (binding.descriptorCount == 0)
2353 continue;
2354 if (b == 1 && m_data.nullDescriptor)
2355 continue;
2356
2357 DE_ASSERT(binding.descriptorCount == 1);
2358 switch (binding.descriptorType)
2359 {
2360 default:
2361 DE_ASSERT(0); // Fallthrough
2362 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2363 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2364 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2365 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2366 {
2367 uint32_t *ptr = (uint32_t *)bufferPtr;
2368 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2369 }
2370 break;
2371 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2372 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2373 {
2374 uint32_t *ptr = (uint32_t *)bufferPtr;
2375 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2376
2377 const vk::VkBufferViewCreateInfo viewCreateInfo = {
2378 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
2379 DE_NULL,
2380 (vk::VkBufferViewCreateFlags)0,
2381 **buffer, // buffer
2382 m_data.format, // format
2383 (vk::VkDeviceSize)0, // offset
2384 (vk::VkDeviceSize)m_data.bufferLen // range
2385 };
2386 vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
2387 bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2388 }
2389 break;
2390 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2391 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2392 {
2393 if (bindings.size() > 1 && bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2394 {
2395 if (m_data.format == VK_FORMAT_R64_SINT)
2396 imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2397
2398 if (m_data.format == VK_FORMAT_R64_UINT)
2399 imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2400 }
2401
2402 if (b == 0)
2403 {
2404 images[b] = ImageWithMemorySp(
2405 new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2406 imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2407 }
2408 else
2409 {
2410 images[b] = ImageWithMemorySp(
2411 new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2412 imageViewCreateInfo.viewType = m_data.viewType;
2413 }
2414 imageViewCreateInfo.image = **images[b];
2415 imageViews[b] =
2416 VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2417
2418 VkImage img = **images[b];
2419 const VkBuffer &bufferR64 = ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2420 const VkImageCreateInfo &imageInfo = ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2421 const uint32_t clearLayers = b == 0 ? 1 : layers;
2422
2423 if (!formatIsR64(m_data.format))
2424 {
2425 preImageBarrier.image = img;
2426 if (b == 1)
2427 {
2428 if (formatIsFloat(m_data.format))
2429 {
2430 deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2431 }
2432 else if (formatIsSignedInt(m_data.format))
2433 {
2434 deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2435 }
2436 else
2437 {
2438 deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2439 }
2440 }
2441 postImageBarrier.image = img;
2442
2443 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2444 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2445 (const VkBufferMemoryBarrier *)DE_NULL, 1, &preImageBarrier);
2446
2447 for (unsigned int i = 0; i < clearLayers; ++i)
2448 {
2449 const VkImageSubresourceRange clearRange = {
2450 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2451 0u, // uint32_t baseMipLevel;
2452 VK_REMAINING_MIP_LEVELS, // uint32_t levelCount;
2453 i, // uint32_t baseArrayLayer;
2454 1 // uint32_t layerCount;
2455 };
2456
2457 vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1,
2458 &clearRange);
2459
2460 // Use same data for all faces for cube(array), otherwise make value a function of the layer
2461 if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2462 {
2463 if (formatIsFloat(m_data.format))
2464 clearValue.float32[0] += 1;
2465 else if (formatIsSignedInt(m_data.format))
2466 clearValue.int32[0] += 1;
2467 else
2468 clearValue.uint32[0] += 1;
2469 }
2470 }
2471 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2472 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2473 (const VkBufferMemoryBarrier *)DE_NULL, 1, &postImageBarrier);
2474 }
2475 else
2476 {
2477 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2478 {
2479 const VkImageSubresourceRange subresourceRange =
2480 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2481 const VkImageMemoryBarrier imageBarrierPre =
2482 makeImageMemoryBarrier(0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
2483 VK_IMAGE_LAYOUT_GENERAL, img, subresourceRange);
2484 const VkImageMemoryBarrier imageBarrierPost =
2485 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
2486 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL, img, subresourceRange);
2487
2488 descriptorSetLayoutR64 =
2489 DescriptorSetLayoutBuilder()
2490 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2491 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2492 .build(vk, device);
2493
2494 descriptorPoolR64 = DescriptorPoolBuilder()
2495 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2496 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1)
2497 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2498
2499 descriptorSetFillImage = makeDescriptorSet(vk, device, *descriptorPoolR64, *descriptorSetLayoutR64);
2500
2501 shaderModuleFillImage =
2502 createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2503 pipelineLayoutFillImage = makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2504 pipelineFillImage =
2505 makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2506
2507 const VkDescriptorImageInfo descResultImageInfo =
2508 makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2509 const VkDescriptorBufferInfo descResultBufferInfo =
2510 makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2511
2512 DescriptorSetUpdateBuilder()
2513 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u),
2514 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2515 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u),
2516 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2517 .update(vk, device);
2518
2519 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2520 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2521 (const VkBufferMemoryBarrier *)DE_NULL, 1, &imageBarrierPre);
2522
2523 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2524 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u,
2525 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2526
2527 vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2528
2529 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2530 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0,
2531 (const VkMemoryBarrier *)DE_NULL, 0, (const VkBufferMemoryBarrier *)DE_NULL,
2532 1, &imageBarrierPost);
2533 }
2534 else
2535 {
2536 VkDeviceSize size = ((b == 0) ? sizeOutputR64 : sizeImageR64);
2537 const vector<VkBufferImageCopy> bufferImageCopy(
2538 1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT,
2539 0, 0, clearLayers)));
2540
2541 copyBufferToImage(vk, *cmdBuffer, bufferR64, size, bufferImageCopy, VK_IMAGE_ASPECT_COLOR_BIT, 1,
2542 clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2543 }
2544 }
2545 }
2546 break;
2547 }
2548 }
2549
2550 const VkSamplerCreateInfo samplerParams = {
2551 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
2552 DE_NULL, // const void* pNext;
2553 0, // VkSamplerCreateFlags flags;
2554 VK_FILTER_NEAREST, // VkFilter magFilter:
2555 VK_FILTER_NEAREST, // VkFilter minFilter;
2556 VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
2557 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeU;
2558 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeV;
2559 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeW;
2560 0.0f, // float mipLodBias;
2561 VK_FALSE, // VkBool32 anistoropyEnable;
2562 1.0f, // float maxAnisotropy;
2563 VK_FALSE, // VkBool32 compareEnable;
2564 VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
2565 0.0f, // float minLod;
2566 0.0f, // float maxLod;
2567 formatIsFloat(m_data.format) ? VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2568 VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
2569 VK_FALSE // VkBool32 unnormalizedCoordinates;
2570 };
2571
2572 Move<VkSampler> sampler(createSampler(vk, device, &samplerParams));
2573
2574 // Flush modified memory.
2575 if (!m_data.nullDescriptor)
2576 flushAlloc(vk, device, buffer->getAllocation());
2577
2578 const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
2579 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
2580 DE_NULL, // pNext
2581 (VkPipelineLayoutCreateFlags)0,
2582 1u, // setLayoutCount
2583 &descriptorSetLayout.get(), // pSetLayouts
2584 0u, // pushConstantRangeCount
2585 DE_NULL, // pPushConstantRanges
2586 };
2587
2588 Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2589
2590 BufferWithMemoryPtr copyBuffer;
2591 copyBuffer = BufferWithMemoryPtr(new BufferWithMemory(
2592 vk, device, allocator, makeBufferCreateInfo(DIM * DIM * 16, VK_BUFFER_USAGE_TRANSFER_DST_BIT),
2593 MemoryRequirement::HostVisible));
2594
2595 {
2596 vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2597 vector<VkDescriptorImageInfo> imageInfoVec(2);
2598 vector<VkBufferView> bufferViewVec(2);
2599 vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2600 int vecIndex = 0;
2601 int numDynamic = 0;
2602
2603 #ifndef CTS_USES_VULKANSC
2604 vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore, bufTemplateEntriesBefore,
2605 texelBufTemplateEntriesBefore;
2606 #endif
2607
2608 for (size_t b = 0; b < bindings.size(); ++b)
2609 {
2610 VkDescriptorSetLayoutBinding &binding = bindings[b];
2611 // Construct the declaration for the binding
2612 if (binding.descriptorCount > 0)
2613 {
2614 // output image
2615 switch (binding.descriptorType)
2616 {
2617 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2618 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2619 // Output image.
2620 if (b == 1 && m_data.nullDescriptor)
2621 imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2622 else
2623 imageInfoVec[vecIndex] =
2624 makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2625 break;
2626 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2627 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2628 if (b == 1 && m_data.nullDescriptor)
2629 bufferViewVec[vecIndex] = DE_NULL;
2630 else
2631 bufferViewVec[vecIndex] = **bufferViews[0];
2632 break;
2633 default:
2634 // Other descriptor types.
2635 if (b == 1 && m_data.nullDescriptor)
2636 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2637 else
2638 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2639 break;
2640 }
2641
2642 VkWriteDescriptorSet w = {
2643 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // sType
2644 DE_NULL, // pNext
2645 m_data.pushDescriptor ? DE_NULL : *descriptorSet, // dstSet
2646 (uint32_t)b, // binding
2647 0, // dstArrayElement
2648 1u, // descriptorCount
2649 binding.descriptorType, // descriptorType
2650 &imageInfoVec[vecIndex], // pImageInfo
2651 &bufferInfoVec[vecIndex], // pBufferInfo
2652 &bufferViewVec[vecIndex], // pTexelBufferView
2653 };
2654
2655 #ifndef CTS_USES_VULKANSC
2656 VkDescriptorUpdateTemplateEntry templateEntry = {
2657 (uint32_t)b, // uint32_t dstBinding;
2658 0, // uint32_t dstArrayElement;
2659 1u, // uint32_t descriptorCount;
2660 binding.descriptorType, // VkDescriptorType descriptorType;
2661 0, // size_t offset;
2662 0, // size_t stride;
2663 };
2664
2665 switch (binding.descriptorType)
2666 {
2667 default:
2668 DE_ASSERT(0); // Fallthrough
2669 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2670 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2671 templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2672 imgTemplateEntriesBefore.push_back(templateEntry);
2673 break;
2674 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2675 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2676 templateEntry.offset = vecIndex * sizeof(VkBufferView);
2677 texelBufTemplateEntriesBefore.push_back(templateEntry);
2678 break;
2679 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2680 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2681 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2682 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2683 templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2684 bufTemplateEntriesBefore.push_back(templateEntry);
2685 break;
2686 }
2687 #endif
2688
2689 vecIndex++;
2690
2691 writesBeforeBindVec.push_back(w);
2692
2693 // Count the number of dynamic descriptors in this set.
2694 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2695 binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2696 {
2697 numDynamic++;
2698 }
2699 }
2700 }
2701
2702 // Make zeros have at least one element so &zeros[0] works
2703 vector<uint32_t> zeros(de::max(1, numDynamic));
2704 deMemset(&zeros[0], 0, numDynamic * sizeof(uint32_t));
2705
2706 // Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2707 if (m_data.useTemplate)
2708 {
2709 #ifndef CTS_USES_VULKANSC
2710 VkDescriptorUpdateTemplateCreateInfo templateCreateInfo = {
2711 VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, // VkStructureType sType;
2712 NULL, // void* pNext;
2713 0, // VkDescriptorUpdateTemplateCreateFlags flags;
2714 0, // uint32_t descriptorUpdateEntryCount;
2715 DE_NULL, // uint32_t descriptorUpdateEntryCount;
2716 m_data.pushDescriptor ?
2717 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2718 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, // VkDescriptorUpdateTemplateType templateType;
2719 descriptorSetLayout.get(), // VkDescriptorSetLayout descriptorSetLayout;
2720 bindPoint, // VkPipelineBindPoint pipelineBindPoint;
2721 *pipelineLayout, // VkPipelineLayout pipelineLayout;
2722 0, // uint32_t set;
2723 };
2724
2725 void *templateVectorData[] = {
2726 imageInfoVec.data(),
2727 bufferInfoVec.data(),
2728 bufferViewVec.data(),
2729 };
2730
2731 vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] = {
2732 &imgTemplateEntriesBefore,
2733 &bufTemplateEntriesBefore,
2734 &texelBufTemplateEntriesBefore,
2735 };
2736
2737 if (m_data.pushDescriptor)
2738 {
2739 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2740 {
2741 if (templateVectorsBefore[i]->size())
2742 {
2743 templateCreateInfo.descriptorUpdateEntryCount = (uint32_t)templateVectorsBefore[i]->size();
2744 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2745 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate =
2746 createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2747 vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout,
2748 0, templateVectorData[i]);
2749 }
2750 }
2751 }
2752 else
2753 {
2754 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2755 {
2756 if (templateVectorsBefore[i]->size())
2757 {
2758 templateCreateInfo.descriptorUpdateEntryCount = (uint32_t)templateVectorsBefore[i]->size();
2759 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2760 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate =
2761 createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2762 vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate,
2763 templateVectorData[i]);
2764 }
2765 }
2766
2767 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic,
2768 &zeros[0]);
2769 }
2770 #endif
2771 }
2772 else
2773 {
2774 if (m_data.pushDescriptor)
2775 {
2776 #ifndef CTS_USES_VULKANSC
2777 if (writesBeforeBindVec.size())
2778 {
2779 vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0,
2780 (uint32_t)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2781 }
2782 #endif
2783 }
2784 else
2785 {
2786 if (writesBeforeBindVec.size())
2787 {
2788 vk.updateDescriptorSets(device, (uint32_t)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0,
2789 NULL);
2790 }
2791
2792 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic,
2793 &zeros[0]);
2794 }
2795 }
2796 }
2797
2798 #ifndef CTS_USES_VULKANSC
2799 // For graphics pipeline library cases.
2800 Move<VkPipeline> vertexInputLib;
2801 Move<VkPipeline> preRasterShaderLib;
2802 Move<VkPipeline> fragShaderLib;
2803 Move<VkPipeline> fragOutputLib;
2804 #endif // CTS_USES_VULKANSC
2805
2806 Move<VkPipeline> pipeline;
2807 Move<VkRenderPass> renderPass;
2808 Move<VkFramebuffer> framebuffer;
2809
2810 #ifndef CTS_USES_VULKANSC
2811 BufferWithMemoryPtr sbtBuffer;
2812 const auto sbtFlags = (VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR |
2813 VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
2814 VkStridedDeviceAddressRegionKHR rgenSBTRegion = makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2815 VkStridedDeviceAddressRegionKHR missSBTRegion = makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2816 VkStridedDeviceAddressRegionKHR hitSBTRegion = makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2817 VkStridedDeviceAddressRegionKHR callSBTRegion = makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2818 const auto sgHandleSize = rayTracingProperties.shaderGroupHandleSize;
2819 #endif // CTS_USES_VULKANSC
2820
2821 if (m_data.stage == STAGE_COMPUTE)
2822 {
2823 const Unique<VkShaderModule> shader(
2824 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2825
2826 const VkPipelineShaderStageCreateInfo pipelineShaderStageParams = {
2827 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
2828 nullptr, // const void* pNext;
2829 static_cast<VkPipelineShaderStageCreateFlags>(0u), // VkPipelineShaderStageCreateFlags flags;
2830 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
2831 *shader, // VkShaderModule module;
2832 "main", // const char* pName;
2833 nullptr, // const VkSpecializationInfo* pSpecializationInfo;
2834 };
2835
2836 VkComputePipelineCreateInfo pipelineCreateInfo = {
2837 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
2838 nullptr, // const void* pNext;
2839 static_cast<VkPipelineCreateFlags>(0u), // VkPipelineCreateFlags flags;
2840 pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage;
2841 *pipelineLayout, // VkPipelineLayout layout;
2842 DE_NULL, // VkPipeline basePipelineHandle;
2843 0, // int32_t basePipelineIndex;
2844 };
2845
2846 #ifndef CTS_USES_VULKANSC
2847 VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2848 if (m_data.needsPipelineRobustness())
2849 {
2850 pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2851 pipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2852 }
2853 #endif
2854
2855 pipeline = createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo);
2856 }
2857 #ifndef CTS_USES_VULKANSC
2858 else if (m_data.stage == STAGE_RAYGEN)
2859 {
2860 const Unique<VkShaderModule> shader(
2861 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2862
2863 const VkPipelineShaderStageCreateInfo shaderCreateInfo = {
2864 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2865 nullptr,
2866 0u, // flags
2867 VK_SHADER_STAGE_RAYGEN_BIT_KHR, // stage
2868 *shader, // shader
2869 "main",
2870 nullptr, // pSpecializationInfo
2871 };
2872
2873 VkRayTracingShaderGroupCreateInfoKHR group = {
2874 VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR,
2875 nullptr,
2876 VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR, // type
2877 0, // generalShader
2878 VK_SHADER_UNUSED_KHR, // closestHitShader
2879 VK_SHADER_UNUSED_KHR, // anyHitShader
2880 VK_SHADER_UNUSED_KHR, // intersectionShader
2881 nullptr, // pShaderGroupCaptureReplayHandle
2882 };
2883
2884 VkRayTracingPipelineCreateInfoKHR pipelineCreateInfo = {
2885 VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR, // sType
2886 nullptr, // pNext
2887 0u, // flags
2888 1u, // stageCount
2889 &shaderCreateInfo, // pStages
2890 1u, // groupCount
2891 &group, // pGroups
2892 0, // maxRecursionDepth
2893 nullptr, // pLibraryInfo
2894 nullptr, // pLibraryInterface
2895 nullptr, // pDynamicState
2896 *pipelineLayout, // layout
2897 (vk::VkPipeline)0, // basePipelineHandle
2898 0u, // basePipelineIndex
2899 };
2900
2901 VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2902 if (m_data.needsPipelineRobustness())
2903 {
2904 pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2905 pipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2906 }
2907
2908 pipeline = createRayTracingPipelineKHR(vk, device, VK_NULL_HANDLE, VK_NULL_HANDLE, &pipelineCreateInfo);
2909
2910 sbtBuffer = BufferWithMemoryPtr(
2911 new BufferWithMemory(vk, device, allocator, makeBufferCreateInfo(sgHandleSize, sbtFlags),
2912 (MemoryRequirement::HostVisible | MemoryRequirement::DeviceAddress)));
2913
2914 uint32_t *ptr = (uint32_t *)sbtBuffer->getAllocation().getHostPtr();
2915 invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2916
2917 vk.getRayTracingShaderGroupHandlesKHR(device, *pipeline, 0, 1, sgHandleSize, ptr);
2918
2919 const VkBufferDeviceAddressInfo deviceAddressInfo{
2920 VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, // VkStructureType sType
2921 nullptr, // const void* pNext
2922 sbtBuffer->get() // VkBuffer buffer;
2923 };
2924 const auto sbtAddress = vk.getBufferDeviceAddress(device, &deviceAddressInfo);
2925 rgenSBTRegion = makeStridedDeviceAddressRegionKHR(sbtAddress, sgHandleSize, sgHandleSize);
2926 }
2927 #endif
2928 else
2929 {
2930 const VkSubpassDescription subpassDesc = {
2931 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
2932 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
2933 0u, // uint32_t inputAttachmentCount
2934 DE_NULL, // const VkAttachmentReference* pInputAttachments
2935 0u, // uint32_t colorAttachmentCount
2936 DE_NULL, // const VkAttachmentReference* pColorAttachments
2937 DE_NULL, // const VkAttachmentReference* pResolveAttachments
2938 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
2939 0u, // uint32_t preserveAttachmentCount
2940 DE_NULL // const uint32_t* pPreserveAttachments
2941 };
2942
2943 const std::vector<VkSubpassDependency> subpassDependencies = {
2944 makeSubpassDependency(VK_SUBPASS_EXTERNAL, // uint32_t srcSubpass
2945 0, // uint32_t dstSubpass
2946 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags srcStageMask
2947 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // VkPipelineStageFlags dstStageMask
2948 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask
2949 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
2950 VK_DEPENDENCY_BY_REGION_BIT // VkDependencyFlags dependencyFlags
2951 ),
2952 makeSubpassDependency(0, 0, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
2953 ((m_data.stage == STAGE_VERTEX) ? VK_PIPELINE_STAGE_VERTEX_SHADER_BIT :
2954 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),
2955 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, VK_ACCESS_SHADER_WRITE_BIT, 0u),
2956 };
2957
2958 const VkRenderPassCreateInfo renderPassParams = {
2959 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
2960 DE_NULL, // const void* pNext
2961 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
2962 0u, // uint32_t attachmentCount
2963 DE_NULL, // const VkAttachmentDescription* pAttachments
2964 1u, // uint32_t subpassCount
2965 &subpassDesc, // const VkSubpassDescription* pSubpasses
2966 de::sizeU32(subpassDependencies), // uint32_t dependencyCount
2967 de::dataOrNull(subpassDependencies), // const VkSubpassDependency* pDependencies
2968 };
2969
2970 renderPass = createRenderPass(vk, device, &renderPassParams);
2971
2972 const vk::VkFramebufferCreateInfo framebufferParams = {
2973 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
2974 DE_NULL, // pNext
2975 (vk::VkFramebufferCreateFlags)0,
2976 *renderPass, // renderPass
2977 0u, // attachmentCount
2978 DE_NULL, // pAttachments
2979 DIM, // width
2980 DIM, // height
2981 1u, // layers
2982 };
2983
2984 framebuffer = createFramebuffer(vk, device, &framebufferParams);
2985
2986 const VkVertexInputBindingDescription vertexInputBindingDescription = {
2987 0u, // uint32_t binding
2988 (uint32_t)formatBytes, // uint32_t stride
2989 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate
2990 };
2991
2992 const VkVertexInputAttributeDescription vertexInputAttributeDescription = {
2993 0u, // uint32_t location
2994 0u, // uint32_t binding
2995 m_data.format, // VkFormat format
2996 0u // uint32_t offset
2997 };
2998
2999 uint32_t numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
3000
3001 VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {
3002 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
3003 DE_NULL, // const void* pNext;
3004 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
3005 numAttribs, // uint32_t vertexBindingDescriptionCount;
3006 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
3007 numAttribs, // uint32_t vertexAttributeDescriptionCount;
3008 &vertexInputAttributeDescription // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
3009 };
3010
3011 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
3012 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
3013 DE_NULL, // const void* pNext;
3014 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
3015 (m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST :
3016 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology topology;
3017 VK_FALSE // VkBool32 primitiveRestartEnable;
3018 };
3019
3020 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
3021 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
3022 DE_NULL, // const void* pNext;
3023 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
3024 VK_FALSE, // VkBool32 depthClampEnable;
3025 (m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE, // VkBool32 rasterizerDiscardEnable;
3026 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
3027 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
3028 VK_FRONT_FACE_CLOCKWISE, // VkFrontFace frontFace;
3029 VK_FALSE, // VkBool32 depthBiasEnable;
3030 0.0f, // float depthBiasConstantFactor;
3031 0.0f, // float depthBiasClamp;
3032 0.0f, // float depthBiasSlopeFactor;
3033 1.0f // float lineWidth;
3034 };
3035
3036 const VkPipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
3037 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType
3038 DE_NULL, // const void* pNext
3039 0u, // VkPipelineMultisampleStateCreateFlags flags
3040 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples
3041 VK_FALSE, // VkBool32 sampleShadingEnable
3042 1.0f, // float minSampleShading
3043 DE_NULL, // const VkSampleMask* pSampleMask
3044 VK_FALSE, // VkBool32 alphaToCoverageEnable
3045 VK_FALSE // VkBool32 alphaToOneEnable
3046 };
3047
3048 VkViewport viewport = makeViewport(DIM, DIM);
3049 VkRect2D scissor = makeRect2D(DIM, DIM);
3050
3051 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo = {
3052 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType
3053 DE_NULL, // const void* pNext
3054 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags
3055 1u, // uint32_t viewportCount
3056 &viewport, // const VkViewport* pViewports
3057 1u, // uint32_t scissorCount
3058 &scissor // const VkRect2D* pScissors
3059 };
3060
3061 Move<VkShaderModule> fs;
3062 Move<VkShaderModule> vs;
3063
3064 uint32_t numStages;
3065 if (m_data.stage == STAGE_VERTEX)
3066 {
3067 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
3068 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
3069 numStages = 1u;
3070 }
3071 else
3072 {
3073 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
3074 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
3075 numStages = 2u;
3076 }
3077
3078 VkPipelineShaderStageCreateInfo shaderCreateInfo[2] = {
3079 {
3080 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, DE_NULL, (VkPipelineShaderStageCreateFlags)0,
3081 VK_SHADER_STAGE_VERTEX_BIT, // stage
3082 *vs, // shader
3083 "main",
3084 DE_NULL, // pSpecializationInfo
3085 },
3086 {
3087 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, DE_NULL, (VkPipelineShaderStageCreateFlags)0,
3088 VK_SHADER_STAGE_FRAGMENT_BIT, // stage
3089 *fs, // shader
3090 "main",
3091 DE_NULL, // pSpecializationInfo
3092 }};
3093
3094 // Base structure with everything for the monolithic case.
3095 VkGraphicsPipelineCreateInfo graphicsPipelineCreateInfo = {
3096 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
3097 nullptr, // const void* pNext;
3098 0u, // VkPipelineCreateFlags flags;
3099 numStages, // uint32_t stageCount;
3100 &shaderCreateInfo[0], // const VkPipelineShaderStageCreateInfo* pStages;
3101 &vertexInputStateCreateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
3102 &inputAssemblyStateCreateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
3103 nullptr, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
3104 &viewportStateCreateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
3105 &rasterizationStateCreateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
3106 &multisampleStateCreateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
3107 nullptr, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
3108 nullptr, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
3109 nullptr, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
3110 pipelineLayout.get(), // VkPipelineLayout layout;
3111 renderPass.get(), // VkRenderPass renderPass;
3112 0u, // uint32_t subpass;
3113 VK_NULL_HANDLE, // VkPipeline basePipelineHandle;
3114 0 // int basePipelineIndex;
3115 };
3116
3117 #ifndef CTS_USES_VULKANSC
3118 VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
3119 if (m_data.needsPipelineRobustness())
3120 {
3121 pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
3122
3123 if (m_data.pipelineRobustnessCase == PipelineRobustnessCase::ENABLED_MONOLITHIC)
3124 {
3125 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3126 graphicsPipelineCreateInfo.pNext = &pipelineRobustnessInfo;
3127 else if (m_data.stage == STAGE_VERTEX)
3128 shaderCreateInfo[0].pNext = &pipelineRobustnessInfo;
3129 else
3130 shaderCreateInfo[1].pNext = &pipelineRobustnessInfo;
3131 }
3132 else // Fast or Optimized graphics pipeline libraries.
3133 {
3134 VkPipelineCreateFlags libCreationFlags = VK_PIPELINE_CREATE_LIBRARY_BIT_KHR;
3135 VkPipelineCreateFlags linkFlags = 0u;
3136
3137 if (m_data.pipelineRobustnessCase == PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL)
3138 {
3139 libCreationFlags |= VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT;
3140 linkFlags |= VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT;
3141 }
3142
3143 // Vertex input state library. When testing the robust vertex shaders, this will be merged with it in the same library.
3144 if (m_data.stage != STAGE_VERTEX || m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3145 {
3146 VkGraphicsPipelineLibraryCreateInfoEXT vertexInputLibInfo = initVulkanStructure();
3147 VkGraphicsPipelineCreateInfo vertexInputPipelineInfo = initVulkanStructure();
3148
3149 vertexInputPipelineInfo.pNext = &vertexInputLibInfo;
3150 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3151 vertexInputLibInfo.pNext = &pipelineRobustnessInfo;
3152
3153 vertexInputLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT;
3154 vertexInputPipelineInfo.flags = libCreationFlags;
3155 vertexInputPipelineInfo.pVertexInputState = graphicsPipelineCreateInfo.pVertexInputState;
3156 vertexInputPipelineInfo.pInputAssemblyState = graphicsPipelineCreateInfo.pInputAssemblyState;
3157
3158 vertexInputLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &vertexInputPipelineInfo);
3159 }
3160
3161 // Pre-rasterization shader state library.
3162 {
3163 VkGraphicsPipelineLibraryCreateInfoEXT preRasterShaderLibInfo = initVulkanStructure();
3164 preRasterShaderLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT;
3165
3166 VkGraphicsPipelineCreateInfo preRasterShaderPipelineInfo =
3167 initVulkanStructure(&preRasterShaderLibInfo);
3168 preRasterShaderPipelineInfo.flags = libCreationFlags;
3169 preRasterShaderPipelineInfo.layout = graphicsPipelineCreateInfo.layout;
3170 preRasterShaderPipelineInfo.pViewportState = graphicsPipelineCreateInfo.pViewportState;
3171 preRasterShaderPipelineInfo.pRasterizationState = graphicsPipelineCreateInfo.pRasterizationState;
3172 preRasterShaderPipelineInfo.pTessellationState = graphicsPipelineCreateInfo.pTessellationState;
3173 preRasterShaderPipelineInfo.renderPass = graphicsPipelineCreateInfo.renderPass;
3174 preRasterShaderPipelineInfo.subpass = graphicsPipelineCreateInfo.subpass;
3175
3176 VkPipelineShaderStageCreateInfo vertexStageInfo = shaderCreateInfo[0];
3177 if (m_data.stage == STAGE_VERTEX && m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
3178 {
3179 preRasterShaderPipelineInfo.pVertexInputState = graphicsPipelineCreateInfo.pVertexInputState;
3180 preRasterShaderPipelineInfo.pInputAssemblyState =
3181 graphicsPipelineCreateInfo.pInputAssemblyState;
3182 preRasterShaderLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT;
3183 vertexStageInfo.pNext = &pipelineRobustnessInfo;
3184 }
3185
3186 preRasterShaderPipelineInfo.stageCount = 1u;
3187 preRasterShaderPipelineInfo.pStages = &vertexStageInfo;
3188
3189 preRasterShaderLib =
3190 createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &preRasterShaderPipelineInfo);
3191 }
3192
3193 // Fragment shader stage library.
3194 {
3195 VkGraphicsPipelineLibraryCreateInfoEXT fragShaderLibInfo = initVulkanStructure();
3196 fragShaderLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT;
3197
3198 VkGraphicsPipelineCreateInfo fragShaderPipelineInfo = initVulkanStructure(&fragShaderLibInfo);
3199 fragShaderPipelineInfo.flags = libCreationFlags;
3200 fragShaderPipelineInfo.layout = graphicsPipelineCreateInfo.layout;
3201 fragShaderPipelineInfo.pMultisampleState = graphicsPipelineCreateInfo.pMultisampleState;
3202 fragShaderPipelineInfo.pDepthStencilState = graphicsPipelineCreateInfo.pDepthStencilState;
3203 fragShaderPipelineInfo.renderPass = graphicsPipelineCreateInfo.renderPass;
3204 fragShaderPipelineInfo.subpass = graphicsPipelineCreateInfo.subpass;
3205
3206 std::vector<VkPipelineShaderStageCreateInfo> shaderStages;
3207 if (m_data.stage != STAGE_VERTEX)
3208 {
3209 shaderStages.push_back(shaderCreateInfo[1]);
3210 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
3211 shaderStages.back().pNext = &pipelineRobustnessInfo;
3212 }
3213
3214 fragShaderPipelineInfo.stageCount = de::sizeU32(shaderStages);
3215 fragShaderPipelineInfo.pStages = de::dataOrNull(shaderStages);
3216
3217 fragShaderLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &fragShaderPipelineInfo);
3218 }
3219
3220 // Fragment output library.
3221 {
3222 VkGraphicsPipelineLibraryCreateInfoEXT fragOutputLibInfo = initVulkanStructure();
3223 fragOutputLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT;
3224
3225 VkGraphicsPipelineCreateInfo fragOutputPipelineInfo = initVulkanStructure(&fragOutputLibInfo);
3226 fragOutputPipelineInfo.flags = libCreationFlags;
3227 fragOutputPipelineInfo.pColorBlendState = graphicsPipelineCreateInfo.pColorBlendState;
3228 fragOutputPipelineInfo.renderPass = graphicsPipelineCreateInfo.renderPass;
3229 fragOutputPipelineInfo.subpass = graphicsPipelineCreateInfo.subpass;
3230 fragOutputPipelineInfo.pMultisampleState = graphicsPipelineCreateInfo.pMultisampleState;
3231
3232 fragOutputLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &fragOutputPipelineInfo);
3233 }
3234
3235 // Linked pipeline.
3236 std::vector<VkPipeline> libraryHandles;
3237 if (*vertexInputLib != VK_NULL_HANDLE)
3238 libraryHandles.push_back(*vertexInputLib);
3239 if (*preRasterShaderLib != VK_NULL_HANDLE)
3240 libraryHandles.push_back(*preRasterShaderLib);
3241 if (*fragShaderLib != VK_NULL_HANDLE)
3242 libraryHandles.push_back(*fragShaderLib);
3243 if (*fragOutputLib != VK_NULL_HANDLE)
3244 libraryHandles.push_back(*fragOutputLib);
3245
3246 VkPipelineLibraryCreateInfoKHR linkedPipelineLibraryInfo = initVulkanStructure();
3247 linkedPipelineLibraryInfo.libraryCount = de::sizeU32(libraryHandles);
3248 linkedPipelineLibraryInfo.pLibraries = de::dataOrNull(libraryHandles);
3249
3250 VkGraphicsPipelineCreateInfo linkedPipelineInfo = initVulkanStructure(&linkedPipelineLibraryInfo);
3251 linkedPipelineInfo.flags = linkFlags;
3252 linkedPipelineInfo.layout = graphicsPipelineCreateInfo.layout;
3253
3254 pipeline = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &linkedPipelineInfo);
3255 }
3256 }
3257 #endif
3258 if (*pipeline == VK_NULL_HANDLE)
3259 pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
3260 }
3261
3262 const VkImageMemoryBarrier imageBarrier = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
3263 DE_NULL, // const void* pNext
3264 0u, // VkAccessFlags srcAccessMask
3265 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
3266 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
3267 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout
3268 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
3269 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
3270 **images[0], // VkImage image
3271 {
3272 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask
3273 0u, // uint32_t baseMipLevel
3274 1u, // uint32_t mipLevels,
3275 0u, // uint32_t baseArray
3276 1u, // uint32_t arraySize
3277 }};
3278
3279 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
3280 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
3281 (const VkBufferMemoryBarrier *)DE_NULL, 1, &imageBarrier);
3282
3283 vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
3284
3285 if (!formatIsR64(m_data.format))
3286 {
3287 VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
3288 VkClearValue clearColor = makeClearValueColorU32(0, 0, 0, 0);
3289
3290 vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
3291 }
3292 else
3293 {
3294 const vector<VkBufferImageCopy> bufferImageCopy(
3295 1, makeBufferImageCopy(outputImageCreateInfo.extent,
3296 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
3297 copyBufferToImage(vk, *cmdBuffer, *(*bufferOutputImageR64), sizeOutputR64, bufferImageCopy,
3298 VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, **images[0], VK_IMAGE_LAYOUT_GENERAL,
3299 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
3300 }
3301
3302 VkMemoryBarrier memBarrier = {
3303 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
3304 DE_NULL, // pNext
3305 0u, // srcAccessMask
3306 0u, // dstAccessMask
3307 };
3308
3309 memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
3310 memBarrier.dstAccessMask =
3311 VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
3312 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages, 0, 1, &memBarrier, 0,
3313 DE_NULL, 0, DE_NULL);
3314
3315 if (m_data.stage == STAGE_COMPUTE)
3316 {
3317 vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
3318 }
3319 #ifndef CTS_USES_VULKANSC
3320 else if (m_data.stage == STAGE_RAYGEN)
3321 {
3322 vk.cmdTraceRaysKHR(*cmdBuffer, &rgenSBTRegion, &missSBTRegion, &hitSBTRegion, &callSBTRegion, DIM, DIM, 1u);
3323 }
3324 #endif
3325 else
3326 {
3327 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(DIM, DIM), 0, DE_NULL,
3328 VK_SUBPASS_CONTENTS_INLINE);
3329 // Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
3330 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3331 {
3332 VkDeviceSize zeroOffset = 0;
3333 VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
3334 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
3335 vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
3336
3337 // This barrier corresponds to the second subpass dependency.
3338 const auto writeStage = ((m_data.stage == STAGE_VERTEX) ? VK_PIPELINE_STAGE_VERTEX_SHADER_BIT :
3339 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
3340 const auto postDrawBarrier =
3341 makeMemoryBarrier(VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, VK_ACCESS_SHADER_WRITE_BIT);
3342 cmdPipelineMemoryBarrier(vk, *cmdBuffer, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, writeStage, &postDrawBarrier);
3343 }
3344 if (m_data.stage == STAGE_VERTEX)
3345 {
3346 vk.cmdDraw(*cmdBuffer, DIM * DIM, 1u, 0u, 0u);
3347 }
3348 else
3349 {
3350 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
3351 }
3352 endRenderPass(vk, *cmdBuffer);
3353 }
3354
3355 memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
3356 memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
3357 vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1, &memBarrier, 0,
3358 DE_NULL, 0, DE_NULL);
3359
3360 const VkBufferImageCopy copyRegion = makeBufferImageCopy(
3361 makeExtent3D(DIM, DIM, 1u), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
3362 vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, ©Region);
3363
3364 memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
3365 memBarrier.dstAccessMask = VK_ACCESS_HOST_READ_BIT;
3366 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 1, &memBarrier, 0,
3367 DE_NULL, 0, DE_NULL);
3368
3369 endCommandBuffer(vk, *cmdBuffer);
3370
3371 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
3372
3373 void *ptr = copyBuffer->getAllocation().getHostPtr();
3374
3375 invalidateAlloc(vk, device, copyBuffer->getAllocation());
3376
3377 qpTestResult res = QP_TEST_RESULT_PASS;
3378
3379 for (uint32_t i = 0; i < DIM * DIM; ++i)
3380 {
3381 if (formatIsFloat(m_data.format))
3382 {
3383 if (((float *)ptr)[i * numComponents] != 1.0f)
3384 {
3385 res = QP_TEST_RESULT_FAIL;
3386 }
3387 }
3388 else if (formatIsR64(m_data.format))
3389 {
3390 if (((uint64_t *)ptr)[i * numComponents] != 1)
3391 {
3392 res = QP_TEST_RESULT_FAIL;
3393 }
3394 }
3395 else
3396 {
3397 if (((uint32_t *)ptr)[i * numComponents] != 1)
3398 {
3399 res = QP_TEST_RESULT_FAIL;
3400 }
3401 }
3402 }
3403
3404 return tcu::TestStatus(res, qpGetTestResultName(res));
3405 }
3406
3407 // Out of bounds stride tests.
3408 //
3409 // The goal is checking the following situation:
3410 //
3411 // - The vertex buffer size is not a multiple of the vertex binding stride.
3412 // - In other words, the last chunk goes partially beyond the end of the buffer.
3413 // - However, in this last chunk there will be an attribute that will be completely inside the buffer's range.
3414 // - With robustBufferAccess2, the implementation has to consider the attribute in-bounds and use it properly.
3415 // - Without robustBufferAccess2, the implementation is allowed to work at the chunk level instead of the attribute level.
3416 // - In other words, it can consider the attribute out of bounds because the chunk is out of bounds.
3417 //
3418 // The test will try to check robustBufferAccess2 is correctly applied here.
3419
3420 struct OutOfBoundsStrideParams
3421 {
3422 const bool pipelineRobustness;
3423 const bool dynamicStride;
3424
OutOfBoundsStrideParamsvkt::robustness::__anon29703f200111::OutOfBoundsStrideParams3425 OutOfBoundsStrideParams(const bool pipelineRobustness_, const bool dynamicStride_)
3426 : pipelineRobustness(pipelineRobustness_)
3427 , dynamicStride(dynamicStride_)
3428 {
3429 }
3430 };
3431
3432 class OutOfBoundsStrideInstance : public vkt::TestInstance
3433 {
3434 public:
OutOfBoundsStrideInstance(Context & context,const OutOfBoundsStrideParams & params)3435 OutOfBoundsStrideInstance(Context &context, const OutOfBoundsStrideParams ¶ms)
3436 : vkt::TestInstance(context)
3437 , m_params(params)
3438 {
3439 }
~OutOfBoundsStrideInstance(void)3440 virtual ~OutOfBoundsStrideInstance(void)
3441 {
3442 }
3443
3444 tcu::TestStatus iterate(void) override;
3445
3446 protected:
3447 const OutOfBoundsStrideParams m_params;
3448 };
3449
3450 class OutOfBoundsStrideCase : public vkt::TestCase
3451 {
3452 public:
3453 OutOfBoundsStrideCase(tcu::TestContext &testCtx, const std::string &name, const OutOfBoundsStrideParams ¶ms);
~OutOfBoundsStrideCase(void)3454 virtual ~OutOfBoundsStrideCase(void)
3455 {
3456 }
3457
3458 void initPrograms(vk::SourceCollections &programCollection) const override;
createInstance(Context & context) const3459 TestInstance *createInstance(Context &context) const override
3460 {
3461 return new OutOfBoundsStrideInstance(context, m_params);
3462 }
3463 void checkSupport(Context &context) const override;
3464
3465 protected:
3466 const OutOfBoundsStrideParams m_params;
3467 };
3468
OutOfBoundsStrideCase(tcu::TestContext & testCtx,const std::string & name,const OutOfBoundsStrideParams & params)3469 OutOfBoundsStrideCase::OutOfBoundsStrideCase(tcu::TestContext &testCtx, const std::string &name,
3470 const OutOfBoundsStrideParams ¶ms)
3471 : vkt::TestCase(testCtx, name)
3472 , m_params(params)
3473 {
3474 #ifdef CTS_USES_VULKANSC
3475 DE_ASSERT(!m_params.pipelineRobustness);
3476 #endif // CTS_USES_VULKANSC
3477 }
3478
checkSupport(Context & context) const3479 void OutOfBoundsStrideCase::checkSupport(Context &context) const
3480 {
3481 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
3482
3483 const auto &vki = context.getInstanceInterface();
3484 const auto physicalDevice = context.getPhysicalDevice();
3485
3486 // We need to query feature support using the physical device instead of using the reported context features because robustness
3487 // features are disabled in the default device.
3488 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
3489 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
3490 #ifndef CTS_USES_VULKANSC
3491 VkPhysicalDevicePipelineRobustnessFeaturesEXT pipelineRobustnessFeatures = initVulkanStructure();
3492 #endif // CTS_USES_VULKANSC
3493 VkPhysicalDeviceExtendedDynamicStateFeaturesEXT edsFeatures = initVulkanStructure();
3494
3495 const auto addFeatures = makeStructChainAdder(&features2);
3496
3497 if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
3498 addFeatures(&robustness2Features);
3499
3500 #ifndef CTS_USES_VULKANSC
3501 if (context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"))
3502 addFeatures(&pipelineRobustnessFeatures);
3503 #endif // CTS_USES_VULKANSC
3504
3505 if (context.isDeviceFunctionalitySupported("VK_EXT_extended_dynamic_state"))
3506 addFeatures(&edsFeatures);
3507
3508 vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
3509
3510 if (!robustness2Features.robustBufferAccess2)
3511 TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
3512
3513 #ifndef CTS_USES_VULKANSC
3514 if (m_params.pipelineRobustness && !pipelineRobustnessFeatures.pipelineRobustness)
3515 TCU_THROW(NotSupportedError, "pipelineRobustness not supported");
3516 #endif // CTS_USES_VULKANSC
3517
3518 if (m_params.dynamicStride && !edsFeatures.extendedDynamicState)
3519 TCU_THROW(NotSupportedError, "extendedDynamicState not supported");
3520 }
3521
initPrograms(vk::SourceCollections & programCollection) const3522 void OutOfBoundsStrideCase::initPrograms(vk::SourceCollections &programCollection) const
3523 {
3524 std::ostringstream vert;
3525 vert << "#version 460\n"
3526 << "layout (location=0) in vec4 inPos;\n"
3527 << "void main (void) {\n"
3528 << " gl_Position = inPos;\n"
3529 << " gl_PointSize = 1.0;\n"
3530 << "}\n";
3531 programCollection.glslSources.add("vert") << glu::VertexSource(vert.str());
3532
3533 std::ostringstream frag;
3534 frag << "#version 460\n"
3535 << "layout (location=0) out vec4 outColor;\n"
3536 << "void main (void) {\n"
3537 << " outColor = vec4(0.0, 0.0, 1.0, 1.0);\n"
3538 << "}\n";
3539 programCollection.glslSources.add("frag") << glu::FragmentSource(frag.str());
3540 }
3541
iterate(void)3542 tcu::TestStatus OutOfBoundsStrideInstance::iterate(void)
3543 {
3544 const auto &vki = m_context.getInstanceInterface();
3545 const auto physicalDevice = m_context.getPhysicalDevice();
3546 const auto &vkd = getDeviceInterface(m_context, true, m_params.pipelineRobustness);
3547 const auto device = getLogicalDevice(m_context, true, m_params.pipelineRobustness);
3548 SimpleAllocator allocator(vkd, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
3549 const auto qfIndex = m_context.getUniversalQueueFamilyIndex();
3550 const tcu::IVec3 fbDim(8, 8, 1);
3551 const auto fbExtent = makeExtent3D(fbDim);
3552 const auto colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
3553 const auto colorUsage = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
3554 const auto colorSRR = makeDefaultImageSubresourceRange();
3555 const auto colorSRL = makeDefaultImageSubresourceLayers();
3556 const auto v4Size = static_cast<uint32_t>(sizeof(tcu::Vec4));
3557 VkQueue queue;
3558
3559 // Retrieve queue manually.
3560 vkd.getDeviceQueue(device, qfIndex, 0u, &queue);
3561
3562 // Color buffer for the test.
3563 ImageWithBuffer colorBuffer(vkd, device, allocator, fbExtent, colorFormat, colorUsage, VK_IMAGE_TYPE_2D);
3564
3565 // We will use points, one point per pixel, but we'll insert a padding after each point.
3566 // We'll make the last padding out of the buffer, but the point itself will be inside the buffer.
3567
3568 // One point per pixel.
3569 const auto pointCount = fbExtent.width * fbExtent.height * fbExtent.depth;
3570 std::vector<tcu::Vec4> points;
3571
3572 points.reserve(pointCount);
3573 for (uint32_t y = 0u; y < fbExtent.height; ++y)
3574 for (uint32_t x = 0u; x < fbExtent.width; ++x)
3575 {
3576 const auto xCoord = ((static_cast<float>(x) + 0.5f) / static_cast<float>(fbExtent.width)) * 2.0f - 1.0f;
3577 const auto yCoord = ((static_cast<float>(y) + 0.5f) / static_cast<float>(fbExtent.height)) * 2.0f - 1.0f;
3578 const tcu::Vec4 coords(xCoord, yCoord, 0.0f, 1.0f);
3579
3580 points.push_back(coords);
3581 }
3582
3583 // Add paddings.
3584 std::vector<tcu::Vec4> vertexBufferData;
3585 vertexBufferData.reserve(points.size() * 2u);
3586 for (const auto &point : points)
3587 {
3588 vertexBufferData.push_back(point);
3589 vertexBufferData.push_back(tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f));
3590 }
3591
3592 // Prepare vertex buffer. Note the size is slightly short and excludes the last padding.
3593 const auto vertexBufferSize = static_cast<VkDeviceSize>(de::dataSize(vertexBufferData) - v4Size);
3594 const auto vertexBufferUsage = (VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
3595 const auto vertexBufferInfo = makeBufferCreateInfo(vertexBufferSize, vertexBufferUsage);
3596 const auto vertexBufferOffset = VkDeviceSize{0};
3597 const auto vertexBufferStride = static_cast<VkDeviceSize>(2u * v4Size);
3598
3599 BufferWithMemory vertexBuffer(vkd, device, allocator, vertexBufferInfo, MemoryRequirement::HostVisible);
3600 auto &vertexBufferAlloc = vertexBuffer.getAllocation();
3601 void *vertexBufferPtr = vertexBufferAlloc.getHostPtr();
3602
3603 deMemcpy(vertexBufferPtr, de::dataOrNull(vertexBufferData), static_cast<size_t>(vertexBufferSize));
3604
3605 // Create the pipeline.
3606 const auto &binaries = m_context.getBinaryCollection();
3607 const auto vertModule = createShaderModule(vkd, device, binaries.get("vert"));
3608 const auto fragModule = createShaderModule(vkd, device, binaries.get("frag"));
3609 const auto renderPass = makeRenderPass(vkd, device, colorFormat);
3610 const auto framebuffer =
3611 makeFramebuffer(vkd, device, renderPass.get(), colorBuffer.getImageView(), fbExtent.width, fbExtent.height);
3612 const auto pipelineLayout = makePipelineLayout(vkd, device);
3613
3614 const std::vector<VkViewport> viewports(1u, makeViewport(fbExtent));
3615 const std::vector<VkRect2D> scissors(1u, makeRect2D(fbExtent));
3616
3617 // Input state, which contains the right stride.
3618 const auto bindingStride = v4Size * 2u; // Vertex and padding.
3619 const auto bindingDescription = makeVertexInputBindingDescription(0u, bindingStride, VK_VERTEX_INPUT_RATE_VERTEX);
3620 const auto attributeDescription = makeVertexInputAttributeDescription(0u, 0u, vk::VK_FORMAT_R32G32B32A32_SFLOAT,
3621 0u); // Vertex at the start of each item.
3622
3623 const VkPipelineVertexInputStateCreateInfo inputStateCreateInfo = {
3624 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
3625 nullptr, // const void* pNext;
3626 0u, // VkPipelineVertexInputStateCreateFlags flags;
3627 1u, // uint32_t vertexBindingDescriptionCount;
3628 &bindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
3629 1u, // uint32_t vertexAttributeDescriptionCount;
3630 &attributeDescription, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
3631 };
3632
3633 std::vector<VkDynamicState> dynamicStates;
3634 if (m_params.dynamicStride)
3635 dynamicStates.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT);
3636
3637 const VkPipelineDynamicStateCreateInfo dynamicStateCreateInfo = {
3638 VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, // VkStructureType sType;
3639 nullptr, // const void* pNext;
3640 0u, // VkPipelineDynamicStateCreateFlags flags;
3641 de::sizeU32(dynamicStates), // uint32_t dynamicStateCount;
3642 de::dataOrNull(dynamicStates), // const VkDynamicState* pDynamicStates;
3643 };
3644
3645 const void *pNext = nullptr;
3646
3647 #ifndef CTS_USES_VULKANSC
3648 const VkPipelineRobustnessCreateInfoEXT pipelineRobustnessCreateInfo = {
3649 VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT, //VkStructureType sType;
3650 nullptr, //const void *pNext;
3651 VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT, //VkPipelineRobustnessBufferBehaviorEXT storageBuffers;
3652 VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT, //VkPipelineRobustnessBufferBehaviorEXT uniformBuffers;
3653 VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT, //VkPipelineRobustnessBufferBehaviorEXT vertexInputs;
3654 VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT, //VkPipelineRobustnessImageBehaviorEXT images;
3655 };
3656
3657 pNext = &pipelineRobustnessCreateInfo;
3658 #endif // CTS_USES_VULKANSC
3659
3660 const auto pipeline = makeGraphicsPipeline(
3661 vkd, device, pipelineLayout.get(), vertModule.get(), VK_NULL_HANDLE, VK_NULL_HANDLE, VK_NULL_HANDLE,
3662 fragModule.get(), renderPass.get(), viewports, scissors, VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, 0u,
3663 &inputStateCreateInfo, nullptr, nullptr, nullptr, nullptr, &dynamicStateCreateInfo, pNext);
3664
3665 // Command pool and buffer.
3666 const CommandPoolWithBuffer cmd(vkd, device, qfIndex);
3667 const auto cmdBuffer = cmd.cmdBuffer.get();
3668
3669 const auto clearColor = makeClearValueColor(tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f));
3670 beginCommandBuffer(vkd, cmdBuffer);
3671 beginRenderPass(vkd, cmdBuffer, renderPass.get(), framebuffer.get(), scissors.at(0u), clearColor);
3672 if (m_params.dynamicStride)
3673 {
3674 #ifndef CTS_USES_VULKANSC
3675 vkd.cmdBindVertexBuffers2(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset, nullptr,
3676 &vertexBufferStride);
3677 #else
3678 vkd.cmdBindVertexBuffers2EXT(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset, nullptr,
3679 &vertexBufferStride);
3680 #endif // CTS_USES_VULKANSC
3681 }
3682 else
3683 {
3684 vkd.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
3685 }
3686 vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline.get());
3687 vkd.cmdDraw(cmdBuffer, pointCount, 1u, 0u, 0u);
3688 endRenderPass(vkd, cmdBuffer);
3689
3690 // Copy image to verification buffer.
3691 const auto color2Transfer = makeImageMemoryBarrier(
3692 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
3693 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, colorBuffer.getImage(), colorSRR);
3694
3695 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
3696 VK_PIPELINE_STAGE_TRANSFER_BIT, &color2Transfer);
3697
3698 const auto copyRegion = makeBufferImageCopy(fbExtent, colorSRL);
3699 vkd.cmdCopyImageToBuffer(cmdBuffer, colorBuffer.getImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3700 colorBuffer.getBuffer(), 1u, ©Region);
3701
3702 const auto transfer2Host = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
3703 cmdPipelineMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
3704 &transfer2Host);
3705
3706 endCommandBuffer(vkd, cmdBuffer);
3707 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
3708
3709 // Verify color buffer.
3710 invalidateAlloc(vkd, device, colorBuffer.getBufferAllocation());
3711
3712 const tcu::Vec4 refColor(0.0f, 0.0f, 1.0f, 1.0f); // Must match frag shader.
3713 const tcu::Vec4 threshold(0.0f, 0.0f, 0.0f, 0.0f);
3714 const void *resultData = colorBuffer.getBufferAllocation().getHostPtr();
3715 const auto tcuFormat = mapVkFormat(colorFormat);
3716 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, fbDim, resultData);
3717 auto &log = m_context.getTestContext().getLog();
3718
3719 if (!tcu::floatThresholdCompare(log, "Result", "", refColor, resultAccess, threshold, tcu::COMPARE_LOG_ON_ERROR))
3720 return tcu::TestStatus::fail("Unexpected results in the color buffer -- check log for details");
3721
3722 return tcu::TestStatus::pass("Pass");
3723 }
3724
getGPLSuffix(PipelineRobustnessCase prCase)3725 std::string getGPLSuffix(PipelineRobustnessCase prCase)
3726 {
3727 if (prCase == PipelineRobustnessCase::ENABLED_FAST_GPL)
3728 return "_fast_gpl";
3729 if (prCase == PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL)
3730 return "_optimized_gpl";
3731 return "";
3732 }
3733
3734 } // namespace
3735
createTests(tcu::TestCaseGroup * group,bool robustness2,bool pipelineRobustness)3736 static void createTests(tcu::TestCaseGroup *group, bool robustness2, bool pipelineRobustness)
3737 {
3738 tcu::TestContext &testCtx = group->getTestContext();
3739
3740 typedef struct
3741 {
3742 uint32_t count;
3743 const char *name;
3744 } TestGroupCase;
3745
3746 TestGroupCase fmtCases[] = {
3747 {VK_FORMAT_R32_SINT, "r32i"},
3748 {VK_FORMAT_R32_UINT, "r32ui"},
3749 {VK_FORMAT_R32_SFLOAT, "r32f"},
3750 {VK_FORMAT_R32G32_SINT, "rg32i"},
3751 {VK_FORMAT_R32G32_UINT, "rg32ui"},
3752 {VK_FORMAT_R32G32_SFLOAT, "rg32f"},
3753 {VK_FORMAT_R32G32B32A32_SINT, "rgba32i"},
3754 {VK_FORMAT_R32G32B32A32_UINT, "rgba32ui"},
3755 {VK_FORMAT_R32G32B32A32_SFLOAT, "rgba32f"},
3756 {VK_FORMAT_R64_SINT, "r64i"},
3757 {VK_FORMAT_R64_UINT, "r64ui"},
3758 };
3759
3760 TestGroupCase fullDescCases[] = {
3761 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, "uniform_buffer"},
3762 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, "storage_buffer"},
3763 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, "uniform_buffer_dynamic"},
3764 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, "storage_buffer_dynamic"},
3765 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer"},
3766 {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer"},
3767 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image"},
3768 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image"},
3769 {VERTEX_ATTRIBUTE_FETCH, "vertex_attribute_fetch"},
3770 };
3771
3772 TestGroupCase imgDescCases[] = {
3773 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image"},
3774 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image"},
3775 };
3776
3777 TestGroupCase fullLenCases32Bit[] = {
3778 {~0U, "null_descriptor"}, {0, "img"}, {4, "len_4"}, {8, "len_8"}, {12, "len_12"}, {16, "len_16"},
3779 {20, "len_20"}, {31, "len_31"}, {32, "len_32"}, {33, "len_33"}, {35, "len_35"}, {36, "len_36"},
3780 {39, "len_39"}, {40, "len_41"}, {252, "len_252"}, {256, "len_256"}, {260, "len_260"},
3781 };
3782
3783 TestGroupCase fullLenCases64Bit[] = {
3784 {~0U, "null_descriptor"}, {0, "img"}, {8, "len_8"}, {16, "len_16"}, {24, "len_24"}, {32, "len_32"},
3785 {40, "len_40"}, {62, "len_62"}, {64, "len_64"}, {66, "len_66"}, {70, "len_70"}, {72, "len_72"},
3786 {78, "len_78"}, {80, "len_80"}, {504, "len_504"}, {512, "len_512"}, {520, "len_520"},
3787 };
3788
3789 TestGroupCase imgLenCases[] = {
3790 {0, "img"},
3791 };
3792
3793 TestGroupCase viewCases[] = {
3794 {VK_IMAGE_VIEW_TYPE_1D, "1d"},
3795 {VK_IMAGE_VIEW_TYPE_2D, "2d"},
3796 {VK_IMAGE_VIEW_TYPE_3D, "3d"},
3797 {VK_IMAGE_VIEW_TYPE_CUBE, "cube"},
3798 {VK_IMAGE_VIEW_TYPE_1D_ARRAY, "1d_array"},
3799 {VK_IMAGE_VIEW_TYPE_2D_ARRAY, "2d_array"},
3800 {VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, "cube_array"},
3801 };
3802
3803 TestGroupCase sampCases[] = {
3804 {VK_SAMPLE_COUNT_1_BIT, "samples_1"},
3805 {VK_SAMPLE_COUNT_4_BIT, "samples_4"},
3806 };
3807
3808 TestGroupCase stageCases[] = {
3809 // compute
3810 {STAGE_COMPUTE, "comp"},
3811 // fragment
3812 {STAGE_FRAGMENT, "frag"},
3813 // vertex
3814 {STAGE_VERTEX, "vert"},
3815 #ifndef CTS_USES_VULKANSC
3816 // raygen
3817 {STAGE_RAYGEN, "rgen"},
3818 #endif
3819 };
3820
3821 TestGroupCase volCases[] = {
3822 {0, "nonvolatile"},
3823 {1, "volatile"},
3824 };
3825
3826 TestGroupCase unrollCases[] = {
3827 {0, "dontunroll"},
3828 {1, "unroll"},
3829 };
3830
3831 TestGroupCase tempCases[] = {
3832 {0, "notemplate"},
3833 #ifndef CTS_USES_VULKANSC
3834 {1, "template"},
3835 #endif
3836 };
3837
3838 TestGroupCase pushCases[] = {
3839 {0, "bind"},
3840 #ifndef CTS_USES_VULKANSC
3841 {1, "push"},
3842 #endif
3843 };
3844
3845 TestGroupCase fmtQualCases[] = {
3846 {0, "no_fmt_qual"},
3847 {1, "fmt_qual"},
3848 };
3849
3850 TestGroupCase readOnlyCases[] = {
3851 {0, "readwrite"},
3852 {1, "readonly"},
3853 };
3854
3855 for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
3856 {
3857 de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name));
3858 for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
3859 {
3860 de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name));
3861 for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
3862 {
3863 de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name));
3864
3865 // Avoid too much duplication by excluding certain test cases
3866 if (pipelineRobustness && !(fmtCases[fmtNdx].count == VK_FORMAT_R32_UINT ||
3867 fmtCases[fmtNdx].count == VK_FORMAT_R32G32B32A32_SFLOAT ||
3868 fmtCases[fmtNdx].count == VK_FORMAT_R64_SINT))
3869 {
3870 continue;
3871 }
3872
3873 int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
3874
3875 for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
3876 {
3877 de::MovePtr<tcu::TestCaseGroup> unrollGroup(
3878 new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name));
3879
3880 // Avoid too much duplication by excluding certain test cases
3881 if (unrollNdx > 0 && pipelineRobustness)
3882 continue;
3883
3884 for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
3885 {
3886 de::MovePtr<tcu::TestCaseGroup> volGroup(
3887 new tcu::TestCaseGroup(testCtx, volCases[volNdx].name));
3888
3889 int numDescCases =
3890 robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
3891 TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
3892
3893 for (int descNdx = 0; descNdx < numDescCases; descNdx++)
3894 {
3895 de::MovePtr<tcu::TestCaseGroup> descGroup(
3896 new tcu::TestCaseGroup(testCtx, descCases[descNdx].name));
3897
3898 // Avoid too much duplication by excluding certain test cases
3899 if (pipelineRobustness &&
3900 !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
3901 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3902 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
3903 descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3904 {
3905 continue;
3906 }
3907
3908 for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
3909 {
3910 de::MovePtr<tcu::TestCaseGroup> rwGroup(
3911 new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name));
3912
3913 // readonly cases are just for storage_buffer
3914 if (readOnlyCases[roNdx].count != 0 &&
3915 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
3916 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
3917 continue;
3918
3919 if (pipelineRobustness && readOnlyCases[roNdx].count != 0)
3920 {
3921 continue;
3922 }
3923
3924 for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
3925 {
3926 de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(
3927 new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name));
3928
3929 // format qualifier is only used for storage image and storage texel buffers
3930 if (fmtQualCases[fmtQualNdx].count &&
3931 !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
3932 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
3933 continue;
3934
3935 if (pushCases[pushNdx].count &&
3936 (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
3937 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC ||
3938 descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3939 continue;
3940
3941 const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3942 int numLenCases =
3943 robustness2 ?
3944 DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) :
3945 DE_LENGTH_OF_ARRAY(imgLenCases);
3946 TestGroupCase *lenCases =
3947 robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3948
3949 for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3950 {
3951 if (lenCases[lenNdx].count != ~0U)
3952 {
3953 bool bufferLen = lenCases[lenNdx].count != 0;
3954 bool bufferDesc =
3955 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3956 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3957 if (bufferLen != bufferDesc)
3958 continue;
3959
3960 // Add template tests cases only for null_descriptor cases
3961 if (tempCases[tempNdx].count)
3962 continue;
3963 }
3964
3965 if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
3966 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3967 ((lenCases[lenNdx].count % fmtSize) != 0) && lenCases[lenNdx].count != ~0U)
3968 {
3969 continue;
3970 }
3971
3972 // Avoid too much duplication by excluding certain test cases
3973 if (pipelineRobustness && robustness2 &&
3974 (lenCases[lenNdx].count == 0 ||
3975 ((lenCases[lenNdx].count & (lenCases[lenNdx].count - 1)) != 0)))
3976 {
3977 continue;
3978 }
3979
3980 // "volatile" only applies to storage images/buffers
3981 if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3982 continue;
3983
3984 de::MovePtr<tcu::TestCaseGroup> lenGroup(
3985 new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name));
3986 for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3987 {
3988 de::MovePtr<tcu::TestCaseGroup> sampGroup(
3989 new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name));
3990
3991 // Avoid too much duplication by excluding certain test cases
3992 if (pipelineRobustness && sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3993 continue;
3994
3995 for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3996 {
3997 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3998 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3999 descCases[descNdx].count !=
4000 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
4001 {
4002 // buffer descriptors don't have different dimensionalities. Only test "1D"
4003 continue;
4004 }
4005
4006 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D &&
4007 viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
4008 sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
4009 {
4010 continue;
4011 }
4012
4013 // Avoid too much duplication by excluding certain test cases
4014 if (pipelineRobustness &&
4015 !(viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_1D ||
4016 viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D ||
4017 viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D_ARRAY))
4018 {
4019 continue;
4020 }
4021
4022 de::MovePtr<tcu::TestCaseGroup> viewGroup(
4023 new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name));
4024 for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases);
4025 stageNdx++)
4026 {
4027 Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
4028 VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT |
4029 VK_SHADER_STAGE_VERTEX_BIT |
4030 VK_SHADER_STAGE_FRAGMENT_BIT;
4031 VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
4032 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
4033 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
4034 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
4035 #ifndef CTS_USES_VULKANSC
4036 if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
4037 {
4038 allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_KHR;
4039 allPipelineStages |=
4040 VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
4041
4042 if (pipelineRobustness)
4043 continue;
4044 }
4045 #endif // CTS_USES_VULKANSC
4046 if ((lenCases[lenNdx].count == ~0U) && pipelineRobustness)
4047 continue;
4048
4049 if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
4050 currentStage != STAGE_VERTEX)
4051 continue;
4052
4053 uint32_t imageDim[3] = {5, 11, 6};
4054 if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
4055 viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
4056 imageDim[1] = imageDim[0];
4057
4058 #ifndef CTS_USES_VULKANSC
4059 std::vector<PipelineRobustnessCase> pipelineRobustnessCases;
4060 if (!pipelineRobustness)
4061 pipelineRobustnessCases.push_back(
4062 PipelineRobustnessCase::DISABLED);
4063 else
4064 {
4065 pipelineRobustnessCases.push_back(
4066 PipelineRobustnessCase::ENABLED_MONOLITHIC);
4067 if (currentStage != STAGE_RAYGEN &&
4068 currentStage != STAGE_COMPUTE)
4069 {
4070 pipelineRobustnessCases.push_back(
4071 PipelineRobustnessCase::ENABLED_FAST_GPL);
4072 pipelineRobustnessCases.push_back(
4073 PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL);
4074 }
4075 }
4076 #else
4077 const std::vector<PipelineRobustnessCase> pipelineRobustnessCases(
4078 1u, (pipelineRobustness ?
4079 PipelineRobustnessCase::ENABLED_MONOLITHIC :
4080 PipelineRobustnessCase::DISABLED));
4081 #endif // CTS_USES_VULKANSC
4082
4083 for (const auto &pipelineRobustnessCase : pipelineRobustnessCases)
4084 {
4085 CaseDef c = {
4086 (VkFormat)fmtCases[fmtNdx].count, // VkFormat format;
4087 currentStage, // Stage stage;
4088 allShaderStages, // VkFlags allShaderStages;
4089 allPipelineStages, // VkFlags allPipelineStages;
4090 (int)descCases[descNdx]
4091 .count, // VkDescriptorType descriptorType;
4092 (VkImageViewType)viewCases[viewNdx]
4093 .count, // VkImageViewType viewType;
4094 (VkSampleCountFlagBits)sampCases[sampNdx]
4095 .count, // VkSampleCountFlagBits samples;
4096 (int)lenCases[lenNdx].count, // int bufferLen;
4097 (bool)unrollCases[unrollNdx].count, // bool unroll;
4098 (bool)volCases[volNdx].count, // bool vol;
4099 (bool)(lenCases[lenNdx].count ==
4100 ~0U), // bool nullDescriptor
4101 (bool)tempCases[tempNdx].count, // bool useTemplate
4102 (bool)fmtQualCases[fmtQualNdx]
4103 .count, // bool formatQualifier
4104 (bool)pushCases[pushNdx].count, // bool pushDescriptor;
4105 (bool)robustness2, // bool testRobustness2;
4106 pipelineRobustnessCase, // PipelineRobustnessCase pipelineRobustnessCase;
4107 {imageDim[0], imageDim[1],
4108 imageDim[2]}, // uint32_t imageDim[3];
4109 (bool)(readOnlyCases[roNdx].count == 1), // bool readOnly;
4110 };
4111
4112 const auto name = stageCases[stageNdx].name +
4113 getGPLSuffix(pipelineRobustnessCase);
4114 viewGroup->addChild(
4115 new RobustnessExtsTestCase(testCtx, name, c));
4116 }
4117 }
4118 sampGroup->addChild(viewGroup.release());
4119 }
4120 lenGroup->addChild(sampGroup.release());
4121 }
4122 fmtQualGroup->addChild(lenGroup.release());
4123 }
4124 // Put storage_buffer tests in separate readonly vs readwrite groups. Other types
4125 // go directly into descGroup
4126 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
4127 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
4128 {
4129 rwGroup->addChild(fmtQualGroup.release());
4130 }
4131 else
4132 {
4133 descGroup->addChild(fmtQualGroup.release());
4134 }
4135 }
4136 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
4137 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
4138 {
4139 descGroup->addChild(rwGroup.release());
4140 }
4141 }
4142 volGroup->addChild(descGroup.release());
4143 }
4144 unrollGroup->addChild(volGroup.release());
4145 }
4146 fmtGroup->addChild(unrollGroup.release());
4147 }
4148 tempGroup->addChild(fmtGroup.release());
4149 }
4150 pushGroup->addChild(tempGroup.release());
4151 }
4152 group->addChild(pushGroup.release());
4153 }
4154
4155 if (robustness2)
4156 {
4157 de::MovePtr<tcu::TestCaseGroup> miscGroup(new tcu::TestCaseGroup(testCtx, "misc"));
4158
4159 for (const auto dynamicStride : {false, true})
4160 {
4161 const OutOfBoundsStrideParams params(pipelineRobustness, dynamicStride);
4162 const std::string nameSuffix(dynamicStride ? "_dynamic_stride" : "");
4163 const std::string testName("out_of_bounds_stride" + nameSuffix);
4164
4165 miscGroup->addChild(new OutOfBoundsStrideCase(testCtx, testName, params));
4166 }
4167
4168 group->addChild(miscGroup.release());
4169 }
4170 }
4171
createRobustness2Tests(tcu::TestCaseGroup * group)4172 static void createRobustness2Tests(tcu::TestCaseGroup *group)
4173 {
4174 createTests(group, /*robustness2=*/true, /*pipelineRobustness=*/false);
4175 }
4176
createImageRobustnessTests(tcu::TestCaseGroup * group)4177 static void createImageRobustnessTests(tcu::TestCaseGroup *group)
4178 {
4179 createTests(group, /*robustness2=*/false, /*pipelineRobustness=*/false);
4180 }
4181
4182 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestCaseGroup * group)4183 static void createPipelineRobustnessTests(tcu::TestCaseGroup *group)
4184 {
4185 tcu::TestContext &testCtx = group->getTestContext();
4186
4187 tcu::TestCaseGroup *robustness2Group = new tcu::TestCaseGroup(testCtx, "robustness2");
4188
4189 createTests(robustness2Group, /*robustness2=*/true, /*pipelineRobustness=*/true);
4190
4191 group->addChild(robustness2Group);
4192
4193 tcu::TestCaseGroup *imageRobustness2Group = new tcu::TestCaseGroup(testCtx, "image_robustness");
4194
4195 createTests(imageRobustness2Group, /*robustness2=*/false, /*pipelineRobustness=*/true);
4196
4197 group->addChild(imageRobustness2Group);
4198 }
4199 #endif
4200
cleanupGroup(tcu::TestCaseGroup * group)4201 static void cleanupGroup(tcu::TestCaseGroup *group)
4202 {
4203 DE_UNREF(group);
4204 // Destroy singleton objects.
4205 ImageRobustnessSingleton::destroy();
4206 Robustness2Singleton::destroy();
4207 PipelineRobustnessImageRobustnessSingleton::destroy();
4208 PipelineRobustnessRobustness2Singleton::destroy();
4209 }
4210
createRobustness2Tests(tcu::TestContext & testCtx)4211 tcu::TestCaseGroup *createRobustness2Tests(tcu::TestContext &testCtx)
4212 {
4213 return createTestGroup(testCtx, "robustness2", createRobustness2Tests, cleanupGroup);
4214 }
4215
createImageRobustnessTests(tcu::TestContext & testCtx)4216 tcu::TestCaseGroup *createImageRobustnessTests(tcu::TestContext &testCtx)
4217 {
4218 return createTestGroup(testCtx, "image_robustness", createImageRobustnessTests, cleanupGroup);
4219 }
4220
4221 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestContext & testCtx)4222 tcu::TestCaseGroup *createPipelineRobustnessTests(tcu::TestContext &testCtx)
4223 {
4224 return createTestGroup(testCtx, "pipeline_robustness", createPipelineRobustnessTests, cleanupGroup);
4225 }
4226 #endif
4227
4228 } // namespace robustness
4229 } // namespace vkt
4230