1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2019 The Khronos Group Inc.
6 * Copyright (c) 2019 Google Inc.
7 * Copyright (c) 2017 Codeplay Software Ltd.
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 */ /*!
22 * \file
23 * \brief Subgroups Tests
24 */ /*--------------------------------------------------------------------*/
25
26 #include "vktSubgroupsClusteredTests.hpp"
27 #include "vktSubgroupsScanHelpers.hpp"
28 #include "vktSubgroupsTestsUtils.hpp"
29
30 #include <string>
31 #include <vector>
32
33 using namespace tcu;
34 using namespace std;
35 using namespace vk;
36 using namespace vkt;
37
38 namespace
39 {
40 enum OpType
41 {
42 OPTYPE_CLUSTERED_ADD = 0,
43 OPTYPE_CLUSTERED_MUL,
44 OPTYPE_CLUSTERED_MIN,
45 OPTYPE_CLUSTERED_MAX,
46 OPTYPE_CLUSTERED_AND,
47 OPTYPE_CLUSTERED_OR,
48 OPTYPE_CLUSTERED_XOR,
49 OPTYPE_CLUSTERED_LAST
50 };
51
52 struct CaseDefinition
53 {
54 Operator op;
55 VkShaderStageFlags shaderStage;
56 VkFormat format;
57 de::SharedPtr<bool> geometryPointSizeSupported;
58 bool requiredSubgroupSize;
59 bool requires8BitUniformBuffer;
60 bool requires16BitUniformBuffer;
61 };
62
getOperator(OpType opType)63 static Operator getOperator(OpType opType)
64 {
65 switch (opType)
66 {
67 case OPTYPE_CLUSTERED_ADD:
68 return OPERATOR_ADD;
69 case OPTYPE_CLUSTERED_MUL:
70 return OPERATOR_MUL;
71 case OPTYPE_CLUSTERED_MIN:
72 return OPERATOR_MIN;
73 case OPTYPE_CLUSTERED_MAX:
74 return OPERATOR_MAX;
75 case OPTYPE_CLUSTERED_AND:
76 return OPERATOR_AND;
77 case OPTYPE_CLUSTERED_OR:
78 return OPERATOR_OR;
79 case OPTYPE_CLUSTERED_XOR:
80 return OPERATOR_XOR;
81 default:
82 TCU_THROW(InternalError, "Unsupported op type");
83 }
84 }
85
checkVertexPipelineStages(const void * internalData,vector<const void * > datas,uint32_t width,uint32_t)86 static bool checkVertexPipelineStages(const void *internalData, vector<const void *> datas, uint32_t width, uint32_t)
87 {
88 DE_UNREF(internalData);
89
90 return subgroups::check(datas, width, 1);
91 }
92
checkComputeOrMesh(const void * internalData,vector<const void * > datas,const uint32_t numWorkgroups[3],const uint32_t localSize[3],uint32_t)93 static bool checkComputeOrMesh(const void *internalData, vector<const void *> datas, const uint32_t numWorkgroups[3],
94 const uint32_t localSize[3], uint32_t)
95 {
96 DE_UNREF(internalData);
97
98 return subgroups::checkComputeOrMesh(datas, numWorkgroups, localSize, 1);
99 }
100
getOpTypeName(Operator op)101 string getOpTypeName(Operator op)
102 {
103 return getScanOpName("subgroupClustered", "", op, SCAN_REDUCE);
104 }
105
getExtHeader(CaseDefinition & caseDef)106 string getExtHeader(CaseDefinition &caseDef)
107 {
108 return "#extension GL_KHR_shader_subgroup_clustered: enable\n"
109 "#extension GL_KHR_shader_subgroup_ballot: enable\n" +
110 subgroups::getAdditionalExtensionForFormat(caseDef.format);
111 }
112
getTestSrc(CaseDefinition & caseDef)113 string getTestSrc(CaseDefinition &caseDef)
114 {
115 const string formatName = subgroups::getFormatNameForGLSL(caseDef.format);
116 const string opTypeName = getOpTypeName(caseDef.op);
117 const string identity = getIdentity(caseDef.op, caseDef.format);
118 const string opOperation = getOpOperation(caseDef.op, caseDef.format, "ref", "data[index]");
119 const string compare = getCompare(caseDef.op, caseDef.format, "ref", "op");
120 ostringstream bdy;
121
122 bdy << " bool tempResult = true;\n"
123 << " uvec4 mask = subgroupBallot(true);\n";
124
125 for (uint32_t i = 1; i <= subgroups::maxSupportedSubgroupSize(); i *= 2)
126 {
127 bdy << " {\n"
128 << " const uint clusterSize = " << i << ";\n"
129 << " if (clusterSize <= gl_SubgroupSize)\n"
130 << " {\n"
131 << " " << formatName << " op = " << opTypeName + "(data[gl_SubgroupInvocationID], clusterSize);\n"
132 << " for (uint clusterOffset = 0; clusterOffset < gl_SubgroupSize; clusterOffset += clusterSize)\n"
133 << " {\n"
134 << " " << formatName << " ref = " << identity << ";\n"
135 << " for (uint index = clusterOffset; index < (clusterOffset + clusterSize); index++)\n"
136 << " {\n"
137 << " if (subgroupBallotBitExtract(mask, index))\n"
138 << " {\n"
139 << " ref = " << opOperation << ";\n"
140 << " }\n"
141 << " }\n"
142 << " if ((clusterOffset <= gl_SubgroupInvocationID) && (gl_SubgroupInvocationID < (clusterOffset + "
143 "clusterSize)))\n"
144 << " {\n"
145 << " if (!" << compare << ")\n"
146 << " {\n"
147 << " tempResult = false;\n"
148 << " }\n"
149 << " }\n"
150 << " }\n"
151 << " }\n"
152 << " }\n"
153 << " tempRes = tempResult ? 1 : 0;\n";
154 }
155
156 return bdy.str();
157 }
158
initFrameBufferPrograms(SourceCollections & programCollection,CaseDefinition caseDef)159 void initFrameBufferPrograms(SourceCollections &programCollection, CaseDefinition caseDef)
160 {
161 const ShaderBuildOptions buildOptions(programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
162 const string extHeader = getExtHeader(caseDef);
163 const string testSrc = getTestSrc(caseDef);
164
165 subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format,
166 *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
167 }
168
initPrograms(SourceCollections & programCollection,CaseDefinition caseDef)169 void initPrograms(SourceCollections &programCollection, CaseDefinition caseDef)
170 {
171 #ifndef CTS_USES_VULKANSC
172 const bool spirv14required =
173 (isAllRayTracingStages(caseDef.shaderStage) || isAllMeshShadingStages(caseDef.shaderStage));
174 #else
175 const bool spirv14required = false;
176 #endif // CTS_USES_VULKANSC
177 const SpirvVersion spirvVersion = spirv14required ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
178 const ShaderBuildOptions buildOptions(programCollection.usedVulkanVersion, spirvVersion, 0u, spirv14required);
179 const string extHeader = getExtHeader(caseDef);
180 const string testSrc = getTestSrc(caseDef);
181
182 subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format,
183 *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
184 }
185
supportedCheck(Context & context,CaseDefinition caseDef)186 void supportedCheck(Context &context, CaseDefinition caseDef)
187 {
188 if (!subgroups::isSubgroupSupported(context))
189 TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
190
191 if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_CLUSTERED_BIT))
192 TCU_THROW(NotSupportedError, "Device does not support subgroup clustered operations");
193
194 if (!subgroups::isFormatSupportedForDevice(context, caseDef.format))
195 TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
196
197 if (caseDef.requires16BitUniformBuffer)
198 {
199 if (!subgroups::is16BitUBOStorageSupported(context))
200 {
201 TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
202 }
203 }
204
205 if (caseDef.requires8BitUniformBuffer)
206 {
207 if (!subgroups::is8BitUBOStorageSupported(context))
208 {
209 TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
210 }
211 }
212
213 if (caseDef.requiredSubgroupSize)
214 {
215 context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
216
217 #ifndef CTS_USES_VULKANSC
218 const VkPhysicalDeviceSubgroupSizeControlFeatures &subgroupSizeControlFeatures =
219 context.getSubgroupSizeControlFeatures();
220 const VkPhysicalDeviceSubgroupSizeControlProperties &subgroupSizeControlProperties =
221 context.getSubgroupSizeControlProperties();
222 #else
223 const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT &subgroupSizeControlFeatures =
224 context.getSubgroupSizeControlFeaturesEXT();
225 const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT &subgroupSizeControlProperties =
226 context.getSubgroupSizeControlPropertiesEXT();
227 #endif // CTS_USES_VULKANSC
228
229 if (subgroupSizeControlFeatures.subgroupSizeControl == false)
230 TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
231
232 if (subgroupSizeControlFeatures.computeFullSubgroups == false)
233 TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
234
235 if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
236 TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
237 }
238
239 *caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
240
241 #ifndef CTS_USES_VULKANSC
242 if (isAllRayTracingStages(caseDef.shaderStage))
243 {
244 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
245 }
246 else if (isAllMeshShadingStages(caseDef.shaderStage))
247 {
248 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS);
249 context.requireDeviceFunctionality("VK_EXT_mesh_shader");
250
251 if ((caseDef.shaderStage & VK_SHADER_STAGE_TASK_BIT_EXT) != 0u)
252 {
253 const auto &features = context.getMeshShaderFeaturesEXT();
254 if (!features.taskShader)
255 TCU_THROW(NotSupportedError, "Task shaders not supported");
256 }
257 }
258 #endif // CTS_USES_VULKANSC
259
260 subgroups::supportedCheckShader(context, caseDef.shaderStage);
261 }
262
noSSBOtest(Context & context,const CaseDefinition caseDef)263 TestStatus noSSBOtest(Context &context, const CaseDefinition caseDef)
264 {
265 const subgroups::SSBOData inputData = {
266 subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
267 subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
268 caseDef.format, // vk::VkFormat format;
269 subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
270 subgroups::SSBOData::BindingUBO, // BindingType bindingType;
271 };
272
273 switch (caseDef.shaderStage)
274 {
275 case VK_SHADER_STAGE_VERTEX_BIT:
276 return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL,
277 checkVertexPipelineStages);
278 case VK_SHADER_STAGE_GEOMETRY_BIT:
279 return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL,
280 checkVertexPipelineStages);
281 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
282 return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL,
283 checkVertexPipelineStages, caseDef.shaderStage);
284 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
285 return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL,
286 checkVertexPipelineStages, caseDef.shaderStage);
287 default:
288 TCU_THROW(InternalError, "Unhandled shader stage");
289 }
290 }
291
test(Context & context,const CaseDefinition caseDef)292 TestStatus test(Context &context, const CaseDefinition caseDef)
293 {
294 const bool isCompute = isAllComputeStages(caseDef.shaderStage);
295 #ifndef CTS_USES_VULKANSC
296 const bool isMesh = isAllMeshShadingStages(caseDef.shaderStage);
297 #else
298 const bool isMesh = false;
299 #endif // CTS_USES_VULKANSC
300 DE_ASSERT(!(isCompute && isMesh));
301
302 if (isCompute || isMesh)
303 {
304 #ifndef CTS_USES_VULKANSC
305 const VkPhysicalDeviceSubgroupSizeControlProperties &subgroupSizeControlProperties =
306 context.getSubgroupSizeControlProperties();
307 #else
308 const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT &subgroupSizeControlProperties =
309 context.getSubgroupSizeControlPropertiesEXT();
310 #endif // CTS_USES_VULKANSC
311 TestLog &log = context.getTestContext().getLog();
312
313 subgroups::SSBOData inputData;
314 inputData.format = caseDef.format;
315 inputData.layout = subgroups::SSBOData::LayoutStd430;
316 inputData.numElements = subgroups::maxSupportedSubgroupSize();
317 inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
318
319 if (caseDef.requiredSubgroupSize == false)
320 {
321 if (isCompute)
322 return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL,
323 checkComputeOrMesh);
324 else
325 return subgroups::makeMeshTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkComputeOrMesh);
326 }
327
328 log << TestLog::Message << "Testing required subgroup size range ["
329 << subgroupSizeControlProperties.minSubgroupSize << ", " << subgroupSizeControlProperties.maxSubgroupSize
330 << "]" << TestLog::EndMessage;
331
332 // According to the spec, requiredSubgroupSize must be a power-of-two integer.
333 for (uint32_t size = subgroupSizeControlProperties.minSubgroupSize;
334 size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
335 {
336 TestStatus result(QP_TEST_RESULT_INTERNAL_ERROR, "Internal Error");
337
338 if (isCompute)
339 result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL,
340 checkComputeOrMesh, size);
341 else
342 result = subgroups::makeMeshTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL,
343 checkComputeOrMesh, size);
344
345 if (result.getCode() != QP_TEST_RESULT_PASS)
346 {
347 log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
348 return result;
349 }
350 }
351
352 return TestStatus::pass("OK");
353 }
354 else if (isAllGraphicsStages(caseDef.shaderStage))
355 {
356 const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
357 const subgroups::SSBOData inputData = {
358 subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
359 subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
360 caseDef.format, // vk::VkFormat format;
361 subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
362 subgroups::SSBOData::BindingSSBO, // bool isImage;
363 4u, // uint32_t binding;
364 stages, // vk::VkShaderStageFlags stages;
365 };
366
367 return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages,
368 stages);
369 }
370 #ifndef CTS_USES_VULKANSC
371 else if (isAllRayTracingStages(caseDef.shaderStage))
372 {
373 const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
374 const subgroups::SSBOData inputData = {
375 subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
376 subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
377 caseDef.format, // vk::VkFormat format;
378 subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
379 subgroups::SSBOData::BindingSSBO, // bool isImage;
380 6u, // uint32_t binding;
381 stages, // vk::VkShaderStageFlags stages;
382 };
383
384 return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL,
385 checkVertexPipelineStages, stages);
386 }
387 #endif // CTS_USES_VULKANSC
388 else
389 TCU_THROW(InternalError, "Unknown stage or invalid stage set");
390 }
391 } // namespace
392
393 namespace vkt
394 {
395 namespace subgroups
396 {
createSubgroupsClusteredTests(TestContext & testCtx)397 TestCaseGroup *createSubgroupsClusteredTests(TestContext &testCtx)
398 {
399 de::MovePtr<TestCaseGroup> group(new TestCaseGroup(testCtx, "clustered"));
400 de::MovePtr<TestCaseGroup> graphicGroup(new TestCaseGroup(testCtx, "graphics"));
401 de::MovePtr<TestCaseGroup> computeGroup(new TestCaseGroup(testCtx, "compute"));
402 de::MovePtr<TestCaseGroup> framebufferGroup(new TestCaseGroup(testCtx, "framebuffer"));
403 #ifndef CTS_USES_VULKANSC
404 de::MovePtr<TestCaseGroup> raytracingGroup(new TestCaseGroup(testCtx, "ray_tracing"));
405 de::MovePtr<TestCaseGroup> meshGroup(new TestCaseGroup(testCtx, "mesh"));
406 #endif // CTS_USES_VULKANSC
407 const VkShaderStageFlags fbStages[] = {
408 VK_SHADER_STAGE_VERTEX_BIT,
409 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
410 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
411 VK_SHADER_STAGE_GEOMETRY_BIT,
412 };
413 #ifndef CTS_USES_VULKANSC
414 const VkShaderStageFlags meshStages[] = {
415 VK_SHADER_STAGE_MESH_BIT_EXT,
416 VK_SHADER_STAGE_TASK_BIT_EXT,
417 };
418 #endif // CTS_USES_VULKANSC
419 const bool boolValues[] = {false, true};
420
421 {
422 const vector<VkFormat> formats = subgroups::getAllFormats();
423
424 for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
425 {
426 const VkFormat format = formats[formatIndex];
427 const string formatName = subgroups::getFormatNameForGLSL(format);
428 const bool isBool = subgroups::isFormatBool(format);
429 const bool isFloat = subgroups::isFormatFloat(format);
430 const bool needs8BitUBOStorage = isFormat8bitTy(format);
431 const bool needs16BitUBOStorage = isFormat16BitTy(format);
432
433 for (int opTypeIndex = 0; opTypeIndex < OPTYPE_CLUSTERED_LAST; ++opTypeIndex)
434 {
435 const OpType opType = static_cast<OpType>(opTypeIndex);
436 const Operator op = getOperator(opType);
437 const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
438
439 // Skip float with bitwise category.
440 if (isFloat && isBitwiseOp)
441 continue;
442
443 // Skip bool when its not the bitwise category.
444 if (isBool && !isBitwiseOp)
445 continue;
446
447 const string name = de::toLower(getOpTypeName(op)) + "_" + formatName;
448
449 for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
450 {
451 const bool requiredSubgroupSize = boolValues[groupSizeNdx];
452 const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
453 const CaseDefinition caseDef = {
454 op, // Operator op;
455 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
456 format, // VkFormat format;
457 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
458 requiredSubgroupSize, // bool requiredSubgroupSize;
459 false, // bool requires8BitUniformBuffer;
460 false // bool requires16BitUniformBuffer;
461 };
462
463 addFunctionCaseWithPrograms(computeGroup.get(), testName, supportedCheck, initPrograms, test,
464 caseDef);
465 }
466
467 #ifndef CTS_USES_VULKANSC
468 for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
469 {
470 for (const auto &stage : meshStages)
471 {
472 const bool requiredSubgroupSize = boolValues[groupSizeNdx];
473 const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "") + "_" +
474 getShaderStageName(stage);
475 const CaseDefinition caseDef = {
476 op, // Operator op;
477 stage, // VkShaderStageFlags shaderStage;
478 format, // VkFormat format;
479 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
480 requiredSubgroupSize, // bool requiredSubgroupSize;
481 false, // bool requires8BitUniformBuffer;
482 false // bool requires16BitUniformBuffer;
483 };
484
485 addFunctionCaseWithPrograms(meshGroup.get(), testName, supportedCheck, initPrograms, test,
486 caseDef);
487 }
488 }
489 #endif // CTS_USES_VULKANSC
490
491 {
492 const CaseDefinition caseDef = {
493 op, // Operator op;
494 VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
495 format, // VkFormat format;
496 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
497 false, // bool requiredSubgroupSize;
498 false, // bool requires8BitUniformBuffer;
499 false // bool requires16BitUniformBuffer;
500 };
501
502 addFunctionCaseWithPrograms(graphicGroup.get(), name, supportedCheck, initPrograms, test, caseDef);
503 }
504
505 for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(fbStages); ++stageIndex)
506 {
507 const CaseDefinition caseDef = {
508 op, // Operator op;
509 fbStages[stageIndex], // VkShaderStageFlags shaderStage;
510 format, // VkFormat format;
511 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
512 false, // bool requiredSubgroupSize;
513 bool(needs8BitUBOStorage), // bool requires8BitUniformBuffer;
514 bool(needs16BitUBOStorage) // bool requires16BitUniformBuffer;
515 };
516 const string testName = name + "_" + getShaderStageName(caseDef.shaderStage);
517
518 addFunctionCaseWithPrograms(framebufferGroup.get(), testName, supportedCheck,
519 initFrameBufferPrograms, noSSBOtest, caseDef);
520 }
521 }
522 }
523 }
524
525 #ifndef CTS_USES_VULKANSC
526 {
527 const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
528
529 for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
530 {
531 const VkFormat format = formats[formatIndex];
532 const string formatName = subgroups::getFormatNameForGLSL(format);
533 const bool isBool = subgroups::isFormatBool(format);
534 const bool isFloat = subgroups::isFormatFloat(format);
535
536 for (int opTypeIndex = 0; opTypeIndex < OPTYPE_CLUSTERED_LAST; ++opTypeIndex)
537 {
538 const OpType opType = static_cast<OpType>(opTypeIndex);
539 const Operator op = getOperator(opType);
540 const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
541
542 // Skip float with bitwise category.
543 if (isFloat && isBitwiseOp)
544 continue;
545
546 // Skip bool when its not the bitwise category.
547 if (isBool && !isBitwiseOp)
548 continue;
549
550 {
551 const string name = de::toLower(getOpTypeName(op)) + "_" + formatName;
552 const CaseDefinition caseDef = {
553 op, // Operator op;
554 SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
555 format, // VkFormat format;
556 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
557 false, // bool requiredSubgroupSize;
558 false, // bool requires8BitUniformBuffer;
559 false // bool requires16BitUniformBuffer;
560 };
561
562 addFunctionCaseWithPrograms(raytracingGroup.get(), name, supportedCheck, initPrograms, test,
563 caseDef);
564 }
565 }
566 }
567 }
568 #endif // CTS_USES_VULKANSC
569
570 group->addChild(graphicGroup.release());
571 group->addChild(computeGroup.release());
572 group->addChild(framebufferGroup.release());
573 #ifndef CTS_USES_VULKANSC
574 group->addChild(raytracingGroup.release());
575 group->addChild(meshGroup.release());
576 #endif // CTS_USES_VULKANSC
577
578 return group.release();
579 }
580 } // namespace subgroups
581 } // namespace vkt
582