1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2020 The Khronos Group Inc.
6 * Copyright (c) 2020 Valve Corporation.
7 * Copyright (c) 2023 LunarG, Inc.
8 * Copyright (c) 2023 Nintendo
9 *
10 * Licensed under the Apache License, Version 2.0 (the "License");
11 * you may not use this file except in compliance with the License.
12 * You may obtain a copy of the License at
13 *
14 * http://www.apache.org/licenses/LICENSE-2.0
15 *
16 * Unless required by applicable law or agreed to in writing, software
17 * distributed under the License is distributed on an "AS IS" BASIS,
18 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 * See the License for the specific language governing permissions and
20 * limitations under the License.
21 *
22 *//*!
23 * \file
24 * \brief Tests with shaders that do not write to the Position built-in.
25 *//*--------------------------------------------------------------------*/
26
27 #include "vktPipelineNoPositionTests.hpp"
28 #include "tcuRGBA.hpp"
29 #include "tcuVectorType.hpp"
30 #include "vkDefs.hpp"
31 #include "vktTestCase.hpp"
32 #include "vkPipelineConstructionUtil.hpp"
33 #include "vktCustomInstancesDevices.hpp"
34
35 #include "vkQueryUtil.hpp"
36 #include "vkObjUtil.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkTypeUtil.hpp"
39 #include "vkCmdUtil.hpp"
40 #include "vkImageWithMemory.hpp"
41 #include "vkBufferWithMemory.hpp"
42 #include "vktPipelineImageUtil.hpp"
43 #include "vkImageUtil.hpp"
44 #include "vkBarrierUtil.hpp"
45 #include "vkBuilderUtil.hpp"
46 #include "vkPlatform.hpp"
47 #include "vkSafetyCriticalUtil.hpp"
48
49 #include "tcuVector.hpp"
50 #include "tcuTestLog.hpp"
51 #include "tcuCommandLine.hpp"
52
53 #include "deUniquePtr.hpp"
54
55 #include <cstdint>
56 #include <string>
57 #include <vector>
58 #include <set>
59 #include <sstream>
60 #include <array>
61
62 namespace vkt
63 {
64 namespace pipeline
65 {
66
67 namespace
68 {
69
70 using namespace vk;
71
72 enum ShaderStageBits
73 {
74 STAGE_VERTEX = (1 << 0),
75 STAGE_TESS_CONTROL = (1 << 1),
76 STAGE_TESS_EVALUATION = (1 << 2),
77 STAGE_GEOMETRY = (1 << 3),
78 STAGE_MASK_COUNT = (1 << 4),
79 };
80
81 using ShaderStageFlags = uint32_t;
82
83 constexpr uint32_t kStageCount = 4u;
84
85 static_assert((1u << kStageCount) == static_cast<uint32_t>(STAGE_MASK_COUNT),
86 "Total stage count does not match stage mask bits");
87
88 const uint32_t max_devgrp_phydevices = VK_MAX_DEVICE_GROUP_SIZE;
89
90 template <typename T>
makeSharedPtr(Move<T> move)91 inline de::SharedPtr<Unique<T>> makeSharedPtr(Move<T> move)
92 {
93 return de::SharedPtr<Unique<T>>(new Unique<T>(move));
94 }
95
96 struct TestParams
97 {
98 vk::PipelineConstructionType pipelineConstructionType; // The way pipeline is constructed
99 ShaderStageFlags selectedStages; // Stages that will be present in the pipeline.
100 ShaderStageFlags writeStages; // Subset of selectedStages that will write to the Position built-in.
101 uint32_t numViews; // Number of views for multiview.
102 bool explicitDeclarations; // Explicitly declare the input and output blocks or not.
103 bool useSSBO; // Write to an SSBO from the selected stages.
104 bool useViewIndexAsDeviceIndex; // Treat gl_ViewIndex shader input variable like gl_DeviceIndex.
105
106 // Commonly used checks.
tessellationvkt::pipeline::__anon6518d5ea0111::TestParams107 bool tessellation(void) const
108 {
109 return (selectedStages & (STAGE_TESS_CONTROL | STAGE_TESS_EVALUATION));
110 }
geometryvkt::pipeline::__anon6518d5ea0111::TestParams111 bool geometry(void) const
112 {
113 return (selectedStages & STAGE_GEOMETRY);
114 }
115 };
116
117 // Generates the combinations list of stage flags for writeStages when a given subset of stages are selected.
getWriteSubCases(ShaderStageFlags selectedStages)118 std::vector<ShaderStageFlags> getWriteSubCases(ShaderStageFlags selectedStages)
119 {
120 std::set<ShaderStageFlags> uniqueCases;
121 for (ShaderStageFlags stages = 0; stages < STAGE_MASK_COUNT; ++stages)
122 uniqueCases.insert(stages & selectedStages);
123 return std::vector<ShaderStageFlags>(begin(uniqueCases), end(uniqueCases));
124 }
125
126 class NoPositionCase : public vkt::TestCase
127 {
128 public:
129 NoPositionCase(tcu::TestContext &testCtx, const std::string &name, const TestParams ¶ms);
~NoPositionCase(void)130 virtual ~NoPositionCase(void)
131 {
132 }
133
134 virtual void initPrograms(vk::SourceCollections &programCollection) const;
135 virtual TestInstance *createInstance(Context &context) const;
136 virtual void checkSupport(Context &context) const;
137
getBackGroundColor(void)138 static tcu::Vec4 getBackGroundColor(void)
139 {
140 return tcu::RGBA::blue().toVec();
141 }
getImageFormat(void)142 static VkFormat getImageFormat(void)
143 {
144 return VK_FORMAT_R8G8B8A8_UNORM;
145 }
getImageExtent(void)146 static VkExtent3D getImageExtent(void)
147 {
148 return makeExtent3D(64u, 64u, 1u);
149 }
150
151 private:
152 TestParams m_params;
153 };
154
155 class NoPositionInstance : public vkt::TestInstance
156 {
157 public:
158 NoPositionInstance(Context &context, const TestParams ¶ms);
~NoPositionInstance(void)159 virtual ~NoPositionInstance(void)
160 {
161 }
162
163 virtual tcu::TestStatus iterate(void);
164
165 void createDeviceGroup(void);
166
getDeviceInterface(void)167 const vk::DeviceInterface &getDeviceInterface(void)
168 {
169 return *m_deviceDriver;
170 }
getInstance(void)171 vk::VkInstance getInstance(void)
172 {
173 return m_deviceGroupInstance;
174 }
getDevice(void)175 vk::VkDevice getDevice(void)
176 {
177 return *m_logicalDevice;
178 }
getPhysicalDevice(uint32_t i=0)179 vk::VkPhysicalDevice getPhysicalDevice(uint32_t i = 0)
180 {
181 return m_physicalDevices[i];
182 }
183
184 private:
185 uint32_t m_numPhysDevices;
186 uint32_t m_numViews;
187 uint32_t m_queueFamilyIndex;
188 CustomInstance m_deviceGroupInstance;
189 vk::Move<vk::VkDevice> m_logicalDevice;
190 std::vector<vk::VkPhysicalDevice> m_physicalDevices;
191 #ifndef CTS_USES_VULKANSC
192 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
193 #else
194 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> m_deviceDriver;
195 #endif // CTS_USES_VULKANSC
196 de::MovePtr<Allocator> m_allocator;
197
198 TestParams m_params;
199 };
200
NoPositionCase(tcu::TestContext & testCtx,const std::string & name,const TestParams & params)201 NoPositionCase::NoPositionCase(tcu::TestContext &testCtx, const std::string &name, const TestParams ¶ms)
202 : vkt::TestCase(testCtx, name)
203 , m_params(params)
204 {
205 }
206
initPrograms(vk::SourceCollections & programCollection) const207 void NoPositionCase::initPrograms(vk::SourceCollections &programCollection) const
208 {
209 // Add shaders for the selected stages and write to gl_Position in the subset of stages marked for writing.
210
211 // Optional writes, extensions and declarations.
212 std::string ssboDecl;
213 std::string extensions;
214 std::string vertSSBOWrite;
215 std::string tescSSBOWrite;
216 std::string teseSSBOWrite;
217 std::string geomSSBOWrite;
218
219 const bool multiview = (m_params.numViews > 1u);
220
221 if (multiview || m_params.useViewIndexAsDeviceIndex)
222 extensions = "#extension GL_EXT_multiview : require\n";
223
224 if (m_params.useSSBO)
225 {
226 const uint32_t numCountersPerStage =
227 m_params.useViewIndexAsDeviceIndex ? max_devgrp_phydevices : m_params.numViews;
228 const auto ssboElementCount = kStageCount * numCountersPerStage;
229 ssboDecl = "layout (set=0, binding=0, std430) buffer StorageBlock { uint counters[" +
230 de::toString(ssboElementCount) + "]; } ssbo;\n";
231
232 const std::array<std::string *, kStageCount> writeStrings = {
233 {&vertSSBOWrite, &tescSSBOWrite, &teseSSBOWrite, &geomSSBOWrite}};
234 for (size_t stageNum = 0; stageNum < writeStrings.size(); ++stageNum)
235 {
236 std::ostringstream s;
237 s << " atomicAdd(ssbo.counters[" << stageNum;
238 if (multiview || m_params.useViewIndexAsDeviceIndex)
239 {
240 s << " * " << numCountersPerStage << " + ";
241 s << "gl_ViewIndex";
242 }
243 s << "], 1);\n";
244 s.flush();
245 *writeStrings[stageNum] = s.str();
246 }
247 }
248
249 if (m_params.selectedStages & STAGE_VERTEX)
250 {
251 std::ostringstream vert;
252 vert << "#version 450\n"
253 << extensions << ssboDecl << "layout (location=0) in vec4 in_pos;\n"
254 << (m_params.explicitDeclarations ? "out gl_PerVertex\n"
255 "{\n"
256 " vec4 gl_Position;\n"
257 " float gl_PointSize;\n"
258 " float gl_ClipDistance[];\n"
259 " float gl_CullDistance[];\n"
260 "};\n" :
261 "")
262 << "void main (void)\n"
263 << "{\n"
264 << ((m_params.writeStages & STAGE_VERTEX) ? " gl_Position = in_pos;\n" : "") << vertSSBOWrite << "}\n";
265
266 programCollection.glslSources.add("vert") << glu::VertexSource(vert.str());
267 }
268
269 if (m_params.selectedStages & STAGE_TESS_CONTROL)
270 {
271 std::ostringstream tesc;
272 tesc << "#version 450\n"
273 << extensions << ssboDecl << "layout (vertices = 3) out;\n"
274 << (m_params.explicitDeclarations ? "in gl_PerVertex\n"
275 "{\n"
276 " vec4 gl_Position;\n"
277 " float gl_PointSize;\n"
278 " float gl_ClipDistance[];\n"
279 " float gl_CullDistance[];\n"
280 "} gl_in[gl_MaxPatchVertices];\n"
281 "out gl_PerVertex\n"
282 "{\n"
283 " vec4 gl_Position;\n"
284 " float gl_PointSize;\n"
285 " float gl_ClipDistance[];\n"
286 " float gl_CullDistance[];\n"
287 "} gl_out[];\n" :
288 "")
289 << "void main (void)\n"
290 << "{\n"
291 << " gl_TessLevelInner[0] = 1.0;\n"
292 << " gl_TessLevelInner[1] = 1.0;\n"
293 << " gl_TessLevelOuter[0] = 1.0;\n"
294 << " gl_TessLevelOuter[1] = 1.0;\n"
295 << " gl_TessLevelOuter[2] = 1.0;\n"
296 << " gl_TessLevelOuter[3] = 1.0;\n"
297 << "\n"
298 << ((m_params.writeStages & STAGE_TESS_CONTROL) ?
299 " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n" :
300 "")
301 << tescSSBOWrite << "}\n";
302
303 programCollection.glslSources.add("tesc") << glu::TessellationControlSource(tesc.str());
304 }
305
306 if (m_params.selectedStages & STAGE_TESS_EVALUATION)
307 {
308 std::ostringstream tese;
309 tese << "#version 450\n"
310 << extensions << ssboDecl << "layout (triangles, fractional_odd_spacing, cw) in;\n"
311 << (m_params.explicitDeclarations ? "in gl_PerVertex\n"
312 "{\n"
313 " vec4 gl_Position;\n"
314 " float gl_PointSize;\n"
315 " float gl_ClipDistance[];\n"
316 " float gl_CullDistance[];\n"
317 "} gl_in[gl_MaxPatchVertices];\n"
318 "out gl_PerVertex\n"
319 "{\n"
320 " vec4 gl_Position;\n"
321 " float gl_PointSize;\n"
322 " float gl_ClipDistance[];\n"
323 " float gl_CullDistance[];\n"
324 "};\n" :
325 "")
326 << "void main (void)\n"
327 << "{\n"
328 << ((m_params.writeStages & STAGE_TESS_EVALUATION) ?
329 " gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position) +\n"
330 " (gl_TessCoord.y * gl_in[1].gl_Position) +\n"
331 " (gl_TessCoord.z * gl_in[2].gl_Position);\n" :
332 "")
333 << teseSSBOWrite << "}\n";
334
335 programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource(tese.str());
336 }
337
338 if (m_params.selectedStages & STAGE_GEOMETRY)
339 {
340 std::ostringstream geom;
341 geom << "#version 450\n"
342 << extensions << ssboDecl << "layout (triangles) in;\n"
343 << "layout (triangle_strip, max_vertices=3) out;\n"
344 << (m_params.explicitDeclarations ? "in gl_PerVertex\n"
345 "{\n"
346 " vec4 gl_Position;\n"
347 " float gl_PointSize;\n"
348 " float gl_ClipDistance[];\n"
349 " float gl_CullDistance[];\n"
350 "} gl_in[3];\n"
351 "out gl_PerVertex\n"
352 "{\n"
353 " vec4 gl_Position;\n"
354 " float gl_PointSize;\n"
355 " float gl_ClipDistance[];\n"
356 " float gl_CullDistance[];\n"
357 "};\n" :
358 "")
359 << "void main (void)\n"
360 << "{\n"
361 << " for (int i = 0; i < 3; i++)\n"
362 << " {\n"
363 << ((m_params.writeStages & STAGE_GEOMETRY) ? " gl_Position = gl_in[i].gl_Position;\n" : "")
364 << " EmitVertex();\n"
365 << " }\n"
366 << geomSSBOWrite << "}\n";
367
368 programCollection.glslSources.add("geom") << glu::GeometrySource(geom.str());
369 }
370
371 {
372 const auto backgroundColor = getBackGroundColor();
373
374 std::ostringstream colorStr;
375 colorStr << "vec4(" << backgroundColor.x() << ", " << backgroundColor.y() << ", " << backgroundColor.z() << ", "
376 << backgroundColor.w() << ")";
377
378 std::ostringstream frag;
379 frag << "#version 450\n"
380 << "layout (location=0) out vec4 out_color;\n"
381 << "void main (void)\n"
382 << "{\n"
383 << " out_color = " << colorStr.str() << ";\n"
384 << "}\n";
385
386 programCollection.glslSources.add("frag") << glu::FragmentSource(frag.str());
387 }
388 }
389
createInstance(Context & context) const390 TestInstance *NoPositionCase::createInstance(Context &context) const
391 {
392 return new NoPositionInstance(context, m_params);
393 }
394
checkSupport(Context & context) const395 void NoPositionCase::checkSupport(Context &context) const
396 {
397 const auto features = getPhysicalDeviceFeatures(context.getInstanceInterface(), context.getPhysicalDevice());
398 const bool hasTess = m_params.tessellation();
399 const bool hasGeom = m_params.geometry();
400
401 if (hasTess && !features.tessellationShader)
402 TCU_THROW(NotSupportedError, "Tessellation shaders not supported");
403
404 if (hasGeom && !features.geometryShader)
405 TCU_THROW(NotSupportedError, "Geometry shaders not supported");
406
407 if ((m_params.numViews > 1u) || (m_params.useViewIndexAsDeviceIndex))
408 {
409 context.requireDeviceFunctionality("VK_KHR_multiview");
410 const auto &multiviewFeatures = context.getMultiviewFeatures();
411
412 if (!multiviewFeatures.multiview)
413 TCU_THROW(NotSupportedError, "Multiview not supported");
414
415 if (hasTess && !multiviewFeatures.multiviewTessellationShader)
416 TCU_THROW(NotSupportedError, "Multiview not supported with tessellation shaders");
417
418 if (hasGeom && !multiviewFeatures.multiviewGeometryShader)
419 TCU_THROW(NotSupportedError, "Multiview not supported with geometry shaders");
420
421 if (m_params.numViews > context.getMultiviewProperties().maxMultiviewViewCount)
422 TCU_THROW(NotSupportedError, "Not enough views supported");
423 }
424
425 if (m_params.useSSBO)
426 {
427 if (!features.vertexPipelineStoresAndAtomics)
428 TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
429 }
430
431 if (m_params.useViewIndexAsDeviceIndex)
432 {
433 context.requireInstanceFunctionality("VK_KHR_device_group_creation");
434 context.requireDeviceFunctionality("VK_KHR_device_group");
435 }
436
437 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(),
438 m_params.pipelineConstructionType);
439 }
440
NoPositionInstance(Context & context,const TestParams & params)441 NoPositionInstance::NoPositionInstance(Context &context, const TestParams ¶ms)
442 : vkt::TestInstance(context)
443 , m_numPhysDevices(1)
444 , m_queueFamilyIndex(0)
445 , m_params(params)
446 {
447 if (m_params.useViewIndexAsDeviceIndex)
448 createDeviceGroup();
449
450 m_numViews = m_params.useViewIndexAsDeviceIndex ? m_numPhysDevices : m_params.numViews;
451 if (m_numViews > context.getMultiviewProperties().maxMultiviewViewCount)
452 TCU_THROW(NotSupportedError, "Not enough views supported");
453 }
454
createDeviceGroup(void)455 void NoPositionInstance::createDeviceGroup(void)
456 {
457 const tcu::CommandLine &cmdLine = m_context.getTestContext().getCommandLine();
458 const uint32_t devGroupIdx = cmdLine.getVKDeviceGroupId() - 1;
459 uint32_t physDeviceIdx = cmdLine.getVKDeviceId() - 1;
460 const float queuePriority = 1.0f;
461 const auto &vki = m_context.getInstanceInterface();
462
463 m_deviceGroupInstance = createCustomInstanceWithExtension(m_context, "VK_KHR_device_group_creation");
464 const InstanceDriver &instance(m_deviceGroupInstance.getDriver());
465
466 std::vector<VkPhysicalDeviceGroupProperties> devGroupsProperties =
467 enumeratePhysicalDeviceGroups(vki, m_deviceGroupInstance);
468 m_numPhysDevices = devGroupsProperties[devGroupIdx].physicalDeviceCount;
469 auto &devGroupProperties = devGroupsProperties[devGroupIdx];
470
471 if (physDeviceIdx >= devGroupProperties.physicalDeviceCount)
472 physDeviceIdx = 0;
473
474 // Enable device features
475 VkPhysicalDeviceFeatures2 deviceFeatures2 = initVulkanStructure();
476 VkDeviceGroupDeviceCreateInfo deviceGroupInfo = initVulkanStructure(&deviceFeatures2);
477 deviceGroupInfo.physicalDeviceCount = devGroupProperties.physicalDeviceCount;
478 deviceGroupInfo.pPhysicalDevices = devGroupProperties.physicalDevices;
479 const VkPhysicalDeviceFeatures deviceFeatures =
480 getPhysicalDeviceFeatures(instance, deviceGroupInfo.pPhysicalDevices[physDeviceIdx]);
481 deviceFeatures2.features = deviceFeatures;
482
483 m_physicalDevices.resize(m_numPhysDevices);
484 for (uint32_t physDevIdx = 0; physDevIdx < m_numPhysDevices; physDevIdx++)
485 m_physicalDevices[physDevIdx] = devGroupProperties.physicalDevices[physDevIdx];
486
487 // Prepare queue info
488 const std::vector<VkQueueFamilyProperties> queueProps =
489 getPhysicalDeviceQueueFamilyProperties(instance, devGroupProperties.physicalDevices[physDeviceIdx]);
490 for (size_t queueNdx = 0; queueNdx < queueProps.size(); queueNdx++)
491 {
492 if (queueProps[queueNdx].queueFlags & VK_QUEUE_GRAPHICS_BIT)
493 m_queueFamilyIndex = (uint32_t)queueNdx;
494 }
495
496 VkDeviceQueueCreateInfo queueInfo = {
497 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
498 nullptr, // const void* pNext;
499 (VkDeviceQueueCreateFlags)0u, // VkDeviceQueueCreateFlags flags;
500 m_queueFamilyIndex, // uint32_t queueFamilyIndex;
501 1u, // uint32_t queueCount;
502 &queuePriority // const float* pQueuePriorities;
503 };
504
505 // Enable extensions
506 const auto &contextMultiviewFeatures = m_context.getMultiviewFeatures();
507 const bool multiViewSupport = contextMultiviewFeatures.multiview;
508 VkPhysicalDeviceMultiviewFeatures multiviewFeatures = vk::initVulkanStructure();
509 #ifndef CTS_USES_VULKANSC
510 const auto &contextGpl = m_context.getGraphicsPipelineLibraryFeaturesEXT();
511 const bool gplSupport = contextGpl.graphicsPipelineLibrary;
512 VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT gplFeatures = vk::initVulkanStructure();
513 #endif
514 const auto addFeatures = vk::makeStructChainAdder(&deviceFeatures2);
515 if (multiViewSupport)
516 addFeatures(&multiviewFeatures);
517 #ifndef CTS_USES_VULKANSC
518 if (isConstructionTypeLibrary(m_params.pipelineConstructionType) && gplSupport)
519 addFeatures(&gplFeatures);
520 #endif
521 vki.getPhysicalDeviceFeatures2(deviceGroupInfo.pPhysicalDevices[physDeviceIdx], &deviceFeatures2);
522 // Enable extensions
523 std::vector<const char *> deviceExtensions;
524 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_device_group"))
525 deviceExtensions.push_back("VK_KHR_device_group");
526
527 if (multiViewSupport)
528 deviceExtensions.push_back("VK_KHR_multiview");
529
530 #ifndef CTS_USES_VULKANSC
531 if (isConstructionTypeLibrary(m_params.pipelineConstructionType) && gplSupport)
532 {
533 deviceExtensions.push_back("VK_KHR_pipeline_library");
534 deviceExtensions.push_back("VK_EXT_graphics_pipeline_library");
535 }
536 #endif
537
538 void *pNext = &deviceGroupInfo;
539
540 #ifdef CTS_USES_VULKANSC
541 VkDeviceObjectReservationCreateInfo memReservationInfo = cmdLine.isSubProcess() ?
542 m_context.getResourceInterface()->getStatMax() :
543 resetDeviceObjectReservationCreateInfo();
544 memReservationInfo.pNext = pNext;
545 pNext = &memReservationInfo;
546
547 VkPhysicalDeviceVulkanSC10Features sc10Features = createDefaultSC10Features();
548 sc10Features.pNext = pNext;
549 pNext = &sc10Features;
550 VkPipelineCacheCreateInfo pcCI;
551 std::vector<VkPipelinePoolSize> poolSizes;
552 if (m_context.getTestContext().getCommandLine().isSubProcess())
553 {
554 if (m_context.getResourceInterface()->getCacheDataSize() > 0)
555 {
556 pcCI = {
557 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
558 nullptr, // const void* pNext;
559 VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
560 VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT, // VkPipelineCacheCreateFlags flags;
561 m_context.getResourceInterface()->getCacheDataSize(), // uintptr_t initialDataSize;
562 m_context.getResourceInterface()->getCacheData() // const void* pInitialData;
563 };
564 memReservationInfo.pipelineCacheCreateInfoCount = 1;
565 memReservationInfo.pPipelineCacheCreateInfos = &pcCI;
566 }
567
568 poolSizes = m_context.getResourceInterface()->getPipelinePoolSizes();
569 if (!poolSizes.empty())
570 {
571 memReservationInfo.pipelinePoolSizeCount = uint32_t(poolSizes.size());
572 memReservationInfo.pPipelinePoolSizes = poolSizes.data();
573 }
574 }
575 #endif // CTS_USES_VULKANSC
576
577 const VkDeviceCreateInfo deviceCreateInfo = {
578 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
579 pNext, // const void* pNext;
580 (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags;
581 1u, // uint32_t queueCreateInfoCount;
582 &queueInfo, // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
583 0u, // uint32_t enabledLayerCount;
584 nullptr, // const char* const* ppEnabledLayerNames;
585 de::sizeU32(deviceExtensions), // uint32_t enabledExtensionCount;
586 de::dataOrNull(deviceExtensions), // const char* const* ppEnabledExtensionNames;
587 deviceFeatures2.pNext == nullptr ? &deviceFeatures :
588 nullptr, // const VkPhysicalDeviceFeatures* pEnabledFeatures;
589 };
590
591 m_logicalDevice = createCustomDevice(m_context.getTestContext().getCommandLine().isValidationEnabled(),
592 m_context.getPlatformInterface(), m_deviceGroupInstance, instance,
593 deviceGroupInfo.pPhysicalDevices[physDeviceIdx], &deviceCreateInfo);
594
595 #ifndef CTS_USES_VULKANSC
596 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(m_context.getPlatformInterface(), m_deviceGroupInstance,
597 *m_logicalDevice, m_context.getUsedApiVersion(),
598 m_context.getTestContext().getCommandLine()));
599 #else
600 m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(
601 new DeviceDriverSC(m_context.getPlatformInterface(), m_context.getInstance(), *m_logicalDevice,
602 m_context.getTestContext().getCommandLine(), m_context.getResourceInterface(),
603 m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(),
604 m_context.getUsedApiVersion()),
605 vk::DeinitDeviceDeleter(m_context.getResourceInterface().get(), *m_logicalDevice));
606 #endif // CTS_USES_VULKANSC
607
608 m_allocator = de::MovePtr<Allocator>(new SimpleAllocator(
609 *m_deviceDriver, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instance, m_physicalDevices[0])));
610 }
611
612 // Make a render pass with one subpass per color attachment
makeRenderPass(const DeviceInterface & vk,const VkDevice device,const PipelineConstructionType pipelineConstructionType,const VkFormat colorFormat,const uint32_t numAttachments,de::MovePtr<VkRenderPassMultiviewCreateInfo> multiviewCreateInfo,const VkImageLayout initialColorImageLayout=VK_IMAGE_LAYOUT_UNDEFINED)613 RenderPassWrapper makeRenderPass(const DeviceInterface &vk, const VkDevice device,
614 const PipelineConstructionType pipelineConstructionType, const VkFormat colorFormat,
615 const uint32_t numAttachments,
616 de::MovePtr<VkRenderPassMultiviewCreateInfo> multiviewCreateInfo,
617 const VkImageLayout initialColorImageLayout = VK_IMAGE_LAYOUT_UNDEFINED)
618 {
619 const VkAttachmentDescription colorAttachmentDescription = {
620 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
621 colorFormat, // VkFormat format;
622 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
623 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
624 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
625 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
626 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
627 initialColorImageLayout, // VkImageLayout initialLayout;
628 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
629 };
630 std::vector<VkAttachmentDescription> attachmentDescriptions(numAttachments, colorAttachmentDescription);
631
632 // Create a subpass for each attachment (each attachement is a layer of an arrayed image).
633 std::vector<VkAttachmentReference> colorAttachmentReferences(numAttachments);
634 std::vector<VkSubpassDescription> subpasses;
635
636 // Ordering here must match the framebuffer attachments
637 for (uint32_t i = 0; i < numAttachments; ++i)
638 {
639 const VkAttachmentReference attachmentRef = {
640 i, // uint32_t attachment;
641 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
642 };
643
644 colorAttachmentReferences[i] = attachmentRef;
645
646 const VkSubpassDescription subpassDescription = {
647 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
648 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
649 0u, // uint32_t inputAttachmentCount;
650 nullptr, // const VkAttachmentReference* pInputAttachments;
651 1u, // uint32_t colorAttachmentCount;
652 &colorAttachmentReferences[i], // const VkAttachmentReference* pColorAttachments;
653 nullptr, // const VkAttachmentReference* pResolveAttachments;
654 nullptr, // const VkAttachmentReference* pDepthStencilAttachment;
655 0u, // uint32_t preserveAttachmentCount;
656 nullptr // const uint32_t* pPreserveAttachments;
657 };
658 subpasses.push_back(subpassDescription);
659 }
660
661 const VkRenderPassCreateInfo renderPassInfo = {
662 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
663 multiviewCreateInfo.get(), // const void* pNext;
664 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
665 static_cast<uint32_t>(attachmentDescriptions.size()), // uint32_t attachmentCount;
666 &attachmentDescriptions[0], // const VkAttachmentDescription* pAttachments;
667 static_cast<uint32_t>(subpasses.size()), // uint32_t subpassCount;
668 &subpasses[0], // const VkSubpassDescription* pSubpasses;
669 0u, // uint32_t dependencyCount;
670 nullptr // const VkSubpassDependency* pDependencies;
671 };
672
673 return RenderPassWrapper(pipelineConstructionType, vk, device, &renderPassInfo);
674 }
675
makeColorSubresourceRange(const int baseArrayLayer,const int layerCount)676 inline VkImageSubresourceRange makeColorSubresourceRange(const int baseArrayLayer, const int layerCount)
677 {
678 return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, static_cast<uint32_t>(baseArrayLayer),
679 static_cast<uint32_t>(layerCount));
680 }
681
iterate(void)682 tcu::TestStatus NoPositionInstance::iterate(void)
683 {
684 const bool useDeviceGroup = m_params.useViewIndexAsDeviceIndex;
685 const auto &vki = m_context.getInstanceInterface();
686 const auto &vkd = useDeviceGroup ? getDeviceInterface() : m_context.getDeviceInterface();
687 const auto physicalDevice = useDeviceGroup ? getPhysicalDevice() : m_context.getPhysicalDevice();
688 const auto device = useDeviceGroup ? getDevice() : m_context.getDevice();
689 const auto qIndex = useDeviceGroup ? m_queueFamilyIndex : m_context.getUniversalQueueFamilyIndex();
690 const auto queue = useDeviceGroup ? getDeviceQueue(vkd, device, qIndex, 0) : m_context.getUniversalQueue();
691 auto &alloc = useDeviceGroup ? *m_allocator : m_context.getDefaultAllocator();
692 const auto format = NoPositionCase::getImageFormat();
693 const auto extent = NoPositionCase::getImageExtent();
694 const auto bgColor = NoPositionCase::getBackGroundColor();
695 const VkImageUsageFlags usage = (VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
696 const auto viewType = (m_numViews > 1u ? VK_IMAGE_VIEW_TYPE_2D_ARRAY : VK_IMAGE_VIEW_TYPE_2D);
697 const bool tess = m_params.tessellation();
698 VkShaderStageFlags stageFlags = 0u;
699
700 // Shader modules.
701 ShaderWrapper vert;
702 ShaderWrapper tesc;
703 ShaderWrapper tese;
704 ShaderWrapper geom;
705 ShaderWrapper frag;
706
707 if (m_params.selectedStages & STAGE_VERTEX)
708 {
709 vert = ShaderWrapper(vkd, device, m_context.getBinaryCollection().get("vert"), 0u);
710 stageFlags |= VK_SHADER_STAGE_VERTEX_BIT;
711 }
712 if (m_params.selectedStages & STAGE_TESS_CONTROL)
713 {
714 tesc = ShaderWrapper(vkd, device, m_context.getBinaryCollection().get("tesc"), 0u);
715 stageFlags |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
716 }
717 if (m_params.selectedStages & STAGE_TESS_EVALUATION)
718 {
719 tese = ShaderWrapper(vkd, device, m_context.getBinaryCollection().get("tese"), 0u);
720 stageFlags |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
721 }
722 if (m_params.selectedStages & STAGE_GEOMETRY)
723 {
724 geom = ShaderWrapper(vkd, device, m_context.getBinaryCollection().get("geom"), 0u);
725 stageFlags |= VK_SHADER_STAGE_GEOMETRY_BIT;
726 }
727
728 frag = ShaderWrapper(vkd, device, m_context.getBinaryCollection().get("frag"), 0u);
729 stageFlags |= VK_SHADER_STAGE_FRAGMENT_BIT;
730
731 const uint32_t layers = m_numViews;
732
733 // Color attachment.
734 const VkImageCreateInfo colorImageInfo = {
735 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
736 nullptr, // const void* pNext;
737 0u, // VkImageCreateFlags flags;
738 vk::VK_IMAGE_TYPE_2D, // VkImageType imageType;
739 format, // VkFormat format;
740 extent, // VkExtent3D extent;
741 1u, // uint32_t mipLevels;
742 layers, // uint32_t arrayLayers;
743 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
744 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
745 usage, // VkImageUsageFlags usage;
746 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
747 0u, // uint32_t queueFamilyIndexCount;
748 nullptr, // const uint32_t* pQueueFamilyIndices;
749 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
750 };
751 ImageWithMemory colorImage(vkd, device, alloc, colorImageInfo, MemoryRequirement::Any);
752
753 const auto subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, layers);
754
755 // Vertices and vertex buffer.
756 const uint32_t numVertices = 3;
757 const tcu::Vec4 vertices[numVertices] = {
758 tcu::Vec4(0.0f, -0.5f, 0.0f, 1.0f),
759 tcu::Vec4(0.5f, 0.5f, 0.0f, 1.0f),
760 tcu::Vec4(-0.5f, 0.5f, 0.0f, 1.0f),
761 };
762
763 const auto vertexBufferSize = static_cast<VkDeviceSize>(numVertices * sizeof(vertices[0]));
764 const auto vertexBufferInfo = makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
765 const auto vertexBufferOffset = static_cast<VkDeviceSize>(0);
766 BufferWithMemory vertexBuffer(vkd, device, alloc, vertexBufferInfo, MemoryRequirement::HostVisible);
767
768 auto &vertexBufferAlloc = vertexBuffer.getAllocation();
769 void *vertexBufferPtr = vertexBufferAlloc.getHostPtr();
770 deMemcpy(vertexBufferPtr, &vertices[0], static_cast<size_t>(vertexBufferSize));
771 flushAlloc(vkd, device, vertexBufferAlloc);
772
773 de::MovePtr<VkRenderPassMultiviewCreateInfo> multiviewInfo;
774 std::vector<uint32_t> viewMasks;
775 std::vector<uint32_t> correlationMasks;
776
777 uint32_t subpassCount = 1;
778
779 if ((m_numViews > 1u) || (m_params.useViewIndexAsDeviceIndex))
780 {
781 if (m_params.useViewIndexAsDeviceIndex)
782 {
783 // In case of useViewIndexAsDeviceIndex,
784 // each view has its own view mask
785 viewMasks.resize(m_numViews);
786 correlationMasks.resize(m_numViews);
787
788 for (uint32_t viewIdx = 0u; viewIdx < m_numViews; ++viewIdx)
789 {
790 viewMasks[viewIdx] |= (1 << viewIdx);
791 correlationMasks[viewIdx] |= (1 << viewIdx);
792 }
793
794 subpassCount = de::sizeU32(viewMasks);
795 }
796 else
797 {
798 viewMasks.resize(1);
799 correlationMasks.resize(1);
800
801 for (uint32_t viewIdx = 0u; viewIdx < m_numViews; ++viewIdx)
802 {
803 viewMasks[0] |= (1 << viewIdx);
804 correlationMasks[0] |= (1 << viewIdx);
805 }
806 }
807
808 multiviewInfo = de::MovePtr<VkRenderPassMultiviewCreateInfo>(new VkRenderPassMultiviewCreateInfo{
809 VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, // VkStructureType sType;
810 nullptr, // const void* pNext;
811 de::sizeU32(viewMasks), // uint32_t subpassCount;
812 de::dataOrNull(viewMasks), // const uint32_t* pViewMasks;
813 0u, // uint32_t dependencyCount;
814 nullptr, // const int32_t* pViewOffsets;
815 de::sizeU32(correlationMasks), // uint32_t correlationMaskCount;
816 de::dataOrNull(correlationMasks), // const uint32_t* pCorrelationMasks;
817 });
818 }
819
820 RenderPassWrapper renderPass(makeRenderPass(vkd, device, m_params.pipelineConstructionType, format, subpassCount,
821 multiviewInfo, VK_IMAGE_LAYOUT_UNDEFINED));
822
823 // Descriptor set layout and pipeline layout.
824 DescriptorSetLayoutBuilder layoutBuilder;
825 if (m_params.useSSBO)
826 {
827 layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, stageFlags);
828 }
829 const auto descriptorSetLayout = layoutBuilder.build(vkd, device);
830 const PipelineLayoutWrapper pipelineLayout(m_params.pipelineConstructionType, vkd, device,
831 descriptorSetLayout.get());
832
833 // Pipeline.
834 const std::vector<VkViewport> viewports{makeViewport(extent)};
835 const std::vector<VkRect2D> scissors{makeRect2D(extent)};
836
837 const auto primitiveTopology(tess ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
838 const VkPipelineCreateFlags createFlags = m_params.useViewIndexAsDeviceIndex ?
839 VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT :
840 VkPipelineCreateFlagBits(0u);
841
842 std::vector<GraphicsPipelineWrapper> pipelines;
843 pipelines.reserve(subpassCount);
844
845 std::vector<de::SharedPtr<Unique<VkImageView>>> colorAttachments;
846 std::vector<VkImage> images;
847 std::vector<VkImageView> attachmentHandles;
848
849 VkPipeline basePipeline = VK_NULL_HANDLE;
850
851 for (uint32_t subpassNdx = 0; subpassNdx < subpassCount; ++subpassNdx)
852 {
853 colorAttachments.push_back(makeSharedPtr(makeImageView(
854 vkd, device, *colorImage, viewType, format,
855 makeColorSubresourceRange(0, m_params.useViewIndexAsDeviceIndex ? subpassCount : m_numViews))));
856 images.push_back(*colorImage);
857 attachmentHandles.push_back(**colorAttachments.back());
858
859 #ifndef CTS_USES_VULKANSC // Pipeline derivatives are forbidden in Vulkan SC
860 pipelines.emplace_back(
861 vki, vkd, physicalDevice, device, m_context.getDeviceExtensions(), m_params.pipelineConstructionType,
862 createFlags | (basePipeline == VK_NULL_HANDLE ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT :
863 VK_PIPELINE_CREATE_DERIVATIVE_BIT));
864 #else
865 pipelines.emplace_back(vki, vkd, physicalDevice, device, m_context.getDeviceExtensions(),
866 m_params.pipelineConstructionType, createFlags);
867 #endif // CTS_USES_VULKANSC
868
869 pipelines.back()
870 .setDefaultTopology(primitiveTopology)
871 .setDefaultRasterizationState()
872 .setDefaultMultisampleState()
873 .setDefaultDepthStencilState()
874 .setDefaultColorBlendState()
875 .setupVertexInputState()
876 .setupPreRasterizationShaderState(viewports, scissors, pipelineLayout, *renderPass, subpassNdx, vert,
877 nullptr, tesc, tese, geom)
878 .setupFragmentShaderState(pipelineLayout, *renderPass, 0u, frag)
879 .setupFragmentOutputState(*renderPass)
880 .setMonolithicPipelineLayout(pipelineLayout)
881 .buildPipeline(VK_NULL_HANDLE, basePipeline, -1);
882
883 if (pipelines.front().wasBuild())
884 basePipeline = pipelines.front().getPipeline();
885 }
886
887 renderPass.createFramebuffer(vkd, device, static_cast<uint32_t>(attachmentHandles.size()), &images[0],
888 &attachmentHandles[0], extent.width, extent.height);
889
890 // Descriptor set and output SSBO if needed.
891 Move<VkDescriptorPool> descriptorPool;
892 Move<VkDescriptorSet> descriptorSet;
893 de::MovePtr<BufferWithMemory> ssboBuffer;
894 const uint32_t numCountersPerStage = m_params.useViewIndexAsDeviceIndex ? max_devgrp_phydevices : m_params.numViews;
895 const auto ssboElementCount = kStageCount * numCountersPerStage;
896 const auto ssboBufferSize = static_cast<VkDeviceSize>(ssboElementCount * sizeof(uint32_t));
897
898 if (m_params.useSSBO)
899 {
900 // Output SSBO.
901 const auto ssboBufferInfo = makeBufferCreateInfo(ssboBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
902 ssboBuffer = de::MovePtr<BufferWithMemory>(
903 new BufferWithMemory(vkd, device, alloc, ssboBufferInfo, MemoryRequirement::HostVisible));
904 auto &ssboBufferAlloc = ssboBuffer->getAllocation();
905
906 deMemset(ssboBufferAlloc.getHostPtr(), 0, static_cast<size_t>(ssboBufferSize));
907 flushAlloc(vkd, device, ssboBufferAlloc);
908
909 // Descriptor pool.
910 DescriptorPoolBuilder poolBuilder;
911 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
912 descriptorPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
913
914 // Descriptor set.
915 descriptorSet = makeDescriptorSet(vkd, device, descriptorPool.get(), descriptorSetLayout.get());
916 const auto ssboWriteInfo = makeDescriptorBufferInfo(ssboBuffer->get(), 0ull, ssboBufferSize);
917 DescriptorSetUpdateBuilder updateBuilder;
918 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(0u),
919 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &ssboWriteInfo);
920 updateBuilder.update(vkd, device);
921 }
922
923 // Command pool and buffer.
924 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
925 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
926 const auto cmdBuffer = cmdBufferPtr.get();
927
928 const std::vector<VkClearValue> colors(subpassCount, makeClearValueColorVec4(bgColor));
929
930 // Render triangle.
931 beginCommandBuffer(vkd, cmdBuffer);
932 renderPass.begin(vkd, cmdBuffer, scissors.front(), subpassCount, &colors[0]);
933
934 // Draw
935 for (uint32_t subpassNdx = 0; subpassNdx < subpassCount; ++subpassNdx)
936 {
937 if (subpassNdx != 0)
938 renderPass.nextSubpass(vkd, cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
939
940 pipelines[subpassNdx].bind(cmdBuffer);
941 if (m_params.useSSBO)
942 vkd.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout.get(), 0u, 1u,
943 &descriptorSet.get(), 0u, nullptr);
944 vkd.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
945 vkd.cmdDraw(cmdBuffer, numVertices, 1u, 0u, 0u);
946 }
947
948 renderPass.end(vkd, cmdBuffer);
949
950 // Output verification buffer.
951 const auto tcuFormat = mapVkFormat(format);
952 const auto pixelSize = static_cast<uint32_t>(tcu::getPixelSize(tcuFormat));
953 const auto layerPixels = extent.width * extent.height;
954 const auto layerBytes = layerPixels * pixelSize;
955 const auto totalBytes = layerBytes * m_numViews;
956
957 const auto verificationBufferInfo = makeBufferCreateInfo(totalBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
958 BufferWithMemory verificationBuffer(vkd, device, alloc, verificationBufferInfo, MemoryRequirement::HostVisible);
959
960 // Copy output image to verification buffer.
961 const auto preTransferBarrier = makeImageMemoryBarrier(
962 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
963 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, colorImage.get(), subresourceRange);
964 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
965 0u, nullptr, 0u, nullptr, 1u, &preTransferBarrier);
966
967 const auto subresourceLayers = makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, m_numViews);
968 const VkBufferImageCopy copyRegion = {
969 0ull, // VkDeviceSize bufferOffset;
970 0u, // uint32_t bufferRowLength;
971 0u, // uint32_t bufferImageHeight;
972 subresourceLayers, // VkImageSubresourceLayers imageSubresource;
973 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
974 extent, // VkExtent3D imageExtent;
975 };
976 vkd.cmdCopyImageToBuffer(cmdBuffer, colorImage.get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
977 verificationBuffer.get(), 1u, ©Region);
978
979 const auto postTransferBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
980 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1u,
981 &postTransferBarrier, 0u, nullptr, 0u, nullptr);
982
983 // Output SSBO to host barrier.
984 if (m_params.useSSBO)
985 {
986 const auto ssboBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
987 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1u,
988 &ssboBarrier, 0u, nullptr, 0u, nullptr);
989 }
990
991 // Submit commands.
992 endCommandBuffer(vkd, cmdBuffer);
993 const uint32_t deviceMask = (1 << m_numPhysDevices) - 1;
994 submitCommandsAndWait(vkd, device, queue, cmdBuffer, useDeviceGroup, deviceMask);
995
996 // Verify the image has the background color.
997 auto &verificationBufferAlloc = verificationBuffer.getAllocation();
998 auto verificationBufferPtr = reinterpret_cast<const char *>(verificationBufferAlloc.getHostPtr());
999 invalidateAlloc(vkd, device, verificationBufferAlloc);
1000
1001 const auto iWidth = static_cast<int>(extent.width);
1002 const auto iHeight = static_cast<int>(extent.height);
1003 const auto iDepth = static_cast<int>(extent.depth);
1004
1005 for (uint32_t layer = 0u; layer < m_params.numViews; ++layer)
1006 {
1007 const auto pixels =
1008 tcu::ConstPixelBufferAccess(tcuFormat, iWidth, iHeight, iDepth,
1009 reinterpret_cast<const void *>(verificationBufferPtr + layer * layerBytes));
1010
1011 for (int y = 0; y < iHeight; ++y)
1012 for (int x = 0; x < iWidth; ++x)
1013 {
1014 const auto pixel = pixels.getPixel(x, y);
1015 if (pixel != bgColor)
1016 {
1017 std::ostringstream msg;
1018 msg << "Unexpected color found at pixel (" << x << ", " << y << ") in layer " << layer;
1019
1020 auto &log = m_context.getTestContext().getLog();
1021 log << tcu::TestLog::Message << msg.str() << tcu::TestLog::EndMessage;
1022 log << tcu::TestLog::Image("Result", "Result Image", pixels);
1023 TCU_FAIL(msg.str());
1024 }
1025 }
1026 }
1027
1028 // Verify SSBO if used.
1029 if (m_params.useSSBO)
1030 {
1031 // Get stored counters.
1032 const auto ssboBufferSizeSz = static_cast<size_t>(ssboBufferSize);
1033 auto &ssboAlloc = ssboBuffer->getAllocation();
1034 invalidateAlloc(vkd, device, ssboAlloc);
1035
1036 std::vector<uint32_t> ssboCounters(ssboElementCount);
1037 DE_ASSERT(ssboBufferSizeSz == ssboCounters.size() * sizeof(decltype(ssboCounters)::value_type));
1038 deMemcpy(ssboCounters.data(), ssboAlloc.getHostPtr(), ssboBufferSizeSz);
1039
1040 // Minimum accepted counter values.
1041 // Vertex, Tessellation Control, Tesellation Evaluation, Geometry.
1042 uint32_t numActualCountersPerStage = m_numViews;
1043 uint32_t expectedCounters[kStageCount] = {3u, 3u, 3u, 1u};
1044
1045 // Verify.
1046 for (uint32_t stageIdx = 0u; stageIdx < kStageCount; ++stageIdx)
1047 for (uint32_t counterIdx = 0u; counterIdx < numActualCountersPerStage; ++counterIdx)
1048 {
1049 // If the stage is not selected, the expected value is exactly zero. Otherwise, it must be at least as expectedCounters.
1050 uint32_t expectedVal = expectedCounters[stageIdx];
1051 uint32_t minVal = ((m_params.selectedStages & (1u << stageIdx)) ? expectedVal : 0u);
1052 const uint32_t storedVal = ssboCounters[stageIdx * numCountersPerStage + counterIdx];
1053
1054 bool ok = false;
1055 if (minVal != 0u)
1056 {
1057 if (storedVal != 0)
1058 ok = (storedVal == minVal) ?
1059 true
1060 // All shaders must process at least gl_ViewIndex|gl_DeviceIndex times.
1061 :
1062 ((storedVal % minVal) == 0u);
1063 else
1064 ok = false;
1065 }
1066 else
1067 ok = true; /* continue */
1068
1069 if (!ok)
1070 {
1071 const char *stageNames[kStageCount] = {
1072 "vertex",
1073 "tessellation control",
1074 "tessellation evaluation",
1075 "geometry",
1076 };
1077
1078 std::ostringstream msg;
1079 msg << "Unexpected SSBO counter value in view " << counterIdx << " for the " << stageNames[stageIdx]
1080 << " shader:"
1081 << " got " << storedVal << " but expected " << minVal;
1082 TCU_FAIL(msg.str());
1083 }
1084 }
1085 }
1086
1087 return tcu::TestStatus::pass("Pass");
1088 }
1089
1090 } // namespace
1091
createNoPositionTests(tcu::TestContext & testCtx,vk::PipelineConstructionType pipelineConstructionType)1092 tcu::TestCaseGroup *createNoPositionTests(tcu::TestContext &testCtx,
1093 vk::PipelineConstructionType pipelineConstructionType)
1094 {
1095 // Tests with shaders that do not write to the Position built-in
1096 de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "no_position"));
1097
1098 for (int aux = 0; aux < 2; ++aux)
1099 {
1100 const bool explicitDeclarations = (aux == 1);
1101 const std::string declGroupName(explicitDeclarations ? "explicit_declarations" : "implicit_declarations");
1102 de::MovePtr<tcu::TestCaseGroup> declGroup(new tcu::TestCaseGroup(testCtx, declGroupName.c_str()));
1103
1104 for (int aux2 = 0; aux2 < 2; ++aux2)
1105 {
1106 const bool useSSBO = (aux2 == 1);
1107 const std::string ssboGroupName(useSSBO ? "ssbo_writes" : "basic");
1108 de::MovePtr<tcu::TestCaseGroup> ssboGroup(new tcu::TestCaseGroup(testCtx, ssboGroupName.c_str()));
1109
1110 const uint32_t maxTestedViewCount = useSSBO ? 3u : 2u;
1111 for (uint32_t viewCount = 1u; viewCount <= maxTestedViewCount; ++viewCount)
1112 {
1113 auto makeViewGroupName = [&]() -> std::string
1114 {
1115 switch (viewCount)
1116 {
1117 case 1u:
1118 return "single_view";
1119 case 2u:
1120 return "multiview";
1121 case 3u:
1122 return "device_index_as_view_index";
1123 }
1124 DE_ASSERT(false);
1125 return std::string();
1126 };
1127
1128 const std::string viewGroupName = makeViewGroupName();
1129 const bool useDeviceIndexAsViewIndex = (3u == viewCount);
1130
1131 // Shader objects do not support multiview
1132 if (viewCount != 1 && vk::isConstructionTypeShaderObject(pipelineConstructionType))
1133 continue;
1134 de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewGroupName.c_str()));
1135
1136 for (ShaderStageFlags stages = 0u; stages < STAGE_MASK_COUNT; ++stages)
1137 {
1138 // Vertex must always be present.
1139 if (!(stages & STAGE_VERTEX))
1140 continue;
1141
1142 // Tessellation stages must both be present or none must be.
1143 if (static_cast<bool>(stages & STAGE_TESS_CONTROL) !=
1144 static_cast<bool>(stages & STAGE_TESS_EVALUATION))
1145 continue;
1146
1147 const auto writeMaskCases = getWriteSubCases(stages);
1148 for (const auto writeMask : writeMaskCases)
1149 {
1150 std::string testName;
1151 if (stages & STAGE_VERTEX)
1152 testName += (testName.empty() ? "" : "_") + std::string("v") +
1153 ((writeMask & STAGE_VERTEX) ? "1" : "0");
1154 if (stages & STAGE_TESS_CONTROL)
1155 testName += (testName.empty() ? "" : "_") + std::string("c") +
1156 ((writeMask & STAGE_TESS_CONTROL) ? "1" : "0");
1157 if (stages & STAGE_TESS_EVALUATION)
1158 testName += (testName.empty() ? "" : "_") + std::string("e") +
1159 ((writeMask & STAGE_TESS_EVALUATION) ? "1" : "0");
1160 if (stages & STAGE_GEOMETRY)
1161 testName += (testName.empty() ? "" : "_") + std::string("g") +
1162 ((writeMask & STAGE_GEOMETRY) ? "1" : "0");
1163
1164 TestParams params{};
1165 params.pipelineConstructionType = pipelineConstructionType;
1166 params.selectedStages = stages;
1167 params.writeStages = writeMask;
1168 // In case of useDeviceIndexAsViewIndex,
1169 // number of physical devices in the group will decide the number of views
1170 params.numViews = useDeviceIndexAsViewIndex ? 0u : viewCount;
1171 params.explicitDeclarations = explicitDeclarations;
1172 params.useSSBO = useSSBO;
1173 params.useViewIndexAsDeviceIndex = useDeviceIndexAsViewIndex;
1174
1175 viewGroup->addChild(new NoPositionCase(testCtx, testName, params));
1176 }
1177 }
1178
1179 ssboGroup->addChild(viewGroup.release());
1180 }
1181
1182 declGroup->addChild(ssboGroup.release());
1183 }
1184
1185 group->addChild(declGroup.release());
1186 }
1187
1188 return group.release();
1189 }
1190
1191 } // namespace pipeline
1192 } // namespace vkt
1193