1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2023 The Khronos Group Inc.
6  * Copyright (c) 2023 Valve Corporation.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Input Attribute Offset Tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktPipelineInputAttributeOffsetTests.hpp"
26 
27 #include "vkBarrierUtil.hpp"
28 #include "vkCmdUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkObjUtil.hpp"
32 #include "vkQueryUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 
35 #include "tcuImageCompare.hpp"
36 
37 #include <string>
38 #include <sstream>
39 #include <memory>
40 #include <vector>
41 #include <array>
42 
43 namespace vkt
44 {
45 namespace pipeline
46 {
47 
48 using namespace vk;
49 
50 namespace
51 {
52 
53 // StrideCase determines the way we're going to store vertex data in the vertex buffer.
54 //
55 // With packed vertices:
56 //
57 //     Vertex buffer
58 //    +-----+---------------------------------------------------------------------+
59 //    |     +---------------------------------------------------------------------+
60 //    |     |    +--------+--------+                                              |
61 //    |     |    |Attr    |Attr    |                                              |
62 //    |     |    |        |        | ...                                          |
63 //    |     |    +--------+--------+                                              |
64 //    |     +---------------------------------------------------------------------+
65 //    +-----+---------------------------------------------------------------------+
66 //
67 //    -------
68 //    Vertex binding offset
69 //
70 //          ------
71 //          Attribute offset
72 //
73 // With padded vertices:
74 //
75 //     Vertex buffer
76 //    +-----+---------------------------------------------------------------------+
77 //    |     +---------------------------------------------------------------------+
78 //    |     |    +--------+--------+--------+                                     |
79 //    |     |    |Attr    |Pad     |Attr    |                                     |
80 //    |     |    |        |        |        |                                     |
81 //    |     |    +--------+--------+--------+                                     |
82 //    |     +---------------------------------------------------------------------+
83 //    +-----+---------------------------------------------------------------------+
84 //
85 //    -------
86 //    Vertex binding offset
87 //
88 //          ------
89 //          Attribute offset
90 //
91 // With overlapping vertices, the case is similar to packed. However, the data type in the _shader_ will be a Vec4, stored in the
92 // buffer as Vec2's. In the shader, only the XY coordinates are properly used (ZW coordinates would belong to the next vertex).
93 //
94 enum class StrideCase
95 {
96     PACKED      = 0,
97     PADDED      = 1,
98     OVERLAPPING = 2,
99 };
100 
getTypeSize(glu::DataType dataType)101 uint32_t getTypeSize(glu::DataType dataType)
102 {
103     switch (dataType)
104     {
105     case glu::TYPE_FLOAT_VEC2:
106         return static_cast<uint32_t>(sizeof(tcu::Vec2));
107     case glu::TYPE_FLOAT_VEC4:
108         return static_cast<uint32_t>(sizeof(tcu::Vec4));
109     default:
110         break;
111     }
112 
113     DE_ASSERT(false);
114     return 0u;
115 }
116 
117 struct TestParams
118 {
119     const PipelineConstructionType constructionType;
120     const glu::DataType dataType; // vec2 or vec4.
121     const uint32_t bindingOffset; // When binding vertex buffer.
122     const StrideCase strideCase;  // Pack all data or include some padding.
123     const bool useMemoryOffset;   // Apply an offset when binding memory to the buffer.
124     const bool dynamic;           // Use dynamic state or not.
125 
attributeSizevkt::pipeline::__anondeb8b2430111::TestParams126     uint32_t attributeSize(void) const
127     {
128         return getTypeSize(dataType);
129     }
130 
isOverlappingvkt::pipeline::__anondeb8b2430111::TestParams131     bool isOverlapping(void) const
132     {
133         return (strideCase == StrideCase::OVERLAPPING);
134     }
135 
attributeFormatvkt::pipeline::__anondeb8b2430111::TestParams136     VkFormat attributeFormat(void) const
137     {
138         switch (dataType)
139         {
140         case glu::TYPE_FLOAT_VEC2:
141             return (isOverlapping() ? VK_FORMAT_R32G32B32A32_SFLOAT : VK_FORMAT_R32G32_SFLOAT);
142         case glu::TYPE_FLOAT_VEC4:
143             return VK_FORMAT_R32G32B32A32_SFLOAT;
144         default:
145             break;
146         }
147 
148         DE_ASSERT(false);
149         return VK_FORMAT_UNDEFINED;
150     }
151 
152     // Given the vertex buffer binding offset, calculate the appropriate attribute offset to make them aligned.
attributeOffsetvkt::pipeline::__anondeb8b2430111::TestParams153     uint32_t attributeOffset(void) const
154     {
155         const auto attribSize = attributeSize();
156         DE_ASSERT(bindingOffset < attribSize);
157         return ((attribSize - bindingOffset) % attribSize);
158     }
159 
160     // Calculates proper padding size between elements according to strideCase.
vertexDataPaddingvkt::pipeline::__anondeb8b2430111::TestParams161     uint32_t vertexDataPadding(void) const
162     {
163         if (strideCase == StrideCase::PADDED)
164             return attributeSize();
165         return 0u;
166     }
167 
168     // Calculates proper binding stride according to strideCase.
bindingStridevkt::pipeline::__anondeb8b2430111::TestParams169     uint32_t bindingStride(void) const
170     {
171         return attributeSize() + vertexDataPadding();
172     }
173 };
174 
175 using VertexVec = std::vector<tcu::Vec2>;
176 using BytesVec  = std::vector<uint8_t>;
177 
buildVertexBufferData(const VertexVec & origVertices,const TestParams & params)178 BytesVec buildVertexBufferData(const VertexVec &origVertices, const TestParams &params)
179 {
180     DE_ASSERT(!origVertices.empty());
181 
182     VertexVec vertices(origVertices);
183 
184     if (params.isOverlapping())
185     {
186         // Each vertex will be read as a vec4, so we need one extra element at the end to make the last vec4 read valid and avoid going beyond the end of the buffer.
187         DE_ASSERT(params.dataType == glu::TYPE_FLOAT_VEC2);
188         vertices.push_back(tcu::Vec2(0.0f, 0.0f));
189     }
190 
191     const auto vertexCount = de::sizeU32(vertices);
192     const auto dataSize    = params.bindingOffset + params.attributeOffset() + vertexCount * params.bindingStride();
193     const tcu::Vec2 zw(0.0f, 1.0f);
194     const auto zwSize        = static_cast<uint32_t>(sizeof(zw));
195     const auto srcVertexSize = static_cast<uint32_t>(sizeof(VertexVec::value_type));
196     const bool needsZW(params.attributeSize() > srcVertexSize); // vec4 needs each vec2 with zw appended.
197     const auto paddingSize = params.vertexDataPadding();
198     BytesVec data(dataSize, uint8_t{0});
199 
200     uint8_t *nextVertexPtr = data.data() + params.bindingOffset + params.attributeOffset();
201 
202     for (uint32_t vertexIdx = 0u; vertexIdx < vertexCount; ++vertexIdx)
203     {
204         // Copy vertex.
205         deMemcpy(nextVertexPtr, &vertices.at(vertexIdx), srcVertexSize);
206         nextVertexPtr += srcVertexSize;
207 
208         // Copy extra ZW values if needed.
209         if (needsZW)
210         {
211             deMemcpy(nextVertexPtr, &zw, zwSize);
212             nextVertexPtr += zwSize;
213         }
214 
215         // Skip the padding bytes.
216         nextVertexPtr += paddingSize;
217     }
218 
219     return data;
220 }
221 
getDefaultColor(void)222 tcu::Vec4 getDefaultColor(void)
223 {
224     return tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f);
225 }
226 
getClearColor(void)227 tcu::Vec4 getClearColor(void)
228 {
229     return tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f);
230 }
231 
getDefaultExtent(void)232 tcu::IVec3 getDefaultExtent(void)
233 {
234     return tcu::IVec3(4, 4, 1); // Multiple pixels and vertices, not too big.
235 }
236 
237 // Generate one triangle per pixel.
generateVertices(uint32_t width,uint32_t height)238 VertexVec generateVertices(uint32_t width, uint32_t height)
239 {
240     VertexVec vertices;
241     vertices.reserve(width * height * 3u); // 3 points (1 triangle) per pixel.
242 
243     // Normalized pixel width and height.
244     const auto pixelWidth   = 2.0f / static_cast<float>(width);
245     const auto pixelHeight  = 2.0f / static_cast<float>(height);
246     const auto widthMargin  = pixelWidth / 4.0f;
247     const auto heightMargin = pixelHeight / 4.0f;
248 
249     for (uint32_t y = 0; y < height; ++y)
250         for (uint32_t x = 0; x < width; ++x)
251         {
252             // Normalized pixel center.
253             const auto pixelCenterX = ((static_cast<float>(x) + 0.5f) / static_cast<float>(width)) * 2.0f - 1.0f;
254             const auto pixelCenterY = ((static_cast<float>(y) + 0.5f) / static_cast<float>(height)) * 2.0f - 1.0f;
255 
256             vertices.push_back(tcu::Vec2(pixelCenterX, pixelCenterY - heightMargin));               // Top
257             vertices.push_back(tcu::Vec2(pixelCenterX - widthMargin, pixelCenterY + heightMargin)); // Bottom left.
258             vertices.push_back(tcu::Vec2(pixelCenterX + widthMargin, pixelCenterY + heightMargin)); // Bottom right.
259         }
260 
261     return vertices;
262 }
263 
264 class InputAttributeOffsetCase : public vkt::TestCase
265 {
266 public:
InputAttributeOffsetCase(tcu::TestContext & testCtx,const std::string & name,const TestParams & params)267     InputAttributeOffsetCase(tcu::TestContext &testCtx, const std::string &name, const TestParams &params)
268         : vkt::TestCase(testCtx, name)
269         , m_params(params)
270     {
271     }
~InputAttributeOffsetCase(void)272     virtual ~InputAttributeOffsetCase(void)
273     {
274     }
275     void initPrograms(vk::SourceCollections &programCollection) const override;
276     TestInstance *createInstance(Context &context) const override;
277     void checkSupport(Context &context) const override;
278 
279 protected:
280     const TestParams m_params;
281 };
282 
283 class InputAttributeOffsetInstance : public vkt::TestInstance
284 {
285 public:
InputAttributeOffsetInstance(Context & context,const TestParams & params)286     InputAttributeOffsetInstance(Context &context, const TestParams &params)
287         : vkt::TestInstance(context)
288         , m_params(params)
289     {
290     }
~InputAttributeOffsetInstance(void)291     virtual ~InputAttributeOffsetInstance(void)
292     {
293     }
294     tcu::TestStatus iterate(void) override;
295 
296 protected:
297     const TestParams m_params;
298 };
299 
createInstance(Context & context) const300 TestInstance *InputAttributeOffsetCase::createInstance(Context &context) const
301 {
302     return new InputAttributeOffsetInstance(context, m_params);
303 }
304 
checkSupport(Context & context) const305 void InputAttributeOffsetCase::checkSupport(Context &context) const
306 {
307     const auto &vki           = context.getInstanceInterface();
308     const auto physicalDevice = context.getPhysicalDevice();
309 
310     checkPipelineConstructionRequirements(vki, physicalDevice, m_params.constructionType);
311 
312 #ifndef CTS_USES_VULKANSC
313     if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset"))
314     {
315         const auto &properties     = context.getPortabilitySubsetProperties();
316         const auto &minStrideAlign = properties.minVertexInputBindingStrideAlignment;
317         const auto bindingStride   = m_params.bindingStride();
318 
319         if (bindingStride < minStrideAlign || bindingStride % minStrideAlign != 0u)
320             TCU_THROW(NotSupportedError, "Binding stride " + std::to_string(bindingStride) + " not a multiple of " +
321                                              std::to_string(minStrideAlign));
322     }
323 #endif // CTS_USES_VULKANSC
324 
325     if (m_params.dynamic)
326         context.requireDeviceFunctionality("VK_EXT_vertex_input_dynamic_state");
327 }
328 
initPrograms(vk::SourceCollections & programCollection) const329 void InputAttributeOffsetCase::initPrograms(vk::SourceCollections &programCollection) const
330 {
331     {
332         std::ostringstream frag;
333         frag << "#version 460\n"
334              << "layout (location=0) out vec4 outColor;\n"
335              << "void main (void) { outColor = vec4" << getDefaultColor() << "; }\n";
336         programCollection.glslSources.add("frag") << glu::FragmentSource(frag.str());
337     }
338 
339     {
340         const auto extraComponents =
341             ((m_params.dataType == glu::TYPE_FLOAT_VEC4) ?
342                  "" :
343                  ((m_params.isOverlapping())
344                       // Simulate that we use the .zw components in order to force the implementation to read them.
345                       ?
346                       ", floor(abs(inPos.z) / 1000.0), (floor(abs(inPos.w) / 2500.0) + 1.0)" // Should result in 0.0, 1.0.
347                       :
348                       ", 0.0, 1.0"));
349         const auto componentSelect = (m_params.isOverlapping() ? ".xy" : "");
350 
351         std::ostringstream vert;
352         vert << "#version 460\n"
353              << "layout (location=0) in "
354              << glu::getDataTypeName(m_params.isOverlapping() ? glu::TYPE_FLOAT_VEC4 : m_params.dataType) << " inPos;\n"
355              << "void main (void) { gl_Position = vec4(inPos" << componentSelect << extraComponents << "); }\n";
356         programCollection.glslSources.add("vert") << glu::VertexSource(vert.str());
357     }
358 }
359 
iterate(void)360 tcu::TestStatus InputAttributeOffsetInstance::iterate(void)
361 {
362     const auto ctx              = m_context.getContextCommonData();
363     const auto fbExtent         = getDefaultExtent();
364     const auto vkExtent         = makeExtent3D(fbExtent);
365     const auto vertices         = generateVertices(vkExtent.width, vkExtent.height);
366     const auto vertexBufferData = buildVertexBufferData(vertices, m_params);
367     const auto colorFormat      = VK_FORMAT_R8G8B8A8_UNORM;
368     const auto colorUsage       = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
369 
370     // Vertex buffer.
371     const auto vertexBufferSize   = static_cast<VkDeviceSize>(de::dataSize(vertexBufferData));
372     const auto vertexBufferInfo   = makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
373     const auto vertexBuffer       = makeBuffer(ctx.vkd, ctx.device, vertexBufferInfo);
374     const auto vertexBufferOffset = static_cast<VkDeviceSize>(m_params.bindingOffset);
375 
376     // Allocate and bind buffer memory.
377     // If useMemoryOffset is true, we'll allocate extra memory that satisfies alignment requirements for the buffer and the attributes.
378     auto vertexBufferReqs = getBufferMemoryRequirements(ctx.vkd, ctx.device, *vertexBuffer);
379     const auto memoryOffset =
380         (m_params.useMemoryOffset ?
381              (de::lcm(vertexBufferReqs.alignment, static_cast<VkDeviceSize>(m_params.attributeSize()))) :
382              0ull);
383     vertexBufferReqs.size += memoryOffset;
384     auto vertexBufferAlloc = ctx.allocator.allocate(vertexBufferReqs, MemoryRequirement::HostVisible);
385     VK_CHECK(ctx.vkd.bindBufferMemory(ctx.device, *vertexBuffer, vertexBufferAlloc->getMemory(), memoryOffset));
386 
387     // Copy vertices to vertex buffer.
388     const auto dstPtr =
389         reinterpret_cast<char *>(vertexBufferAlloc->getHostPtr()) + memoryOffset; // Need to add offset manually here.
390     deMemcpy(dstPtr, de::dataOrNull(vertexBufferData), de::dataSize(vertexBufferData));
391     flushAlloc(ctx.vkd, ctx.device, *vertexBufferAlloc);
392 
393     // Color buffer.
394     ImageWithBuffer colorBuffer(ctx.vkd, ctx.device, ctx.allocator, vkExtent, colorFormat, colorUsage,
395                                 VK_IMAGE_TYPE_2D);
396 
397     // Render pass and framebuffer.
398     auto renderPass = RenderPassWrapper(m_params.constructionType, ctx.vkd, ctx.device, colorFormat);
399     renderPass.createFramebuffer(ctx.vkd, ctx.device, colorBuffer.getImage(), colorBuffer.getImageView(),
400                                  vkExtent.width, vkExtent.height);
401 
402     // Shaders.
403     const auto &binaries  = m_context.getBinaryCollection();
404     const auto vertModule = ShaderWrapper(ctx.vkd, ctx.device, binaries.get("vert"));
405     const auto fragModule = ShaderWrapper(ctx.vkd, ctx.device, binaries.get("frag"));
406 
407     std::vector<VkDynamicState> dynamicStates;
408     if (m_params.dynamic)
409         dynamicStates.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_EXT);
410 
411     const VkPipelineDynamicStateCreateInfo dynamicStateCreateInfo = {
412         VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, // VkStructureType sType;
413         nullptr,                                              // const void* pNext;
414         0u,                                                   // VkPipelineDynamicStateCreateFlags flags;
415         de::sizeU32(dynamicStates),                           // uint32_t dynamicStateCount;
416         de::dataOrNull(dynamicStates),                        // const VkDynamicState* pDynamicStates;
417     };
418 
419     // Vertex input values according to test parameters.
420     const auto vertexInputBinding =
421         makeVertexInputBindingDescription(0u, m_params.bindingStride(), VK_VERTEX_INPUT_RATE_VERTEX);
422     const auto vertexInputAttribute =
423         makeVertexInputAttributeDescription(0u, 0u, m_params.attributeFormat(), m_params.attributeOffset());
424 
425     using VertexInputStatePtr = std::unique_ptr<VkPipelineVertexInputStateCreateInfo>;
426     VertexInputStatePtr pipelineVertexInputState;
427     if (!m_params.dynamic)
428     {
429         pipelineVertexInputState.reset(new VkPipelineVertexInputStateCreateInfo);
430         *pipelineVertexInputState                                 = initVulkanStructure();
431         pipelineVertexInputState->vertexBindingDescriptionCount   = 1u;
432         pipelineVertexInputState->pVertexBindingDescriptions      = &vertexInputBinding;
433         pipelineVertexInputState->vertexAttributeDescriptionCount = 1u;
434         pipelineVertexInputState->pVertexAttributeDescriptions    = &vertexInputAttribute;
435     }
436 
437     const std::vector<VkViewport> viewports(1u, makeViewport(vkExtent));
438     const std::vector<VkRect2D> scissors(1u, makeRect2D(vkExtent));
439 
440     // Pipeline.
441     const PipelineLayoutWrapper pipelineLayout(m_params.constructionType, ctx.vkd, ctx.device);
442     GraphicsPipelineWrapper pipelineWrapper(ctx.vki, ctx.vkd, ctx.physicalDevice, ctx.device,
443                                             m_context.getDeviceExtensions(), m_params.constructionType);
444     pipelineWrapper.setMonolithicPipelineLayout(pipelineLayout)
445         .setDefaultDepthStencilState()
446         .setDefaultColorBlendState()
447         .setDefaultRasterizationState()
448         .setDefaultMultisampleState()
449         .setDefaultVertexInputState(false)
450         .setDefaultTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
451         .setDynamicState(&dynamicStateCreateInfo)
452         .setupVertexInputState(pipelineVertexInputState.get())
453         .setupPreRasterizationShaderState(viewports, scissors, pipelineLayout, *renderPass, 0u, vertModule)
454         .setupFragmentShaderState(pipelineLayout, *renderPass, 0u, fragModule)
455         .setupFragmentOutputState(*renderPass, 0u)
456         .buildPipeline();
457 
458     CommandPoolWithBuffer cmd(ctx.vkd, ctx.device, ctx.qfIndex);
459     const auto cmdBuffer = *cmd.cmdBuffer;
460 
461     // Draw and copy image to verification buffer.
462     beginCommandBuffer(ctx.vkd, cmdBuffer);
463     {
464         renderPass.begin(ctx.vkd, cmdBuffer, scissors.at(0u), getClearColor());
465         pipelineWrapper.bind(cmdBuffer);
466         ctx.vkd.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
467         if (m_params.dynamic)
468         {
469             VkVertexInputBindingDescription2EXT dynamicBinding = initVulkanStructure();
470             dynamicBinding.binding                             = vertexInputBinding.binding;
471             dynamicBinding.inputRate                           = vertexInputBinding.inputRate;
472             dynamicBinding.stride                              = vertexInputBinding.stride;
473             dynamicBinding.divisor                             = 1u;
474 
475             VkVertexInputAttributeDescription2EXT dynamicAttribute = initVulkanStructure();
476             dynamicAttribute.location                              = vertexInputAttribute.location;
477             dynamicAttribute.binding                               = vertexInputAttribute.binding;
478             dynamicAttribute.format                                = vertexInputAttribute.format;
479             dynamicAttribute.offset                                = vertexInputAttribute.offset;
480 
481             ctx.vkd.cmdSetVertexInputEXT(cmdBuffer, 1u, &dynamicBinding, 1u, &dynamicAttribute);
482         }
483         ctx.vkd.cmdDraw(cmdBuffer, de::sizeU32(vertices), 1u, 0u, 0u);
484         renderPass.end(ctx.vkd, cmdBuffer);
485     }
486     {
487         copyImageToBuffer(ctx.vkd, cmdBuffer, colorBuffer.getImage(), colorBuffer.getBuffer(),
488                           tcu::IVec2(fbExtent.x(), fbExtent.y()), VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
489                           VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1u, VK_IMAGE_ASPECT_COLOR_BIT,
490                           VK_IMAGE_ASPECT_COLOR_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
491     }
492     endCommandBuffer(ctx.vkd, cmdBuffer);
493     submitCommandsAndWait(ctx.vkd, ctx.device, ctx.queue, cmdBuffer);
494     invalidateAlloc(ctx.vkd, ctx.device, colorBuffer.getBufferAllocation());
495 
496     // Check color buffer.
497     auto &log            = m_context.getTestContext().getLog();
498     const auto tcuFormat = mapVkFormat(colorFormat);
499     const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, fbExtent, colorBuffer.getBufferAllocation().getHostPtr());
500     const tcu::Vec4 threshold(0.0f, 0.0f, 0.0f, 0.0f);
501 
502     if (!tcu::floatThresholdCompare(log, "Result", "", getDefaultColor(), resultAccess, threshold,
503                                     tcu::COMPARE_LOG_ON_ERROR))
504         return tcu::TestStatus::fail("Unexpected color buffer contents -- check log for details");
505 
506     return tcu::TestStatus::pass("Pass");
507 }
508 
509 } // anonymous namespace
510 
createInputAttributeOffsetTests(tcu::TestContext & testCtx,vk::PipelineConstructionType pipelineConstructionType)511 tcu::TestCaseGroup *createInputAttributeOffsetTests(tcu::TestContext &testCtx,
512                                                     vk::PipelineConstructionType pipelineConstructionType)
513 {
514     using GroupPtr = de::MovePtr<tcu::TestCaseGroup>;
515     GroupPtr mainGroup(new tcu::TestCaseGroup(testCtx, "input_attribute_offset"));
516 
517     for (const auto dataType : {glu::TYPE_FLOAT_VEC2, glu::TYPE_FLOAT_VEC4})
518     {
519         const auto typeSize = getTypeSize(dataType);
520         GroupPtr dataTypeGrp(new tcu::TestCaseGroup(testCtx, glu::getDataTypeName(dataType)));
521 
522         for (uint32_t offset = 0u; offset < typeSize; ++offset)
523         {
524             const auto offsetGrpName = "offset_" + std::to_string(offset);
525             GroupPtr offsetGrp(new tcu::TestCaseGroup(testCtx, offsetGrpName.c_str()));
526 
527             for (const auto strideCase : {StrideCase::PACKED, StrideCase::PADDED, StrideCase::OVERLAPPING})
528             {
529                 if (strideCase == StrideCase::OVERLAPPING && dataType != glu::TYPE_FLOAT_VEC2)
530                     continue;
531 
532                 const std::array<const char *, 3> strideNames{"packed", "padded", "overlapping"};
533                 GroupPtr strideGrp(new tcu::TestCaseGroup(testCtx, strideNames.at(static_cast<int>(strideCase))));
534 
535                 for (const auto useMemoryOffset : {false, true})
536                 {
537                     const std::array<const char *, 2> memoryOffsetGrpNames{"no_memory_offset", "with_memory_offset"};
538                     GroupPtr memoryOffsetGrp(
539                         new tcu::TestCaseGroup(testCtx, memoryOffsetGrpNames.at(static_cast<int>(useMemoryOffset))));
540 
541                     for (const auto &dynamic : {false, true})
542                     {
543                         const TestParams params{
544                             pipelineConstructionType, dataType, offset, strideCase, useMemoryOffset, dynamic,
545                         };
546                         const auto testName = (dynamic ? "dynamic" : "static");
547                         memoryOffsetGrp->addChild(new InputAttributeOffsetCase(testCtx, testName, params));
548                     }
549 
550                     strideGrp->addChild(memoryOffsetGrp.release());
551                 }
552 
553                 offsetGrp->addChild(strideGrp.release());
554             }
555 
556             dataTypeGrp->addChild(offsetGrp.release());
557         }
558 
559         mainGroup->addChild(dataTypeGrp.release());
560     }
561 
562     return mainGroup.release();
563 }
564 
565 } // namespace pipeline
566 } // namespace vkt
567