1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2018 The Khronos Group Inc.
6 * Copyright (c) 2018 Google Inc.
7 * Copyright (c) 2018 ARM Limited.
8 * Copyright (c) 2023 LunarG, Inc.
9 * Copyright (c) 2023 Nintendo
10 *
11 * Licensed under the Apache License, Version 2.0 (the "License");
12 * you may not use this file except in compliance with the License.
13 * You may obtain a copy of the License at
14 *
15 * http://www.apache.org/licenses/LICENSE-2.0
16 *
17 * Unless required by applicable law or agreed to in writing, software
18 * distributed under the License is distributed on an "AS IS" BASIS,
19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 * See the License for the specific language governing permissions and
21 * limitations under the License.
22 *
23 *//*!
24 * \file
25 * \brief Dynamic Offset Tests
26 *//*--------------------------------------------------------------------*/
27
28 #include "vktPipelineDynamicOffsetTests.hpp"
29 #include "vktPipelineClearUtil.hpp"
30 #include "vktPipelineImageUtil.hpp"
31 #include "vktPipelineVertexUtil.hpp"
32 #include "vktPipelineReferenceRenderer.hpp"
33 #include "vkComputePipelineConstructionUtil.hpp"
34 #include "vktTestCase.hpp"
35 #include "vkImageUtil.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkPrograms.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkRef.hpp"
40 #include "vkRefUtil.hpp"
41 #include "vkTypeUtil.hpp"
42 #include "vkCmdUtil.hpp"
43 #include "vkObjUtil.hpp"
44 #include "vkDeviceUtil.hpp"
45 #include "vkBuilderUtil.hpp"
46 #include "tcuImageCompare.hpp"
47 #include "deMemory.h"
48 #include "deUniquePtr.hpp"
49 #include "tcuTestLog.hpp"
50 #include <array>
51 #include <cmath>
52 #include <vector>
53 #include <sstream>
54
55 namespace vkt
56 {
57 namespace pipeline
58 {
59
60 using namespace vk;
61 using namespace std;
62 using de::UniquePtr;
63
64 namespace
65 {
66 typedef de::SharedPtr<Unique<VkBuffer>> VkBufferSp;
67 typedef de::SharedPtr<Allocation> AllocationSp;
68 typedef de::SharedPtr<Unique<VkCommandBuffer>> VkCommandBufferSp;
69 typedef de::SharedPtr<RenderPassWrapper> VkRenderPassSp;
70
71 enum class GroupingStrategy
72 {
73 SINGLE_SET = 0,
74 MULTISET = 1,
75 ARRAYS = 2,
76 };
77
78 struct TestParams
79 {
80 PipelineConstructionType pipelineConstructionType;
81 VkDescriptorType descriptorType;
82 uint32_t numCmdBuffers;
83 bool reverseOrder;
84 uint32_t numDescriptorSetBindings;
85 uint32_t numDynamicBindings;
86 uint32_t numNonDynamicBindings;
87 GroupingStrategy groupingStrategy;
88 };
89 #ifndef CTS_USES_VULKANSC
createQuads(uint32_t numQuads,float size)90 vector<Vertex4RGBA> createQuads(uint32_t numQuads, float size)
91 {
92 vector<Vertex4RGBA> vertices;
93
94 for (uint32_t quadNdx = 0; quadNdx < numQuads; quadNdx++)
95 {
96 const float xOffset = -0.5f + (float)quadNdx;
97 const tcu::Vec4 color(0.0f);
98 const Vertex4RGBA lowerLeftVertex = {tcu::Vec4(-size + xOffset, -size, 0.0f, 1.0f), color};
99 const Vertex4RGBA lowerRightVertex = {tcu::Vec4(size + xOffset, -size, 0.0f, 1.0f), color};
100 const Vertex4RGBA UpperLeftVertex = {tcu::Vec4(-size + xOffset, size, 0.0f, 1.0f), color};
101 const Vertex4RGBA UpperRightVertex = {tcu::Vec4(size + xOffset, size, 0.0f, 1.0f), color};
102
103 vertices.push_back(lowerLeftVertex);
104 vertices.push_back(lowerRightVertex);
105 vertices.push_back(UpperLeftVertex);
106 vertices.push_back(UpperLeftVertex);
107 vertices.push_back(lowerRightVertex);
108 vertices.push_back(UpperRightVertex);
109 }
110
111 return vertices;
112 }
113 #endif // CTS_USES_VULKANSC
114
115 static const tcu::Vec4 testColors[] = {tcu::Vec4(0.3f, 0.0f, 0.0f, 1.0f), tcu::Vec4(0.0f, 0.3f, 0.0f, 1.0f),
116 tcu::Vec4(0.0f, 0.0f, 0.3f, 1.0f), tcu::Vec4(0.3f, 0.3f, 0.0f, 1.0f),
117 tcu::Vec4(0.0f, 0.3f, 0.3f, 1.0f), tcu::Vec4(0.3f, 0.0f, 0.3f, 1.0f)};
118 static constexpr VkDeviceSize kColorSize = static_cast<VkDeviceSize>(sizeof(testColors[0]));
119 static constexpr uint32_t kNumTestColors = static_cast<uint32_t>(DE_LENGTH_OF_ARRAY(testColors));
120
compareVectors(const tcu::Vec4 firstVector,const tcu::Vec4 secondVector,const float tolerance)121 bool compareVectors(const tcu::Vec4 firstVector, const tcu::Vec4 secondVector, const float tolerance)
122 {
123 for (auto i = 0; i < firstVector.SIZE; i++)
124 {
125 if (abs(firstVector[i] - secondVector[i]) > tolerance)
126 return false;
127 }
128
129 return true;
130 }
131
makeImageCreateInfo(const tcu::IVec2 & size,const VkFormat format,const VkImageUsageFlags usage)132 inline VkImageCreateInfo makeImageCreateInfo(const tcu::IVec2 &size, const VkFormat format,
133 const VkImageUsageFlags usage)
134 {
135 const VkImageCreateInfo imageParams = {
136 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
137 DE_NULL, // const void* pNext;
138 (VkImageCreateFlags)0, // VkImageCreateFlags flags;
139 VK_IMAGE_TYPE_2D, // VkImageType imageType;
140 format, // VkFormat format;
141 makeExtent3D(size.x(), size.y(), 1), // VkExtent3D extent;
142 1u, // uint32_t mipLevels;
143 1u, // uint32_t arrayLayers;
144 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
145 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
146 usage, // VkImageUsageFlags usage;
147 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
148 0u, // uint32_t queueFamilyIndexCount;
149 DE_NULL, // const uint32_t* pQueueFamilyIndices;
150 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
151 };
152
153 return imageParams;
154 }
155
156 class DynamicOffsetTestInstance : public vkt::TestInstance
157 {
158 public:
DynamicOffsetTestInstance(Context & context,const TestParams & params)159 DynamicOffsetTestInstance(Context &context, const TestParams ¶ms)
160 : vkt::TestInstance(context)
161 , m_params(params)
162 , m_memAlloc(context.getDeviceInterface(), context.getDevice(),
163 getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
164 {
165 }
166
167 protected:
168 const TestParams m_params;
169 SimpleAllocator m_memAlloc;
170 };
171
172 class DynamicOffsetGraphicsTestInstance : public DynamicOffsetTestInstance
173 {
174 public:
175 DynamicOffsetGraphicsTestInstance(Context &context, const TestParams ¶ms);
176 virtual ~DynamicOffsetGraphicsTestInstance(void);
177 void init(void);
178 virtual tcu::TestStatus iterate(void);
179 tcu::TestStatus verifyImage(void);
180
181 private:
182 const tcu::UVec2 m_renderSize;
183 const VkFormat m_colorFormat;
184 VkImageCreateInfo m_colorImageCreateInfo;
185 Move<VkImage> m_colorImage;
186 de::MovePtr<Allocation> m_colorImageAlloc;
187 Move<VkImageView> m_colorAttachmentView;
188 vector<VkRenderPassSp> m_renderPasses;
189 ShaderWrapper m_vertexShaderModule;
190 ShaderWrapper m_fragmentShaderModule;
191 Move<VkBuffer> m_vertexBuffer;
192 de::MovePtr<Allocation> m_vertexBufferAlloc;
193 Move<VkBuffer> m_buffer;
194 de::MovePtr<Allocation> m_bufferAlloc;
195 vector<Move<VkDescriptorSetLayout>> m_descriptorSetLayouts;
196 Move<VkDescriptorPool> m_descriptorPool;
197 vector<Move<VkDescriptorSet>> m_descriptorSets;
198 PipelineLayoutWrapper m_pipelineLayout;
199 vector<GraphicsPipelineWrapper> m_graphicsPipelines;
200 Move<VkCommandPool> m_cmdPool;
201 vector<VkCommandBufferSp> m_cmdBuffers;
202 vector<Vertex4RGBA> m_vertices;
203 };
204 #ifndef CTS_USES_VULKANSC
DynamicOffsetGraphicsTestInstance(Context & context,const TestParams & params)205 DynamicOffsetGraphicsTestInstance::DynamicOffsetGraphicsTestInstance(Context &context, const TestParams ¶ms)
206 : DynamicOffsetTestInstance(context, params)
207 , m_renderSize(32, 32)
208 , m_colorFormat(VK_FORMAT_R8G8B8A8_UNORM)
209 , m_vertices(createQuads(m_params.numDescriptorSetBindings * m_params.numCmdBuffers, 0.25f))
210 {
211 }
212 #endif // CTS_USES_VULKANSC
213
init(void)214 void DynamicOffsetGraphicsTestInstance::init(void)
215 {
216 const VkComponentMapping componentMappingRGBA = {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
217 VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
218 const InstanceInterface &vki = m_context.getInstanceInterface();
219 const DeviceInterface &vk = m_context.getDeviceInterface();
220 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
221 const VkDevice vkDevice = m_context.getDevice();
222 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
223 const uint32_t numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
224 uint32_t offset = 0;
225 uint32_t quadNdx = 0;
226 const VkPhysicalDeviceLimits deviceLimits =
227 getPhysicalDeviceProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()).limits;
228 const VkDeviceSize alignment = ((m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ?
229 deviceLimits.minUniformBufferOffsetAlignment :
230 deviceLimits.minStorageBufferOffsetAlignment);
231 const VkDeviceSize extraBytes = kColorSize % alignment;
232 const VkDeviceSize colorBlockInputSize =
233 ((extraBytes == 0ull) ? kColorSize : (kColorSize + alignment - extraBytes));
234 const VkDeviceSize bufferSize = colorBlockInputSize * kNumTestColors;
235 const VkDeviceSize bindingOffset = bufferSize / numBindings;
236 const VkDescriptorType nonDynamicDescriptorType =
237 m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER :
238 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
239
240 vector<VkDescriptorSetLayout> descriptorSetLayoutsPlain;
241 vector<VkDescriptorSet> descriptorSetsPlain;
242
243 // Create color image
244 {
245
246 const VkImageCreateInfo colorImageParams = {
247 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
248 DE_NULL, // const void* pNext;
249 0u, // VkImageCreateFlags flags;
250 VK_IMAGE_TYPE_2D, // VkImageType imageType;
251 m_colorFormat, // VkFormat format;
252 {m_renderSize.x(), m_renderSize.y(), 1u}, // VkExtent3D extent;
253 1u, // uint32_t mipLevels;
254 1u, // uint32_t arrayLayers;
255 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
256 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
257 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // VkImageUsageFlags usage;
258 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
259 1u, // uint32_t queueFamilyIndexCount;
260 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
261 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
262 };
263
264 m_colorImageCreateInfo = colorImageParams;
265 m_colorImage = createImage(vk, vkDevice, &m_colorImageCreateInfo);
266
267 // Allocate and bind color image memory
268 m_colorImageAlloc =
269 m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *m_colorImage), MemoryRequirement::Any);
270 VK_CHECK(vk.bindImageMemory(vkDevice, *m_colorImage, m_colorImageAlloc->getMemory(),
271 m_colorImageAlloc->getOffset()));
272 }
273
274 // Create color attachment view
275 {
276 const VkImageViewCreateInfo colorAttachmentViewParams = {
277 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
278 DE_NULL, // const void* pNext;
279 0u, // VkImageViewCreateFlags flags;
280 *m_colorImage, // VkImage image;
281 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
282 m_colorFormat, // VkFormat format;
283 componentMappingRGBA, // VkChannelMapping channels;
284 {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}, // VkImageSubresourceRange subresourceRange;
285 };
286
287 m_colorAttachmentView = createImageView(vk, vkDevice, &colorAttachmentViewParams);
288 }
289
290 // Create render passes
291 for (uint32_t renderPassIdx = 0; renderPassIdx < m_params.numCmdBuffers; renderPassIdx++)
292 {
293 // The first pass clears the output image, and the second one draws on top of the first pass.
294 const VkAttachmentLoadOp loadOps[] = {VK_ATTACHMENT_LOAD_OP_CLEAR, VK_ATTACHMENT_LOAD_OP_LOAD};
295
296 const VkImageLayout initialLayouts[] = {VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
297
298 const VkAttachmentDescription attachmentDescription = {
299 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags
300 m_colorFormat, // VkFormat format
301 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
302 loadOps[renderPassIdx], // VkAttachmentLoadOp loadOp
303 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp
304 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp
305 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp
306 initialLayouts[renderPassIdx], // VkImageLayout initialLayout
307 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout
308 };
309
310 const VkAttachmentReference attachmentRef = {
311 0u, // uint32_t attachment
312 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout
313 };
314
315 const VkSubpassDescription subpassDescription = {
316 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
317 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
318 0u, // uint32_t inputAttachmentCount
319 DE_NULL, // const VkAttachmentReference* pInputAttachments
320 1u, // uint32_t colorAttachmentCount
321 &attachmentRef, // const VkAttachmentReference* pColorAttachments
322 DE_NULL, // const VkAttachmentReference* pResolveAttachments
323 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
324 0u, // uint32_t preserveAttachmentCount
325 DE_NULL // const uint32_t* pPreserveAttachments
326 };
327
328 const VkRenderPassCreateInfo renderPassInfo = {
329 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
330 DE_NULL, // const void* pNext
331 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
332 1u, // uint32_t attachmentCount
333 &attachmentDescription, // const VkAttachmentDescription* pAttachments
334 1u, // uint32_t subpassCount
335 &subpassDescription, // const VkSubpassDescription* pSubpasses
336 0u, // uint32_t dependencyCount
337 DE_NULL // const VkSubpassDependency* pDependencies
338 };
339
340 m_renderPasses.push_back(
341 VkRenderPassSp(new RenderPassWrapper(m_params.pipelineConstructionType, vk, vkDevice, &renderPassInfo)));
342
343 const VkImageView attachmentBindInfos[] = {*m_colorAttachmentView};
344
345 const VkFramebufferCreateInfo framebufferParams = {
346 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
347 DE_NULL, // const void* pNext;
348 0u, // VkFramebufferCreateFlags flags;
349 **m_renderPasses[renderPassIdx], // VkRenderPass renderPass;
350 1u, // uint32_t attachmentCount;
351 attachmentBindInfos, // const VkImageView* pAttachments;
352 (uint32_t)m_renderSize.x(), // uint32_t width;
353 (uint32_t)m_renderSize.y(), // uint32_t height;
354 1u // uint32_t layers;
355 };
356
357 m_renderPasses[renderPassIdx]->createFramebuffer(vk, vkDevice, &framebufferParams, *m_colorImage);
358 }
359
360 // Create pipeline layout
361 {
362 // Create descriptor set layouts
363 vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings;
364
365 for (uint32_t binding = 0; binding < numBindings; binding++)
366 {
367 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
368 const VkDescriptorType descriptorType = (dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType);
369 const uint32_t bindingNumber = (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET ? binding : 0u);
370 const uint32_t descriptorCount =
371 ((m_params.groupingStrategy == GroupingStrategy::ARRAYS) ?
372 (dynamicDesc ? m_params.numDynamicBindings : m_params.numNonDynamicBindings) :
373 1u);
374 const VkDescriptorSetLayoutBinding descriptorSetLayoutBinding = {
375 bindingNumber, // uint32_t binding;
376 descriptorType, // VkDescriptorType descriptorType;
377 descriptorCount, // uint32_t descriptorCount;
378 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlags stageFlags;
379 DE_NULL // const VkSampler* pImmutableSamplers;
380 };
381
382 // Skip used descriptors in array mode.
383 if (m_params.groupingStrategy == GroupingStrategy::ARRAYS)
384 binding = (dynamicDesc ? m_params.numDynamicBindings - 1 : numBindings);
385
386 descriptorSetLayoutBindings.push_back(descriptorSetLayoutBinding);
387 }
388
389 vector<VkDescriptorSetLayoutCreateInfo> descriptorSetLayoutCreateInfos;
390
391 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
392 {
393 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
394 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
395 DE_NULL, // const void* pNext;
396 0u, // VkDescriptorSetLayoutCreateFlags flags;
397 numBindings, // uint32_t bindingCount;
398 descriptorSetLayoutBindings.data() // const VkDescriptorSetLayoutBinding* pBindings;
399 };
400
401 m_descriptorSetLayouts.push_back(createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo));
402 }
403 else
404 {
405 for (size_t i = 0; i < descriptorSetLayoutBindings.size(); ++i)
406 {
407 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
408 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
409 DE_NULL, // const void* pNext;
410 0u, // VkDescriptorSetLayoutCreateFlags flags;
411 1u, // uint32_t bindingCount;
412 &descriptorSetLayoutBindings[i] // const VkDescriptorSetLayoutBinding* pBindings;
413 };
414
415 m_descriptorSetLayouts.push_back(
416 createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo));
417 }
418 }
419
420 // Create pipeline layout
421 descriptorSetLayoutsPlain.resize(m_descriptorSetLayouts.size());
422 for (size_t i = 0; i < descriptorSetLayoutsPlain.size(); ++i)
423 descriptorSetLayoutsPlain[i] = m_descriptorSetLayouts[i].get();
424
425 const VkPipelineLayoutCreateInfo pipelineLayoutParams = {
426 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
427 DE_NULL, // const void* pNext;
428 0u, // VkPipelineLayoutCreateFlags flags;
429 static_cast<uint32_t>(descriptorSetLayoutsPlain.size()), // uint32_t descriptorSetCount;
430 descriptorSetLayoutsPlain.data(), // const VkDescriptorSetLayout* pSetLayouts;
431 0u, // uint32_t pushConstantRangeCount;
432 DE_NULL // const VkPushDescriptorRange* pPushDescriptorRanges;
433 };
434
435 m_pipelineLayout =
436 PipelineLayoutWrapper(m_params.pipelineConstructionType, vk, vkDevice, &pipelineLayoutParams);
437 }
438
439 // Create buffer
440 {
441 vector<uint8_t> hostBuffer((size_t)bufferSize, 0);
442 for (uint32_t colorIdx = 0; colorIdx < kNumTestColors; colorIdx++)
443 deMemcpy(&hostBuffer[(uint32_t)colorBlockInputSize * colorIdx], &testColors[colorIdx], kColorSize);
444
445 const VkBufferUsageFlags usageFlags = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ?
446 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT :
447 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
448
449 const VkBufferCreateInfo bufferCreateInfo = {
450 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
451 DE_NULL, // const void* pNext;
452 0u, // VkBufferCreateFlags flags
453 bufferSize, // VkDeviceSize size;
454 usageFlags, // VkBufferUsageFlags usage;
455 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
456 1u, // uint32_t queueFamilyCount;
457 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
458 };
459
460 m_buffer = createBuffer(vk, vkDevice, &bufferCreateInfo);
461 m_bufferAlloc =
462 m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_buffer), MemoryRequirement::HostVisible);
463 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_buffer, m_bufferAlloc->getMemory(), m_bufferAlloc->getOffset()));
464
465 deMemcpy(m_bufferAlloc->getHostPtr(), hostBuffer.data(), (size_t)bufferSize);
466 flushAlloc(vk, vkDevice, *m_bufferAlloc);
467 }
468
469 // Create descriptor pool
470 {
471 DescriptorPoolBuilder poolBuilder;
472 poolBuilder.addType(m_params.descriptorType, m_params.numDynamicBindings);
473 poolBuilder.addType(nonDynamicDescriptorType, m_params.numNonDynamicBindings);
474 m_descriptorPool = poolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
475 static_cast<uint32_t>(m_descriptorSetLayouts.size()));
476 }
477
478 // Create descriptor sets
479 {
480 for (size_t i = 0; i < m_descriptorSetLayouts.size(); ++i)
481 {
482 const VkDescriptorSetAllocateInfo allocInfo = {
483 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
484 DE_NULL, // const void* pNext;
485 *m_descriptorPool, // VkDescriptorPool descriptorPool;
486 1u, // uint32_t setLayoutCount;
487 &(m_descriptorSetLayouts[i].get()), // const VkDescriptorSetLayout* pSetLayouts;
488 };
489 m_descriptorSets.push_back(allocateDescriptorSet(vk, vkDevice, &allocInfo));
490 }
491 }
492
493 descriptorSetsPlain.resize(m_descriptorSets.size());
494 for (size_t i = 0; i < descriptorSetsPlain.size(); ++i)
495 descriptorSetsPlain[i] = m_descriptorSets[i].get();
496
497 // Update descriptor sets
498 for (uint32_t binding = 0; binding < numBindings; ++binding)
499 {
500 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
501 const VkDescriptorType descriptorType = (dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType);
502 const VkDescriptorBufferInfo descriptorBufferInfo = {
503 *m_buffer, // VkBuffer buffer;
504 bindingOffset * binding, // VkDeviceSize offset;
505 kColorSize // VkDeviceSize range;
506 };
507
508 VkDescriptorSet bindingSet;
509 uint32_t bindingNumber;
510 uint32_t dstArrayElement;
511
512 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
513 {
514 bindingSet = m_descriptorSets[0].get();
515 bindingNumber = binding;
516 dstArrayElement = 0u;
517 }
518 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
519 {
520 bindingSet = m_descriptorSets[binding].get();
521 bindingNumber = 0u;
522 dstArrayElement = 0u;
523 }
524 else // GroupingStrategy::ARRAYS
525 {
526 bindingSet = (dynamicDesc ? m_descriptorSets[0].get() : m_descriptorSets[1].get());
527 bindingNumber = 0u;
528 dstArrayElement = (dynamicDesc ? binding : (binding - m_params.numDynamicBindings));
529 }
530
531 const VkWriteDescriptorSet writeDescriptorSet = {
532 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
533 DE_NULL, // const void* pNext;
534 bindingSet, // VkDescriptorSet dstSet;
535 bindingNumber, // uint32_t dstBinding;
536 dstArrayElement, // uint32_t dstArrayElement;
537 1u, // uint32_t descriptorCount;
538 descriptorType, // VkDescriptorType descriptorType;
539 DE_NULL, // const VkDescriptorImageInfo* pImageInfo;
540 &descriptorBufferInfo, // const VkDescriptorBufferInfo* pBufferInfo;
541 DE_NULL // const VkBufferView* pTexelBufferView;
542 };
543
544 vk.updateDescriptorSets(vkDevice, 1u, &writeDescriptorSet, 0u, DE_NULL);
545 }
546
547 // Create shaders
548 {
549 m_vertexShaderModule = ShaderWrapper(vk, vkDevice, m_context.getBinaryCollection().get("vert"), 0u);
550 m_fragmentShaderModule = ShaderWrapper(vk, vkDevice, m_context.getBinaryCollection().get("frag"), 0u);
551 }
552
553 // Create pipelines
554 m_graphicsPipelines.reserve(m_params.numCmdBuffers);
555 for (uint32_t pipelineIdx = 0; pipelineIdx < m_params.numCmdBuffers; pipelineIdx++)
556 {
557 const VkVertexInputBindingDescription vertexInputBindingDescription = {
558 0u, // uint32_t binding;
559 sizeof(Vertex4RGBA), // uint32_t strideInBytes;
560 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate stepRate;
561 };
562
563 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[] = {
564 {
565 0u, // uint32_t location;
566 0u, // uint32_t binding;
567 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
568 0u // uint32_t offsetInBytes;
569 },
570 {
571 1u, // uint32_t location;
572 0u, // uint32_t binding;
573 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
574 offsetof(Vertex4RGBA, color), // uint32_t offset;
575 }};
576
577 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams{
578 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
579 DE_NULL, // const void* pNext;
580 0u, // vkPipelineVertexInputStateCreateFlags flags;
581 1u, // uint32_t bindingCount;
582 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
583 2u, // uint32_t attributeCount;
584 vertexInputAttributeDescriptions // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
585 };
586
587 const vector<VkViewport> viewports{makeViewport(m_renderSize)};
588 const vector<VkRect2D> scissors{makeRect2D(m_renderSize)};
589
590 m_graphicsPipelines.emplace_back(vki, vk, physicalDevice, vkDevice, m_context.getDeviceExtensions(),
591 m_params.pipelineConstructionType);
592 m_graphicsPipelines.back()
593 .setMonolithicPipelineLayout(m_pipelineLayout)
594 .setDefaultRasterizationState()
595 .setDefaultDepthStencilState()
596 .setDefaultColorBlendState()
597 .setDefaultMultisampleState()
598 .setupVertexInputState(&vertexInputStateParams)
599 .setupPreRasterizationShaderState(viewports, scissors, m_pipelineLayout, **m_renderPasses[pipelineIdx], 0u,
600 m_vertexShaderModule)
601 .setupFragmentShaderState(m_pipelineLayout, **m_renderPasses[pipelineIdx], 0u, m_fragmentShaderModule)
602 .setupFragmentOutputState(**m_renderPasses[pipelineIdx])
603 .buildPipeline();
604 }
605
606 // Create vertex buffer
607 {
608 const VkBufferCreateInfo vertexBufferParams = {
609 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
610 DE_NULL, // const void* pNext;
611 0u, // VkBufferCreateFlags flags;
612 (VkDeviceSize)(sizeof(Vertex4RGBA) * m_vertices.size()), // VkDeviceSize size;
613 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
614 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
615 1u, // uint32_t queueFamilyCount;
616 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
617 };
618
619 m_vertexBuffer = createBuffer(vk, vkDevice, &vertexBufferParams);
620 m_vertexBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_vertexBuffer),
621 MemoryRequirement::HostVisible);
622
623 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_vertexBuffer, m_vertexBufferAlloc->getMemory(),
624 m_vertexBufferAlloc->getOffset()));
625
626 // Load vertices into vertex buffer
627 deMemcpy(m_vertexBufferAlloc->getHostPtr(), m_vertices.data(), m_vertices.size() * sizeof(Vertex4RGBA));
628 flushAlloc(vk, vkDevice, *m_vertexBufferAlloc);
629 }
630
631 // Create command pool
632 m_cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
633
634 // Create command buffers
635 for (uint32_t cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
636 m_cmdBuffers.push_back(VkCommandBufferSp(new Unique<VkCommandBuffer>(
637 allocateCommandBuffer(vk, vkDevice, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY))));
638
639 for (uint32_t cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
640 {
641 const VkClearValue attachmentClearValue = defaultClearValue(m_colorFormat);
642 const VkDeviceSize vertexBufferOffset = 0;
643 const uint32_t idx = m_params.reverseOrder ? m_params.numCmdBuffers - cmdBufferIdx - 1 : cmdBufferIdx;
644
645 beginCommandBuffer(vk, **m_cmdBuffers[idx], 0u);
646 m_renderPasses[idx]->begin(vk, **m_cmdBuffers[idx], makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()),
647 attachmentClearValue);
648 m_graphicsPipelines[idx].bind(**m_cmdBuffers[idx]);
649 vk.cmdBindVertexBuffers(**m_cmdBuffers[idx], 0, 1, &m_vertexBuffer.get(), &vertexBufferOffset);
650
651 for (uint32_t i = 0; i < m_params.numDescriptorSetBindings; i++)
652 {
653 vector<uint32_t> offsets;
654 for (uint32_t dynamicBindingIdx = 0; dynamicBindingIdx < m_params.numDynamicBindings; dynamicBindingIdx++)
655 offsets.push_back(offset + (uint32_t)colorBlockInputSize * dynamicBindingIdx);
656
657 vk.cmdBindDescriptorSets(**m_cmdBuffers[idx], VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u,
658 static_cast<uint32_t>(descriptorSetsPlain.size()), descriptorSetsPlain.data(),
659 m_params.numDynamicBindings, offsets.data());
660 offset += (uint32_t)colorBlockInputSize;
661
662 // Draw quad
663 vk.cmdDraw(**m_cmdBuffers[idx], 6, 1, 6 * quadNdx, 0);
664 quadNdx++;
665 }
666
667 m_renderPasses[idx]->end(vk, **m_cmdBuffers[idx]);
668 endCommandBuffer(vk, **m_cmdBuffers[idx]);
669 }
670 }
671
~DynamicOffsetGraphicsTestInstance(void)672 DynamicOffsetGraphicsTestInstance::~DynamicOffsetGraphicsTestInstance(void)
673 {
674 }
675
iterate(void)676 tcu::TestStatus DynamicOffsetGraphicsTestInstance::iterate(void)
677 {
678 init();
679
680 for (uint32_t cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
681 submitCommandsAndWait(m_context.getDeviceInterface(), m_context.getDevice(), m_context.getUniversalQueue(),
682 **m_cmdBuffers[cmdBufferIdx]);
683
684 return verifyImage();
685 }
686
verifyImage(void)687 tcu::TestStatus DynamicOffsetGraphicsTestInstance::verifyImage(void)
688 {
689 const tcu::TextureFormat tcuColorFormat = mapVkFormat(m_colorFormat);
690 const tcu::TextureFormat tcuDepthFormat = tcu::TextureFormat();
691 const ColorVertexShader vertexShader;
692 const ColorFragmentShader fragmentShader(tcuColorFormat, tcuDepthFormat);
693 const rr::Program program(&vertexShader, &fragmentShader);
694 ReferenceRenderer refRenderer(m_renderSize.x(), m_renderSize.y(), 1, tcuColorFormat, tcuDepthFormat, &program);
695 bool compareOk = false;
696
697 // Render reference image
698 {
699 const uint32_t numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
700 const uint32_t bindingOffset = kNumTestColors / numBindings;
701
702 for (uint32_t quadIdx = 0; quadIdx < m_vertices.size() / 6; quadIdx++)
703 for (uint32_t vertexIdx = 0; vertexIdx < 6; vertexIdx++)
704 {
705 tcu::Vec4 refColor(0.0f);
706
707 for (uint32_t binding = 0; binding < m_params.numDynamicBindings; binding++)
708 refColor += testColors[quadIdx + binding * bindingOffset + binding];
709 for (uint32_t binding = 0; binding < m_params.numNonDynamicBindings; binding++)
710 refColor += testColors[(m_params.numDynamicBindings + binding) * bindingOffset];
711 refColor.w() = 1.0f;
712
713 m_vertices[quadIdx * 6 + vertexIdx].color.xyzw() = refColor;
714 }
715
716 refRenderer.draw(rr::RenderState(refRenderer.getViewportState(),
717 m_context.getDeviceProperties().limits.subPixelPrecisionBits),
718 rr::PRIMITIVETYPE_TRIANGLES, m_vertices);
719 }
720
721 // Compare result with reference image
722 {
723 de::MovePtr<tcu::TextureLevel> result = readColorAttachment(
724 m_context.getDeviceInterface(), m_context.getDevice(), m_context.getUniversalQueue(),
725 m_context.getUniversalQueueFamilyIndex(), m_memAlloc, *m_colorImage, m_colorFormat, m_renderSize);
726
727 compareOk = tcu::intThresholdPositionDeviationCompare(
728 m_context.getTestContext().getLog(), "IntImageCompare", "Image comparison", refRenderer.getAccess(),
729 result->getAccess(), tcu::UVec4(2, 2, 2, 2), tcu::IVec3(1, 1, 0), true, tcu::COMPARE_LOG_RESULT);
730 }
731
732 if (compareOk)
733 return tcu::TestStatus::pass("Result image matches reference");
734 else
735 return tcu::TestStatus::fail("Image mismatch");
736 }
737 #ifndef CTS_USES_VULKANSC
738 class DynamicOffsetGraphicsTest : public vkt::TestCase
739 {
740 public:
741 DynamicOffsetGraphicsTest(tcu::TestContext &testContext, const string &name, const TestParams ¶ms);
742 ~DynamicOffsetGraphicsTest(void);
743 void initPrograms(SourceCollections &sourceCollections) const;
744 TestInstance *createInstance(Context &context) const;
745 void checkSupport(Context &context) const;
746
747 protected:
748 const TestParams m_params;
749 };
750
DynamicOffsetGraphicsTest(tcu::TestContext & testContext,const string & name,const TestParams & params)751 DynamicOffsetGraphicsTest::DynamicOffsetGraphicsTest(tcu::TestContext &testContext, const string &name,
752 const TestParams ¶ms)
753 : vkt::TestCase(testContext, name)
754 , m_params(params)
755 {
756 }
757
~DynamicOffsetGraphicsTest(void)758 DynamicOffsetGraphicsTest::~DynamicOffsetGraphicsTest(void)
759 {
760 }
761
createInstance(Context & context) const762 TestInstance *DynamicOffsetGraphicsTest::createInstance(Context &context) const
763 {
764 return new DynamicOffsetGraphicsTestInstance(context, m_params);
765 }
766
checkSupport(Context & context) const767 void DynamicOffsetGraphicsTest::checkSupport(Context &context) const
768 {
769 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(),
770 m_params.pipelineConstructionType);
771 }
772
initPrograms(SourceCollections & sourceCollections) const773 void DynamicOffsetGraphicsTest::initPrograms(SourceCollections &sourceCollections) const
774 {
775 const uint32_t numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
776 const string bufferType =
777 m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? "uniform" : "readonly buffer";
778 ostringstream inputBlocks;
779 ostringstream inputSum;
780 string setAndBinding;
781 string blockSuffix;
782 string accessSuffix;
783 bool dynArrayDecl = false; // Dynamic descriptor block array declared?
784 bool nonDynArrayDecl = false; // Nondynamic descriptor block array declared?
785
786 for (uint32_t b = 0; b < numBindings; b++)
787 {
788 const bool dynBind = (b < m_params.numDynamicBindings);
789 const string bStr = de::toString(b);
790
791 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
792 {
793 setAndBinding = "set = 0, binding = " + bStr;
794 blockSuffix = bStr;
795 accessSuffix = bStr;
796 }
797 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
798 {
799 setAndBinding = "set = " + bStr + ", binding = 0";
800 blockSuffix = bStr;
801 accessSuffix = bStr;
802 }
803 else // GroupingStrategy::ARRAYS
804 {
805 // In array mode, only two sets are declared, one with an array of dynamic descriptors and another one with an array of
806 // nondynamic descriptors.
807 setAndBinding = "set = " + string(dynBind ? "0" : "1") + ", binding = 0";
808 blockSuffix =
809 string(dynBind ? "Dyn" : "NonDyn") + "[" +
810 (dynBind ? de::toString(m_params.numDynamicBindings) : de::toString(m_params.numNonDynamicBindings)) +
811 "]";
812 accessSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" +
813 (dynBind ? de::toString(b) : de::toString(b - m_params.numDynamicBindings)) + "]";
814 }
815
816 // In array mode, declare the input block only once per descriptor type.
817 bool &arrayDeclFlag = (dynBind ? dynArrayDecl : nonDynArrayDecl);
818 if (m_params.groupingStrategy != GroupingStrategy::ARRAYS || !arrayDeclFlag)
819 {
820 inputBlocks << "layout(" << setAndBinding << ") " << bufferType << " Block" << bStr << "\n"
821 << "{\n"
822 << " vec4 color;\n"
823 << "} inputData" << blockSuffix << ";\n";
824 arrayDeclFlag = true;
825 }
826
827 // But the sum always needs to be added once per descriptor.
828 inputSum << " vtxColor.rgb += inputData" << accessSuffix << ".color.rgb;\n";
829 }
830
831 const string vertexSrc = "#version 450\n"
832 "layout(location = 0) in highp vec4 position;\n"
833 "layout(location = 1) in highp vec4 color;\n"
834 "layout(location = 0) out highp vec4 vtxColor;\n" +
835 inputBlocks.str() +
836 "\n"
837 "out gl_PerVertex { vec4 gl_Position; };\n"
838 "\n"
839 "void main()\n"
840 "{\n"
841 " gl_Position = position;\n"
842 " vtxColor = vec4(0, 0, 0, 1);\n" +
843 inputSum.str() + "}\n";
844
845 const string fragmentSrc = "#version 450\n"
846 "layout(location = 0) in highp vec4 vtxColor;\n"
847 "layout(location = 0) out highp vec4 fragColor;\n"
848 "\n"
849 "void main (void)\n"
850 "{\n"
851 " fragColor = vtxColor;\n"
852 "}\n";
853
854 sourceCollections.glslSources.add("vert") << glu::VertexSource(vertexSrc);
855 sourceCollections.glslSources.add("frag") << glu::FragmentSource(fragmentSrc);
856 }
857 #endif // CTS_USES_VULKANSC
858 class DynamicOffsetComputeTestInstance : public DynamicOffsetTestInstance
859 {
860 public:
861 DynamicOffsetComputeTestInstance(Context &context, const TestParams ¶ms);
862 virtual ~DynamicOffsetComputeTestInstance(void);
863 void init(void);
864 virtual tcu::TestStatus iterate(void);
865 tcu::TestStatus verifyOutput(void);
866
867 private:
868 const uint32_t m_numBindings;
869 const uint32_t m_numOutputColors;
870 const VkPhysicalDeviceLimits m_deviceLimits;
871 Move<VkBuffer> m_buffer;
872 de::MovePtr<Allocation> m_bufferAlloc;
873 vector<Move<VkDescriptorSetLayout>> m_descriptorSetLayouts;
874 Move<VkDescriptorPool> m_descriptorPool;
875 vector<Move<VkDescriptorSet>> m_descriptorSets;
876 PipelineLayoutWrapper m_pipelineLayout;
877 ComputePipelineWrapper m_computePipeline;
878 Move<VkBuffer> m_outputBuffer;
879 de::MovePtr<Allocation> m_outputBufferAlloc;
880 Move<VkCommandPool> m_cmdPool;
881 vector<VkCommandBufferSp> m_cmdBuffers;
882 };
883
DynamicOffsetComputeTestInstance(Context & context,const TestParams & params)884 DynamicOffsetComputeTestInstance::DynamicOffsetComputeTestInstance(Context &context, const TestParams ¶ms)
885 : DynamicOffsetTestInstance(context, params)
886 , m_numBindings(params.numDynamicBindings + params.numNonDynamicBindings)
887 , m_numOutputColors(params.numCmdBuffers * params.numDescriptorSetBindings)
888 , m_deviceLimits(getPhysicalDeviceProperties(context.getInstanceInterface(), context.getPhysicalDevice()).limits)
889 {
890 }
891
init(void)892 void DynamicOffsetComputeTestInstance::init(void)
893 {
894 const DeviceInterface &vk = m_context.getDeviceInterface();
895 const VkDevice vkDevice = m_context.getDevice();
896 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
897 const VkDeviceSize inputAlignment = ((m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ?
898 m_deviceLimits.minUniformBufferOffsetAlignment :
899 m_deviceLimits.minStorageBufferOffsetAlignment);
900 const VkDeviceSize inputExtraBytes = kColorSize % inputAlignment;
901 const VkDeviceSize colorBlockInputSize =
902 ((inputExtraBytes == 0ull) ? kColorSize : (kColorSize + inputAlignment - inputExtraBytes));
903 const uint32_t colorBlockInputSizeU32 = static_cast<uint32_t>(colorBlockInputSize);
904 const VkDeviceSize outputExtraBytes = kColorSize % m_deviceLimits.minStorageBufferOffsetAlignment;
905 const VkDeviceSize colorBlockOutputSize =
906 ((outputExtraBytes == 0ull) ? kColorSize :
907 (kColorSize + m_deviceLimits.minStorageBufferOffsetAlignment - outputExtraBytes));
908 const uint32_t colorBlockOutputSizeU32 = static_cast<uint32_t>(colorBlockOutputSize);
909 const VkDeviceSize bufferSize = colorBlockInputSize * kNumTestColors;
910 const VkDeviceSize bindingOffset = bufferSize / m_numBindings;
911 const VkDescriptorType nonDynamicDescriptorType =
912 m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER :
913 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
914 const VkDeviceSize outputBufferSize = colorBlockOutputSize * m_numOutputColors;
915
916 vector<VkDescriptorSetLayout> descriptorSetLayoutsPlain;
917 vector<VkDescriptorSet> descriptorSetsPlain;
918
919 // Create pipeline layout
920 {
921 // Create descriptor set layouts
922 vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings;
923
924 for (uint32_t binding = 0; binding < m_numBindings; binding++)
925 {
926 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
927 const VkDescriptorType descriptorType = (dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType);
928 const uint32_t bindingNumber = (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET ? binding : 0u);
929 const uint32_t descriptorCount =
930 ((m_params.groupingStrategy == GroupingStrategy::ARRAYS) ?
931 (dynamicDesc ? m_params.numDynamicBindings : m_params.numNonDynamicBindings) :
932 1u);
933 const VkDescriptorSetLayoutBinding descriptorSetLayoutBinding = {
934 bindingNumber, // uint32_t binding;
935 descriptorType, // VkDescriptorType descriptorType;
936 descriptorCount, // uint32_t descriptorCount;
937 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags stageFlags;
938 DE_NULL // const VkSampler* pImmutableSamplers;
939 };
940
941 // Skip used descriptors in array mode.
942 if (m_params.groupingStrategy == GroupingStrategy::ARRAYS)
943 binding = (dynamicDesc ? m_params.numDynamicBindings - 1 : m_numBindings);
944
945 descriptorSetLayoutBindings.push_back(descriptorSetLayoutBinding);
946 }
947
948 const uint32_t bindingNumberOutput =
949 (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET ? m_numBindings : 0u);
950 const VkDescriptorSetLayoutBinding descriptorSetLayoutBindingOutput = {
951 bindingNumberOutput, // uint32_t binding;
952 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, // VkDescriptorType descriptorType;
953 1u, // uint32_t descriptorCount;
954 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags stageFlags;
955 DE_NULL // const VkSampler* pImmutableSamplers;
956 };
957
958 descriptorSetLayoutBindings.push_back(descriptorSetLayoutBindingOutput);
959
960 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
961 {
962 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
963 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
964 DE_NULL, // const void* pNext;
965 0u, // VkDescriptorSetLayoutCreateFlags flags;
966 m_numBindings + 1, // uint32_t bindingCount;
967 descriptorSetLayoutBindings.data() // const VkDescriptorSetLayoutBinding* pBindings;
968 };
969
970 m_descriptorSetLayouts.push_back(
971 createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo, DE_NULL));
972 }
973 else
974 {
975 for (size_t i = 0; i < descriptorSetLayoutBindings.size(); ++i)
976 {
977 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
978 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
979 DE_NULL, // const void* pNext;
980 0u, // VkDescriptorSetLayoutCreateFlags flags;
981 1u, // uint32_t bindingCount;
982 &descriptorSetLayoutBindings[i] // const VkDescriptorSetLayoutBinding* pBindings;
983 };
984
985 m_descriptorSetLayouts.push_back(
986 createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo, DE_NULL));
987 }
988 }
989
990 // Create pipeline layout
991 descriptorSetLayoutsPlain.resize(m_descriptorSetLayouts.size());
992 for (size_t i = 0; i < descriptorSetLayoutsPlain.size(); ++i)
993 descriptorSetLayoutsPlain[i] = m_descriptorSetLayouts[i].get();
994
995 const VkPipelineLayoutCreateInfo pipelineLayoutParams = {
996 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
997 DE_NULL, // const void* pNext;
998 0u, // VkPipelineLayoutCreateFlags flags;
999 static_cast<uint32_t>(descriptorSetLayoutsPlain.size()), // uint32_t descriptorSetCount;
1000 descriptorSetLayoutsPlain.data(), // const VkDescriptorSetLayout* pSetLayouts;
1001 0u, // uint32_t pushConstantRangeCount;
1002 DE_NULL // const VkPushDescriptorRange* pPushDescriptorRanges;
1003 };
1004
1005 m_pipelineLayout =
1006 PipelineLayoutWrapper(m_params.pipelineConstructionType, vk, vkDevice, &pipelineLayoutParams);
1007 }
1008
1009 // Create buffer
1010 {
1011 vector<uint8_t> hostBuffer((uint32_t)bufferSize, 0);
1012 for (uint32_t colorIdx = 0; colorIdx < kNumTestColors; colorIdx++)
1013 deMemcpy(&hostBuffer[colorBlockInputSizeU32 * colorIdx], &testColors[colorIdx], kColorSize);
1014
1015 const VkBufferUsageFlags usageFlags = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ?
1016 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT :
1017 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1018
1019 const VkBufferCreateInfo bufferCreateInfo = {
1020 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1021 DE_NULL, // const void* pNext;
1022 0u, // VkBufferCreateFlags flags
1023 bufferSize, // VkDeviceSize size;
1024 usageFlags, // VkBufferUsageFlags usage;
1025 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1026 1u, // uint32_t queueFamilyCount;
1027 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
1028 };
1029
1030 m_buffer = createBuffer(vk, vkDevice, &bufferCreateInfo);
1031 m_bufferAlloc =
1032 m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_buffer), MemoryRequirement::HostVisible);
1033 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_buffer, m_bufferAlloc->getMemory(), m_bufferAlloc->getOffset()));
1034
1035 deMemcpy(m_bufferAlloc->getHostPtr(), hostBuffer.data(), (size_t)bufferSize);
1036 flushAlloc(vk, vkDevice, *m_bufferAlloc);
1037 }
1038
1039 // Create output buffer
1040 {
1041 const VkBufferCreateInfo bufferCreateInfo = {
1042 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1043 DE_NULL, // const void* pNext;
1044 0u, // VkBufferCreateFlags flags
1045 outputBufferSize, // VkDeviceSize size;
1046 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, // VkBufferUsageFlags usage;
1047 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1048 1u, // uint32_t queueFamilyCount;
1049 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
1050 };
1051
1052 m_outputBuffer = createBuffer(vk, vkDevice, &bufferCreateInfo);
1053 m_outputBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_outputBuffer),
1054 MemoryRequirement::HostVisible);
1055 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_outputBuffer, m_outputBufferAlloc->getMemory(),
1056 m_outputBufferAlloc->getOffset()));
1057 }
1058
1059 // Create descriptor pool
1060 {
1061 DescriptorPoolBuilder poolBuilder;
1062 poolBuilder.addType(m_params.descriptorType, m_params.numDynamicBindings);
1063 poolBuilder.addType(nonDynamicDescriptorType, m_params.numNonDynamicBindings);
1064 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1u);
1065 m_descriptorPool = poolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1066 static_cast<uint32_t>(m_descriptorSetLayouts.size()));
1067 }
1068
1069 // Create descriptor sets
1070 {
1071 for (size_t i = 0; i < m_descriptorSetLayouts.size(); ++i)
1072 {
1073 const VkDescriptorSetAllocateInfo allocInfo = {
1074 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
1075 DE_NULL, // const void* pNext;
1076 *m_descriptorPool, // VkDescriptorPool descriptorPool;
1077 1u, // uint32_t setLayoutCount;
1078 &(m_descriptorSetLayouts[i].get()), // const VkDescriptorSetLayout* pSetLayouts;
1079 };
1080 m_descriptorSets.push_back(allocateDescriptorSet(vk, vkDevice, &allocInfo));
1081 }
1082 }
1083
1084 descriptorSetsPlain.resize(m_descriptorSets.size());
1085 for (size_t i = 0; i < descriptorSetsPlain.size(); ++i)
1086 descriptorSetsPlain[i] = m_descriptorSets[i].get();
1087
1088 // Update input buffer descriptors
1089 for (uint32_t binding = 0; binding < m_numBindings; ++binding)
1090 {
1091 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
1092 const VkDescriptorType descriptorType = dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType;
1093 const VkDescriptorBufferInfo descriptorBufferInfo = {
1094 *m_buffer, // VkBuffer buffer;
1095 bindingOffset * binding, // VkDeviceSize offset;
1096 kColorSize // VkDeviceSize range;
1097 };
1098
1099 VkDescriptorSet bindingSet;
1100 uint32_t bindingNumber;
1101 uint32_t dstArrayElement;
1102
1103 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1104 {
1105 bindingSet = m_descriptorSets[0].get();
1106 bindingNumber = binding;
1107 dstArrayElement = 0u;
1108 }
1109 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1110 {
1111 bindingSet = m_descriptorSets[binding].get();
1112 bindingNumber = 0u;
1113 dstArrayElement = 0u;
1114 }
1115 else // GroupingStrategy::ARRAYS
1116 {
1117 bindingSet = (dynamicDesc ? m_descriptorSets[0].get() : m_descriptorSets[1].get());
1118 bindingNumber = 0u;
1119 dstArrayElement = (dynamicDesc ? binding : (binding - m_params.numDynamicBindings));
1120 }
1121
1122 const VkWriteDescriptorSet writeDescriptorSet = {
1123 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1124 DE_NULL, // const void* pNext;
1125 bindingSet, // VkDescriptorSet dstSet;
1126 bindingNumber, // uint32_t dstBinding;
1127 dstArrayElement, // uint32_t dstArrayElement;
1128 1u, // uint32_t descriptorCount;
1129 descriptorType, // VkDescriptorType descriptorType;
1130 DE_NULL, // const VkDescriptorImageInfo* pImageInfo;
1131 &descriptorBufferInfo, // const VkDescriptorBufferInfo* pBufferInfo;
1132 DE_NULL // const VkBufferView* pTexelBufferView;
1133 };
1134
1135 vk.updateDescriptorSets(vkDevice, 1u, &writeDescriptorSet, 0u, DE_NULL);
1136 }
1137
1138 // Update output buffer descriptor
1139 {
1140 const VkDescriptorBufferInfo descriptorBufferInfo = {
1141 *m_outputBuffer, // VkBuffer buffer;
1142 0u, // VkDeviceSize offset;
1143 kColorSize // VkDeviceSize range;
1144 };
1145
1146 VkDescriptorSet bindingSet;
1147 uint32_t bindingNumber;
1148
1149 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1150 {
1151 bindingSet = m_descriptorSets[0].get();
1152 bindingNumber = m_numBindings;
1153 }
1154 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1155 {
1156 bindingSet = m_descriptorSets.back().get();
1157 bindingNumber = 0u;
1158 }
1159 else // GroupingStrategy::ARRAYS
1160 {
1161 bindingSet = m_descriptorSets.back().get();
1162 bindingNumber = 0u;
1163 }
1164
1165 const VkWriteDescriptorSet writeDescriptorSet = {
1166 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1167 DE_NULL, // const void* pNext;
1168 bindingSet, // VkDescriptorSet dstSet;
1169 bindingNumber, // uint32_t dstBinding;
1170 0u, // uint32_t dstArrayElement;
1171 1u, // uint32_t descriptorCount;
1172 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, // VkDescriptorType descriptorType;
1173 DE_NULL, // const VkDescriptorImageInfo* pImageInfo;
1174 &descriptorBufferInfo, // const VkDescriptorBufferInfo* pBufferInfo;
1175 DE_NULL // const VkBufferView* pTexelBufferView;
1176 };
1177
1178 vk.updateDescriptorSets(vkDevice, 1u, &writeDescriptorSet, 0u, DE_NULL);
1179 }
1180
1181 // Create pipeline
1182 {
1183
1184 m_computePipeline =
1185 ComputePipelineWrapper(vk, vkDevice, graphicsToComputeConstructionType(m_params.pipelineConstructionType),
1186 m_context.getBinaryCollection().get("compute"));
1187 m_computePipeline.setDescriptorSetLayouts(m_pipelineLayout.getSetLayoutCount(),
1188 m_pipelineLayout.getSetLayouts());
1189 m_computePipeline.buildPipeline();
1190 }
1191
1192 // Create command pool
1193 m_cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
1194
1195 // Create command buffers
1196 for (uint32_t cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
1197 m_cmdBuffers.push_back(VkCommandBufferSp(new Unique<VkCommandBuffer>(
1198 allocateCommandBuffer(vk, vkDevice, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY))));
1199
1200 uint32_t inputOffset = 0u;
1201 uint32_t outputOffset = 0u;
1202
1203 for (uint32_t cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
1204 {
1205 const uint32_t idx = m_params.reverseOrder ? m_params.numCmdBuffers - cmdBufferIdx - 1 : cmdBufferIdx;
1206
1207 beginCommandBuffer(vk, **m_cmdBuffers[idx], 0u);
1208 m_computePipeline.bind(**m_cmdBuffers[idx]);
1209
1210 for (uint32_t i = 0; i < m_params.numDescriptorSetBindings; i++)
1211 {
1212 // Create pipeline barrier
1213 const vk::VkBufferMemoryBarrier bufferBarrier = {
1214 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1215 DE_NULL, // const void* pNext;
1216 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1217 vk::VK_ACCESS_SHADER_WRITE_BIT | vk::VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1218 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1219 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1220 *m_outputBuffer, // VkBuffer buffer;
1221 outputOffset, // VkDeviceSize offset;
1222 VK_WHOLE_SIZE // VkDeviceSize size;
1223 };
1224
1225 vector<uint32_t> offsets;
1226
1227 // Offsets for input buffers
1228 for (uint32_t dynamicBindingIdx = 0; dynamicBindingIdx < m_params.numDynamicBindings; dynamicBindingIdx++)
1229 offsets.push_back(inputOffset + colorBlockInputSizeU32 * dynamicBindingIdx);
1230 inputOffset += colorBlockInputSizeU32;
1231
1232 // Offset for output buffer
1233 offsets.push_back(outputOffset);
1234 outputOffset += colorBlockOutputSizeU32;
1235
1236 vk.cmdBindDescriptorSets(**m_cmdBuffers[idx], VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0u,
1237 static_cast<uint32_t>(descriptorSetsPlain.size()), descriptorSetsPlain.data(),
1238 (uint32_t)offsets.size(), offsets.data());
1239
1240 // Dispatch
1241 vk.cmdDispatch(**m_cmdBuffers[idx], 1, 1, 1);
1242
1243 vk.cmdPipelineBarrier(**m_cmdBuffers[idx], vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
1244 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | vk::VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u,
1245 DE_NULL, 1u, &bufferBarrier, 0u, DE_NULL);
1246 }
1247
1248 endCommandBuffer(vk, **m_cmdBuffers[idx]);
1249 }
1250 }
1251
~DynamicOffsetComputeTestInstance(void)1252 DynamicOffsetComputeTestInstance::~DynamicOffsetComputeTestInstance(void)
1253 {
1254 }
1255
iterate(void)1256 tcu::TestStatus DynamicOffsetComputeTestInstance::iterate(void)
1257 {
1258 init();
1259
1260 for (uint32_t cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
1261 submitCommandsAndWait(m_context.getDeviceInterface(), m_context.getDevice(), m_context.getUniversalQueue(),
1262 **m_cmdBuffers[cmdBufferIdx]);
1263
1264 return verifyOutput();
1265 }
1266
verifyOutput(void)1267 tcu::TestStatus DynamicOffsetComputeTestInstance::verifyOutput(void)
1268 {
1269 const uint32_t bindingOffset = kNumTestColors / m_numBindings;
1270 const uint32_t colorBlockOutputSize =
1271 static_cast<uint32_t>(de::max(kColorSize, m_deviceLimits.minStorageBufferOffsetAlignment));
1272 vector<tcu::Vec4> refColors(m_numOutputColors);
1273 vector<tcu::Vec4> outColors(m_numOutputColors);
1274
1275 for (uint32_t i = 0; i < m_numOutputColors; i++)
1276 {
1277 tcu::Vec4 refColor(0.0f);
1278
1279 for (uint32_t binding = 0; binding < m_params.numDynamicBindings; binding++)
1280 refColor += testColors[i + binding * bindingOffset + binding];
1281 for (uint32_t binding = 0; binding < m_params.numNonDynamicBindings; binding++)
1282 refColor += testColors[(m_params.numDynamicBindings + binding) * bindingOffset];
1283 refColor.w() = 1.0f;
1284
1285 refColors[i] = refColor;
1286 }
1287
1288 invalidateAlloc(m_context.getDeviceInterface(), m_context.getDevice(), *m_outputBufferAlloc);
1289
1290 // Grab the output results using offset alignment
1291 for (uint32_t i = 0; i < m_numOutputColors; i++)
1292 outColors[i] = *(tcu::Vec4 *)((uint8_t *)m_outputBufferAlloc->getHostPtr() + colorBlockOutputSize * i);
1293
1294 // Verify results
1295 for (uint32_t i = 0; i < m_numOutputColors; i++)
1296 if (outColors[i] != refColors[i])
1297 return tcu::TestStatus::fail("Output mismatch");
1298
1299 return tcu::TestStatus::pass("Output matches expected values");
1300 }
1301
1302 class DynamicOffsetComputeTest : public vkt::TestCase
1303 {
1304 public:
1305 DynamicOffsetComputeTest(tcu::TestContext &testContext, const string &name, const TestParams ¶ms);
1306 ~DynamicOffsetComputeTest(void);
1307 void initPrograms(SourceCollections &sourceCollections) const;
1308 TestInstance *createInstance(Context &context) const;
1309 void checkSupport(Context &context) const;
1310
1311 protected:
1312 const TestParams m_params;
1313 };
1314
DynamicOffsetComputeTest(tcu::TestContext & testContext,const string & name,const TestParams & params)1315 DynamicOffsetComputeTest::DynamicOffsetComputeTest(tcu::TestContext &testContext, const string &name,
1316 const TestParams ¶ms)
1317 : vkt::TestCase(testContext, name)
1318 , m_params(params)
1319 {
1320 }
1321
~DynamicOffsetComputeTest(void)1322 DynamicOffsetComputeTest::~DynamicOffsetComputeTest(void)
1323 {
1324 }
1325
createInstance(Context & context) const1326 TestInstance *DynamicOffsetComputeTest::createInstance(Context &context) const
1327 {
1328 return new DynamicOffsetComputeTestInstance(context, m_params);
1329 }
1330
checkSupport(Context & context) const1331 void DynamicOffsetComputeTest::checkSupport(Context &context) const
1332 {
1333 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(),
1334 m_params.pipelineConstructionType);
1335 }
1336
initPrograms(SourceCollections & sourceCollections) const1337 void DynamicOffsetComputeTest::initPrograms(SourceCollections &sourceCollections) const
1338 {
1339 const uint32_t numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
1340 const string bufferType =
1341 m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? "uniform" : "buffer";
1342 ostringstream inputBlocks;
1343 ostringstream inputSum;
1344 string setAndBinding;
1345 string blockSuffix;
1346 string accessSuffix;
1347 bool dynArrayDecl = false; // Dynamic descriptor block array declared?
1348 bool nonDynArrayDecl = false; // Nondynamic descriptor block array declared?
1349 string bStr;
1350
1351 for (uint32_t b = 0; b < numBindings; b++)
1352 {
1353 const bool dynBind = (b < m_params.numDynamicBindings);
1354 bStr = de::toString(b);
1355
1356 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1357 {
1358 setAndBinding = "set = 0, binding = " + bStr;
1359 blockSuffix = bStr;
1360 accessSuffix = bStr;
1361 }
1362 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1363 {
1364 setAndBinding = "set = " + bStr + ", binding = 0";
1365 blockSuffix = bStr;
1366 accessSuffix = bStr;
1367 }
1368 else // GroupingStrategy::ARRAYS
1369 {
1370 // In array mode, only two sets are declared, one with an array of dynamic descriptors and another one with an array of
1371 // nondynamic descriptors.
1372 setAndBinding = "set = " + string(dynBind ? "0" : "1") + ", binding = 0";
1373 blockSuffix =
1374 string(dynBind ? "Dyn" : "NonDyn") + "[" +
1375 (dynBind ? de::toString(m_params.numDynamicBindings) : de::toString(m_params.numNonDynamicBindings)) +
1376 "]";
1377 accessSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" +
1378 (dynBind ? de::toString(b) : de::toString(b - m_params.numDynamicBindings)) + "]";
1379 }
1380
1381 // In array mode, declare the input block only once per descriptor type.
1382 bool &arrayDeclFlag = (dynBind ? dynArrayDecl : nonDynArrayDecl);
1383 if (m_params.groupingStrategy != GroupingStrategy::ARRAYS || !arrayDeclFlag)
1384 {
1385 inputBlocks << "layout(" << setAndBinding << ") " << bufferType << " Block" << bStr << "\n"
1386 << "{\n"
1387 << " vec4 color;\n"
1388 << "} inputData" << blockSuffix << ";\n";
1389 arrayDeclFlag = true;
1390 }
1391
1392 // But the sum always needs to be added once per descriptor.
1393 inputSum << " outData.color.rgb += inputData" << accessSuffix << ".color.rgb;\n";
1394 }
1395
1396 bStr = de::toString(numBindings);
1397 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1398 {
1399 setAndBinding = "set = 0, binding = " + bStr;
1400 }
1401 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1402 {
1403 setAndBinding = "set = " + bStr + ", binding = 0";
1404 }
1405 else // GroupingStrategy::ARRAYS
1406 {
1407 // The output buffer goes to a separate set.
1408 uint32_t usedSets = 0u;
1409 if (dynArrayDecl)
1410 ++usedSets;
1411 if (nonDynArrayDecl)
1412 ++usedSets;
1413
1414 setAndBinding = "set = " + de::toString(usedSets) + ", binding = 0";
1415 }
1416
1417 const string computeSrc = "#version 450\n" + inputBlocks.str() + "layout(" + setAndBinding +
1418 ") writeonly buffer Output\n"
1419 "{\n"
1420 " vec4 color;\n"
1421 "} outData;\n"
1422 "\n"
1423 "void main()\n"
1424 "{\n"
1425 " outData.color = vec4(0, 0, 0, 1);\n" +
1426 inputSum.str() + "}\n";
1427
1428 sourceCollections.glslSources.add("compute") << glu::ComputeSource(computeSrc);
1429 }
1430
1431 class DynamicOffsetMixedTestInstance : public vkt::TestInstance
1432 {
1433 public:
DynamicOffsetMixedTestInstance(Context & context,const PipelineConstructionType pipelineConstructionType,const tcu::IVec2 renderSize,const uint32_t numInstances,const bool testAllOffsets,const bool reverseOrder,const bool runComputeFirst,const uint32_t vertexOffset,const uint32_t sharedUboOffset,const uint32_t fragUboOffset,const uint32_t ssboReadOffset,const uint32_t ssboWriteOffset)1434 DynamicOffsetMixedTestInstance(Context &context, const PipelineConstructionType pipelineConstructionType,
1435 const tcu::IVec2 renderSize, const uint32_t numInstances, const bool testAllOffsets,
1436 const bool reverseOrder, const bool runComputeFirst, const uint32_t vertexOffset,
1437 const uint32_t sharedUboOffset, const uint32_t fragUboOffset,
1438 const uint32_t ssboReadOffset, const uint32_t ssboWriteOffset)
1439 : vkt::TestInstance(context)
1440 , m_pipelineConstructionType(pipelineConstructionType)
1441 , m_renderSize(renderSize)
1442 , m_numInstances(numInstances)
1443 , m_testAllOffsets(testAllOffsets)
1444 , m_reverseOrder(reverseOrder)
1445 , m_runComputeFirst(runComputeFirst)
1446 , m_vertexOffset(vertexOffset)
1447 , m_sharedUboOffset(sharedUboOffset)
1448 , m_fragUboOffset(fragUboOffset)
1449 , m_ssboReadOffset(ssboReadOffset)
1450 , m_ssboWriteOffset(ssboWriteOffset)
1451 {
1452 }
1453
1454 ~DynamicOffsetMixedTestInstance();
1455
1456 virtual tcu::TestStatus iterate(void);
1457
1458 private:
1459 struct VertexInfo
1460 {
1461 tcu::Vec4 position;
1462 tcu::Vec4 color;
1463 };
1464
1465 const VkFormat OUTPUT_COLOR_FORMAT = VK_FORMAT_R8G8B8A8_UNORM;
1466
1467 const PipelineConstructionType m_pipelineConstructionType;
1468 const tcu::IVec2 m_renderSize;
1469 const uint32_t m_numInstances;
1470 const bool m_testAllOffsets;
1471 const bool m_reverseOrder;
1472 const bool m_runComputeFirst;
1473 const uint32_t m_vertexOffset;
1474 const uint32_t m_sharedUboOffset;
1475 const uint32_t m_fragUboOffset;
1476 const uint32_t m_ssboReadOffset;
1477 const uint32_t m_ssboWriteOffset;
1478 };
1479
~DynamicOffsetMixedTestInstance()1480 DynamicOffsetMixedTestInstance::~DynamicOffsetMixedTestInstance()
1481 {
1482 }
1483
iterate(void)1484 tcu::TestStatus DynamicOffsetMixedTestInstance::iterate(void)
1485 {
1486 tcu::TestLog &log = m_context.getTestContext().getLog();
1487 const InstanceInterface &vki = m_context.getInstanceInterface();
1488 const DeviceInterface &vk = m_context.getDeviceInterface();
1489 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
1490 const VkDevice device = m_context.getDevice();
1491 Allocator &allocator = m_context.getDefaultAllocator();
1492 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1493
1494 // Create shaders
1495 const ShaderWrapper vertexShaderModule = ShaderWrapper(vk, device, m_context.getBinaryCollection().get("vert"), 0u);
1496 const ShaderWrapper fragmentShaderModule =
1497 ShaderWrapper(vk, device, m_context.getBinaryCollection().get("frag"), 0u);
1498 const ShaderWrapper computeShaderModule =
1499 ShaderWrapper(vk, device, m_context.getBinaryCollection().get("comp"), 0u);
1500
1501 const uint32_t vertexBufferBindId = 0u;
1502
1503 // Vertex input state and binding
1504 VkVertexInputBindingDescription bindingDescription{
1505 vertexBufferBindId, // uint32_t binding;
1506 sizeof(VertexInfo), // uint32_t stride;
1507 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate inputRate;
1508 };
1509
1510 const std::array<VkVertexInputAttributeDescription, 2> vertexAttributeDescs{
1511 {VkVertexInputAttributeDescription{
1512 0u, // uint32_t location;
1513 vertexBufferBindId, // uint32_t binding;
1514 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
1515 0u // uint32_t offset;
1516 },
1517
1518 VkVertexInputAttributeDescription{
1519 1u, // uint32_t location;
1520 vertexBufferBindId, // uint32_t binding;
1521 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
1522 uint32_t(sizeof(float)) * 4u // uint32_t offset;
1523 }}};
1524
1525 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo{
1526 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
1527 DE_NULL, // const void* pNext;
1528 0u, // VkPipelineVertexInputStateCreateFlags flags;
1529 1u, // uint32_t vertexBindingDescriptionCount;
1530 &bindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
1531 static_cast<uint32_t>(vertexAttributeDescs.size()), // uint32_t vertexAttributeDescriptionCount;
1532 vertexAttributeDescs.data() // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
1533 };
1534
1535 // Descriptor pool and descriptor set
1536 DescriptorPoolBuilder poolBuilder;
1537
1538 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 3u);
1539 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 2u);
1540
1541 const Move<VkDescriptorPool> descriptorPool =
1542 poolBuilder.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1543
1544 DescriptorSetLayoutBuilder layoutBuilderAttachments;
1545 {
1546 if (!m_reverseOrder)
1547 {
1548 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
1549 VK_SHADER_STAGE_VERTEX_BIT);
1550 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
1551 VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT);
1552 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
1553 VK_SHADER_STAGE_COMPUTE_BIT);
1554 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
1555 VK_SHADER_STAGE_FRAGMENT_BIT);
1556 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
1557 VK_SHADER_STAGE_COMPUTE_BIT);
1558 }
1559 else
1560 {
1561 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
1562 VK_SHADER_STAGE_COMPUTE_BIT);
1563 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
1564 VK_SHADER_STAGE_FRAGMENT_BIT);
1565 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
1566 VK_SHADER_STAGE_COMPUTE_BIT);
1567 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
1568 VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT);
1569 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
1570 VK_SHADER_STAGE_VERTEX_BIT);
1571 }
1572 }
1573
1574 const Move<VkDescriptorSetLayout> descriptorSetLayout = layoutBuilderAttachments.build(vk, device);
1575
1576 const Move<VkDescriptorSet> descriptorSet =
1577 makeDescriptorSet(vk, device, descriptorPool.get(), descriptorSetLayout.get());
1578
1579 Move<VkImage> colorImage =
1580 (makeImage(vk, device,
1581 makeImageCreateInfo(m_renderSize, VK_FORMAT_R8G8B8A8_UNORM,
1582 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT)));
1583
1584 // Allocate and bind color image memory
1585 const VkImageSubresourceRange colorSubresourceRange =
1586 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
1587 const UniquePtr<Allocation> colorImageAlloc(bindImage(vk, device, allocator, *colorImage, MemoryRequirement::Any));
1588 Move<VkImageView> colorImageView =
1589 (makeImageView(vk, device, *colorImage, VK_IMAGE_VIEW_TYPE_2D, OUTPUT_COLOR_FORMAT, colorSubresourceRange));
1590
1591 // Create renderpass
1592 const VkAttachmentDescription attachmentDescription = {
1593 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags
1594 OUTPUT_COLOR_FORMAT, // VkFormat format
1595 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
1596 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp
1597 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp
1598 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp
1599 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp
1600 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout
1601 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout
1602 };
1603
1604 const VkAttachmentReference attachmentReference = {
1605 0u, // uint32_t attachment
1606 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout
1607 };
1608
1609 const VkSubpassDescription subpassDescription = {
1610 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
1611 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
1612 0u, // uint32_t inputAttachmentCount
1613 DE_NULL, // const VkAttachmentReference* pInputAttachments
1614 1u, // uint32_t colorAttachmentCount
1615 &attachmentReference, // const VkAttachmentReference* pColorAttachments
1616 DE_NULL, // const VkAttachmentReference* pResolveAttachments
1617 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
1618 0u, // uint32_t preserveAttachmentCount
1619 DE_NULL // const uint32_t* pPreserveAttachments
1620 };
1621
1622 const VkRenderPassCreateInfo renderPassInfo = {
1623 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
1624 DE_NULL, // const void* pNext
1625 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
1626 1u, // uint32_t attachmentCount
1627 &attachmentDescription, // const VkAttachmentDescription* pAttachments
1628 1u, // uint32_t subpassCount
1629 &subpassDescription, // const VkSubpassDescription* pSubpasses
1630 0u, // uint32_t dependencyCount
1631 DE_NULL // const VkSubpassDependency* pDependencies
1632 };
1633
1634 RenderPassWrapper renderPass(m_pipelineConstructionType, vk, device, &renderPassInfo);
1635
1636 // Create framebuffer
1637 const VkImageView attachmentBindInfos[] = {*colorImageView};
1638
1639 const VkFramebufferCreateInfo framebufferCreateInfo = {
1640 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
1641 DE_NULL, // const void* pNext;
1642 VkFramebufferCreateFlags(0), // VkFramebufferCreateFlags flags;
1643 *renderPass, // VkRenderPass renderPass;
1644 1u, // uint32_t attachmentCount;
1645 attachmentBindInfos, // const VkImageView* pAttachments;
1646 (uint32_t)m_renderSize.x(), // uint32_t width;
1647 (uint32_t)m_renderSize.y(), // uint32_t height;
1648 1u // uint32_t layers;
1649 };
1650
1651 renderPass.createFramebuffer(vk, device, &framebufferCreateInfo, *colorImage);
1652
1653 // Create pipeline layout
1654 const VkPipelineLayoutCreateInfo pipelineLayoutInfo = {
1655 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
1656 DE_NULL, // const void* pNext;
1657 0u, // VkPipelineLayoutCreateFlags flags;
1658 1u, // uint32_t descriptorSetCount;
1659 &descriptorSetLayout.get(), // const VkDescriptorSetLayout* pSetLayouts;
1660 0u, // uint32_t pushConstantRangeCount;
1661 DE_NULL // const VkPushDescriptorRange* pPushDescriptorRanges;
1662 };
1663
1664 PipelineLayoutWrapper pipelineLayout(m_pipelineConstructionType, vk, device, &pipelineLayoutInfo);
1665
1666 // Create graphics pipeline
1667 const std::vector<VkViewport> viewports(1, makeViewport(m_renderSize));
1668 const std::vector<VkRect2D> scissors(1, makeRect2D(m_renderSize));
1669
1670 const VkPipelineRasterizationStateCreateInfo rasterizationState = {
1671 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType
1672 DE_NULL, // const void* pNext
1673 0u, // VkPipelineRasterizationStateCreateFlags flags
1674 VK_FALSE, // VkBool32 depthClampEnable
1675 VK_FALSE, // VkBool32 rasterizerDiscardEnable
1676 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode
1677 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode
1678 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace
1679 VK_FALSE, // VkBool32 depthBiasEnable
1680 0.0f, // float depthBiasConstantFactor
1681 0.0f, // float depthBiasClamp
1682 0.0f, // float depthBiasSlopeFactor
1683 1.0f // float lineWidth
1684 };
1685
1686 GraphicsPipelineWrapper graphicsPipeline(vki, vk, physicalDevice, device, m_context.getDeviceExtensions(),
1687 m_pipelineConstructionType);
1688
1689 graphicsPipeline.setDefaultMultisampleState()
1690 .setDefaultColorBlendState()
1691 .setDefaultTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
1692 .setupVertexInputState(&vertexInputStateCreateInfo)
1693 .setupPreRasterizationShaderState((viewports), scissors, pipelineLayout, *renderPass, 0u, vertexShaderModule,
1694 &rasterizationState)
1695 .setupFragmentShaderState(pipelineLayout, *renderPass, 0u, fragmentShaderModule)
1696 .setupFragmentOutputState(*renderPass, 0u)
1697 .setMonolithicPipelineLayout(pipelineLayout)
1698 .buildPipeline();
1699
1700 ComputePipelineWrapper computePipeline(vk, device, graphicsToComputeConstructionType(m_pipelineConstructionType),
1701 m_context.getBinaryCollection().get("comp"));
1702 computePipeline.setDescriptorSetLayout(descriptorSetLayout.get());
1703 computePipeline.buildPipeline();
1704
1705 const VkQueue queue = m_context.getUniversalQueue();
1706 const VkPhysicalDeviceLimits deviceLimits =
1707 getPhysicalDeviceProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()).limits;
1708
1709 // Create vertex buffer
1710 const uint32_t numVertices = 6;
1711 const VkDeviceSize vertexBufferSizeBytes = 256;
1712 const Unique<VkBuffer> vertexBuffer(
1713 makeBuffer(vk, device, vertexBufferSizeBytes, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1714 const de::UniquePtr<Allocation> vertexBufferAlloc(
1715 bindBuffer(vk, device, allocator, *vertexBuffer, MemoryRequirement::HostVisible));
1716
1717 const uint32_t instanceSize = (uint32_t)std::sqrt(m_numInstances);
1718 const float posIncrement = 1.0f / (float)m_numInstances * float(instanceSize);
1719
1720 // Result image has to be a square and multiple of 16.
1721 DE_ASSERT(instanceSize * instanceSize == m_numInstances && m_numInstances % 16u == 0);
1722
1723 {
1724 tcu::Vec4 vertexColor = tcu::Vec4(0.0f, 0.5f, 0.0f, 1.0f);
1725 VertexInfo *const pVertices = reinterpret_cast<VertexInfo *>(vertexBufferAlloc->getHostPtr());
1726
1727 pVertices[0] = {tcu::Vec4(posIncrement, -posIncrement, 0.0f, 1.0f), vertexColor};
1728 pVertices[1] = {tcu::Vec4(-posIncrement, -posIncrement, 0.0f, 1.0f), vertexColor};
1729 pVertices[2] = {tcu::Vec4(-posIncrement, posIncrement, 0.0f, 1.0f), vertexColor};
1730 pVertices[3] = {tcu::Vec4(-posIncrement, posIncrement, 1.0f, 1.0f), vertexColor};
1731 pVertices[4] = {tcu::Vec4(posIncrement, posIncrement, 1.0f, 1.0f), vertexColor};
1732 pVertices[5] = {tcu::Vec4(posIncrement, -posIncrement, 1.0f, 1.0f), vertexColor};
1733
1734 flushAlloc(vk, device, *vertexBufferAlloc);
1735 }
1736
1737 // Prepare buffers
1738 const vk::VkDeviceSize minUboAlignment = deviceLimits.minUniformBufferOffsetAlignment;
1739 const vk::VkDeviceSize minSsboAlignment = deviceLimits.minStorageBufferOffsetAlignment;
1740 const uint32_t bufferElementSizeVec4 = (uint32_t)sizeof(tcu::Vec4);
1741 const uint32_t bufferElementSizeMat4 = (uint32_t)sizeof(tcu::Mat4);
1742 uint32_t uboDynamicAlignmentVec4 = bufferElementSizeVec4;
1743 uint32_t uboDynamicAlignmentMat4 = bufferElementSizeMat4;
1744 uint32_t ssboDynamicAlignmentVec4 = bufferElementSizeVec4;
1745 uint32_t ssboDynamicAlignmentMat4 = bufferElementSizeMat4;
1746
1747 if (minUboAlignment > 0)
1748 {
1749 uboDynamicAlignmentVec4 =
1750 (uboDynamicAlignmentVec4 + (uint32_t)minUboAlignment - 1) & ~((uint32_t)minUboAlignment - 1);
1751 uboDynamicAlignmentMat4 =
1752 (uboDynamicAlignmentMat4 + (uint32_t)minUboAlignment - 1) & ~((uint32_t)minUboAlignment - 1);
1753 }
1754 if (minSsboAlignment > 0)
1755 {
1756 ssboDynamicAlignmentVec4 =
1757 (ssboDynamicAlignmentVec4 + (uint32_t)minSsboAlignment - 1) & ~((uint32_t)minSsboAlignment - 1);
1758 ssboDynamicAlignmentMat4 =
1759 (ssboDynamicAlignmentMat4 + (uint32_t)minSsboAlignment - 1) & ~((uint32_t)minSsboAlignment - 1);
1760 }
1761
1762 const uint32_t uboBufferSizeVec4 = m_numInstances * uboDynamicAlignmentVec4;
1763 const uint32_t uboBufferSizeMat4 = m_numInstances * uboDynamicAlignmentMat4;
1764 const uint32_t ssboBufferSizeVec4 = m_numInstances * ssboDynamicAlignmentVec4;
1765 const uint32_t ssboBufferSizeMat4 = m_numInstances * ssboDynamicAlignmentMat4;
1766
1767 const Unique<VkBuffer> uboBufferVertex(
1768 makeBuffer(vk, device, uboBufferSizeVec4, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
1769 const Unique<VkBuffer> uboBufferShared(
1770 makeBuffer(vk, device, uboBufferSizeVec4, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
1771 const Unique<VkBuffer> ssboBufferWrite(
1772 makeBuffer(vk, device, ssboBufferSizeVec4, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
1773 const Unique<VkBuffer> uboBufferFrag(makeBuffer(vk, device, uboBufferSizeMat4, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
1774 const Unique<VkBuffer> ssboBufferRead(
1775 makeBuffer(vk, device, ssboBufferSizeMat4, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
1776
1777 const UniquePtr<Allocation> uboBufferAllocVertex(
1778 bindBuffer(vk, device, allocator, *uboBufferVertex, MemoryRequirement::HostVisible));
1779 const UniquePtr<Allocation> uboBufferAllocShared(
1780 bindBuffer(vk, device, allocator, *uboBufferShared, MemoryRequirement::HostVisible));
1781 const UniquePtr<Allocation> ssboBufferAllocWrite(
1782 bindBuffer(vk, device, allocator, *ssboBufferWrite, MemoryRequirement::HostVisible));
1783 const UniquePtr<Allocation> uboBufferAllocFrag(
1784 bindBuffer(vk, device, allocator, *uboBufferFrag, MemoryRequirement::HostVisible));
1785 const UniquePtr<Allocation> ssboBufferAllocRead(
1786 bindBuffer(vk, device, allocator, *ssboBufferRead, MemoryRequirement::HostVisible));
1787
1788 const float colorIncrement = 1.0f / float(m_numInstances);
1789
1790 std::vector<tcu::Vec4> constVertexOffsets;
1791
1792 uint32_t columnCount = 0u;
1793 float columnOffset = posIncrement;
1794 float rowOffset = -1.0f + posIncrement;
1795
1796 for (uint32_t posId = 0; posId < m_numInstances; posId++)
1797 {
1798 constVertexOffsets.push_back(tcu::Vec4(-1.0f + columnOffset, rowOffset, 0.0f, 0.0f));
1799
1800 columnOffset += 2 * posIncrement;
1801 columnCount++;
1802
1803 if (columnCount >= instanceSize)
1804 {
1805 columnCount = 0;
1806 columnOffset = posIncrement;
1807 rowOffset += 2 * posIncrement;
1808 }
1809 }
1810
1811 // Fill buffers
1812 {
1813 char *pPosUboVertex = static_cast<char *>(uboBufferAllocVertex->getHostPtr());
1814 char *pPosUboShared = static_cast<char *>(uboBufferAllocShared->getHostPtr());
1815 char *pPosSsboWrite = static_cast<char *>(ssboBufferAllocWrite->getHostPtr());
1816 char *pPosUboFrag = static_cast<char *>(uboBufferAllocFrag->getHostPtr());
1817 char *pPosSsboRead = static_cast<char *>(ssboBufferAllocRead->getHostPtr());
1818
1819 if (m_testAllOffsets)
1820 {
1821 for (uint32_t posId = 0; posId < m_numInstances; posId++)
1822 {
1823 const float constFragMat[] = {
1824 colorIncrement, colorIncrement, colorIncrement, colorIncrement,
1825 colorIncrement, colorIncrement, colorIncrement, colorIncrement,
1826 colorIncrement, colorIncrement, colorIncrement, colorIncrement,
1827 colorIncrement, colorIncrement, colorIncrement, colorIncrement * float(posId + 1u)};
1828
1829 const float constReadMat[] = {
1830 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f - colorIncrement * float(posId + 1u),
1831 1.0f, 0.0f, 1.0f, 0.17f, 0.0f, 1.0f, 0.0f, 1.0f};
1832
1833 *((tcu::Vec4 *)pPosUboVertex) = constVertexOffsets[posId];
1834 *((tcu::Vec4 *)pPosUboShared) = tcu::Vec4(colorIncrement * float(posId + 1u), 0.0f, 0.0f, 1.0f);
1835 *((tcu::Vec4 *)pPosSsboWrite) = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
1836 *((tcu::Mat4 *)pPosUboFrag) = tcu::Mat4(constFragMat);
1837 *((tcu::Mat4 *)pPosSsboRead) = tcu::Mat4(constReadMat);
1838 pPosUboVertex += uboDynamicAlignmentVec4;
1839 pPosUboShared += uboDynamicAlignmentVec4;
1840 pPosSsboWrite += ssboDynamicAlignmentVec4;
1841 pPosUboFrag += uboDynamicAlignmentMat4;
1842 pPosSsboRead += ssboDynamicAlignmentMat4;
1843 }
1844 }
1845 else
1846 {
1847 for (uint32_t posId = 0; posId < m_numInstances; posId++)
1848 {
1849 const float constFragMat[] = {
1850 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1851 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, m_fragUboOffset == posId ? 1.0f : 0.0f};
1852
1853 const float constReadMat[] = {0.0f, 0.0f, 0.0f, 0.0f,
1854 0.0f, 0.0f, 0.0f, m_ssboReadOffset == posId ? 0.25f : 0.0f,
1855 0.0f, 0.0f, 0.0f, m_ssboReadOffset == posId ? 0.17f : 0.0f,
1856 0.0f, 0.0f, 0.0f, 0.0f};
1857
1858 *((tcu::Vec4 *)pPosUboVertex) = constVertexOffsets[posId];
1859 *((tcu::Vec4 *)pPosUboShared) =
1860 m_sharedUboOffset == posId ? tcu::Vec4(1.0f, 0.0f, 0.0f, 1.0f) : tcu::Vec4(0);
1861 *((tcu::Vec4 *)pPosSsboWrite) = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
1862 *((tcu::Mat4 *)pPosUboFrag) = tcu::Mat4(constFragMat);
1863 *((tcu::Mat4 *)pPosSsboRead) = tcu::Mat4(constReadMat);
1864 pPosUboVertex += uboDynamicAlignmentVec4;
1865 pPosUboShared += uboDynamicAlignmentVec4;
1866 pPosSsboWrite += ssboDynamicAlignmentVec4;
1867 pPosUboFrag += uboDynamicAlignmentMat4;
1868 pPosSsboRead += ssboDynamicAlignmentMat4;
1869 }
1870 }
1871
1872 flushAlloc(vk, device, *uboBufferAllocVertex);
1873 flushAlloc(vk, device, *uboBufferAllocShared);
1874 flushAlloc(vk, device, *ssboBufferAllocWrite);
1875 flushAlloc(vk, device, *uboBufferAllocFrag);
1876 flushAlloc(vk, device, *ssboBufferAllocRead);
1877 }
1878
1879 const vk::VkDescriptorBufferInfo uboInfoVertexVec =
1880 makeDescriptorBufferInfo(*uboBufferVertex, 0u, bufferElementSizeVec4);
1881 const vk::VkDescriptorBufferInfo uboInfoVec = makeDescriptorBufferInfo(*uboBufferShared, 0u, bufferElementSizeVec4);
1882 const vk::VkDescriptorBufferInfo ssboInfoVec =
1883 makeDescriptorBufferInfo(*ssboBufferWrite, 0u, bufferElementSizeVec4);
1884 const vk::VkDescriptorBufferInfo uboInfoMat = makeDescriptorBufferInfo(*uboBufferFrag, 0u, bufferElementSizeMat4);
1885 const vk::VkDescriptorBufferInfo ssboInfoMat = makeDescriptorBufferInfo(*ssboBufferRead, 0u, bufferElementSizeMat4);
1886
1887 // Update descriptors
1888 DescriptorSetUpdateBuilder builder;
1889
1890 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(m_reverseOrder ? 4u : 0u),
1891 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, &uboInfoVertexVec);
1892 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(m_reverseOrder ? 3u : 1u),
1893 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, &uboInfoVec);
1894 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2u),
1895 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, &ssboInfoVec);
1896 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(m_reverseOrder ? 1u : 3u),
1897 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, &uboInfoMat);
1898 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(m_reverseOrder ? 0u : 4u),
1899 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, &ssboInfoMat);
1900 builder.update(vk, device);
1901
1902 // Command buffer
1903 const Unique<VkCommandPool> cmdPool(
1904 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1905 const Unique<VkCommandBuffer> cmdBuffer(
1906 allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
1907
1908 const VkDeviceSize vertexBufferOffset = 0u;
1909
1910 // Render result buffer
1911 const VkDeviceSize colorBufferSizeBytes = tcu::getPixelSize(mapVkFormat(OUTPUT_COLOR_FORMAT)) *
1912 static_cast<VkDeviceSize>(m_renderSize.x()) *
1913 static_cast<VkDeviceSize>(m_renderSize.y());
1914 const Unique<VkBuffer> colorBuffer(makeBuffer(vk, device, colorBufferSizeBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1915 const UniquePtr<Allocation> colorBufferAlloc(
1916 bindBuffer(vk, device, allocator, *colorBuffer, MemoryRequirement::HostVisible));
1917
1918 const VkClearValue clearColorValue = defaultClearValue(OUTPUT_COLOR_FORMAT);
1919
1920 bool runGraphics = !m_runComputeFirst;
1921
1922 for (int i = 0; i < 2; i++)
1923 {
1924 beginCommandBuffer(vk, *cmdBuffer);
1925
1926 if (runGraphics)
1927 {
1928 renderPass.begin(vk, *cmdBuffer, makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()), clearColorValue);
1929 graphicsPipeline.bind(*cmdBuffer);
1930 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
1931 }
1932 else
1933 {
1934 computePipeline.bind(*cmdBuffer);
1935 }
1936
1937 if (m_testAllOffsets)
1938 {
1939 for (uint32_t instance = 0; instance < m_numInstances; instance++)
1940 {
1941 uint32_t uboOffsetVec4 = uboDynamicAlignmentVec4 * instance;
1942 uint32_t uboOffsetMat4 = uboDynamicAlignmentMat4 * instance;
1943 uint32_t ssboOffsetVec4 = ssboDynamicAlignmentVec4 * instance;
1944 uint32_t ssboOffsetMat4 = ssboDynamicAlignmentMat4 * instance;
1945 std::vector<uint32_t> offsets;
1946
1947 offsets.push_back(m_reverseOrder ? ssboOffsetMat4 : uboOffsetVec4);
1948 offsets.push_back(m_reverseOrder ? uboOffsetMat4 : uboOffsetVec4);
1949 offsets.push_back(ssboOffsetVec4);
1950 offsets.push_back(m_reverseOrder ? uboOffsetVec4 : uboOffsetMat4);
1951 offsets.push_back(m_reverseOrder ? uboOffsetVec4 : ssboOffsetMat4);
1952
1953 if (runGraphics)
1954 {
1955 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u,
1956 &descriptorSet.get(), (uint32_t)offsets.size(), offsets.data());
1957 vk.cmdDraw(*cmdBuffer, numVertices, 1u, 0u, 0u);
1958 }
1959 else
1960 {
1961 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u,
1962 &descriptorSet.get(), (uint32_t)offsets.size(), offsets.data());
1963 vk.cmdDispatch(*cmdBuffer, 1, 1, 1);
1964 }
1965 }
1966 }
1967 else
1968 {
1969 std::vector<uint32_t> offsets;
1970
1971 offsets.push_back(m_reverseOrder ? ssboDynamicAlignmentMat4 * m_ssboReadOffset :
1972 uboDynamicAlignmentVec4 * m_vertexOffset);
1973 offsets.push_back(m_reverseOrder ? uboDynamicAlignmentMat4 * m_fragUboOffset :
1974 uboDynamicAlignmentVec4 * m_sharedUboOffset);
1975 offsets.push_back(ssboDynamicAlignmentVec4 * m_ssboWriteOffset);
1976 offsets.push_back(m_reverseOrder ? uboDynamicAlignmentVec4 * m_sharedUboOffset :
1977 uboDynamicAlignmentMat4 * m_fragUboOffset);
1978 offsets.push_back(m_reverseOrder ? uboDynamicAlignmentVec4 * m_vertexOffset :
1979 ssboDynamicAlignmentMat4 * m_ssboReadOffset);
1980
1981 if (runGraphics)
1982 {
1983 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u,
1984 &descriptorSet.get(), (uint32_t)offsets.size(), offsets.data());
1985 vk.cmdDraw(*cmdBuffer, numVertices, 1u, 0u, 0u);
1986 }
1987 else
1988 {
1989 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u,
1990 &descriptorSet.get(), (uint32_t)offsets.size(), offsets.data());
1991 vk.cmdDispatch(*cmdBuffer, 1, 1, 1);
1992 }
1993 }
1994
1995 if (runGraphics)
1996 {
1997 renderPass.end(vk, *cmdBuffer);
1998 copyImageToBuffer(vk, *cmdBuffer, *colorImage, *colorBuffer, m_renderSize,
1999 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
2000 }
2001
2002 runGraphics = !runGraphics;
2003
2004 endCommandBuffer(vk, *cmdBuffer);
2005
2006 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
2007 m_context.resetCommandPoolForVKSC(device, *cmdPool);
2008 }
2009
2010 // Check result image
2011 {
2012 tcu::TextureLevel referenceTexture(mapVkFormat(OUTPUT_COLOR_FORMAT), m_renderSize.x(), m_renderSize.y());
2013 const tcu::PixelBufferAccess referenceAccess = referenceTexture.getAccess();
2014 const uint32_t segmentSize = m_renderSize.x() / instanceSize;
2015
2016 // Create reference image
2017 if (m_testAllOffsets)
2018 {
2019 for (int y = 0; y < m_renderSize.y(); ++y)
2020 {
2021 for (int x = 0; x < m_renderSize.x(); ++x)
2022 {
2023 // While running test for all offsets, we create a nice gradient-like color for the pixels.
2024 float colorValue = (float)(y / segmentSize * instanceSize + x / segmentSize + 1u) * colorIncrement;
2025
2026 referenceAccess.setPixel(tcu::Vec4(colorValue, 0.5f, colorValue, 1.0f), x, y);
2027 }
2028 }
2029 }
2030 else
2031 {
2032 // At first we have to find a correct location for the drawn square.
2033 const uint32_t segmentCountPerRow = (uint32_t)m_renderSize.x() / segmentSize;
2034 const uint32_t offsetY = m_vertexOffset > segmentCountPerRow ? m_vertexOffset / segmentCountPerRow : 0u;
2035 const uint32_t offsetX = offsetY > 0 ? m_vertexOffset - (segmentCountPerRow * offsetY) : m_vertexOffset;
2036 const uint32_t pixelOffsetY = segmentSize * offsetY;
2037 const uint32_t pixelOffsetX = segmentSize * offsetX;
2038
2039 for (int y = 0; y < m_renderSize.y(); ++y)
2040 {
2041 for (int x = 0; x < m_renderSize.x(); ++x)
2042 {
2043 float colorValueRed = clearColorValue.color.float32[0];
2044 float colorValueGreen = clearColorValue.color.float32[1];
2045 float colorValueBlue = clearColorValue.color.float32[2];
2046
2047 // Next, we fill the correct number of pixels with test color.
2048 if (x >= (int)pixelOffsetX && x < int(pixelOffsetX + segmentSize) && y >= (int)pixelOffsetY &&
2049 y < int(pixelOffsetY + segmentSize))
2050 {
2051 // While running test only for one offset, the result color for pixel is constant.
2052 colorValueRed = 1.0f;
2053 colorValueGreen = 0.5f;
2054 colorValueBlue = colorValueRed;
2055 }
2056
2057 referenceAccess.setPixel(tcu::Vec4(colorValueRed, colorValueGreen, colorValueBlue, 1.0f), x, y);
2058 }
2059 }
2060 }
2061
2062 invalidateAlloc(vk, device, *colorBufferAlloc);
2063
2064 const tcu::ConstPixelBufferAccess resultPixelAccess(mapVkFormat(OUTPUT_COLOR_FORMAT), m_renderSize.x(),
2065 m_renderSize.y(), 1, colorBufferAlloc->getHostPtr());
2066
2067 if (!tcu::floatThresholdCompare(log, "color", "Image compare", referenceAccess, resultPixelAccess,
2068 tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT))
2069 return tcu::TestStatus::fail("Rendered image is not correct");
2070 }
2071
2072 // Check result buffer values
2073 {
2074 invalidateAlloc(vk, device, *ssboBufferAllocWrite);
2075
2076 std::vector<tcu::Vec4> refColors;
2077 std::vector<tcu::Vec4> outColors;
2078
2079 for (uint32_t i = 0; i < m_numInstances; i++)
2080 {
2081 if (m_testAllOffsets)
2082 {
2083 refColors.push_back(
2084 tcu::Vec4(float(i + 1) * colorIncrement, 1.0f - float(i + 1) * colorIncrement, 0.17f, 1.0f));
2085 }
2086 else
2087 {
2088 refColors.push_back(m_ssboWriteOffset == i ? tcu::Vec4(1.0f, 0.25f, 0.17f, 1.0f) :
2089 tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
2090 }
2091
2092 outColors.push_back(
2093 *(tcu::Vec4 *)((uint8_t *)ssboBufferAllocWrite->getHostPtr() + ssboDynamicAlignmentVec4 * i));
2094
2095 if (!compareVectors(outColors[i], refColors[i], 0.01f))
2096 {
2097 log << tcu::TestLog::Message << "Reference: " << refColors[i].x() << ", " << refColors[i].y() << ", "
2098 << refColors[i].z() << ", " << refColors[i].w() << ", " << tcu::TestLog::EndMessage;
2099 log << tcu::TestLog::Message << "Result : " << outColors[i].x() << ", " << outColors[i].y() << ", "
2100 << outColors[i].z() << ", " << outColors[i].w() << ", " << tcu::TestLog::EndMessage;
2101
2102 return tcu::TestStatus::fail("Result value is not correct");
2103 }
2104 }
2105 }
2106
2107 return tcu::TestStatus::pass("Success");
2108 }
2109
2110 class DynamicOffsetMixedTest : public vkt::TestCase
2111 {
2112 public:
DynamicOffsetMixedTest(tcu::TestContext & testContext,const PipelineConstructionType pipelineConstructionType,const std::string & name,const tcu::IVec2 renderSize,const uint32_t numInstances,const bool testAllOffsets,const bool reverseOrder,const bool runComputeFirst=false,const uint32_t vertexOffset=0u,const uint32_t sharedUboOffset=0u,const uint32_t fragUboOffset=0u,const uint32_t ssboReadOffset=0u,const uint32_t ssboWriteOffset=0u)2113 DynamicOffsetMixedTest(tcu::TestContext &testContext, const PipelineConstructionType pipelineConstructionType,
2114 const std::string &name, const tcu::IVec2 renderSize, const uint32_t numInstances,
2115 const bool testAllOffsets, const bool reverseOrder, const bool runComputeFirst = false,
2116 const uint32_t vertexOffset = 0u, const uint32_t sharedUboOffset = 0u,
2117 const uint32_t fragUboOffset = 0u, const uint32_t ssboReadOffset = 0u,
2118 const uint32_t ssboWriteOffset = 0u)
2119 : vkt::TestCase(testContext, name)
2120 , m_pipelineConstructionType(pipelineConstructionType)
2121 , m_renderSize(renderSize)
2122 , m_numInstances(numInstances)
2123 , m_testAllOffsets(testAllOffsets)
2124 , m_reverseOrder(reverseOrder)
2125 , m_runComputeFirst(runComputeFirst)
2126 , m_vertexOffset(vertexOffset)
2127 , m_sharedUboOffset(sharedUboOffset)
2128 , m_fragUboOffset(fragUboOffset)
2129 , m_ssboReadOffset(ssboReadOffset)
2130 , m_ssboWriteOffset(ssboWriteOffset)
2131 {
2132 }
2133
2134 ~DynamicOffsetMixedTest(void);
2135
2136 void initPrograms(SourceCollections &sourceCollections) const;
2137 void checkSupport(vkt::Context &context) const;
2138 TestInstance *createInstance(Context &context) const;
2139
2140 private:
2141 const PipelineConstructionType m_pipelineConstructionType;
2142 const tcu::IVec2 m_renderSize;
2143 const uint32_t m_numInstances;
2144 const bool m_testAllOffsets;
2145 const bool m_reverseOrder;
2146 const bool m_runComputeFirst;
2147 const uint32_t m_vertexOffset;
2148 const uint32_t m_sharedUboOffset;
2149 const uint32_t m_fragUboOffset;
2150 const uint32_t m_ssboReadOffset;
2151 const uint32_t m_ssboWriteOffset;
2152 };
2153
~DynamicOffsetMixedTest(void)2154 DynamicOffsetMixedTest::~DynamicOffsetMixedTest(void)
2155 {
2156 }
2157
initPrograms(SourceCollections & sourceCollections) const2158 void DynamicOffsetMixedTest::initPrograms(SourceCollections &sourceCollections) const
2159 {
2160 // Vertex
2161 {
2162 std::ostringstream src;
2163
2164 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
2165 << "\n"
2166 << "layout(set = 0, binding = " << (m_reverseOrder ? "4" : "0") << ") uniform uboVertexData\n"
2167 << "{\n"
2168 << " vec4 position;\n"
2169 << "} inputPosData;\n"
2170 << "\n"
2171 << "layout(location = 0) in vec4 inPosition;\n"
2172 << "layout(location = 1) in vec4 inColor;\n"
2173 << "layout(location = 0) out vec4 outColor;\n"
2174 << "\n"
2175 << "out gl_PerVertex\n"
2176 << "{\n"
2177 << " vec4 gl_Position;\n"
2178 << "};\n"
2179 << "\n"
2180 << "void main (void)\n"
2181 << "{\n"
2182 << " gl_Position = inPosition + inputPosData.position;\n"
2183 << " outColor = inColor;\n"
2184 << "}\n";
2185
2186 sourceCollections.glslSources.add("vert") << glu::VertexSource(src.str());
2187 }
2188
2189 // Fragment
2190 {
2191 std::ostringstream src;
2192
2193 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
2194 << "\n"
2195 << "layout(set = 0, binding = " << (m_reverseOrder ? "3" : "1") << ") uniform uboSharedData\n"
2196 << "{\n"
2197 << " vec4 color;\n"
2198 << "} inputData0;\n"
2199 << "\n"
2200 << "layout(set = 0, binding = " << (m_reverseOrder ? "1" : "3") << ") uniform uboFragOnly\n"
2201 << "{\n"
2202 << " mat4 color;\n"
2203 << "} inputData1;\n"
2204 << "\n"
2205 << "layout(location = 0) in vec4 inColor;\n"
2206 << "layout(location = 0) out vec4 outColor;\n"
2207 << "\n"
2208 << "void main (void)\n"
2209 << "{\n"
2210 << " outColor = inColor + inputData0.color;\n"
2211 << " outColor.b = inputData1.color[3][3];\n"
2212 << "}\n";
2213
2214 sourceCollections.glslSources.add("frag") << glu::FragmentSource(src.str());
2215 }
2216
2217 // Compute
2218 {
2219 std::ostringstream src;
2220
2221 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
2222 << "\n"
2223 << "layout(set = 0, binding = " << (m_reverseOrder ? "3" : "1") << ") uniform uboSharedData\n"
2224 << "{\n"
2225 << " vec4 color;\n"
2226 << "} inputData;\n"
2227 << "\n"
2228 << "layout(set = 0, binding = 2) writeonly buffer ssboOutput\n"
2229 << "{\n"
2230 << " vec4 color;\n"
2231 << "} outData;\n"
2232 << "\n"
2233 << "layout(set = 0, binding = " << (m_reverseOrder ? "0" : "4") << ") readonly buffer ssboInput\n"
2234 << "{\n"
2235 << " mat4 color;\n"
2236 << "} readData;\n"
2237 << "\n"
2238 << "void main (void)\n"
2239 << "{\n"
2240 << " outData.color = inputData.color;\n"
2241 << " outData.color.g = readData.color[3][1];\n"
2242 << " outData.color.b = readData.color[3][2];\n"
2243 << "}\n";
2244
2245 sourceCollections.glslSources.add("comp") << glu::ComputeSource(src.str());
2246 }
2247 }
2248
checkSupport(vkt::Context & context) const2249 void DynamicOffsetMixedTest::checkSupport(vkt::Context &context) const
2250 {
2251 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(),
2252 m_pipelineConstructionType);
2253 }
2254
createInstance(Context & context) const2255 TestInstance *DynamicOffsetMixedTest::createInstance(Context &context) const
2256 {
2257 return new DynamicOffsetMixedTestInstance(context, m_pipelineConstructionType, m_renderSize, m_numInstances,
2258 m_testAllOffsets, m_reverseOrder, m_runComputeFirst, m_vertexOffset,
2259 m_sharedUboOffset, m_fragUboOffset, m_ssboReadOffset, m_ssboWriteOffset);
2260 }
2261
2262 } // namespace
2263
createDynamicOffsetTests(tcu::TestContext & testCtx,PipelineConstructionType pipelineConstructionType)2264 tcu::TestCaseGroup *createDynamicOffsetTests(tcu::TestContext &testCtx,
2265 PipelineConstructionType pipelineConstructionType)
2266 {
2267 const char *pipelineTypes[] = {"graphics", "compute"};
2268
2269 struct
2270 {
2271 const char *name;
2272 const GroupingStrategy strategy;
2273 } const groupingTypes[] = {
2274 {"single_set", GroupingStrategy::SINGLE_SET},
2275 {"multiset", GroupingStrategy::MULTISET},
2276 {"arrays", GroupingStrategy::ARRAYS},
2277 };
2278
2279 struct
2280 {
2281 const char *name;
2282 VkDescriptorType type;
2283 } const descriptorTypes[] = {{"uniform_buffer", VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC},
2284 {"storage_buffer", VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC}};
2285
2286 struct
2287 {
2288 const char *name;
2289 uint32_t num;
2290 } const numCmdBuffers[] = {{"numcmdbuffers_1", 1u}, {"numcmdbuffers_2", 2u}};
2291
2292 struct
2293 {
2294 const char *name;
2295 bool reverse;
2296 } const reverseOrders[] = {{"reverseorder", true}, {"sameorder", false}};
2297
2298 struct
2299 {
2300 const char *name;
2301 uint32_t num;
2302 } const numDescriptorSetBindings[] = {{"numdescriptorsetbindings_1", 1u}, {"numdescriptorsetbindings_2", 2u}};
2303
2304 struct
2305 {
2306 const char *name;
2307 uint32_t num;
2308 } const numDynamicBindings[] = {{"numdynamicbindings_1", 1u}, {"numdynamicbindings_2", 2u}};
2309
2310 struct
2311 {
2312 const char *name;
2313 uint32_t num;
2314 } const numNonDynamicBindings[] = {{"numnondynamicbindings_0", 0u}, {"numnondynamicbindings_1", 1u}};
2315
2316 de::MovePtr<tcu::TestCaseGroup> dynamicOffsetTests(new tcu::TestCaseGroup(testCtx, "dynamic_offset"));
2317
2318 for (uint32_t pipelineTypeIdx = 0; pipelineTypeIdx < DE_LENGTH_OF_ARRAY(pipelineTypes); pipelineTypeIdx++)
2319 {
2320 // VK_EXT_graphics_pipeline_library can't be tested with compute pipeline
2321 if ((pipelineTypeIdx == 1) && (pipelineConstructionType != PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC))
2322 continue;
2323
2324 de::MovePtr<tcu::TestCaseGroup> pipelineTypeGroup(
2325 new tcu::TestCaseGroup(testCtx, pipelineTypes[pipelineTypeIdx]));
2326
2327 for (uint32_t groupingTypeIdx = 0; groupingTypeIdx < DE_LENGTH_OF_ARRAY(groupingTypes); ++groupingTypeIdx)
2328 {
2329 de::MovePtr<tcu::TestCaseGroup> groupingTypeGroup(
2330 new tcu::TestCaseGroup(testCtx, groupingTypes[groupingTypeIdx].name));
2331
2332 for (uint32_t descriptorTypeIdx = 0; descriptorTypeIdx < DE_LENGTH_OF_ARRAY(descriptorTypes);
2333 descriptorTypeIdx++)
2334 {
2335 de::MovePtr<tcu::TestCaseGroup> descriptorTypeGroup(
2336 new tcu::TestCaseGroup(testCtx, descriptorTypes[descriptorTypeIdx].name));
2337
2338 for (uint32_t numCmdBuffersIdx = 0; numCmdBuffersIdx < DE_LENGTH_OF_ARRAY(numCmdBuffers);
2339 numCmdBuffersIdx++)
2340 {
2341 de::MovePtr<tcu::TestCaseGroup> numCmdBuffersGroup(
2342 new tcu::TestCaseGroup(testCtx, numCmdBuffers[numCmdBuffersIdx].name));
2343
2344 for (uint32_t reverseOrderIdx = 0; reverseOrderIdx < DE_LENGTH_OF_ARRAY(reverseOrders);
2345 reverseOrderIdx++)
2346 {
2347 if (numCmdBuffers[numCmdBuffersIdx].num < 2 && reverseOrders[reverseOrderIdx].reverse)
2348 continue;
2349
2350 de::MovePtr<tcu::TestCaseGroup> reverseOrderGroup(
2351 new tcu::TestCaseGroup(testCtx, reverseOrders[reverseOrderIdx].name));
2352
2353 for (uint32_t numDescriptorSetBindingsIdx = 0;
2354 numDescriptorSetBindingsIdx < DE_LENGTH_OF_ARRAY(numDescriptorSetBindings);
2355 numDescriptorSetBindingsIdx++)
2356 {
2357 if (numCmdBuffers[numCmdBuffersIdx].num > 1 &&
2358 numDescriptorSetBindings[numDescriptorSetBindingsIdx].num > 1)
2359 continue;
2360
2361 de::MovePtr<tcu::TestCaseGroup> numDescriptorSetBindingsGroup(new tcu::TestCaseGroup(
2362 testCtx, numDescriptorSetBindings[numDescriptorSetBindingsIdx].name));
2363 for (uint32_t numDynamicBindingsIdx = 0;
2364 numDynamicBindingsIdx < DE_LENGTH_OF_ARRAY(numDynamicBindings);
2365 numDynamicBindingsIdx++)
2366 {
2367 de::MovePtr<tcu::TestCaseGroup> numDynamicBindingsGroup(
2368 new tcu::TestCaseGroup(testCtx, numDynamicBindings[numDynamicBindingsIdx].name));
2369
2370 for (uint32_t numNonDynamicBindingsIdx = 0;
2371 numNonDynamicBindingsIdx < DE_LENGTH_OF_ARRAY(numNonDynamicBindings);
2372 numNonDynamicBindingsIdx++)
2373 {
2374 TestParams params{pipelineConstructionType,
2375 descriptorTypes[descriptorTypeIdx].type,
2376 numCmdBuffers[numCmdBuffersIdx].num,
2377 reverseOrders[reverseOrderIdx].reverse,
2378 numDescriptorSetBindings[numDescriptorSetBindingsIdx].num,
2379 numDynamicBindings[numDynamicBindingsIdx].num,
2380 numNonDynamicBindings[numNonDynamicBindingsIdx].num,
2381 groupingTypes[groupingTypeIdx].strategy};
2382 #ifndef CTS_USES_VULKANSC
2383 if (strcmp(pipelineTypes[pipelineTypeIdx], "graphics") == 0)
2384 numDynamicBindingsGroup->addChild(new DynamicOffsetGraphicsTest(
2385 testCtx, numNonDynamicBindings[numNonDynamicBindingsIdx].name, params));
2386 else
2387 #endif // CTS_USES_VULKANSC
2388 numDynamicBindingsGroup->addChild(new DynamicOffsetComputeTest(
2389 testCtx, numNonDynamicBindings[numNonDynamicBindingsIdx].name, params));
2390 }
2391
2392 numDescriptorSetBindingsGroup->addChild(numDynamicBindingsGroup.release());
2393 }
2394
2395 reverseOrderGroup->addChild(numDescriptorSetBindingsGroup.release());
2396 }
2397
2398 numCmdBuffersGroup->addChild(reverseOrderGroup.release());
2399 }
2400
2401 descriptorTypeGroup->addChild(numCmdBuffersGroup.release());
2402 }
2403
2404 groupingTypeGroup->addChild(descriptorTypeGroup.release());
2405 }
2406
2407 pipelineTypeGroup->addChild(groupingTypeGroup.release());
2408 }
2409
2410 dynamicOffsetTests->addChild(pipelineTypeGroup.release());
2411 }
2412
2413 // Dynamic descriptor offset test for combined descriptor sets.
2414 if (pipelineConstructionType == PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC)
2415 {
2416 de::MovePtr<tcu::TestCaseGroup> combinedDescriptorsTests(
2417 new tcu::TestCaseGroup(testCtx, "combined_descriptors"));
2418
2419 struct
2420 {
2421 const char *name;
2422 const bool reverseDescriptors;
2423 } const orders[] = {{"same_order", false}, {"reverse_order", true}};
2424
2425 struct
2426 {
2427 const char *name;
2428 const uint32_t offsetCount;
2429 const uint32_t offsets[5];
2430 } const numOffsets[] = {{"16", 16u, {15u, 7u, 2u, 3u, 5u}},
2431 {"64", 64u, {27u, 22u, 45u, 19u, 59u}},
2432 {"256", 256u, {197u, 244u, 110u, 238u, 88u}}};
2433
2434 struct
2435 {
2436 const char *name;
2437 const bool computeFirst;
2438 } const pipelineOrders[] = {{"graphics_first", false}, {"compute_first", true}};
2439
2440 // Run tests for all offsets
2441 {
2442 de::MovePtr<tcu::TestCaseGroup> allOffsetsGroup(new tcu::TestCaseGroup(testCtx, "all_offsets"));
2443 de::MovePtr<tcu::TestCaseGroup> singleOffsetGroup(new tcu::TestCaseGroup(testCtx, "single_offset"));
2444
2445 for (const auto &order : orders)
2446 {
2447 for (const auto &offsets : numOffsets)
2448 {
2449 for (const auto &pipeline : pipelineOrders)
2450 {
2451 allOffsetsGroup->addChild(new DynamicOffsetMixedTest(
2452 testCtx, pipelineConstructionType,
2453 std::string(order.name) + "_" + std::string(offsets.name) + "_" + pipeline.name,
2454 tcu::IVec2(32, 32), // Render size
2455 offsets.offsetCount,
2456 true, // All offsets
2457 order.reverseDescriptors, pipeline.computeFirst));
2458 singleOffsetGroup->addChild(new DynamicOffsetMixedTest(
2459 testCtx, pipelineConstructionType,
2460 std::string(order.name) + "_" + std::string(offsets.name) + "_" + pipeline.name,
2461 tcu::IVec2(32, 32), // Render size
2462 offsets.offsetCount,
2463 false, // Single offset only
2464 order.reverseDescriptors, pipeline.computeFirst,
2465 offsets.offsets[0], // For vertex ubo
2466 offsets.offsets[1], // For shared ubo (fragment & compute)
2467 offsets.offsets[2], // For fragment ubo
2468 offsets.offsets[3], // For ssbo read only
2469 offsets.offsets[4])); // For ssbo write only
2470 }
2471 }
2472 }
2473 combinedDescriptorsTests->addChild(allOffsetsGroup.release());
2474 combinedDescriptorsTests->addChild(singleOffsetGroup.release());
2475 }
2476
2477 dynamicOffsetTests->addChild(combinedDescriptorsTests.release());
2478 }
2479
2480 return dynamicOffsetTests.release();
2481 }
2482
2483 } // namespace pipeline
2484 } // namespace vkt
2485