1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Protected memory storage buffer tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktProtectedMemStorageBufferTests.hpp"
26
27 #include "deRandom.hpp"
28 #include "deStringUtil.hpp"
29 #include "tcuTestLog.hpp"
30 #include "tcuVector.hpp"
31 #include "tcuStringTemplate.hpp"
32
33 #include "vkPrograms.hpp"
34 #include "vktTestCase.hpp"
35 #include "vktTestGroupUtil.hpp"
36 #include "vkTypeUtil.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkCmdUtil.hpp"
39
40 #include "vktProtectedMemBufferValidator.hpp"
41 #include "vktProtectedMemUtils.hpp"
42 #include "vktProtectedMemContext.hpp"
43
44 namespace vkt
45 {
46 namespace ProtectedMem
47 {
48
49 namespace
50 {
51
52 enum
53 {
54 RENDER_HEIGHT = 128,
55 RENDER_WIDTH = 128,
56 };
57
58 enum
59 {
60 RANDOM_TEST_COUNT = 10,
61 };
62
63 enum SSBOTestType
64 {
65 SSBO_READ,
66 SSBO_WRITE,
67 SSBO_ATOMIC
68 };
69
70 enum SSBOAtomicType
71 {
72 ATOMIC_ADD,
73 ATOMIC_MIN,
74 ATOMIC_MAX,
75 ATOMIC_AND,
76 ATOMIC_OR,
77 ATOMIC_XOR,
78 ATOMIC_EXCHANGE,
79 ATOMIC_COMPSWAP
80 };
81
getSSBOTypeString(SSBOTestType type)82 const char *getSSBOTypeString(SSBOTestType type)
83 {
84 switch (type)
85 {
86 case SSBO_READ:
87 return "read";
88 case SSBO_WRITE:
89 return "write";
90 case SSBO_ATOMIC:
91 return "atomic";
92 default:
93 DE_FATAL("Invalid SSBO test type");
94 return "";
95 }
96 }
97
getSSBOAtomicTypeString(SSBOAtomicType type)98 const char *getSSBOAtomicTypeString(SSBOAtomicType type)
99 {
100 switch (type)
101 {
102 case ATOMIC_ADD:
103 return "add";
104 case ATOMIC_MIN:
105 return "min";
106 case ATOMIC_MAX:
107 return "max";
108 case ATOMIC_AND:
109 return "and";
110 case ATOMIC_OR:
111 return "or";
112 case ATOMIC_XOR:
113 return "xor";
114 case ATOMIC_EXCHANGE:
115 return "exchange";
116 case ATOMIC_COMPSWAP:
117 return "compswap";
118 default:
119 DE_FATAL("Invalid SSBO atomic operation type");
120 return "";
121 }
122 }
123
addBufferCopyCmd(const vk::DeviceInterface & vk,vk::VkCommandBuffer cmdBuffer,uint32_t queueFamilyIndex,vk::VkBuffer srcBuffer,vk::VkBuffer dstBuffer,uint32_t copySize,bool dstFragment)124 void static addBufferCopyCmd(const vk::DeviceInterface &vk, vk::VkCommandBuffer cmdBuffer, uint32_t queueFamilyIndex,
125 vk::VkBuffer srcBuffer, vk::VkBuffer dstBuffer, uint32_t copySize, bool dstFragment)
126 {
127 const vk::VkBufferMemoryBarrier dstWriteStartBarrier = {
128 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
129 DE_NULL, // const void* pNext
130 vk::VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask
131 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags dstAccessMask
132 queueFamilyIndex, // uint32_t srcQueueFamilyIndex
133 queueFamilyIndex, // uint32_t dstQueueFamilyIndex
134 srcBuffer, // VkBuffer buffer
135 0u, // VkDeviceSize offset
136 VK_WHOLE_SIZE, // VkDeviceSize size
137 };
138
139 vk.cmdPipelineBarrier(cmdBuffer,
140 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
141 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, // dstStageMask
142 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 1, &dstWriteStartBarrier,
143 0, (const vk::VkImageMemoryBarrier *)DE_NULL);
144
145 const vk::VkBufferCopy copyRegion = {
146 0, // VkDeviceSize srcOffset
147 0, // VkDeviceSize dstOffset
148 copySize // VkDeviceSize size
149 };
150 vk.cmdCopyBuffer(cmdBuffer, srcBuffer, dstBuffer, 1, ©Region);
151
152 const vk::VkBufferMemoryBarrier dstWriteEndBarrier = {
153 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
154 DE_NULL, // const void* pNext
155 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags srcAccessMask
156 vk::VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask
157 queueFamilyIndex, // uint32_t srcQueueFamilyIndex
158 queueFamilyIndex, // uint32_t dstQueueFamilyIndex
159 dstBuffer, // VkBuffer buffer
160 0u, // VkDeviceSize offset
161 VK_WHOLE_SIZE, // VkDeviceSize size
162 };
163
164 vk.cmdPipelineBarrier(cmdBuffer,
165 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, // srcStageMask
166 dstFragment ? vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT :
167 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, // dstStageMask
168 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 1, &dstWriteEndBarrier, 0,
169 (const vk::VkImageMemoryBarrier *)DE_NULL);
170 }
171
getProtectionMode(const vk::VkPipelineCreateFlags flags)172 ProtectionMode getProtectionMode(const vk::VkPipelineCreateFlags flags)
173 {
174 #ifndef CTS_USES_VULKANSC
175 if ((flags & vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT) != 0)
176 {
177 return ProtectionMode::PROTECTION_DISABLED;
178 }
179 #endif
180 DE_UNREF(flags);
181 return ProtectionMode::PROTECTION_ENABLED;
182 }
183
184 template <typename T>
185 class StorageBufferTestInstance : public ProtectedTestInstance
186 {
187 public:
188 StorageBufferTestInstance(Context &ctx, const SSBOTestType testType, const glu::ShaderType shaderType,
189 const tcu::UVec4 testInput, const BufferValidator<T> &validator,
190 const bool pipelineProtectedAccess, const vk::VkPipelineCreateFlags pipelineFlags);
191 virtual tcu::TestStatus iterate(void);
192
193 private:
194 tcu::TestStatus executeFragmentTest(void);
195 tcu::TestStatus executeComputeTest(void);
196
197 const SSBOTestType m_testType;
198 const glu::ShaderType m_shaderType;
199 const tcu::UVec4 m_testInput;
200 const BufferValidator<T> &m_validator;
201 const vk::VkFormat m_imageFormat;
202 const vk::VkPipelineCreateFlags m_pipelineFlags;
203 const ProtectionMode m_protectionMode;
204 };
205
206 template <typename T>
207 class StorageBufferTestCase : public TestCase
208 {
209 public:
StorageBufferTestCase(tcu::TestContext & testctx,const SSBOTestType testType,const glu::ShaderType shaderType,const char * name,const tcu::UVec4 testInput,ValidationDataStorage<T> validationData,vk::VkFormat format,bool pipelineProtectedAccess,vk::VkPipelineCreateFlags pipelineFlags,const std::string & extraShader="")210 StorageBufferTestCase(tcu::TestContext &testctx, const SSBOTestType testType, const glu::ShaderType shaderType,
211 const char *name, const tcu::UVec4 testInput, ValidationDataStorage<T> validationData,
212 vk::VkFormat format, bool pipelineProtectedAccess, vk::VkPipelineCreateFlags pipelineFlags,
213 const std::string &extraShader = "")
214 : TestCase(testctx, name)
215 , m_testType(testType)
216 , m_shaderType(shaderType)
217 , m_testInput(testInput)
218 , m_validator(validationData, format)
219 , m_pipelineProtectedAccess(pipelineProtectedAccess)
220 , m_pipelineFlags(pipelineFlags)
221 , m_extraShader(extraShader)
222 , m_protectionMode(getProtectionMode(m_pipelineFlags))
223 {
224 }
createInstance(Context & ctx) const225 virtual TestInstance *createInstance(Context &ctx) const
226 {
227 return new StorageBufferTestInstance<T>(ctx, m_testType, m_shaderType, m_testInput, m_validator,
228 m_pipelineProtectedAccess, m_pipelineFlags);
229 }
230 virtual void initPrograms(vk::SourceCollections &programCollection) const;
checkSupport(Context & context) const231 virtual void checkSupport(Context &context) const
232 {
233 checkProtectedQueueSupport(context);
234 }
235
~StorageBufferTestCase(void)236 virtual ~StorageBufferTestCase(void)
237 {
238 }
239
240 private:
241 const SSBOTestType m_testType;
242 const glu::ShaderType m_shaderType;
243 const tcu::UVec4 m_testInput;
244 const BufferValidator<T> m_validator;
245 const bool m_pipelineProtectedAccess;
246 const vk::VkPipelineCreateFlags m_pipelineFlags;
247 const std::string m_extraShader;
248 const ProtectionMode m_protectionMode;
249 };
250
251 template <typename T>
StorageBufferTestInstance(Context & ctx,const SSBOTestType testType,const glu::ShaderType shaderType,const tcu::UVec4 testInput,const BufferValidator<T> & validator,const bool pipelineProtectedAccess,const vk::VkPipelineCreateFlags pipelineFlags)252 StorageBufferTestInstance<T>::StorageBufferTestInstance(Context &ctx, const SSBOTestType testType,
253 const glu::ShaderType shaderType, const tcu::UVec4 testInput,
254 const BufferValidator<T> &validator,
255 const bool pipelineProtectedAccess,
256 const vk::VkPipelineCreateFlags pipelineFlags)
257 : ProtectedTestInstance(ctx, pipelineProtectedAccess ?
258 std::vector<std::string>({"VK_EXT_pipeline_protected_access"}) :
259 std::vector<std::string>())
260 , m_testType(testType)
261 , m_shaderType(shaderType)
262 , m_testInput(testInput)
263 , m_validator(validator)
264 , m_imageFormat(vk::VK_FORMAT_R8G8B8A8_UNORM)
265 , m_pipelineFlags(pipelineFlags)
266 , m_protectionMode(getProtectionMode(m_pipelineFlags))
267 {
268 }
269
270 template <typename T>
initPrograms(vk::SourceCollections & programCollection) const271 void StorageBufferTestCase<T>::initPrograms(vk::SourceCollections &programCollection) const
272 {
273 const char *vertexShader =
274 "#version 450\n"
275 "layout(location=0) out vec4 vIndex;\n"
276 "void main() {\n"
277 " vec2 pos[4] = vec2[4]( vec2(-0.7, 0.7), vec2(0.7, 0.7), vec2(0.0, -0.7), vec2(-0.7, -0.7) );\n"
278 " vIndex = vec4(gl_VertexIndex);\n"
279 " gl_PointSize = 1.0;\n"
280 " gl_Position = vec4(pos[gl_VertexIndex], 0.0, 1.0);\n"
281 "}";
282
283 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uvec4)
284 // set = 0, location = 2 -> buffer ProtectedTestBufferSource (uvec4)
285 const char *readShaderTemplateStr = "#version 450\n"
286 "${INPUT_DECLARATION}\n"
287 "\n"
288 "layout(set=0, binding=0, std140) buffer ProtectedTestBuffer\n"
289 "{\n"
290 " highp uvec4 protectedTestResultBuffer;\n"
291 "};\n"
292 "\n"
293 "layout(set=0, binding=2, std140) buffer ProtectedTestBufferSource\n"
294 "{\n"
295 " highp uvec4 protectedTestBufferSource;\n"
296 "};\n"
297 "\n"
298 "void main (void)\n"
299 "{\n"
300 " protectedTestResultBuffer = protectedTestBufferSource;\n"
301 " ${FRAGMENT_OUTPUT}\n"
302 "}\n";
303
304 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uvec4)
305 // set = 0, location = 1 -> uniform Data (uvec4)
306 const char *writeShaderTemplateStr = "#version 450\n"
307 "${INPUT_DECLARATION}\n"
308 "\n"
309 "layout(set=0, binding=0, std140) buffer ProtectedTestBuffer\n"
310 "{\n"
311 " highp uvec4 protectedTestResultBuffer;\n"
312 "};\n"
313 "\n"
314 "layout(set=0, binding=1, std140) uniform Data\n"
315 "{\n"
316 " highp uvec4 testInput;\n"
317 "};\n"
318 "\n"
319 "void main (void)\n"
320 "{\n"
321 " protectedTestResultBuffer = testInput;\n"
322 " ${FRAGMENT_OUTPUT}\n"
323 "}\n";
324
325 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uint [4])
326 const char *atomicTestShaderTemplateStr = "#version 450\n"
327 "${INPUT_DECLARATION}\n"
328 "\n"
329 "layout(set=0, binding=0, std430) buffer ProtectedTestBuffer\n"
330 "{\n"
331 " highp uint protectedTestResultBuffer[4];\n"
332 "};\n"
333 "\n"
334 "void main (void)\n"
335 "{\n"
336 " uint i = uint(${INVOCATION_ID});\n"
337 " ${ATOMIC_FUNCTION_CALL}\n"
338 " ${FRAGMENT_OUTPUT}\n"
339 "}\n";
340
341 const char *shaderTemplateStr;
342 std::map<std::string, std::string> shaderParam;
343 switch (m_testType)
344 {
345 case SSBO_READ:
346 shaderTemplateStr = readShaderTemplateStr;
347 break;
348 case SSBO_WRITE:
349 shaderTemplateStr = writeShaderTemplateStr;
350 break;
351 case SSBO_ATOMIC:
352 {
353 shaderTemplateStr = atomicTestShaderTemplateStr;
354 shaderParam["ATOMIC_FUNCTION_CALL"] = m_extraShader;
355 break;
356 }
357 default:
358 DE_FATAL("Incorrect SSBO test type");
359 return;
360 }
361
362 if (m_shaderType == glu::SHADERTYPE_FRAGMENT)
363 {
364 shaderParam["INPUT_DECLARATION"] = "layout(location=0) out mediump vec4 o_color;\n"
365 "layout(location=0) in vec4 vIndex;\n";
366 shaderParam["FRAGMENT_OUTPUT"] = "o_color = vec4( 0.0, 0.4, 1.0, 1.0 );\n";
367 shaderParam["INVOCATION_ID"] = "vIndex.x";
368
369 programCollection.glslSources.add("vert") << glu::VertexSource(vertexShader);
370 programCollection.glslSources.add("TestShader")
371 << glu::FragmentSource(tcu::StringTemplate(shaderTemplateStr).specialize(shaderParam));
372 }
373 else if (m_shaderType == glu::SHADERTYPE_COMPUTE)
374 {
375 shaderParam["INPUT_DECLARATION"] = "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
376 shaderParam["FRAGMENT_OUTPUT"] = "";
377 shaderParam["INVOCATION_ID"] = "gl_GlobalInvocationID.x";
378 programCollection.glslSources.add("TestShader")
379 << glu::ComputeSource(tcu::StringTemplate(shaderTemplateStr).specialize(shaderParam));
380 }
381 else
382 DE_FATAL("Incorrect shader type");
383
384 m_validator.initPrograms(programCollection);
385 }
386
387 template <typename T>
executeFragmentTest(void)388 tcu::TestStatus StorageBufferTestInstance<T>::executeFragmentTest(void)
389 {
390 ProtectedContext &ctx(m_protectedContext);
391 const vk::DeviceInterface &vk = ctx.getDeviceInterface();
392 const vk::VkDevice device = ctx.getDevice();
393 const vk::VkQueue queue = ctx.getQueue();
394 const uint32_t queueFamilyIndex = ctx.getQueueFamilyIndex();
395
396 const uint32_t testUniformSize = sizeof(m_testInput);
397 de::UniquePtr<vk::BufferWithMemory> testUniform(
398 makeBuffer(ctx, PROTECTION_DISABLED, queueFamilyIndex, testUniformSize,
399 vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
400 vk::MemoryRequirement::HostVisible));
401
402 // Set the test input uniform data
403 {
404 deMemcpy(testUniform->getAllocation().getHostPtr(), &m_testInput, testUniformSize);
405 vk::flushAlloc(vk, device, testUniform->getAllocation());
406 }
407
408 const vk::MemoryRequirement *memoryRequirement = &vk::MemoryRequirement::Any;
409 if (m_protectionMode == PROTECTION_ENABLED)
410 {
411 memoryRequirement = &vk::MemoryRequirement::Protected;
412 }
413
414 const uint32_t testBufferSize = sizeof(ValidationDataStorage<T>);
415 de::MovePtr<vk::BufferWithMemory> testBuffer(
416 makeBuffer(ctx, m_protectionMode, queueFamilyIndex, testBufferSize,
417 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, *memoryRequirement));
418 de::MovePtr<vk::BufferWithMemory> testBufferSource(
419 makeBuffer(ctx, m_protectionMode, queueFamilyIndex, testBufferSize,
420 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, *memoryRequirement));
421
422 vk::Move<vk::VkShaderModule> vertexShader(
423 vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("vert"), 0));
424 vk::Unique<vk::VkShaderModule> testShader(
425 vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("TestShader"), 0));
426
427 // Create descriptors
428 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(
429 vk::DescriptorSetLayoutBuilder()
430 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_ALL)
431 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_ALL)
432 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_ALL)
433 .build(vk, device));
434 vk::Unique<vk::VkDescriptorPool> descriptorPool(
435 vk::DescriptorPoolBuilder()
436 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
437 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u)
438 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
439 .build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
440 vk::Unique<vk::VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
441
442 // Update descriptor set information
443 {
444 vk::VkDescriptorBufferInfo descTestBuffer = makeDescriptorBufferInfo(**testBuffer, 0, testBufferSize);
445 vk::VkDescriptorBufferInfo descTestUniform = makeDescriptorBufferInfo(**testUniform, 0, testUniformSize);
446 vk::VkDescriptorBufferInfo descTestBufferSource =
447 makeDescriptorBufferInfo(**testBufferSource, 0, testBufferSize);
448
449 vk::DescriptorSetUpdateBuilder()
450 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
451 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBuffer)
452 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u),
453 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descTestUniform)
454 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u),
455 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBufferSource)
456 .update(vk, device);
457 }
458
459 // Create output image
460 de::MovePtr<vk::ImageWithMemory> colorImage(
461 createImage2D(ctx, m_protectionMode, queueFamilyIndex, RENDER_WIDTH, RENDER_HEIGHT, m_imageFormat,
462 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_SAMPLED_BIT));
463 vk::Unique<vk::VkImageView> colorImageView(createImageView(ctx, **colorImage, m_imageFormat));
464 vk::Unique<vk::VkRenderPass> renderPass(createRenderPass(ctx, m_imageFormat));
465 vk::Unique<vk::VkFramebuffer> framebuffer(
466 createFramebuffer(ctx, RENDER_WIDTH, RENDER_HEIGHT, *renderPass, *colorImageView));
467
468 // Build pipeline
469 vk::Unique<vk::VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
470 vk::Unique<vk::VkCommandPool> cmdPool(makeCommandPool(vk, device, m_protectionMode, queueFamilyIndex));
471 vk::Unique<vk::VkCommandBuffer> cmdBuffer(
472 vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
473
474 // Create pipeline
475 vk::Unique<vk::VkPipeline> graphicsPipeline(makeGraphicsPipeline(
476 vk, device, *pipelineLayout, *renderPass, *vertexShader, *testShader,
477 std::vector<vk::VkVertexInputBindingDescription>(), std::vector<vk::VkVertexInputAttributeDescription>(),
478 tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, m_pipelineFlags));
479
480 beginCommandBuffer(vk, *cmdBuffer);
481
482 if (m_testType == SSBO_READ || m_testType == SSBO_ATOMIC)
483 {
484 vk::VkBuffer targetBuffer = (m_testType == SSBO_ATOMIC) ? **testBuffer : **testBufferSource;
485 addBufferCopyCmd(vk, *cmdBuffer, queueFamilyIndex, **testUniform, targetBuffer, testUniformSize, true);
486 }
487
488 // Start image barrier
489 {
490 const vk::VkImageMemoryBarrier startImgBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
491 DE_NULL, // pNext
492 0, // srcAccessMask
493 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
494 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
495 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
496 queueFamilyIndex, // srcQueueFamilyIndex
497 queueFamilyIndex, // dstQueueFamilyIndex
498 **colorImage, // image
499 {
500 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
501 0u, // baseMipLevel
502 1u, // mipLevels
503 0u, // baseArraySlice
504 1u, // subresourceRange
505 }};
506
507 vk.cmdPipelineBarrier(*cmdBuffer,
508 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
509 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // dstStageMask
510 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
511 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &startImgBarrier);
512 }
513
514 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, vk::makeRect2D(0, 0, RENDER_WIDTH, RENDER_HEIGHT),
515 tcu::Vec4(0.125f, 0.25f, 0.5f, 1.0f));
516 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
517 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet,
518 0u, DE_NULL);
519
520 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
521 endRenderPass(vk, *cmdBuffer);
522
523 {
524 const vk::VkImageMemoryBarrier endImgBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
525 DE_NULL, // pNext
526 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask
527 vk::VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
528 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
529 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // newLayout
530 queueFamilyIndex, // srcQueueFamilyIndex
531 queueFamilyIndex, // dstQueueFamilyIndex
532 **colorImage, // image
533 {
534 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
535 0u, // baseMipLevel
536 1u, // mipLevels
537 0u, // baseArraySlice
538 1u, // subresourceRange
539 }};
540 vk.cmdPipelineBarrier(*cmdBuffer,
541 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // srcStageMask
542 vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, // dstStageMask
543 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
544 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &endImgBarrier);
545 }
546
547 endCommandBuffer(vk, *cmdBuffer);
548
549 // Execute Draw
550 {
551 const vk::Unique<vk::VkFence> fence(vk::createFence(vk, device));
552 VK_CHECK(vk.resetFences(device, 1, &fence.get()));
553 VK_CHECK(queueSubmit(ctx, m_protectionMode, queue, *cmdBuffer, *fence, ~0ull));
554 }
555
556 // Log inputs
557 ctx.getTestContext().getLog() << tcu::TestLog::Message << "Input values: \n"
558 << "1: " << m_testInput << "\n"
559 << tcu::TestLog::EndMessage;
560
561 // Validate buffer
562 if (m_validator.validateBuffer(ctx, **testBuffer))
563 return tcu::TestStatus::pass("Everything went OK");
564 else
565 return tcu::TestStatus::fail("Something went really wrong");
566 }
567
568 template <typename T>
executeComputeTest(void)569 tcu::TestStatus StorageBufferTestInstance<T>::executeComputeTest(void)
570 {
571 ProtectedContext &ctx(m_protectedContext);
572 const vk::DeviceInterface &vk = ctx.getDeviceInterface();
573 const vk::VkDevice device = ctx.getDevice();
574 const vk::VkQueue queue = ctx.getQueue();
575 const uint32_t queueFamilyIndex = ctx.getQueueFamilyIndex();
576
577 const uint32_t testUniformSize = sizeof(m_testInput);
578 de::UniquePtr<vk::BufferWithMemory> testUniform(
579 makeBuffer(ctx, PROTECTION_DISABLED, queueFamilyIndex, testUniformSize,
580 vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
581 vk::MemoryRequirement::HostVisible));
582
583 // Set the test input uniform data
584 {
585 deMemcpy(testUniform->getAllocation().getHostPtr(), &m_testInput, testUniformSize);
586 vk::flushAlloc(vk, device, testUniform->getAllocation());
587 }
588
589 const vk::MemoryRequirement *memoryRequirement = &vk::MemoryRequirement::Any;
590 if (m_protectionMode == PROTECTION_ENABLED)
591 {
592 memoryRequirement = &vk::MemoryRequirement::Protected;
593 }
594
595 const uint32_t testBufferSize = sizeof(ValidationDataStorage<T>);
596 de::MovePtr<vk::BufferWithMemory> testBuffer(
597 makeBuffer(ctx, m_protectionMode, queueFamilyIndex, testBufferSize,
598 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, *memoryRequirement));
599 de::MovePtr<vk::BufferWithMemory> testBufferSource(
600 makeBuffer(ctx, m_protectionMode, queueFamilyIndex, testBufferSize,
601 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, *memoryRequirement));
602
603 vk::Unique<vk::VkShaderModule> testShader(
604 vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("TestShader"), 0));
605
606 // Create descriptors
607 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(
608 vk::DescriptorSetLayoutBuilder()
609 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT)
610 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT)
611 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT)
612 .build(vk, device));
613 vk::Unique<vk::VkDescriptorPool> descriptorPool(
614 vk::DescriptorPoolBuilder()
615 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
616 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u)
617 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
618 .build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
619 vk::Unique<vk::VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
620
621 // Update descriptor set information
622 {
623 vk::VkDescriptorBufferInfo descTestBuffer = makeDescriptorBufferInfo(**testBuffer, 0, testBufferSize);
624 vk::VkDescriptorBufferInfo descTestUniform = makeDescriptorBufferInfo(**testUniform, 0, testUniformSize);
625 vk::VkDescriptorBufferInfo descTestBufferSource =
626 makeDescriptorBufferInfo(**testBufferSource, 0, testBufferSize);
627
628 vk::DescriptorSetUpdateBuilder()
629 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
630 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBuffer)
631 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u),
632 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descTestUniform)
633 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u),
634 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBufferSource)
635 .update(vk, device);
636 }
637
638 // Build and execute test
639 {
640 const vk::Unique<vk::VkFence> fence(vk::createFence(vk, device));
641 vk::Unique<vk::VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
642 vk::Unique<vk::VkPipeline> SSBOPipeline(makeComputePipeline(vk, device, *pipelineLayout, m_pipelineFlags,
643 nullptr, *testShader,
644 (vk::VkPipelineShaderStageCreateFlags)0u));
645 vk::Unique<vk::VkCommandPool> cmdPool(makeCommandPool(vk, device, m_protectionMode, queueFamilyIndex));
646 vk::Unique<vk::VkCommandBuffer> cmdBuffer(
647 vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
648 uint32_t dispatchCount = (m_testType == SSBO_ATOMIC) ? 4u : 1u;
649
650 beginCommandBuffer(vk, *cmdBuffer);
651
652 if (m_testType == SSBO_READ || m_testType == SSBO_ATOMIC)
653 {
654 vk::VkBuffer targetBuffer = (m_testType == SSBO_ATOMIC) ? **testBuffer : **testBufferSource;
655 addBufferCopyCmd(vk, *cmdBuffer, queueFamilyIndex, **testUniform, targetBuffer, testUniformSize, false);
656 }
657
658 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *SSBOPipeline);
659 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u,
660 &*descriptorSet, 0u, DE_NULL);
661
662 vk.cmdDispatch(*cmdBuffer, dispatchCount, 1u, 1u);
663
664 endCommandBuffer(vk, *cmdBuffer);
665 VK_CHECK(queueSubmit(ctx, m_protectionMode, queue, *cmdBuffer, *fence, ~0ull));
666 }
667
668 ctx.getTestContext().getLog() << tcu::TestLog::Message << "Input values: \n"
669 << "1: " << m_testInput << "\n"
670 << tcu::TestLog::EndMessage;
671
672 // Validate buffer
673 if (m_validator.validateBuffer(ctx, **testBuffer))
674 return tcu::TestStatus::pass("Everything went OK");
675 else
676 return tcu::TestStatus::fail("Something went really wrong");
677 }
678
679 template <typename T>
iterate(void)680 tcu::TestStatus StorageBufferTestInstance<T>::iterate(void)
681 {
682 switch (m_shaderType)
683 {
684 case glu::SHADERTYPE_FRAGMENT:
685 return executeFragmentTest();
686 case glu::SHADERTYPE_COMPUTE:
687 return executeComputeTest();
688 default:
689 DE_FATAL("Incorrect shader type");
690 return tcu::TestStatus::fail("");
691 }
692 }
693
createSpecifiedStorageBufferTests(tcu::TestContext & testCtx,const std::string groupName,SSBOTestType testType,const glu::ShaderType shaderType,const ValidationDataStorage<tcu::UVec4> testData[],size_t testCount,bool pipelineProtectedAccess,vk::VkPipelineCreateFlags pipelineFlags)694 tcu::TestCaseGroup *createSpecifiedStorageBufferTests(tcu::TestContext &testCtx, const std::string groupName,
695 SSBOTestType testType, const glu::ShaderType shaderType,
696 const ValidationDataStorage<tcu::UVec4> testData[],
697 size_t testCount, bool pipelineProtectedAccess,
698 vk::VkPipelineCreateFlags pipelineFlags)
699 {
700 const std::string testTypeStr = getSSBOTypeString(testType);
701 const std::string description = "Storage buffer " + testTypeStr + " tests";
702 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
703
704 for (size_t ndx = 0; ndx < testCount; ++ndx)
705 {
706 const std::string name = testTypeStr + "_" + de::toString(ndx + 1);
707 testGroup->addChild(new StorageBufferTestCase<tcu::UVec4>(
708 testCtx, testType, shaderType, name.c_str(), testData[ndx].values, testData[ndx],
709 vk::VK_FORMAT_R32G32B32A32_UINT, pipelineProtectedAccess, pipelineFlags));
710 }
711
712 return testGroup.release();
713 }
714
createRandomizedBufferTests(tcu::TestContext & testCtx,SSBOTestType testType,const glu::ShaderType shaderType,size_t testCount,bool pipelineProtectedAccess,vk::VkPipelineCreateFlags pipelineFlags)715 tcu::TestCaseGroup *createRandomizedBufferTests(tcu::TestContext &testCtx, SSBOTestType testType,
716 const glu::ShaderType shaderType, size_t testCount,
717 bool pipelineProtectedAccess, vk::VkPipelineCreateFlags pipelineFlags)
718 {
719 de::Random rnd(testCtx.getCommandLine().getBaseSeed());
720 std::vector<ValidationDataStorage<tcu::UVec4>> testData;
721 testData.resize(testCount);
722
723 for (size_t ndx = 0; ndx < testCount; ++ndx)
724 for (uint32_t compIdx = 0; compIdx < 4; ++compIdx)
725 testData[ndx].values[compIdx] = rnd.getUint32();
726
727 return createSpecifiedStorageBufferTests(testCtx, "random", testType, shaderType, testData.data(), testData.size(),
728 pipelineProtectedAccess, pipelineFlags);
729 }
730
731 struct
732 {
733 bool pipelineProtectedAccess;
734 const char *name;
735 } protectedAccess[] = {
736 {false, "default"},
737 #ifndef CTS_USES_VULKANSC
738 {true, "protected_access"},
739 #endif
740 };
741 struct
742 {
743 vk::VkPipelineCreateFlags pipelineFlags;
744 const char *name;
745 } flags[] = {
746 {(vk::VkPipelineCreateFlagBits)0u, "none"},
747 #ifndef CTS_USES_VULKANSC
748 {vk::VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT, "protected_access_only"},
749 {vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT, "no_protected_access"},
750 #endif
751 };
752
createRWStorageBufferTests(tcu::TestContext & testCtx,const std::string groupName,SSBOTestType testType,const ValidationDataStorage<tcu::UVec4> testData[],size_t testCount)753 tcu::TestCaseGroup *createRWStorageBufferTests(tcu::TestContext &testCtx, const std::string groupName,
754 SSBOTestType testType,
755 const ValidationDataStorage<tcu::UVec4> testData[], size_t testCount)
756 {
757 de::MovePtr<tcu::TestCaseGroup> ssboRWTestGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
758
759 glu::ShaderType shaderTypes[] = {glu::SHADERTYPE_FRAGMENT, glu::SHADERTYPE_COMPUTE};
760
761 for (int protectedAccessNdx = 0; protectedAccessNdx < DE_LENGTH_OF_ARRAY(protectedAccess); ++protectedAccessNdx)
762 {
763 de::MovePtr<tcu::TestCaseGroup> protectedAccessGroup(
764 new tcu::TestCaseGroup(testCtx, protectedAccess[protectedAccessNdx].name));
765 for (int flagsNdx = 0; flagsNdx < DE_LENGTH_OF_ARRAY(flags); ++flagsNdx)
766 {
767 de::MovePtr<tcu::TestCaseGroup> flagsGroup(new tcu::TestCaseGroup(testCtx, flags[flagsNdx].name));
768 if (!protectedAccess[protectedAccessNdx].pipelineProtectedAccess && flags[flagsNdx].pipelineFlags != 0u)
769 continue;
770
771 for (int shaderNdx = 0; shaderNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderNdx)
772 {
773 const glu::ShaderType shaderType = shaderTypes[shaderNdx];
774 const std::string shaderName = glu::getShaderTypeName(shaderType);
775 const std::string shaderGroupDesc = "Storage buffer tests for shader type: " + shaderName;
776 de::MovePtr<tcu::TestCaseGroup> testShaderGroup(new tcu::TestCaseGroup(testCtx, shaderName.c_str()));
777
778 testShaderGroup->addChild(createSpecifiedStorageBufferTests(
779 testCtx, "static", testType, shaderType, testData, testCount,
780 protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags));
781 testShaderGroup->addChild(createRandomizedBufferTests(
782 testCtx, testType, shaderType, RANDOM_TEST_COUNT,
783 protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags));
784 flagsGroup->addChild(testShaderGroup.release());
785 }
786 protectedAccessGroup->addChild(flagsGroup.release());
787 }
788 ssboRWTestGroup->addChild(protectedAccessGroup.release());
789 }
790
791 return ssboRWTestGroup.release();
792 }
793
calculateAtomicOpData(SSBOAtomicType type,const tcu::UVec4 & inputValue,const uint32_t atomicArg,std::string & atomicCall,tcu::UVec4 & refValue,const uint32_t swapNdx=0)794 void calculateAtomicOpData(SSBOAtomicType type, const tcu::UVec4 &inputValue, const uint32_t atomicArg,
795 std::string &atomicCall, tcu::UVec4 &refValue, const uint32_t swapNdx = 0)
796 {
797 switch (type)
798 {
799 case ATOMIC_ADD:
800 {
801 refValue = inputValue + tcu::UVec4(atomicArg);
802 atomicCall = "atomicAdd(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
803 break;
804 }
805 case ATOMIC_MIN:
806 {
807 refValue = tcu::UVec4(std::min(inputValue.x(), atomicArg), std::min(inputValue.y(), atomicArg),
808 std::min(inputValue.z(), atomicArg), std::min(inputValue.w(), atomicArg));
809 atomicCall = "atomicMin(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
810 break;
811 }
812 case ATOMIC_MAX:
813 {
814 refValue = tcu::UVec4(std::max(inputValue.x(), atomicArg), std::max(inputValue.y(), atomicArg),
815 std::max(inputValue.z(), atomicArg), std::max(inputValue.w(), atomicArg));
816 atomicCall = "atomicMax(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
817 break;
818 }
819 case ATOMIC_AND:
820 {
821 refValue = tcu::UVec4(inputValue.x() & atomicArg, inputValue.y() & atomicArg, inputValue.z() & atomicArg,
822 inputValue.w() & atomicArg);
823 atomicCall = "atomicAnd(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
824 break;
825 }
826 case ATOMIC_OR:
827 {
828 refValue = tcu::UVec4(inputValue.x() | atomicArg, inputValue.y() | atomicArg, inputValue.z() | atomicArg,
829 inputValue.w() | atomicArg);
830 atomicCall = "atomicOr(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
831 break;
832 }
833 case ATOMIC_XOR:
834 {
835 refValue = tcu::UVec4(inputValue.x() ^ atomicArg, inputValue.y() ^ atomicArg, inputValue.z() ^ atomicArg,
836 inputValue.w() ^ atomicArg);
837 atomicCall = "atomicXor(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
838 break;
839 }
840 case ATOMIC_EXCHANGE:
841 {
842 refValue = tcu::UVec4(atomicArg);
843 atomicCall = "atomicExchange(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
844 break;
845 }
846 case ATOMIC_COMPSWAP:
847 {
848 int selectedNdx = swapNdx % 4;
849 uint32_t selectedChange = inputValue[selectedNdx];
850
851 refValue = inputValue;
852 refValue[selectedNdx] = atomicArg;
853 atomicCall = "atomicCompSwap(protectedTestResultBuffer[i], " + de::toString(selectedChange) + "u, " +
854 de::toString(atomicArg) + "u);";
855 break;
856 }
857 default:
858 DE_FATAL("Incorrect atomic function type");
859 break;
860 }
861 }
862
863 } // namespace
864
createReadStorageBufferTests(tcu::TestContext & testCtx)865 tcu::TestCaseGroup *createReadStorageBufferTests(tcu::TestContext &testCtx)
866 {
867 const ValidationDataStorage<tcu::UVec4> testData[] = {{tcu::UVec4(0u, 0u, 0u, 0u)}, {tcu::UVec4(1u, 0u, 0u, 0u)},
868 {tcu::UVec4(0u, 1u, 0u, 0u)}, {tcu::UVec4(0u, 0u, 1u, 0u)},
869 {tcu::UVec4(0u, 0u, 0u, 1u)}, {tcu::UVec4(1u, 1u, 1u, 1u)}};
870
871 // Storage Buffer Read Tests
872 return createRWStorageBufferTests(testCtx, "ssbo_read", SSBO_READ, testData, DE_LENGTH_OF_ARRAY(testData));
873 }
874
createWriteStorageBufferTests(tcu::TestContext & testCtx)875 tcu::TestCaseGroup *createWriteStorageBufferTests(tcu::TestContext &testCtx)
876 {
877 const ValidationDataStorage<tcu::UVec4> testData[] = {{tcu::UVec4(0u, 0u, 0u, 0u)}, {tcu::UVec4(1u, 0u, 0u, 0u)},
878 {tcu::UVec4(0u, 1u, 0u, 0u)}, {tcu::UVec4(0u, 0u, 1u, 0u)},
879 {tcu::UVec4(0u, 0u, 0u, 1u)}, {tcu::UVec4(1u, 1u, 1u, 1u)}};
880
881 // Storage Buffer Write Tests
882 return createRWStorageBufferTests(testCtx, "ssbo_write", SSBO_WRITE, testData, DE_LENGTH_OF_ARRAY(testData));
883 }
884
createAtomicStorageBufferTests(tcu::TestContext & testCtx)885 tcu::TestCaseGroup *createAtomicStorageBufferTests(tcu::TestContext &testCtx)
886 {
887 struct
888 {
889 const tcu::UVec4 input;
890 const uint32_t atomicArg;
891 const uint32_t swapNdx;
892 } testData[] = {
893 {tcu::UVec4(0u, 1u, 2u, 3u), 10u, 0u},
894 {tcu::UVec4(10u, 20u, 30u, 40u), 3u, 2u},
895 {tcu::UVec4(800u, 400u, 230u, 999u), 50u, 3u},
896 {tcu::UVec4(100800u, 233400u, 22230u, 77999u), 800u, 1u},
897 };
898
899 SSBOAtomicType testTypes[] = {ATOMIC_ADD, ATOMIC_MIN, ATOMIC_MAX, ATOMIC_AND,
900 ATOMIC_OR, ATOMIC_XOR, ATOMIC_EXCHANGE, ATOMIC_COMPSWAP};
901
902 glu::ShaderType shaderTypes[] = {glu::SHADERTYPE_FRAGMENT, glu::SHADERTYPE_COMPUTE};
903
904 de::Random rnd(testCtx.getCommandLine().getBaseSeed());
905 // Storage Buffer Atomic Tests
906 de::MovePtr<tcu::TestCaseGroup> ssboAtomicTests(new tcu::TestCaseGroup(testCtx, "ssbo_atomic"));
907
908 for (int shaderNdx = 0; shaderNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderNdx)
909 {
910 const glu::ShaderType shaderType = shaderTypes[shaderNdx];
911 const std::string shaderName = glu::getShaderTypeName(shaderType);
912 const std::string shaderDesc = "Storage Buffer Atomic Tests for shader type: " + shaderName;
913 de::MovePtr<tcu::TestCaseGroup> atomicShaderGroup(new tcu::TestCaseGroup(testCtx, shaderName.c_str()));
914
915 for (int protectedAccessNdx = 0; protectedAccessNdx < DE_LENGTH_OF_ARRAY(protectedAccess); ++protectedAccessNdx)
916 {
917 de::MovePtr<tcu::TestCaseGroup> protectedAccessGroup(
918 new tcu::TestCaseGroup(testCtx, protectedAccess[protectedAccessNdx].name));
919 for (int flagsNdx = 0; flagsNdx < DE_LENGTH_OF_ARRAY(flags); ++flagsNdx)
920 {
921 de::MovePtr<tcu::TestCaseGroup> flagsGroup(new tcu::TestCaseGroup(testCtx, flags[flagsNdx].name));
922 if (!protectedAccess[protectedAccessNdx].pipelineProtectedAccess && flags[flagsNdx].pipelineFlags != 0u)
923 continue;
924
925 for (int typeNdx = 0; typeNdx < DE_LENGTH_OF_ARRAY(testTypes); ++typeNdx)
926 {
927 SSBOAtomicType atomicType = testTypes[typeNdx];
928 const std::string atomicTypeStr = getSSBOAtomicTypeString(atomicType);
929 const std::string atomicDesc = "Storage Buffer Atomic Tests: " + atomicTypeStr;
930
931 de::MovePtr<tcu::TestCaseGroup> staticTests(new tcu::TestCaseGroup(testCtx, "static"));
932 for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(testData); ++ndx)
933 {
934 const std::string name = "atomic_" + atomicTypeStr + "_" + de::toString(ndx + 1);
935 const tcu::UVec4 &inputValue = testData[ndx].input;
936 const uint32_t &atomicArg = testData[ndx].atomicArg;
937 std::string atomicCall;
938 tcu::UVec4 refValue;
939
940 calculateAtomicOpData(atomicType, inputValue, atomicArg, atomicCall, refValue,
941 testData[ndx].swapNdx);
942
943 ValidationDataStorage<tcu::UVec4> validationData = {refValue};
944 staticTests->addChild(new StorageBufferTestCase<tcu::UVec4>(
945 testCtx, SSBO_ATOMIC, shaderType, name.c_str(), inputValue, validationData,
946 vk::VK_FORMAT_R32G32B32A32_UINT,
947 protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags,
948 atomicCall));
949 }
950
951 de::MovePtr<tcu::TestCaseGroup> randomTests(new tcu::TestCaseGroup(testCtx, "random"));
952 for (int ndx = 0; ndx < RANDOM_TEST_COUNT; ndx++)
953 {
954 const std::string name = "atomic_" + atomicTypeStr + "_" + de::toString(ndx + 1);
955 uint32_t atomicArg = rnd.getUint16();
956 tcu::UVec4 inputValue;
957 tcu::UVec4 refValue;
958 std::string atomicCall;
959
960 for (int i = 0; i < 4; i++)
961 inputValue[i] = rnd.getUint16();
962
963 calculateAtomicOpData(atomicType, inputValue, atomicArg, atomicCall, refValue, ndx);
964
965 ValidationDataStorage<tcu::UVec4> validationData = {refValue};
966 randomTests->addChild(new StorageBufferTestCase<tcu::UVec4>(
967 testCtx, SSBO_ATOMIC, shaderType, name.c_str(), inputValue, validationData,
968 vk::VK_FORMAT_R32G32B32A32_UINT,
969 protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags,
970 atomicCall));
971 }
972
973 de::MovePtr<tcu::TestCaseGroup> atomicTests(new tcu::TestCaseGroup(testCtx, atomicTypeStr.c_str()));
974 atomicTests->addChild(staticTests.release());
975 atomicTests->addChild(randomTests.release());
976 flagsGroup->addChild(atomicTests.release());
977 }
978 protectedAccessGroup->addChild(flagsGroup.release());
979 }
980 atomicShaderGroup->addChild(protectedAccessGroup.release());
981 }
982 ssboAtomicTests->addChild(atomicShaderGroup.release());
983 }
984
985 return ssboAtomicTests.release();
986 }
987
988 } // namespace ProtectedMem
989 } // namespace vkt
990