1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2018 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Robust buffer access tests for storage buffers and
22 * storage texel buffers with variable pointers.
23 *
24 * \note These tests are checking if accessing a memory through a variable
25 * pointer that points outside of accessible buffer memory is robust.
26 * To do this the tests are creating proper SPIRV code that creates
27 * variable pointers. Those pointers are either pointing into a
28 * memory allocated for a buffer but "not accesible" - meaning
29 * DescriptorBufferInfo has smaller size than a memory we access in
30 * shader or entirely outside of allocated memory (i.e. buffer is
31 * 256 bytes big but we are trying to access under offset of 1k from
32 * buffer start). There is a set of valid behaviours defined when
33 * robust buffer access extension is enabled described in chapter 32
34 * section 1 of Vulkan spec.
35 *
36 *//*--------------------------------------------------------------------*/
37
38 #include "vktRobustBufferAccessWithVariablePointersTests.hpp"
39 #include "vktRobustnessUtil.hpp"
40 #include "vktTestCaseUtil.hpp"
41 #include "vkBuilderUtil.hpp"
42 #include "vkImageUtil.hpp"
43 #include "vkPrograms.hpp"
44 #include "vkQueryUtil.hpp"
45 #include "vkDeviceUtil.hpp"
46 #include "vkRef.hpp"
47 #include "vkRefUtil.hpp"
48 #include "vkTypeUtil.hpp"
49 #include "tcuTestLog.hpp"
50 #include "vkDefs.hpp"
51 #include "deRandom.hpp"
52
53 #include <limits>
54 #include <sstream>
55
56 namespace vkt
57 {
58 namespace robustness
59 {
60
61 using namespace vk;
62
63 // keep local things local
64 namespace
65 {
66
67 // Creates a custom device with robust buffer access and variable pointer features.
createRobustBufferAccessVariablePointersDevice(Context & context,const vkt::CustomInstance & customInstance)68 Move<VkDevice> createRobustBufferAccessVariablePointersDevice(Context &context
69 #ifdef CTS_USES_VULKANSC
70 ,
71 const vkt::CustomInstance &customInstance
72 #endif // CTS_USES_VULKANSC
73 )
74 {
75 auto pointerFeatures = context.getVariablePointersFeatures();
76
77 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
78 features2.features = context.getDeviceFeatures();
79 features2.features.robustBufferAccess = VK_TRUE;
80 features2.pNext = &pointerFeatures;
81
82 return createRobustBufferAccessDevice(context,
83 #ifdef CTS_USES_VULKANSC
84 customInstance,
85 #endif // CTS_USES_VULKANSC
86 &features2);
87 }
88
89 // A supplementary structures that can hold information about buffer size
90 struct AccessRangesData
91 {
92 VkDeviceSize allocSize;
93 VkDeviceSize accessRange;
94 VkDeviceSize maxAccessRange;
95 };
96
97 // Pointer to function that can be used to fill a buffer with some data - it is passed as an parameter to buffer creation utility function
98 typedef void (*FillBufferProcPtr)(void *, vk::VkDeviceSize, const void *const);
99
100 // An utility function for creating a buffer
101 // This function not only allocates memory for the buffer but also fills buffer up with a data
createTestBuffer(Context & context,const vk::DeviceInterface & deviceInterface,const VkDevice & device,VkDeviceSize accessRange,VkBufferUsageFlags usage,SimpleAllocator & allocator,Move<VkBuffer> & buffer,de::MovePtr<Allocation> & bufferAlloc,AccessRangesData & data,FillBufferProcPtr fillBufferProc,const void * const blob)102 void createTestBuffer(Context &context, const vk::DeviceInterface &deviceInterface, const VkDevice &device,
103 VkDeviceSize accessRange, VkBufferUsageFlags usage, SimpleAllocator &allocator,
104 Move<VkBuffer> &buffer, de::MovePtr<Allocation> &bufferAlloc, AccessRangesData &data,
105 FillBufferProcPtr fillBufferProc, const void *const blob)
106 {
107 const VkBufferCreateInfo bufferParams = {
108 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
109 DE_NULL, // const void* pNext;
110 0u, // VkBufferCreateFlags flags;
111 accessRange, // VkDeviceSize size;
112 usage, // VkBufferUsageFlags usage;
113 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
114 VK_QUEUE_FAMILY_IGNORED, // uint32_t queueFamilyIndexCount;
115 DE_NULL // const uint32_t* pQueueFamilyIndices;
116 };
117
118 buffer = createBuffer(deviceInterface, device, &bufferParams);
119
120 VkMemoryRequirements bufferMemoryReqs = getBufferMemoryRequirements(deviceInterface, device, *buffer);
121 bufferAlloc = allocator.allocate(bufferMemoryReqs, MemoryRequirement::HostVisible);
122
123 data.allocSize = bufferMemoryReqs.size;
124 data.accessRange = accessRange;
125 data.maxAccessRange = deMinu64(data.allocSize, deMinu64(bufferParams.size, accessRange));
126
127 VK_CHECK(deviceInterface.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
128 #ifdef CTS_USES_VULKANSC
129 if (context.getTestContext().getCommandLine().isSubProcess())
130 fillBufferProc(bufferAlloc->getHostPtr(), bufferMemoryReqs.size, blob);
131 #else
132 fillBufferProc(bufferAlloc->getHostPtr(), bufferMemoryReqs.size, blob);
133 DE_UNREF(context);
134 #endif // CTS_USES_VULKANCSC
135 flushMappedMemoryRange(deviceInterface, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
136 }
137
138 // An adapter function matching FillBufferProcPtr interface. Fills a buffer with "randomly" generated test data matching desired format.
populateBufferWithValues(void * buffer,VkDeviceSize size,const void * const blob)139 void populateBufferWithValues(void *buffer, VkDeviceSize size, const void *const blob)
140 {
141 populateBufferWithTestValues(buffer, size, *static_cast<const vk::VkFormat *>(blob));
142 }
143
144 // An adapter function matching FillBufferProcPtr interface. Fills a buffer with 0xBABABABABABA... pattern. Used to fill up output buffers.
145 // Since this pattern cannot show up in generated test data it should not show up in the valid output.
populateBufferWithFiller(void * buffer,VkDeviceSize size,const void * const blob)146 void populateBufferWithFiller(void *buffer, VkDeviceSize size, const void *const blob)
147 {
148 DE_UNREF(blob);
149 deMemset(buffer, 0xBA, static_cast<size_t>(size));
150 }
151
152 // An adapter function matching FillBufferProcPtr interface. Fills a buffer with a copy of memory contents pointed to by blob.
populateBufferWithCopy(void * buffer,VkDeviceSize size,const void * const blob)153 void populateBufferWithCopy(void *buffer, VkDeviceSize size, const void *const blob)
154 {
155 deMemcpy(buffer, blob, static_cast<size_t>(size));
156 }
157
158 // A composite types used in test
159 // Those composites can be made of unsigned ints, signed ints or floats (except for matrices that work with floats only).
160 enum ShaderType
161 {
162 SHADER_TYPE_MATRIX_COPY = 0,
163 SHADER_TYPE_VECTOR_COPY,
164 SHADER_TYPE_SCALAR_COPY,
165
166 SHADER_TYPE_COUNT
167 };
168
169 // We are testing reads or writes
170 // In case of testing reads - writes are always
171 enum BufferAccessType
172 {
173 BUFFER_ACCESS_TYPE_READ_FROM_STORAGE = 0,
174 BUFFER_ACCESS_TYPE_WRITE_TO_STORAGE,
175 };
176
177 // Test case for checking robust buffer access with variable pointers
178 class RobustAccessWithPointersTest : public vkt::TestCase
179 {
180 public:
181 static const uint32_t s_testArraySize;
182 static const uint32_t s_numberOfBytesAccessed;
183
184 RobustAccessWithPointersTest(tcu::TestContext &testContext, const std::string &name, VkShaderStageFlags shaderStage,
185 ShaderType shaderType, VkFormat bufferFormat);
186
~RobustAccessWithPointersTest(void)187 virtual ~RobustAccessWithPointersTest(void)
188 {
189 }
190
191 void checkSupport(Context &context) const override;
192
193 protected:
194 const VkShaderStageFlags m_shaderStage;
195 const ShaderType m_shaderType;
196 const VkFormat m_bufferFormat;
197 };
198
199 const uint32_t RobustAccessWithPointersTest::s_testArraySize = 1024u;
200 const uint32_t RobustAccessWithPointersTest::s_numberOfBytesAccessed = static_cast<uint32_t>(16ull * sizeof(float));
201
RobustAccessWithPointersTest(tcu::TestContext & testContext,const std::string & name,VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat)202 RobustAccessWithPointersTest::RobustAccessWithPointersTest(tcu::TestContext &testContext, const std::string &name,
203 VkShaderStageFlags shaderStage, ShaderType shaderType,
204 VkFormat bufferFormat)
205 : vkt::TestCase(testContext, name)
206 , m_shaderStage(shaderStage)
207 , m_shaderType(shaderType)
208 , m_bufferFormat(bufferFormat)
209 {
210 DE_ASSERT(m_shaderStage == VK_SHADER_STAGE_VERTEX_BIT || m_shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT ||
211 m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT);
212 }
213
checkSupport(Context & context) const214 void RobustAccessWithPointersTest::checkSupport(Context &context) const
215 {
216 const auto &pointerFeatures = context.getVariablePointersFeatures();
217 if (!pointerFeatures.variablePointersStorageBuffer)
218 TCU_THROW(NotSupportedError, "VariablePointersStorageBuffer SPIR-V capability not supported");
219
220 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
221 !context.getDeviceFeatures().robustBufferAccess)
222 TCU_THROW(NotSupportedError,
223 "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
224 }
225
226 // A subclass for testing reading with variable pointers
227 class RobustReadTest : public RobustAccessWithPointersTest
228 {
229 public:
230 RobustReadTest(tcu::TestContext &testContext, const std::string &name, VkShaderStageFlags shaderStage,
231 ShaderType shaderType, VkFormat bufferFormat, VkDeviceSize readAccessRange,
232 bool accessOutOfBackingMemory);
233
~RobustReadTest(void)234 virtual ~RobustReadTest(void)
235 {
236 }
237 virtual TestInstance *createInstance(Context &context) const;
238
239 private:
240 virtual void initPrograms(SourceCollections &programCollection) const;
241 const VkDeviceSize m_readAccessRange;
242 const bool m_accessOutOfBackingMemory;
243 };
244
245 // A subclass for testing writing with variable pointers
246 class RobustWriteTest : public RobustAccessWithPointersTest
247 {
248 public:
249 RobustWriteTest(tcu::TestContext &testContext, const std::string &name, VkShaderStageFlags shaderStage,
250 ShaderType shaderType, VkFormat bufferFormat, VkDeviceSize writeAccessRange,
251 bool accessOutOfBackingMemory);
252
~RobustWriteTest(void)253 virtual ~RobustWriteTest(void)
254 {
255 }
256 virtual TestInstance *createInstance(Context &context) const;
257
258 private:
259 virtual void initPrograms(SourceCollections &programCollection) const;
260 const VkDeviceSize m_writeAccessRange;
261 const bool m_accessOutOfBackingMemory;
262 };
263
264 // In case I detect that some prerequisites are not fullfilled I am creating this lightweight empty test instance instead of AccessInstance. Should be bit faster that way.
265 class NotSupportedInstance : public vkt::TestInstance
266 {
267 public:
NotSupportedInstance(Context & context,const std::string & message)268 NotSupportedInstance(Context &context, const std::string &message)
269 : TestInstance(context)
270 , m_notSupportedMessage(message)
271 {
272 }
273
~NotSupportedInstance(void)274 virtual ~NotSupportedInstance(void)
275 {
276 }
277
iterate(void)278 virtual tcu::TestStatus iterate(void)
279 {
280 TCU_THROW(NotSupportedError, m_notSupportedMessage.c_str());
281 }
282
283 private:
284 std::string m_notSupportedMessage;
285 };
286
287 // A superclass for instances testing reading and writing
288 // holds all necessary object members
289 class AccessInstance : public vkt::TestInstance
290 {
291 public:
292 AccessInstance(Context &context, Move<VkDevice> device,
293 #ifndef CTS_USES_VULKANSC
294 de::MovePtr<vk::DeviceDriver> deviceDriver,
295 #else
296 de::MovePtr<CustomInstance> customInstance,
297 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
298 #endif // CTS_USES_VULKANSC
299 ShaderType shaderType, VkShaderStageFlags shaderStage, VkFormat bufferFormat,
300 BufferAccessType bufferAccessType, VkDeviceSize inBufferAccessRange,
301 VkDeviceSize outBufferAccessRange, bool accessOutOfBackingMemory);
302
303 virtual ~AccessInstance(void);
304
305 virtual tcu::TestStatus iterate(void);
306
307 virtual bool verifyResult(bool splitAccess = false);
308
309 private:
310 bool isExpectedValueFromInBuffer(VkDeviceSize offsetInBytes, const void *valuePtr, VkDeviceSize valueSize);
311 bool isOutBufferValueUnchanged(VkDeviceSize offsetInBytes, VkDeviceSize valueSize);
312
313 protected:
314 #ifndef CTS_USES_VULKANSC
315 Move<VkDevice> m_device;
316 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
317 #else
318 // Construction needs to happen in this exact order to ensure proper resource destruction
319 de::MovePtr<CustomInstance> m_customInstance;
320 Move<VkDevice> m_device;
321 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> m_deviceDriver;
322 #endif // CTS_USES_VULKANSC
323 de::MovePtr<TestEnvironment> m_testEnvironment;
324
325 const ShaderType m_shaderType;
326 const VkShaderStageFlags m_shaderStage;
327
328 const VkFormat m_bufferFormat;
329 const BufferAccessType m_bufferAccessType;
330
331 AccessRangesData m_inBufferAccess;
332 Move<VkBuffer> m_inBuffer;
333 de::MovePtr<Allocation> m_inBufferAlloc;
334
335 AccessRangesData m_outBufferAccess;
336 Move<VkBuffer> m_outBuffer;
337 de::MovePtr<Allocation> m_outBufferAlloc;
338
339 Move<VkBuffer> m_indicesBuffer;
340 de::MovePtr<Allocation> m_indicesBufferAlloc;
341
342 Move<VkDescriptorPool> m_descriptorPool;
343 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
344 Move<VkDescriptorSet> m_descriptorSet;
345
346 Move<VkFence> m_fence;
347 VkQueue m_queue;
348
349 // Used when m_shaderStage == VK_SHADER_STAGE_VERTEX_BIT
350 Move<VkBuffer> m_vertexBuffer;
351 de::MovePtr<Allocation> m_vertexBufferAlloc;
352
353 const bool m_accessOutOfBackingMemory;
354 };
355
356 // A subclass for read tests
357 class ReadInstance : public AccessInstance
358 {
359 public:
360 ReadInstance(Context &context, Move<VkDevice> device,
361 #ifndef CTS_USES_VULKANSC
362 de::MovePtr<vk::DeviceDriver> deviceDriver,
363 #else
364 de::MovePtr<CustomInstance> customInstance,
365 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
366 #endif // CTS_USES_VULKANSC
367 ShaderType shaderType, VkShaderStageFlags shaderStage, VkFormat bufferFormat,
368 VkDeviceSize inBufferAccessRange, bool accessOutOfBackingMemory);
369
~ReadInstance(void)370 virtual ~ReadInstance(void)
371 {
372 }
373 };
374
375 // A subclass for write tests
376 class WriteInstance : public AccessInstance
377 {
378 public:
379 WriteInstance(Context &context, Move<VkDevice> device,
380 #ifndef CTS_USES_VULKANSC
381 de::MovePtr<vk::DeviceDriver> deviceDriver,
382 #else
383 de::MovePtr<CustomInstance> customInstance,
384 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
385 #endif // CTS_USES_VULKANSC
386 ShaderType shaderType, VkShaderStageFlags shaderStage, VkFormat bufferFormat,
387 VkDeviceSize writeBufferAccessRange, bool accessOutOfBackingMemory);
388
~WriteInstance(void)389 virtual ~WriteInstance(void)
390 {
391 }
392 };
393
394 // Automatically incremented counter.
395 // Each read of value bumps counter up.
396 class Autocounter
397 {
398 public:
Autocounter()399 Autocounter() : value(0u)
400 {
401 }
incrementAndGetValue()402 uint32_t incrementAndGetValue()
403 {
404 return ++value;
405 }
406
407 private:
408 uint32_t value;
409 };
410
411 // A class representing SPIRV variable.
412 // This class internally has an unique identificator.
413 // When such variable is used in shader composition routine it is mapped on a in-SPIRV-code variable name.
414 class Variable
415 {
416 friend bool operator<(const Variable &a, const Variable &b);
417
418 public:
Variable(Autocounter & autoincrement)419 Variable(Autocounter &autoincrement) : value(autoincrement.incrementAndGetValue())
420 {
421 }
422
423 private:
424 uint32_t value;
425 };
426
operator <(const Variable & a,const Variable & b)427 bool operator<(const Variable &a, const Variable &b)
428 {
429 return a.value < b.value;
430 }
431
432 // A class representing SPIRV operation.
433 // Since those are not copyable they don't need internal id. Memory address is used instead.
434 class Operation
435 {
436 friend bool operator==(const Operation &a, const Operation &b);
437
438 public:
Operation(const char * text)439 Operation(const char *text) : value(text)
440 {
441 }
getValue() const442 const std::string &getValue() const
443 {
444 return value;
445 }
446
447 private:
448 Operation(const Operation &other);
449 const std::string value;
450 };
451
operator ==(const Operation & a,const Operation & b)452 bool operator==(const Operation &a, const Operation &b)
453 {
454 return &a == &b; // a fast & simple address comparison - making copies was disabled
455 }
456
457 // A namespace containing all SPIRV operations used in those tests.
458 namespace op
459 {
460 #define OP(name) const Operation name("Op" #name)
461 OP(Capability);
462 OP(Extension);
463 OP(ExtInstImport);
464 OP(EntryPoint);
465 OP(MemoryModel);
466 OP(ExecutionMode);
467
468 OP(Decorate);
469 OP(MemberDecorate);
470 OP(Name);
471 OP(MemberName);
472
473 OP(TypeVoid);
474 OP(TypeBool);
475 OP(TypeInt);
476 OP(TypeFloat);
477 OP(TypeVector);
478 OP(TypeMatrix);
479 OP(TypeArray);
480 OP(TypeStruct);
481 OP(TypeFunction);
482 OP(TypePointer);
483 OP(TypeImage);
484 OP(TypeSampledImage);
485
486 OP(Constant);
487 OP(ConstantComposite);
488 OP(Variable);
489
490 OP(Function);
491 OP(FunctionEnd);
492 OP(Label);
493 OP(Return);
494
495 OP(LogicalEqual);
496 OP(IEqual);
497 OP(Select);
498
499 OP(AccessChain);
500 OP(Load);
501 OP(Store);
502 #undef OP
503 } // namespace op
504
505 // A class that allows to easily compose SPIRV code.
506 // This class automatically keeps correct order of most of operations
507 // i.e. capabilities to the top,
508 class ShaderStream
509 {
510 public:
ShaderStream()511 ShaderStream()
512 {
513 }
514 // composes shader string out of shader substreams.
str() const515 std::string str() const
516 {
517 std::stringstream stream;
518 stream << capabilities.str() << "; ----------------- PREAMBLE -----------------\n"
519 << preamble.str() << "; ----------------- DEBUG --------------------\n"
520 << names.str() << "; ----------------- DECORATIONS --------------\n"
521 << decorations.str() << "; ----------------- TYPES --------------------\n"
522 << basictypes.str() << "; ----------------- CONSTANTS ----------------\n"
523 << constants.str() << "; ----------------- ADVANCED TYPES -----------\n"
524 << compositetypes.str()
525 << ((compositeconstants.str().length() > 0) ? "; ----------------- CONSTANTS ----------------\n" : "")
526 << compositeconstants.str() << "; ----------------- VARIABLES & FUNCTIONS ----\n"
527 << shaderstream.str();
528 return stream.str();
529 }
530 // Functions below are used to push Operations, Variables and other strings, numbers and characters to the shader.
531 // Each function uses selectStream and map subroutines.
532 // selectStream is used to choose a proper substream of shader.
533 // E.g. if an operation is OpConstant it should be put into constants definitions stream - so selectStream will return that stream.
534 // map on the other hand is used to replace Variables and Operations to their in-SPIRV-code representations.
535 // for types like ints or floats map simply calls << operator to produce its string representation
536 // for Operations a proper operation string is returned
537 // for Variables there is a special mapping between in-C++ variable and in-SPIRV-code variable name.
538 // following sequence of functions could be squashed to just two using variadic templates once we move to C++11 or higher
539 // each method returns *this to allow chaining calls to these methods.
540 template <typename T>
operator ()(const T & a)541 ShaderStream &operator()(const T &a)
542 {
543 selectStream(a, 0) << map(a) << '\n';
544 return *this;
545 }
546 template <typename T1, typename T2>
operator ()(const T1 & a,const T2 & b)547 ShaderStream &operator()(const T1 &a, const T2 &b)
548 {
549 selectStream(a, 0) << map(a) << '\t' << map(b) << '\n';
550 return *this;
551 }
552 template <typename T1, typename T2, typename T3>
operator ()(const T1 & a,const T2 & b,const T3 & c)553 ShaderStream &operator()(const T1 &a, const T2 &b, const T3 &c)
554 {
555 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\n';
556 return *this;
557 }
558 template <typename T1, typename T2, typename T3, typename T4>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d)559 ShaderStream &operator()(const T1 &a, const T2 &b, const T3 &c, const T4 &d)
560 {
561 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\n';
562 return *this;
563 }
564 template <typename T1, typename T2, typename T3, typename T4, typename T5>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e)565 ShaderStream &operator()(const T1 &a, const T2 &b, const T3 &c, const T4 &d, const T5 &e)
566 {
567 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\n';
568 return *this;
569 }
570 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f)571 ShaderStream &operator()(const T1 &a, const T2 &b, const T3 &c, const T4 &d, const T5 &e, const T6 &f)
572 {
573 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t'
574 << map(f) << '\n';
575 return *this;
576 }
577 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g)578 ShaderStream &operator()(const T1 &a, const T2 &b, const T3 &c, const T4 &d, const T5 &e, const T6 &f, const T7 &g)
579 {
580 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t'
581 << map(f) << '\t' << map(g) << '\n';
582 return *this;
583 }
584 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g,const T8 & h)585 ShaderStream &operator()(const T1 &a, const T2 &b, const T3 &c, const T4 &d, const T5 &e, const T6 &f, const T7 &g,
586 const T8 &h)
587 {
588 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t'
589 << map(f) << '\t' << map(g) << '\t' << map(h) << '\n';
590 return *this;
591 }
592 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8,
593 typename T9>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g,const T8 & h,const T9 & i)594 ShaderStream &operator()(const T1 &a, const T2 &b, const T3 &c, const T4 &d, const T5 &e, const T6 &f, const T7 &g,
595 const T8 &h, const T9 &i)
596 {
597 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t'
598 << map(f) << '\t' << map(g) << '\t' << map(h) << '\t' << map(i) << '\n';
599 return *this;
600 }
601 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8,
602 typename T9, typename T10>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g,const T8 & h,const T9 & i,const T10 & k)603 ShaderStream &operator()(const T1 &a, const T2 &b, const T3 &c, const T4 &d, const T5 &e, const T6 &f, const T7 &g,
604 const T8 &h, const T9 &i, const T10 &k)
605 {
606 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t'
607 << map(f) << '\t' << map(g) << '\t' << map(h) << '\t' << map(i) << '\t' << map(k) << '\n';
608 return *this;
609 }
610
611 // returns true if two variables has the same in-SPIRV-code names
areSame(const Variable a,const Variable b)612 bool areSame(const Variable a, const Variable b)
613 {
614 VariableIt varA = vars.find(a);
615 VariableIt varB = vars.find(b);
616 return varA != vars.end() && varB != vars.end() && varA->second == varB->second;
617 }
618
619 // makes variable 'a' in-SPIRV-code name to be the same as variable 'b' in-SPIRV-code name
makeSame(const Variable a,const Variable b)620 void makeSame(const Variable a, const Variable b)
621 {
622 VariableIt varB = vars.find(b);
623 if (varB != vars.end())
624 {
625 std::pair<VariableIt, bool> inserted = vars.insert(std::make_pair(a, varB->second));
626 if (!inserted.second)
627 inserted.first->second = varB->second;
628 }
629 }
630
631 private:
632 // generic version of map (tries to push whatever came to stringstream to get its string representation)
633 template <typename T>
map(const T & a)634 std::string map(const T &a)
635 {
636 std::stringstream temp;
637 temp << a;
638 return temp.str();
639 }
640
641 // looks for mapping of c++ Variable object onto in-SPIRV-code name.
642 // if there was not yet such mapping generated a new mapping is created based on incremented local counter.
map(const Variable & a)643 std::string map(const Variable &a)
644 {
645 VariableIt var = vars.find(a);
646 if (var != vars.end())
647 return var->second;
648 std::stringstream temp;
649 temp << '%';
650 temp.width(4);
651 temp.fill('0');
652 temp << std::hex << varCounter.incrementAndGetValue();
653 vars.insert(std::make_pair(a, temp.str()));
654 return temp.str();
655 }
656
657 // a simple specification for Operation
map(const Operation & a)658 std::string map(const Operation &a)
659 {
660 return a.getValue();
661 }
662
663 // a specification for char* - faster than going through stringstream << operator
map(const char * & a)664 std::string map(const char *&a)
665 {
666 return std::string(a);
667 }
668
669 // a specification for char - faster than going through stringstream << operator
map(const char & a)670 std::string map(const char &a)
671 {
672 return std::string(1, a);
673 }
674
675 // a generic version of selectStream - used when neither 1st nor 3rd SPIRV line token is Operation.
676 // In general should never happen.
677 // All SPIRV lines are constructed in a one of two forms:
678 // Variable = Operation operands...
679 // or
680 // Operation operands...
681 // So operation is either 1st or 3rd token.
682 template <typename T0, typename T1>
selectStream(const T0 & op0,const T1 & op1)683 std::stringstream &selectStream(const T0 &op0, const T1 &op1)
684 {
685 DE_UNREF(op0);
686 DE_UNREF(op1);
687 return shaderstream;
688 }
689
690 // Specialisation for Operation being 1st parameter
691 // Certain operations make the SPIRV code line to be pushed to different substreams.
692 template <typename T1>
selectStream(const Operation & op,const T1 & op1)693 std::stringstream &selectStream(const Operation &op, const T1 &op1)
694 {
695 DE_UNREF(op1);
696 if (op == op::Decorate || op == op::MemberDecorate)
697 return decorations;
698 if (op == op::Name || op == op::MemberName)
699 return names;
700 if (op == op::Capability || op == op::Extension)
701 return capabilities;
702 if (op == op::MemoryModel || op == op::ExecutionMode || op == op::EntryPoint)
703 return preamble;
704 return shaderstream;
705 }
706
707 // Specialisation for Operation being 3rd parameter
708 // Certain operations make the SPIRV code line to be pushed to different substreams.
709 // If we would like to use this way of generating SPIRV we could use this method as SPIRV line validation point
710 // e.g. here instead of heving partial specialisation I could specialise for T0 being Variable since this has to match Variable = Operation operands...
711 template <typename T0>
selectStream(const T0 & op0,const Operation & op)712 std::stringstream &selectStream(const T0 &op0, const Operation &op)
713 {
714 DE_UNREF(op0);
715 if (op == op::ExtInstImport)
716 return preamble;
717 if (op == op::TypeVoid || op == op::TypeBool || op == op::TypeInt || op == op::TypeFloat ||
718 op == op::TypeVector || op == op::TypeMatrix)
719 return basictypes;
720 if (op == op::TypeArray || op == op::TypeStruct || op == op::TypeFunction || op == op::TypePointer ||
721 op == op::TypeImage || op == op::TypeSampledImage)
722 return compositetypes;
723 if (op == op::Constant)
724 return constants;
725 if (op == op::ConstantComposite)
726 return compositeconstants;
727 return shaderstream;
728 }
729
730 typedef std::map<Variable, std::string> VariablesPack;
731 typedef VariablesPack::iterator VariableIt;
732
733 // local mappings between c++ Variable objects and in-SPIRV-code names
734 VariablesPack vars;
735
736 // shader substreams
737 std::stringstream capabilities;
738 std::stringstream preamble;
739 std::stringstream names;
740 std::stringstream decorations;
741 std::stringstream basictypes;
742 std::stringstream constants;
743 std::stringstream compositetypes;
744 std::stringstream compositeconstants;
745 std::stringstream shaderstream;
746
747 // local incremented counter
748 Autocounter varCounter;
749 };
750
751 // A suppliementary class to group frequently used Variables together
752 class Variables
753 {
754 public:
Variables(Autocounter & autoincrement)755 Variables(Autocounter &autoincrement)
756 : version(autoincrement)
757 , mainFunc(autoincrement)
758 , mainFuncLabel(autoincrement)
759 , voidFuncVoid(autoincrement)
760 , copy_type(autoincrement)
761 , copy_type_vec(autoincrement)
762 , buffer_type_vec(autoincrement)
763 , copy_type_ptr(autoincrement)
764 , buffer_type(autoincrement)
765 , voidId(autoincrement)
766 , v4f32(autoincrement)
767 , v4s32(autoincrement)
768 , v4u32(autoincrement)
769 , v4s64(autoincrement)
770 , v4u64(autoincrement)
771 , s32(autoincrement)
772 , f32(autoincrement)
773 , u32(autoincrement)
774 , s64(autoincrement)
775 , u64(autoincrement)
776 , boolean(autoincrement)
777 , array_content_type(autoincrement)
778 , s32_type_ptr(autoincrement)
779 , dataSelectorStructPtrType(autoincrement)
780 , dataSelectorStructPtr(autoincrement)
781 , dataArrayType(autoincrement)
782 , dataInput(autoincrement)
783 , dataInputPtrType(autoincrement)
784 , dataInputType(autoincrement)
785 , dataInputSampledType(autoincrement)
786 , dataOutput(autoincrement)
787 , dataOutputPtrType(autoincrement)
788 , dataOutputType(autoincrement)
789 , dataSelectorStructType(autoincrement)
790 , input(autoincrement)
791 , inputPtr(autoincrement)
792 , output(autoincrement)
793 , outputPtr(autoincrement)
794 {
795 for (uint32_t i = 0; i < 32; ++i)
796 constants.push_back(Variable(autoincrement));
797 }
798 const Variable version;
799 const Variable mainFunc;
800 const Variable mainFuncLabel;
801 const Variable voidFuncVoid;
802 std::vector<Variable> constants;
803 const Variable copy_type;
804 const Variable copy_type_vec;
805 const Variable buffer_type_vec;
806 const Variable copy_type_ptr;
807 const Variable buffer_type;
808 const Variable voidId;
809 const Variable v4f32;
810 const Variable v4s32;
811 const Variable v4u32;
812 const Variable v4s64;
813 const Variable v4u64;
814 const Variable s32;
815 const Variable f32;
816 const Variable u32;
817 const Variable s64;
818 const Variable u64;
819 const Variable boolean;
820 const Variable array_content_type;
821 const Variable s32_type_ptr;
822 const Variable dataSelectorStructPtrType;
823 const Variable dataSelectorStructPtr;
824 const Variable dataArrayType;
825 const Variable dataInput;
826 const Variable dataInputPtrType;
827 const Variable dataInputType;
828 const Variable dataInputSampledType;
829 const Variable dataOutput;
830 const Variable dataOutputPtrType;
831 const Variable dataOutputType;
832 const Variable dataSelectorStructType;
833 const Variable input;
834 const Variable inputPtr;
835 const Variable output;
836 const Variable outputPtr;
837 };
838
839 // A routing generating SPIRV code for all test cases in this group
MakeShader(VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat,bool reads,bool unused)840 std::string MakeShader(VkShaderStageFlags shaderStage, ShaderType shaderType, VkFormat bufferFormat, bool reads,
841 bool unused)
842 {
843 const bool isR64 = (bufferFormat == VK_FORMAT_R64_UINT || bufferFormat == VK_FORMAT_R64_SINT);
844 // faster to write
845 const char is = '=';
846
847 // variables require such counter to generate their unique ids. Since there is possibility that in the future this code will
848 // run parallel this counter is made local to this function body to be safe.
849 Autocounter localcounter;
850
851 // A frequently used Variables (gathered into this single object for readability)
852 Variables var(localcounter);
853
854 // A SPIRV code builder
855 ShaderStream shaderSource;
856
857 // A basic preamble of SPIRV shader. Turns on required capabilities and extensions.
858 shaderSource(op::Capability, "Shader")(op::Capability, "VariablePointersStorageBuffer");
859
860 if (isR64)
861 {
862 shaderSource(op::Capability, "Int64");
863 }
864
865 shaderSource(op::Extension, "\"SPV_KHR_storage_buffer_storage_class\"")(
866 op::Extension, "\"SPV_KHR_variable_pointers\"")(var.version, is, op::ExtInstImport,
867 "\"GLSL.std.450\"")(op::MemoryModel, "Logical", "GLSL450");
868
869 // Use correct entry point definition depending on shader stage
870 if (shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
871 {
872 shaderSource(op::EntryPoint, "GLCompute", var.mainFunc, "\"main\"")(op::ExecutionMode, var.mainFunc,
873 "LocalSize", 1, 1, 1);
874 }
875 else if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
876 {
877 shaderSource(op::EntryPoint, "Vertex", var.mainFunc, "\"main\"", var.input, var.output)(
878 op::Decorate, var.output, "BuiltIn", "Position")(op::Decorate, var.input, "Location", 0);
879 }
880 else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
881 {
882 shaderSource(op::EntryPoint, "Fragment", var.mainFunc, "\"main\"", var.output)(
883 op::ExecutionMode, var.mainFunc, "OriginUpperLeft")(op::Decorate, var.output, "Location", 0);
884 }
885
886 // If we are testing vertex shader or fragment shader we need to provide the other one for the pipeline too.
887 // So the not tested one is 'unused'. It is then a minimal/simplest possible pass-through shader.
888 // If we are testing compute shader we dont need unused shader at all.
889 if (unused)
890 {
891 if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
892 {
893 shaderSource(var.voidId, is, op::TypeVoid)(var.voidFuncVoid, is, op::TypeFunction, var.voidId)(
894 var.f32, is, op::TypeFloat, 32)(var.v4f32, is, op::TypeVector, var.f32,
895 4)(var.outputPtr, is, op::TypePointer, "Output", var.v4f32)(
896 var.output, is, op::Variable, var.outputPtr, "Output")(var.constants[6], is, op::Constant, var.f32, 1)(
897 var.constants[7], is, op::ConstantComposite, var.v4f32, var.constants[6], var.constants[6],
898 var.constants[6], var.constants[6])(var.mainFunc, is, op::Function, var.voidId, "None",
899 var.voidFuncVoid)(var.mainFuncLabel, is, op::Label);
900 }
901 else if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
902 {
903 shaderSource(var.voidId, is, op::TypeVoid)(var.voidFuncVoid, is, op::TypeFunction, var.voidId)(
904 var.f32, is, op::TypeFloat, 32)(var.v4f32, is, op::TypeVector, var.f32,
905 4)(var.outputPtr, is, op::TypePointer, "Output", var.v4f32)(
906 var.output, is, op::Variable, var.outputPtr, "Output")(var.inputPtr, is, op::TypePointer, "Input",
907 var.v4f32)(
908 var.input, is, op::Variable, var.inputPtr, "Input")(var.mainFunc, is, op::Function, var.voidId, "None",
909 var.voidFuncVoid)(var.mainFuncLabel, is, op::Label);
910 }
911 }
912 else // this is a start of actual shader that tests variable pointers
913 {
914 shaderSource(op::Decorate, var.dataInput, "DescriptorSet", 0)(op::Decorate, var.dataInput, "Binding", 0)
915
916 (op::Decorate, var.dataOutput, "DescriptorSet", 0)(op::Decorate, var.dataOutput, "Binding", 1);
917
918 // for scalar types and vector types we use 1024 element array of 4 elements arrays of 4-component vectors
919 // so the stride of internal array is size of 4-component vector
920 if (shaderType == SHADER_TYPE_SCALAR_COPY || shaderType == SHADER_TYPE_VECTOR_COPY)
921 {
922 if (isR64)
923 {
924 shaderSource(op::Decorate, var.array_content_type, "ArrayStride", 32);
925 }
926 else
927 {
928 shaderSource(op::Decorate, var.array_content_type, "ArrayStride", 16);
929 }
930 }
931
932 if (isR64)
933 {
934 shaderSource(op::Decorate, var.dataArrayType, "ArrayStride", 128);
935 }
936 else
937 {
938 // for matrices we use array of 4x4-component matrices
939 // stride of outer array is then 64 in every case
940 shaderSource(op::Decorate, var.dataArrayType, "ArrayStride", 64);
941 }
942
943 // an output block
944 shaderSource(op::MemberDecorate, var.dataOutputType, 0, "Offset", 0)(op::Decorate, var.dataOutputType, "Block")
945
946 // an input block. Marked readonly.
947 (op::MemberDecorate, var.dataInputType, 0, "NonWritable")(
948 op::MemberDecorate, var.dataInputType, 0, "Offset", 0)(op::Decorate, var.dataInputType, "Block")
949
950 //a special structure matching data in one of our buffers.
951 // member at 0 is an index to read position
952 // member at 1 is an index to write position
953 // member at 2 is always zero. It is used to perform OpSelect. I used value coming from buffer to avoid incidental optimisations that could prune OpSelect if the value was compile time known.
954 (op::MemberDecorate, var.dataSelectorStructType, 0, "Offset",
955 0)(op::MemberDecorate, var.dataSelectorStructType, 1, "Offset",
956 4)(op::MemberDecorate, var.dataSelectorStructType, 2, "Offset", 8)(op::Decorate,
957 var.dataSelectorStructType, "Block")
958
959 // binding to matching buffer
960 (op::Decorate, var.dataSelectorStructPtr, "DescriptorSet", 0)(op::Decorate, var.dataSelectorStructPtr,
961 "Binding", 2)
962
963 // making composite types used in shader
964 (var.voidId, is, op::TypeVoid)(var.voidFuncVoid, is, op::TypeFunction, var.voidId)
965
966 (var.boolean, is, op::TypeBool)
967
968 (var.f32, is, op::TypeFloat, 32)(var.s32, is, op::TypeInt, 32, 1)(var.u32, is, op::TypeInt, 32, 0);
969
970 if (isR64)
971 {
972 shaderSource(var.s64, is, op::TypeInt, 64, 1)(var.u64, is, op::TypeInt, 64, 0);
973 }
974
975 shaderSource(var.v4f32, is, op::TypeVector, var.f32, 4)(var.v4s32, is, op::TypeVector, var.s32,
976 4)(var.v4u32, is, op::TypeVector, var.u32, 4);
977
978 if (isR64)
979 {
980 shaderSource(var.v4s64, is, op::TypeVector, var.s64, 4)(var.v4u64, is, op::TypeVector, var.u64, 4);
981 }
982
983 // since the shared tests scalars, vectors, matrices of ints, uints and floats I am generating alternative names for some of the types so I can use those and not need to use "if" everywhere.
984 // A Variable mappings will make sure the proper variable name is used
985 // below is a first part of aliasing types based on int, uint, float
986 switch (bufferFormat)
987 {
988 case vk::VK_FORMAT_R32_SINT:
989 shaderSource.makeSame(var.buffer_type, var.s32);
990 shaderSource.makeSame(var.buffer_type_vec, var.v4s32);
991 break;
992 case vk::VK_FORMAT_R32_UINT:
993 shaderSource.makeSame(var.buffer_type, var.u32);
994 shaderSource.makeSame(var.buffer_type_vec, var.v4u32);
995 break;
996 case vk::VK_FORMAT_R32_SFLOAT:
997 shaderSource.makeSame(var.buffer_type, var.f32);
998 shaderSource.makeSame(var.buffer_type_vec, var.v4f32);
999 break;
1000 case vk::VK_FORMAT_R64_SINT:
1001 shaderSource.makeSame(var.buffer_type, var.s64);
1002 shaderSource.makeSame(var.buffer_type_vec, var.v4s64);
1003 break;
1004 case vk::VK_FORMAT_R64_UINT:
1005 shaderSource.makeSame(var.buffer_type, var.u64);
1006 shaderSource.makeSame(var.buffer_type_vec, var.v4u64);
1007 break;
1008 default:
1009 // to prevent compiler from complaining not all cases are handled (but we should not get here).
1010 deAssertFail("This point should be not reachable with correct program flow.", __FILE__, __LINE__);
1011 break;
1012 }
1013
1014 // below is a second part that aliases based on scalar, vector, matrix
1015 switch (shaderType)
1016 {
1017 case SHADER_TYPE_SCALAR_COPY:
1018 shaderSource.makeSame(var.copy_type, var.buffer_type);
1019 break;
1020 case SHADER_TYPE_VECTOR_COPY:
1021 shaderSource.makeSame(var.copy_type, var.buffer_type_vec);
1022 break;
1023 case SHADER_TYPE_MATRIX_COPY:
1024 if (bufferFormat != VK_FORMAT_R32_SFLOAT)
1025 TCU_THROW(NotSupportedError, "Matrices can be used only with floating point types.");
1026 shaderSource(var.copy_type, is, op::TypeMatrix, var.buffer_type_vec, 4);
1027 break;
1028 default:
1029 // to prevent compiler from complaining not all cases are handled (but we should not get here).
1030 deAssertFail("This point should be not reachable with correct program flow.", __FILE__, __LINE__);
1031 break;
1032 }
1033
1034 // I will need some constants so lets add them to shader source
1035 shaderSource(var.constants[0], is, op::Constant, var.s32, 0)(var.constants[1], is, op::Constant, var.s32, 1)(
1036 var.constants[2], is, op::Constant, var.s32, 2)(var.constants[3], is, op::Constant, var.s32, 3)(
1037 var.constants[4], is, op::Constant, var.u32, 4)(var.constants[5], is, op::Constant, var.u32, 1024);
1038
1039 // for fragment shaders I need additionally a constant vector (output "colour") so lets make it
1040 if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1041 {
1042 shaderSource(var.constants[6], is, op::Constant, var.f32, 1)(var.constants[7], is, op::ConstantComposite,
1043 var.v4f32, var.constants[6], var.constants[6],
1044 var.constants[6], var.constants[6]);
1045 }
1046
1047 // additional alias for the type of content of this 1024-element outer array.
1048 if (shaderType == SHADER_TYPE_SCALAR_COPY || shaderType == SHADER_TYPE_VECTOR_COPY)
1049 {
1050 shaderSource(var.array_content_type, is, op::TypeArray, var.buffer_type_vec, var.constants[4]);
1051 }
1052 else
1053 {
1054 shaderSource.makeSame(var.array_content_type, var.copy_type);
1055 }
1056
1057 // Lets create pointer types to the input data type, output data type and a struct
1058 // This must be distinct types due to different type decorations
1059 // Lets make also actual poiters to the data
1060 shaderSource(var.dataArrayType, is, op::TypeArray, var.array_content_type,
1061 var.constants[5])(var.dataInputType, is, op::TypeStruct,
1062 var.dataArrayType)(var.dataOutputType, is, op::TypeStruct, var.dataArrayType)(
1063 var.dataInputPtrType, is, op::TypePointer, "StorageBuffer",
1064 var.dataInputType)(var.dataOutputPtrType, is, op::TypePointer, "StorageBuffer", var.dataOutputType)(
1065 var.dataInput, is, op::Variable, var.dataInputPtrType,
1066 "StorageBuffer")(var.dataOutput, is, op::Variable, var.dataOutputPtrType, "StorageBuffer")(
1067 var.dataSelectorStructType, is, op::TypeStruct, var.s32, var.s32,
1068 var.s32)(var.dataSelectorStructPtrType, is, op::TypePointer, "Uniform", var.dataSelectorStructType)(
1069 var.dataSelectorStructPtr, is, op::Variable, var.dataSelectorStructPtrType, "Uniform");
1070
1071 // we need also additional pointers to fullfil stage requirements on shaders inputs and outputs
1072 if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
1073 {
1074 shaderSource(var.inputPtr, is, op::TypePointer, "Input",
1075 var.v4f32)(var.input, is, op::Variable, var.inputPtr,
1076 "Input")(var.outputPtr, is, op::TypePointer, "Output",
1077 var.v4f32)(var.output, is, op::Variable, var.outputPtr, "Output");
1078 }
1079 else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1080 {
1081 shaderSource(var.outputPtr, is, op::TypePointer, "Output", var.v4f32)(var.output, is, op::Variable,
1082 var.outputPtr, "Output");
1083 }
1084
1085 shaderSource(var.copy_type_ptr, is, op::TypePointer, "StorageBuffer",
1086 var.copy_type)(var.s32_type_ptr, is, op::TypePointer, "Uniform", var.s32);
1087
1088 // Make a shader main function
1089 shaderSource(var.mainFunc, is, op::Function, var.voidId, "None", var.voidFuncVoid)(var.mainFuncLabel, is,
1090 op::Label);
1091
1092 Variable copyFromPtr(localcounter), copyToPtr(localcounter), zeroPtr(localcounter);
1093 Variable copyFrom(localcounter), copyTo(localcounter), zero(localcounter);
1094
1095 // Lets load data from our auxiliary buffer with reading index, writing index and zero.
1096 shaderSource(copyToPtr, is, op::AccessChain, var.s32_type_ptr, var.dataSelectorStructPtr,
1097 var.constants[1])(copyTo, is, op::Load, var.s32, copyToPtr)(
1098 copyFromPtr, is, op::AccessChain, var.s32_type_ptr, var.dataSelectorStructPtr,
1099 var.constants[0])(copyFrom, is, op::Load, var.s32,
1100 copyFromPtr)(zeroPtr, is, op::AccessChain, var.s32_type_ptr, var.dataSelectorStructPtr,
1101 var.constants[2])(zero, is, op::Load, var.s32, zeroPtr);
1102
1103 // let start copying data using variable pointers
1104 switch (shaderType)
1105 {
1106 case SHADER_TYPE_SCALAR_COPY:
1107 for (int i = 0; i < 4; ++i)
1108 {
1109 for (int j = 0; j < 4; ++j)
1110 {
1111 Variable actualLoadChain(localcounter), actualStoreChain(localcounter), loadResult(localcounter);
1112 Variable selection(localcounter);
1113 Variable lcA(localcounter), lcB(localcounter), scA(localcounter), scB(localcounter);
1114
1115 shaderSource(selection, is, op::IEqual, var.boolean, zero, var.constants[0]);
1116
1117 if (reads)
1118 {
1119 // if we check reads we use variable pointers only for reading part
1120 shaderSource(lcA, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0],
1121 copyFrom, var.constants[i],
1122 var.constants[j])(lcB, is, op::AccessChain, var.copy_type_ptr, var.dataInput,
1123 var.constants[0], copyFrom, var.constants[i], var.constants[j])
1124 // actualLoadChain will be a variable pointer as it was created through OpSelect
1125 (actualLoadChain, is, op::Select, var.copy_type_ptr, selection, lcA, lcB)
1126 // actualStoreChain will be a regular pointer
1127 (actualStoreChain, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0],
1128 copyTo, var.constants[i], var.constants[j]);
1129 }
1130 else
1131 {
1132 // if we check writes we use variable pointers only for writing part only
1133 shaderSource
1134 // actualLoadChain will be regular regualar pointer
1135 (actualLoadChain, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0],
1136 copyFrom, var.constants[i], var.constants[j])(scA, is, op::AccessChain, var.copy_type_ptr,
1137 var.dataOutput, var.constants[0], copyTo,
1138 var.constants[i], var.constants[j])(
1139 scB, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo,
1140 var.constants[i], var.constants[j])
1141 // actualStoreChain will be a variable pointer as it was created through OpSelect
1142 (actualStoreChain, is, op::Select, var.copy_type_ptr, selection, scA, scB);
1143 }
1144 // do actual copying
1145 shaderSource(loadResult, is, op::Load, var.copy_type, actualLoadChain)(op::Store, actualStoreChain,
1146 loadResult);
1147 }
1148 }
1149 break;
1150 // cases below have the same logic as the one above - just we are copying bigger chunks of data with every load/store pair
1151 case SHADER_TYPE_VECTOR_COPY:
1152 for (int i = 0; i < 4; ++i)
1153 {
1154 Variable actualLoadChain(localcounter), actualStoreChain(localcounter), loadResult(localcounter);
1155 Variable selection(localcounter);
1156 Variable lcA(localcounter), lcB(localcounter), scA(localcounter), scB(localcounter);
1157
1158 shaderSource(selection, is, op::IEqual, var.boolean, zero, var.constants[0]);
1159
1160 if (reads)
1161 {
1162 shaderSource(lcA, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom,
1163 var.constants[i])(lcB, is, op::AccessChain, var.copy_type_ptr, var.dataInput,
1164 var.constants[0], copyFrom, var.constants[i])(
1165 actualLoadChain, is, op::Select, var.copy_type_ptr, selection, lcA,
1166 lcB)(actualStoreChain, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0],
1167 copyTo, var.constants[i]);
1168 }
1169 else
1170 {
1171 shaderSource(actualLoadChain, is, op::AccessChain, var.copy_type_ptr, var.dataInput,
1172 var.constants[0], copyFrom,
1173 var.constants[i])(scA, is, op::AccessChain, var.copy_type_ptr, var.dataOutput,
1174 var.constants[0], copyTo, var.constants[i])(
1175 scB, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo,
1176 var.constants[i])(actualStoreChain, is, op::Select, var.copy_type_ptr, selection, scA, scB);
1177 }
1178
1179 shaderSource(loadResult, is, op::Load, var.copy_type, actualLoadChain)(op::Store, actualStoreChain,
1180 loadResult);
1181 }
1182 break;
1183 case SHADER_TYPE_MATRIX_COPY:
1184 {
1185 Variable actualLoadChain(localcounter), actualStoreChain(localcounter), loadResult(localcounter);
1186 Variable selection(localcounter);
1187 Variable lcA(localcounter), lcB(localcounter), scA(localcounter), scB(localcounter);
1188
1189 shaderSource(selection, is, op::IEqual, var.boolean, zero, var.constants[0]);
1190
1191 if (reads)
1192 {
1193 shaderSource(lcA, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom)(
1194 lcB, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0],
1195 copyFrom)(actualLoadChain, is, op::Select, var.copy_type_ptr, selection, lcA, lcB)(
1196 actualStoreChain, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo);
1197 }
1198 else
1199 {
1200 shaderSource(actualLoadChain, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0],
1201 copyFrom)(scA, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0],
1202 copyTo)(scB, is, op::AccessChain, var.copy_type_ptr, var.dataOutput,
1203 var.constants[0], copyTo)(actualStoreChain, is, op::Select,
1204 var.copy_type_ptr, selection, scA, scB);
1205 }
1206
1207 shaderSource(loadResult, is, op::Load, var.copy_type, actualLoadChain)(op::Store, actualStoreChain,
1208 loadResult);
1209 }
1210 break;
1211 default:
1212 // to prevent compiler from complaining not all cases are handled (but we should not get here).
1213 deAssertFail("This point should be not reachable with correct program flow.", __FILE__, __LINE__);
1214 break;
1215 }
1216 }
1217
1218 // This is common for test shaders and unused ones
1219 // We need to fill stage ouput from shader properly
1220 // output vertices positions in vertex shader
1221 if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
1222 {
1223 Variable inputValue(localcounter), outputLocation(localcounter);
1224 shaderSource(inputValue, is, op::Load, var.v4f32, var.input)(outputLocation, is, op::AccessChain, var.outputPtr,
1225 var.output)(op::Store, outputLocation, inputValue);
1226 }
1227 // output colour in fragment shader
1228 else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1229 {
1230 shaderSource(op::Store, var.output, var.constants[7]);
1231 }
1232
1233 // We are done. Lets close main function body
1234 shaderSource(op::Return)(op::FunctionEnd);
1235
1236 return shaderSource.str();
1237 }
1238
RobustReadTest(tcu::TestContext & testContext,const std::string & name,VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat,VkDeviceSize readAccessRange,bool accessOutOfBackingMemory)1239 RobustReadTest::RobustReadTest(tcu::TestContext &testContext, const std::string &name, VkShaderStageFlags shaderStage,
1240 ShaderType shaderType, VkFormat bufferFormat, VkDeviceSize readAccessRange,
1241 bool accessOutOfBackingMemory)
1242 : RobustAccessWithPointersTest(testContext, name, shaderStage, shaderType, bufferFormat)
1243 , m_readAccessRange(readAccessRange)
1244 , m_accessOutOfBackingMemory(accessOutOfBackingMemory)
1245 {
1246 }
1247
createInstance(Context & context) const1248 TestInstance *RobustReadTest::createInstance(Context &context) const
1249 {
1250 #ifndef CTS_USES_VULKANSC
1251 auto device = createRobustBufferAccessVariablePointersDevice(context);
1252 de::MovePtr<vk::DeviceDriver> deviceDriver = de::MovePtr<DeviceDriver>(
1253 new DeviceDriver(context.getPlatformInterface(), context.getInstance(), *device, context.getUsedApiVersion(),
1254 context.getTestContext().getCommandLine()));
1255 #else
1256 de::MovePtr<CustomInstance> customInstance =
1257 de::MovePtr<CustomInstance>(new CustomInstance(createCustomInstanceFromContext(context)));
1258 auto device = createRobustBufferAccessVariablePointersDevice(context, *customInstance);
1259 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver =
1260 de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(
1261 new DeviceDriverSC(context.getPlatformInterface(), *customInstance, *device,
1262 context.getTestContext().getCommandLine(), context.getResourceInterface(),
1263 context.getDeviceVulkanSC10Properties(), context.getDeviceProperties(),
1264 context.getUsedApiVersion()),
1265 vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *device));
1266 #endif // CTS_USES_VULKANSC
1267
1268 return new ReadInstance(context, device,
1269 #ifdef CTS_USES_VULKANSC
1270 customInstance,
1271 #endif // CTS_USES_VULKANSC
1272 deviceDriver, m_shaderType, m_shaderStage, m_bufferFormat, m_readAccessRange,
1273 m_accessOutOfBackingMemory);
1274 }
1275
initPrograms(SourceCollections & programCollection) const1276 void RobustReadTest::initPrograms(SourceCollections &programCollection) const
1277 {
1278 if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
1279 {
1280 programCollection.spirvAsmSources.add("compute")
1281 << MakeShader(VK_SHADER_STAGE_COMPUTE_BIT, m_shaderType, m_bufferFormat, true, false);
1282 }
1283 else
1284 {
1285 programCollection.spirvAsmSources.add("vertex")
1286 << MakeShader(VK_SHADER_STAGE_VERTEX_BIT, m_shaderType, m_bufferFormat, true,
1287 m_shaderStage != VK_SHADER_STAGE_VERTEX_BIT);
1288 programCollection.spirvAsmSources.add("fragment")
1289 << MakeShader(VK_SHADER_STAGE_FRAGMENT_BIT, m_shaderType, m_bufferFormat, true,
1290 m_shaderStage != VK_SHADER_STAGE_FRAGMENT_BIT);
1291 }
1292 }
1293
RobustWriteTest(tcu::TestContext & testContext,const std::string & name,VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat,VkDeviceSize writeAccessRange,bool accessOutOfBackingMemory)1294 RobustWriteTest::RobustWriteTest(tcu::TestContext &testContext, const std::string &name, VkShaderStageFlags shaderStage,
1295 ShaderType shaderType, VkFormat bufferFormat, VkDeviceSize writeAccessRange,
1296 bool accessOutOfBackingMemory)
1297
1298 : RobustAccessWithPointersTest(testContext, name, shaderStage, shaderType, bufferFormat)
1299 , m_writeAccessRange(writeAccessRange)
1300 , m_accessOutOfBackingMemory(accessOutOfBackingMemory)
1301 {
1302 }
1303
createInstance(Context & context) const1304 TestInstance *RobustWriteTest::createInstance(Context &context) const
1305 {
1306 #ifndef CTS_USES_VULKANSC
1307 auto device = createRobustBufferAccessVariablePointersDevice(context);
1308 de::MovePtr<vk::DeviceDriver> deviceDriver = de::MovePtr<DeviceDriver>(
1309 new DeviceDriver(context.getPlatformInterface(), context.getInstance(), *device, context.getUsedApiVersion(),
1310 context.getTestContext().getCommandLine()));
1311 #else
1312 de::MovePtr<CustomInstance> customInstance =
1313 de::MovePtr<CustomInstance>(new CustomInstance(createCustomInstanceFromContext(context)));
1314 auto device = createRobustBufferAccessVariablePointersDevice(context, *customInstance);
1315 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver =
1316 de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(
1317 new DeviceDriverSC(context.getPlatformInterface(), *customInstance, *device,
1318 context.getTestContext().getCommandLine(), context.getResourceInterface(),
1319 context.getDeviceVulkanSC10Properties(), context.getDeviceProperties(),
1320 context.getUsedApiVersion()),
1321 vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *device));
1322 #endif // CTS_USES_VULKANSC
1323
1324 return new WriteInstance(context, device,
1325 #ifdef CTS_USES_VULKANSC
1326 customInstance,
1327 #endif
1328 deviceDriver, m_shaderType, m_shaderStage, m_bufferFormat, m_writeAccessRange,
1329 m_accessOutOfBackingMemory);
1330 }
1331
initPrograms(SourceCollections & programCollection) const1332 void RobustWriteTest::initPrograms(SourceCollections &programCollection) const
1333 {
1334 if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
1335 {
1336 programCollection.spirvAsmSources.add("compute")
1337 << MakeShader(VK_SHADER_STAGE_COMPUTE_BIT, m_shaderType, m_bufferFormat, false, false);
1338 }
1339 else
1340 {
1341 programCollection.spirvAsmSources.add("vertex")
1342 << MakeShader(VK_SHADER_STAGE_VERTEX_BIT, m_shaderType, m_bufferFormat, false,
1343 m_shaderStage != VK_SHADER_STAGE_VERTEX_BIT);
1344 programCollection.spirvAsmSources.add("fragment")
1345 << MakeShader(VK_SHADER_STAGE_FRAGMENT_BIT, m_shaderType, m_bufferFormat, false,
1346 m_shaderStage != VK_SHADER_STAGE_FRAGMENT_BIT);
1347 }
1348 }
1349
AccessInstance(Context & context,Move<VkDevice> device,de::MovePtr<vk::DeviceDriver> deviceDriver,ShaderType shaderType,VkShaderStageFlags shaderStage,VkFormat bufferFormat,BufferAccessType bufferAccessType,VkDeviceSize inBufferAccessRange,VkDeviceSize outBufferAccessRange,bool accessOutOfBackingMemory)1350 AccessInstance::AccessInstance(Context &context, Move<VkDevice> device,
1351 #ifndef CTS_USES_VULKANSC
1352 de::MovePtr<vk::DeviceDriver> deviceDriver,
1353 #else
1354 de::MovePtr<CustomInstance> customInstance,
1355 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
1356 #endif // CTS_USES_VULKANSC
1357
1358 ShaderType shaderType, VkShaderStageFlags shaderStage, VkFormat bufferFormat,
1359 BufferAccessType bufferAccessType, VkDeviceSize inBufferAccessRange,
1360 VkDeviceSize outBufferAccessRange, bool accessOutOfBackingMemory)
1361 : vkt::TestInstance(context)
1362 #ifdef CTS_USES_VULKANSC
1363 , m_customInstance(customInstance)
1364 #endif
1365 , m_device(device)
1366 , m_deviceDriver(deviceDriver)
1367 , m_shaderType(shaderType)
1368 , m_shaderStage(shaderStage)
1369 , m_bufferFormat(bufferFormat)
1370 , m_bufferAccessType(bufferAccessType)
1371 , m_accessOutOfBackingMemory(accessOutOfBackingMemory)
1372 {
1373 tcu::TestLog &log = context.getTestContext().getLog();
1374 const DeviceInterface &vk = *m_deviceDriver;
1375 const auto &vki = context.getInstanceInterface();
1376 const auto instance = context.getInstance();
1377 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1378 const VkPhysicalDevice physicalDevice = chooseDevice(vki, instance, context.getTestContext().getCommandLine());
1379 SimpleAllocator memAlloc(vk, *m_device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1380
1381 DE_ASSERT(RobustAccessWithPointersTest::s_numberOfBytesAccessed % sizeof(uint32_t) == 0);
1382 DE_ASSERT(inBufferAccessRange <= RobustAccessWithPointersTest::s_numberOfBytesAccessed);
1383 DE_ASSERT(outBufferAccessRange <= RobustAccessWithPointersTest::s_numberOfBytesAccessed);
1384
1385 if (m_bufferFormat == VK_FORMAT_R64_UINT || m_bufferFormat == VK_FORMAT_R64_SINT)
1386 {
1387 if (!context.getDeviceFeatures().shaderInt64)
1388 {
1389 TCU_THROW(NotSupportedError, "64-bit integers not supported");
1390 }
1391 }
1392
1393 // Check storage support
1394 if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
1395 {
1396 if (!context.getDeviceFeatures().vertexPipelineStoresAndAtomics)
1397 {
1398 TCU_THROW(NotSupportedError, "Stores not supported in vertex stage");
1399 }
1400 }
1401 else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1402 {
1403 if (!context.getDeviceFeatures().fragmentStoresAndAtomics)
1404 {
1405 TCU_THROW(NotSupportedError, "Stores not supported in fragment stage");
1406 }
1407 }
1408
1409 createTestBuffer(context, vk, *m_device, inBufferAccessRange, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, memAlloc,
1410 m_inBuffer, m_inBufferAlloc, m_inBufferAccess, &populateBufferWithValues, &m_bufferFormat);
1411 createTestBuffer(context, vk, *m_device, outBufferAccessRange, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, memAlloc,
1412 m_outBuffer, m_outBufferAlloc, m_outBufferAccess, &populateBufferWithFiller, DE_NULL);
1413
1414 int32_t indices[] = {(m_accessOutOfBackingMemory && (m_bufferAccessType == BUFFER_ACCESS_TYPE_READ_FROM_STORAGE)) ?
1415 static_cast<int32_t>(RobustAccessWithPointersTest::s_testArraySize) - 1 :
1416 0,
1417 (m_accessOutOfBackingMemory && (m_bufferAccessType == BUFFER_ACCESS_TYPE_WRITE_TO_STORAGE)) ?
1418 static_cast<int32_t>(RobustAccessWithPointersTest::s_testArraySize) - 1 :
1419 0,
1420 0};
1421 AccessRangesData indicesAccess;
1422 createTestBuffer(context, vk, *m_device, 3 * sizeof(int32_t), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, memAlloc,
1423 m_indicesBuffer, m_indicesBufferAlloc, indicesAccess, &populateBufferWithCopy, &indices);
1424
1425 log << tcu::TestLog::Message << "input buffer - alloc size: " << m_inBufferAccess.allocSize
1426 << tcu::TestLog::EndMessage;
1427 log << tcu::TestLog::Message << "input buffer - max access range: " << m_inBufferAccess.maxAccessRange
1428 << tcu::TestLog::EndMessage;
1429 log << tcu::TestLog::Message << "output buffer - alloc size: " << m_outBufferAccess.allocSize
1430 << tcu::TestLog::EndMessage;
1431 log << tcu::TestLog::Message << "output buffer - max access range: " << m_outBufferAccess.maxAccessRange
1432 << tcu::TestLog::EndMessage;
1433 log << tcu::TestLog::Message << "indices - input offset: " << indices[0] << tcu::TestLog::EndMessage;
1434 log << tcu::TestLog::Message << "indices - output offset: " << indices[1] << tcu::TestLog::EndMessage;
1435 log << tcu::TestLog::Message << "indices - additional: " << indices[2] << tcu::TestLog::EndMessage;
1436
1437 // Create descriptor data
1438 {
1439 DescriptorPoolBuilder descriptorPoolBuilder;
1440 descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u);
1441 descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u);
1442 descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u);
1443 m_descriptorPool =
1444 descriptorPoolBuilder.build(vk, *m_device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1445
1446 DescriptorSetLayoutBuilder setLayoutBuilder;
1447 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
1448 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
1449 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_ALL);
1450 m_descriptorSetLayout = setLayoutBuilder.build(vk, *m_device);
1451
1452 const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo = {
1453 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
1454 DE_NULL, // const void* pNext;
1455 *m_descriptorPool, // VkDescriptorPool descriptorPool;
1456 1u, // uint32_t setLayoutCount;
1457 &m_descriptorSetLayout.get() // const VkDescriptorSetLayout* pSetLayouts;
1458 };
1459
1460 m_descriptorSet = allocateDescriptorSet(vk, *m_device, &descriptorSetAllocateInfo);
1461
1462 const VkDescriptorBufferInfo inBufferDescriptorInfo =
1463 makeDescriptorBufferInfo(*m_inBuffer, 0ull, m_inBufferAccess.accessRange);
1464 const VkDescriptorBufferInfo outBufferDescriptorInfo =
1465 makeDescriptorBufferInfo(*m_outBuffer, 0ull, m_outBufferAccess.accessRange);
1466 const VkDescriptorBufferInfo indicesBufferDescriptorInfo =
1467 makeDescriptorBufferInfo(*m_indicesBuffer, 0ull, 12ull);
1468
1469 DescriptorSetUpdateBuilder setUpdateBuilder;
1470 setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0),
1471 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inBufferDescriptorInfo);
1472 setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1),
1473 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &outBufferDescriptorInfo);
1474 setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2),
1475 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &indicesBufferDescriptorInfo);
1476 setUpdateBuilder.update(vk, *m_device);
1477 }
1478
1479 // Create fence
1480 {
1481 const VkFenceCreateInfo fenceParams = {
1482 VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
1483 DE_NULL, // const void* pNext;
1484 0u // VkFenceCreateFlags flags;
1485 };
1486
1487 m_fence = createFence(vk, *m_device, &fenceParams);
1488 }
1489
1490 // Get queue
1491 vk.getDeviceQueue(*m_device, queueFamilyIndex, 0, &m_queue);
1492
1493 if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
1494 {
1495 m_testEnvironment = de::MovePtr<TestEnvironment>(
1496 new ComputeEnvironment(m_context, *m_deviceDriver, *m_device, *m_descriptorSetLayout, *m_descriptorSet));
1497 }
1498 else
1499 {
1500 using tcu::Vec4;
1501
1502 const VkVertexInputBindingDescription vertexInputBindingDescription = {
1503 0u, // uint32_t binding;
1504 sizeof(tcu::Vec4), // uint32_t strideInBytes;
1505 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
1506 };
1507
1508 const VkVertexInputAttributeDescription vertexInputAttributeDescription = {
1509 0u, // uint32_t location;
1510 0u, // uint32_t binding;
1511 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
1512 0u // uint32_t offset;
1513 };
1514
1515 AccessRangesData vertexAccess;
1516 const Vec4 vertices[] = {
1517 Vec4(-1.0f, -1.0f, 0.0f, 1.0f),
1518 Vec4(-1.0f, 1.0f, 0.0f, 1.0f),
1519 Vec4(1.0f, -1.0f, 0.0f, 1.0f),
1520 };
1521 const VkDeviceSize vertexBufferSize = static_cast<VkDeviceSize>(sizeof(vertices));
1522 createTestBuffer(context, vk, *m_device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, memAlloc,
1523 m_vertexBuffer, m_vertexBufferAlloc, vertexAccess, &populateBufferWithCopy, &vertices);
1524
1525 const GraphicsEnvironment::DrawConfig drawWithOneVertexBuffer = {
1526 std::vector<VkBuffer>(1, *m_vertexBuffer), // std::vector<VkBuffer> vertexBuffers;
1527 DE_LENGTH_OF_ARRAY(vertices), // uint32_t vertexCount;
1528 1, // uint32_t instanceCount;
1529 DE_NULL, // VkBuffer indexBuffer;
1530 0u, // uint32_t indexCount;
1531 };
1532
1533 m_testEnvironment = de::MovePtr<TestEnvironment>(new GraphicsEnvironment(
1534 m_context, *m_deviceDriver, *m_device, *m_descriptorSetLayout, *m_descriptorSet,
1535 GraphicsEnvironment::VertexBindings(1, vertexInputBindingDescription),
1536 GraphicsEnvironment::VertexAttributes(1, vertexInputAttributeDescription), drawWithOneVertexBuffer));
1537 }
1538 }
1539
~AccessInstance()1540 AccessInstance::~AccessInstance()
1541 {
1542 }
1543
1544 // Verifies if the buffer has the value initialized by BufferAccessInstance::populateReadBuffer at a given offset.
isExpectedValueFromInBuffer(VkDeviceSize offsetInBytes,const void * valuePtr,VkDeviceSize valueSize)1545 bool AccessInstance::isExpectedValueFromInBuffer(VkDeviceSize offsetInBytes, const void *valuePtr,
1546 VkDeviceSize valueSize)
1547 {
1548 DE_ASSERT(offsetInBytes % 4 == 0);
1549 DE_ASSERT(offsetInBytes < m_inBufferAccess.allocSize);
1550 DE_ASSERT(valueSize == 4ull || valueSize == 8ull);
1551
1552 const uint32_t valueIndex = uint32_t(offsetInBytes / 4) + 2;
1553
1554 if (isUintFormat(m_bufferFormat))
1555 {
1556 const uint32_t expectedValues[2] = {valueIndex, valueIndex + 1u};
1557 return !deMemCmp(valuePtr, &expectedValues, (size_t)valueSize);
1558 }
1559 else if (isIntFormat(m_bufferFormat))
1560 {
1561 const int32_t value = -int32_t(valueIndex);
1562 const int32_t expectedValues[2] = {value, value - 1};
1563 return !deMemCmp(valuePtr, &expectedValues, (size_t)valueSize);
1564 }
1565 else if (isFloatFormat(m_bufferFormat))
1566 {
1567 DE_ASSERT(valueSize == 4ull);
1568 const float value = float(valueIndex);
1569 return !deMemCmp(valuePtr, &value, (size_t)valueSize);
1570 }
1571 else
1572 {
1573 DE_ASSERT(false);
1574 return false;
1575 }
1576 }
1577
isOutBufferValueUnchanged(VkDeviceSize offsetInBytes,VkDeviceSize valueSize)1578 bool AccessInstance::isOutBufferValueUnchanged(VkDeviceSize offsetInBytes, VkDeviceSize valueSize)
1579 {
1580 DE_ASSERT(valueSize <= 8);
1581 const uint8_t *const outValuePtr = (uint8_t *)m_outBufferAlloc->getHostPtr() + offsetInBytes;
1582 const uint64_t defaultValue = 0xBABABABABABABABAull;
1583
1584 return !deMemCmp(outValuePtr, &defaultValue, (size_t)valueSize);
1585 }
1586
iterate(void)1587 tcu::TestStatus AccessInstance::iterate(void)
1588 {
1589 const DeviceInterface &vk = *m_deviceDriver;
1590 const vk::VkCommandBuffer cmdBuffer = m_testEnvironment->getCommandBuffer();
1591
1592 // Submit command buffer
1593 {
1594 const VkSubmitInfo submitInfo = {
1595 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1596 DE_NULL, // const void* pNext;
1597 0u, // uint32_t waitSemaphoreCount;
1598 DE_NULL, // const VkSemaphore* pWaitSemaphores;
1599 DE_NULL, // const VkPIpelineStageFlags* pWaitDstStageMask;
1600 1u, // uint32_t commandBufferCount;
1601 &cmdBuffer, // const VkCommandBuffer* pCommandBuffers;
1602 0u, // uint32_t signalSemaphoreCount;
1603 DE_NULL // const VkSemaphore* pSignalSemaphores;
1604 };
1605
1606 VK_CHECK(vk.resetFences(*m_device, 1, &m_fence.get()));
1607 VK_CHECK(vk.queueSubmit(m_queue, 1, &submitInfo, *m_fence));
1608 VK_CHECK(vk.waitForFences(*m_device, 1, &m_fence.get(), true, ~(0ull) /* infinity */));
1609 }
1610
1611 // Prepare result buffer for read
1612 {
1613 const VkMappedMemoryRange outBufferRange = {
1614 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // VkStructureType sType;
1615 DE_NULL, // const void* pNext;
1616 m_outBufferAlloc->getMemory(), // VkDeviceMemory mem;
1617 0ull, // VkDeviceSize offset;
1618 m_outBufferAccess.allocSize, // VkDeviceSize size;
1619 };
1620
1621 VK_CHECK(vk.invalidateMappedMemoryRanges(*m_device, 1u, &outBufferRange));
1622 }
1623
1624 if (verifyResult())
1625 return tcu::TestStatus::pass("All values OK");
1626 else
1627 return tcu::TestStatus::fail("Invalid value(s) found");
1628 }
1629
verifyResult(bool splitAccess)1630 bool AccessInstance::verifyResult(bool splitAccess)
1631 {
1632 std::ostringstream logMsg;
1633 tcu::TestLog &log = m_context.getTestContext().getLog();
1634 const bool isReadAccess = (m_bufferAccessType == BUFFER_ACCESS_TYPE_READ_FROM_STORAGE);
1635 const void *inDataPtr = m_inBufferAlloc->getHostPtr();
1636 const void *outDataPtr = m_outBufferAlloc->getHostPtr();
1637 bool allOk = true;
1638 uint32_t valueNdx = 0;
1639 const VkDeviceSize maxAccessRange =
1640 isReadAccess ? m_inBufferAccess.maxAccessRange : m_outBufferAccess.maxAccessRange;
1641 const bool isR64 = (m_bufferFormat == VK_FORMAT_R64_UINT || m_bufferFormat == VK_FORMAT_R64_SINT);
1642 const uint32_t unsplitElementSize = (isR64 ? 8u : 4u);
1643 const uint32_t elementSize = ((isR64 && !splitAccess) ? 8u : 4u);
1644
1645 for (VkDeviceSize offsetInBytes = 0; offsetInBytes < m_outBufferAccess.allocSize; offsetInBytes += elementSize)
1646 {
1647 const uint8_t *outValuePtr = static_cast<const uint8_t *>(outDataPtr) + offsetInBytes;
1648 const size_t outValueSize =
1649 static_cast<size_t>(deMinu64(elementSize, (m_outBufferAccess.allocSize - offsetInBytes)));
1650
1651 if (offsetInBytes >= RobustAccessWithPointersTest::s_numberOfBytesAccessed)
1652 {
1653 // The shader will only write 16 values into the result buffer. The rest of the values
1654 // should remain unchanged or may be modified if we are writing out of bounds.
1655 if (!isOutBufferValueUnchanged(offsetInBytes, outValueSize) &&
1656 (isReadAccess || !isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, 4)))
1657 {
1658 logMsg << "\nValue " << valueNdx++ << " has been modified with an unknown value: "
1659 << *(static_cast<const uint32_t *>(static_cast<const void *>(outValuePtr)));
1660 allOk = false;
1661 }
1662 }
1663 else
1664 {
1665 const int32_t distanceToOutOfBounds =
1666 static_cast<int32_t>(maxAccessRange) - static_cast<int32_t>(offsetInBytes);
1667 bool isOutOfBoundsAccess = false;
1668
1669 logMsg << "\n" << valueNdx++ << ": ";
1670
1671 logValue(logMsg, outValuePtr, m_bufferFormat, outValueSize);
1672
1673 if (m_accessOutOfBackingMemory)
1674 isOutOfBoundsAccess = true;
1675
1676 // Check if the shader operation accessed an operand located less than 16 bytes away
1677 // from the out of bounds address. Less than 32 bytes away for 64 bit accesses.
1678 if (!isOutOfBoundsAccess && distanceToOutOfBounds < (isR64 ? 32 : 16))
1679 {
1680 uint32_t operandSize = 0;
1681
1682 switch (m_shaderType)
1683 {
1684 case SHADER_TYPE_SCALAR_COPY:
1685 operandSize = unsplitElementSize; // Size of scalar
1686 break;
1687
1688 case SHADER_TYPE_VECTOR_COPY:
1689 operandSize = unsplitElementSize * 4; // Size of vec4
1690 break;
1691
1692 case SHADER_TYPE_MATRIX_COPY:
1693 operandSize = unsplitElementSize * 16; // Size of mat4
1694 break;
1695
1696 default:
1697 DE_ASSERT(false);
1698 }
1699
1700 isOutOfBoundsAccess = (((offsetInBytes / operandSize) + 1) * operandSize > maxAccessRange);
1701 }
1702
1703 if (isOutOfBoundsAccess)
1704 {
1705 logMsg << " (out of bounds " << (isReadAccess ? "read" : "write") << ")";
1706
1707 const bool isValuePartiallyOutOfBounds =
1708 ((distanceToOutOfBounds > 0) && ((uint32_t)distanceToOutOfBounds < elementSize));
1709 bool isValidValue = false;
1710
1711 if (isValuePartiallyOutOfBounds && !m_accessOutOfBackingMemory)
1712 {
1713 // The value is partially out of bounds
1714
1715 bool isOutOfBoundsPartOk = true;
1716 bool isWithinBoundsPartOk = true;
1717
1718 uint32_t inBoundPartSize = distanceToOutOfBounds;
1719
1720 // For cases that partial element is out of bound, the part within the buffer allocated memory can be buffer content per spec.
1721 // We need to check it as a whole part.
1722 if (offsetInBytes + elementSize > m_inBufferAccess.allocSize)
1723 {
1724 inBoundPartSize =
1725 static_cast<int32_t>(m_inBufferAccess.allocSize) - static_cast<int32_t>(offsetInBytes);
1726 }
1727
1728 if (isReadAccess)
1729 {
1730 isWithinBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize,
1731 outValuePtr, inBoundPartSize);
1732 isOutOfBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize,
1733 (uint8_t *)outValuePtr + inBoundPartSize,
1734 outValueSize - inBoundPartSize);
1735 }
1736 else
1737 {
1738 isWithinBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize,
1739 outValuePtr, inBoundPartSize) ||
1740 isOutBufferValueUnchanged(offsetInBytes, inBoundPartSize);
1741
1742 isOutOfBoundsPartOk =
1743 isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize,
1744 (uint8_t *)outValuePtr + inBoundPartSize,
1745 outValueSize - inBoundPartSize) ||
1746 isOutBufferValueUnchanged(offsetInBytes + inBoundPartSize, outValueSize - inBoundPartSize);
1747 }
1748
1749 logMsg << ", first " << distanceToOutOfBounds << " byte(s) "
1750 << (isWithinBoundsPartOk ? "OK" : "wrong");
1751 logMsg << ", last " << outValueSize - distanceToOutOfBounds << " byte(s) "
1752 << (isOutOfBoundsPartOk ? "OK" : "wrong");
1753
1754 isValidValue = isWithinBoundsPartOk && isOutOfBoundsPartOk;
1755 }
1756 else
1757 {
1758 if (isReadAccess)
1759 {
1760 isValidValue =
1761 isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, outValueSize);
1762 }
1763 else
1764 {
1765 isValidValue = isOutBufferValueUnchanged(offsetInBytes, outValueSize);
1766
1767 if (!isValidValue)
1768 {
1769 // Out of bounds writes may modify values withing the memory ranges bound to the buffer
1770 isValidValue = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr,
1771 outValueSize);
1772
1773 if (isValidValue)
1774 logMsg << ", OK, written within the memory range bound to the buffer";
1775 }
1776 }
1777 }
1778
1779 if (!isValidValue && !splitAccess)
1780 {
1781 // Check if we are satisfying the [0, 0, 0, x] pattern, where x may be either 0 or 1,
1782 // or the maximum representable positive integer value (if the format is integer-based).
1783
1784 const bool canMatchVec4Pattern =
1785 (isReadAccess && !isValuePartiallyOutOfBounds && (m_shaderType == SHADER_TYPE_VECTOR_COPY) &&
1786 (offsetInBytes / elementSize + 1) % 4 == 0);
1787 bool matchesVec4Pattern = false;
1788
1789 if (canMatchVec4Pattern)
1790 {
1791 matchesVec4Pattern = verifyOutOfBoundsVec4(outValuePtr - 3u * elementSize, m_bufferFormat);
1792 }
1793
1794 if (!canMatchVec4Pattern || !matchesVec4Pattern)
1795 {
1796 logMsg << ". Failed: ";
1797
1798 if (isReadAccess)
1799 {
1800 logMsg << "expected value within the buffer range or 0";
1801
1802 if (canMatchVec4Pattern)
1803 logMsg << ", or the [0, 0, 0, x] pattern";
1804 }
1805 else
1806 {
1807 logMsg << "written out of the range";
1808 }
1809
1810 allOk = false;
1811 }
1812 }
1813 }
1814 else // We are within bounds
1815 {
1816 if (isReadAccess)
1817 {
1818 if (!isExpectedValueFromInBuffer(offsetInBytes, outValuePtr, elementSize))
1819 {
1820 logMsg << ", Failed: unexpected value";
1821 allOk = false;
1822 }
1823 }
1824 else
1825 {
1826 // Out of bounds writes may change values within the bounds.
1827 if (!isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.accessRange, outValuePtr, elementSize))
1828 {
1829 logMsg << ", Failed: unexpected value";
1830 allOk = false;
1831 }
1832 }
1833 }
1834 }
1835 }
1836
1837 log << tcu::TestLog::Message << logMsg.str() << tcu::TestLog::EndMessage;
1838
1839 if (!allOk && unsplitElementSize > 4u && !splitAccess)
1840 {
1841 // "Non-atomic accesses to storage buffers that are a multiple of 32 bits may be decomposed into 32-bit accesses that are individually bounds-checked."
1842 return verifyResult(true /*splitAccess*/);
1843 }
1844
1845 return allOk;
1846 }
1847
1848 // BufferReadInstance
1849
ReadInstance(Context & context,Move<VkDevice> device,de::MovePtr<vk::DeviceDriver> deviceDriver,ShaderType shaderType,VkShaderStageFlags shaderStage,VkFormat bufferFormat,VkDeviceSize inBufferAccessRange,bool accessOutOfBackingMemory)1850 ReadInstance::ReadInstance(Context &context, Move<VkDevice> device,
1851 #ifndef CTS_USES_VULKANSC
1852 de::MovePtr<vk::DeviceDriver> deviceDriver,
1853 #else
1854 de::MovePtr<CustomInstance> customInstance,
1855 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
1856 #endif // CTS_USES_VULKANSC
1857 ShaderType shaderType, VkShaderStageFlags shaderStage, VkFormat bufferFormat,
1858 //bool readFromStorage,
1859 VkDeviceSize inBufferAccessRange, bool accessOutOfBackingMemory)
1860
1861 : AccessInstance(context, device,
1862 #ifdef CTS_USES_VULKANSC
1863 customInstance,
1864 #endif // CTS_USES_VULKANSC
1865 deviceDriver, shaderType, shaderStage, bufferFormat, BUFFER_ACCESS_TYPE_READ_FROM_STORAGE,
1866 inBufferAccessRange, RobustAccessWithPointersTest::s_numberOfBytesAccessed,
1867 accessOutOfBackingMemory)
1868 {
1869 }
1870
1871 // BufferWriteInstance
1872
WriteInstance(Context & context,Move<VkDevice> device,de::MovePtr<vk::DeviceDriver> deviceDriver,ShaderType shaderType,VkShaderStageFlags shaderStage,VkFormat bufferFormat,VkDeviceSize writeBufferAccessRange,bool accessOutOfBackingMemory)1873 WriteInstance::WriteInstance(Context &context, Move<VkDevice> device,
1874 #ifndef CTS_USES_VULKANSC
1875 de::MovePtr<vk::DeviceDriver> deviceDriver,
1876 #else
1877 de::MovePtr<CustomInstance> customInstance,
1878 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
1879 #endif // CTS_USES_VULKANSC
1880 ShaderType shaderType, VkShaderStageFlags shaderStage, VkFormat bufferFormat,
1881 VkDeviceSize writeBufferAccessRange, bool accessOutOfBackingMemory)
1882
1883 : AccessInstance(context, device,
1884 #ifdef CTS_USES_VULKANSC
1885 customInstance,
1886 #endif // CTS_USES_VULKANSC
1887 deviceDriver, shaderType, shaderStage, bufferFormat, BUFFER_ACCESS_TYPE_WRITE_TO_STORAGE,
1888 RobustAccessWithPointersTest::s_numberOfBytesAccessed, writeBufferAccessRange,
1889 accessOutOfBackingMemory)
1890 {
1891 }
1892
1893 } // unnamed namespace
1894
createBufferAccessWithVariablePointersTests(tcu::TestContext & testCtx)1895 tcu::TestCaseGroup *createBufferAccessWithVariablePointersTests(tcu::TestContext &testCtx)
1896 {
1897 // Lets make group for the tests
1898 de::MovePtr<tcu::TestCaseGroup> bufferAccessWithVariablePointersTests(
1899 new tcu::TestCaseGroup(testCtx, "through_pointers"));
1900
1901 // Lets add subgroups to better organise tests
1902 de::MovePtr<tcu::TestCaseGroup> computeWithVariablePointersTests(new tcu::TestCaseGroup(testCtx, "compute"));
1903 de::MovePtr<tcu::TestCaseGroup> computeReads(new tcu::TestCaseGroup(testCtx, "reads"));
1904 de::MovePtr<tcu::TestCaseGroup> computeWrites(new tcu::TestCaseGroup(testCtx, "writes"));
1905
1906 de::MovePtr<tcu::TestCaseGroup> graphicsWithVariablePointersTests(new tcu::TestCaseGroup(testCtx, "graphics"));
1907 de::MovePtr<tcu::TestCaseGroup> graphicsReads(new tcu::TestCaseGroup(testCtx, "reads"));
1908 de::MovePtr<tcu::TestCaseGroup> graphicsReadsVertex(new tcu::TestCaseGroup(testCtx, "vertex"));
1909 de::MovePtr<tcu::TestCaseGroup> graphicsReadsFragment(new tcu::TestCaseGroup(testCtx, "fragment"));
1910 de::MovePtr<tcu::TestCaseGroup> graphicsWrites(new tcu::TestCaseGroup(testCtx, "writes"));
1911 de::MovePtr<tcu::TestCaseGroup> graphicsWritesVertex(new tcu::TestCaseGroup(testCtx, "vertex"));
1912 de::MovePtr<tcu::TestCaseGroup> graphicsWritesFragment(new tcu::TestCaseGroup(testCtx, "fragment"));
1913
1914 // A struct for describing formats
1915 struct Formats
1916 {
1917 const VkFormat value;
1918 const char *const name;
1919 };
1920
1921 const Formats bufferFormats[] = {
1922 {VK_FORMAT_R32_SINT, "s32"}, {VK_FORMAT_R32_UINT, "u32"}, {VK_FORMAT_R32_SFLOAT, "f32"},
1923 {VK_FORMAT_R64_SINT, "s64"}, {VK_FORMAT_R64_UINT, "u64"},
1924 };
1925 const uint8_t bufferFormatsCount = static_cast<uint8_t>(DE_LENGTH_OF_ARRAY(bufferFormats));
1926
1927 // Amounts of data to copy
1928 const VkDeviceSize rangeSizes[] = {1ull, 3ull, 4ull, 16ull, 32ull};
1929 const uint8_t rangeSizesCount = static_cast<uint8_t>(DE_LENGTH_OF_ARRAY(rangeSizes));
1930
1931 // gather above data into one array
1932 const struct ShaderTypes
1933 {
1934 const ShaderType value;
1935 const char *const name;
1936 const Formats *const formats;
1937 const uint8_t formatsCount;
1938 const VkDeviceSize *const sizes;
1939 const uint8_t sizesCount;
1940 } types[] = {{SHADER_TYPE_VECTOR_COPY, "vec4", bufferFormats, bufferFormatsCount, rangeSizes, rangeSizesCount},
1941 {SHADER_TYPE_SCALAR_COPY, "scalar", bufferFormats, bufferFormatsCount, rangeSizes, rangeSizesCount}};
1942
1943 // Specify to which subgroups put various tests
1944 const struct ShaderStages
1945 {
1946 VkShaderStageFlags stage;
1947 de::MovePtr<tcu::TestCaseGroup> &reads;
1948 de::MovePtr<tcu::TestCaseGroup> &writes;
1949 } stages[] = {{VK_SHADER_STAGE_VERTEX_BIT, graphicsReadsVertex, graphicsWritesVertex},
1950 {VK_SHADER_STAGE_FRAGMENT_BIT, graphicsReadsFragment, graphicsWritesFragment},
1951 {VK_SHADER_STAGE_COMPUTE_BIT, computeReads, computeWrites}};
1952
1953 // Eventually specify if memory used should be in the "inaccesible" portion of buffer or entirely outside of buffer
1954 const char *const backingMemory[] = {"in_memory", "out_of_memory"};
1955
1956 for (int32_t stageId = 0; stageId < DE_LENGTH_OF_ARRAY(stages); ++stageId)
1957 for (int i = 0; i < DE_LENGTH_OF_ARRAY(types); ++i)
1958 for (int j = 0; j < types[i].formatsCount; ++j)
1959 for (int k = 0; k < types[i].sizesCount; ++k)
1960 for (int s = 0; s < DE_LENGTH_OF_ARRAY(backingMemory); ++s)
1961 {
1962 std::ostringstream name;
1963 name << types[i].sizes[k] << "B_" << backingMemory[s] << "_with_" << types[i].name << '_'
1964 << types[i].formats[j].name;
1965 stages[stageId].reads->addChild(
1966 new RobustReadTest(testCtx, name.str().c_str(), stages[stageId].stage, types[i].value,
1967 types[i].formats[j].value, types[i].sizes[k], s != 0));
1968 }
1969
1970 for (int32_t stageId = 0; stageId < DE_LENGTH_OF_ARRAY(stages); ++stageId)
1971 for (int i = 0; i < DE_LENGTH_OF_ARRAY(types); ++i)
1972 for (int j = 0; j < types[i].formatsCount; ++j)
1973 for (int k = 0; k < types[i].sizesCount; ++k)
1974 for (int s = 0; s < DE_LENGTH_OF_ARRAY(backingMemory); ++s)
1975 {
1976 std::ostringstream name;
1977 name << types[i].sizes[k] << "B_" << backingMemory[s] << "_with_" << types[i].name << '_'
1978 << types[i].formats[j].name;
1979 stages[stageId].writes->addChild(
1980 new RobustWriteTest(testCtx, name.str().c_str(), stages[stageId].stage, types[i].value,
1981 types[i].formats[j].value, types[i].sizes[k], s != 0));
1982 }
1983
1984 graphicsReads->addChild(graphicsReadsVertex.release());
1985 graphicsReads->addChild(graphicsReadsFragment.release());
1986
1987 graphicsWrites->addChild(graphicsWritesVertex.release());
1988 graphicsWrites->addChild(graphicsWritesFragment.release());
1989
1990 graphicsWithVariablePointersTests->addChild(graphicsReads.release());
1991 graphicsWithVariablePointersTests->addChild(graphicsWrites.release());
1992
1993 computeWithVariablePointersTests->addChild(computeReads.release());
1994 computeWithVariablePointersTests->addChild(computeWrites.release());
1995
1996 bufferAccessWithVariablePointersTests->addChild(graphicsWithVariablePointersTests.release());
1997 bufferAccessWithVariablePointersTests->addChild(computeWithVariablePointersTests.release());
1998
1999 return bufferAccessWithVariablePointersTests.release();
2000 }
2001
2002 } // namespace robustness
2003 } // namespace vkt
2004