1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2021 The Khronos Group Inc.
6 * Copyright (c) 2021 Google LLC.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief SSBO corner case tests.
23 *//*--------------------------------------------------------------------*/
24 #include "deRandom.hpp"
25
26 #include "vktSSBOCornerCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vkMemUtil.hpp"
29 #include "vkBuilderUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkRefUtil.hpp"
32 #include "vkTypeUtil.hpp"
33 #include "vkCmdUtil.hpp"
34
35 #include <string>
36
37 namespace vkt
38 {
39 namespace ssbo
40 {
41 using std::string;
42 using std::vector;
43
44 namespace
45 {
46 class CornerCase : public TestCase
47 {
48 public:
CornerCase(tcu::TestContext & testCtx,const char * name)49 CornerCase(tcu::TestContext &testCtx, const char *name) : TestCase(testCtx, name)
50 {
51 init();
52 }
53 virtual void delayedInit(void);
54 virtual void initPrograms(vk::SourceCollections &programCollection) const;
55 virtual TestInstance *createInstance(Context &context) const;
56
57 protected:
58 string m_computeShaderSrc;
59 const int m_testSize = 589; // This is the minimum value of the variable that causes a crash.
60 };
61
useCornerCaseShader(int loopCount)62 string useCornerCaseShader(int loopCount)
63 {
64 std::ostringstream src;
65 de::Random rnd(1);
66
67 src << "#version 310 es\n"
68 "#extension GL_EXT_buffer_reference : enable\n"
69 "layout(std430, buffer_reference) buffer BlockA\n"
70 "{\n"
71 " highp ivec4 a[];\n"
72 "};\n"
73 // ac_numIrrelevant is not used for anything, but is needed so that compiler doesn't optimize everything out.
74 "layout(std140, binding = 0) buffer AcBlock { highp uint ac_numIrrelevant; };\n"
75 "\n"
76 "layout (push_constant, std430) uniform PC {\n"
77 " BlockA blockA;\n"
78 "};\n"
79 "\n"
80 "bool compare_ivec4(highp ivec4 a, highp ivec4 b) { return a == b; }\n"
81 "\n"
82 "void main (void)\n"
83 "{\n"
84 " int allOk = int(true);\n";
85
86 for (int i = 0; i < loopCount; i++)
87 {
88 src << " allOk = allOk & int(compare_ivec4((blockA.a[" << i << "]), ivec4(" << rnd.getInt(-9, 9) << ", "
89 << rnd.getInt(-9, 9) << ", " << rnd.getInt(-9, 9) << ", " << rnd.getInt(-9, 9) << ")));\n";
90 }
91
92 src << " if (allOk != int(false))\n"
93 " {\n"
94 " ac_numIrrelevant++;\n"
95 " }\n"
96 "}\n";
97
98 return src.str();
99 }
100
101 struct Buffer
102 {
103 uint32_t buffer;
104 int size;
105
Buffervkt::ssbo::__anonad0d25ec0111::Buffer106 Buffer(uint32_t buffer_, int size_) : buffer(buffer_), size(size_)
107 {
108 }
Buffervkt::ssbo::__anonad0d25ec0111::Buffer109 Buffer(void) : buffer(0), size(0)
110 {
111 }
112 };
113
allocateAndBindMemory(Context & context,vk::VkBuffer buffer,vk::MemoryRequirement memReqs,vk::VkDeviceSize * allocationSize=DE_NULL)114 de::MovePtr<vk::Allocation> allocateAndBindMemory(Context &context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs,
115 vk::VkDeviceSize *allocationSize = DE_NULL)
116 {
117 const vk::DeviceInterface &vkd = context.getDeviceInterface();
118 const vk::VkMemoryRequirements bufReqs = vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
119 de::MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(bufReqs, memReqs);
120
121 vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
122 if (allocationSize)
123 {
124 *allocationSize = bufReqs.size;
125 }
126
127 return memory;
128 }
129
createBuffer(Context & context,vk::VkDeviceSize bufferSize,vk::VkBufferUsageFlags usageFlags)130 vk::Move<vk::VkBuffer> createBuffer(Context &context, vk::VkDeviceSize bufferSize, vk::VkBufferUsageFlags usageFlags)
131 {
132 const vk::VkDevice vkDevice = context.getDevice();
133 const vk::DeviceInterface &vk = context.getDeviceInterface();
134 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
135
136 const vk::VkBufferCreateInfo bufferInfo = {
137 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
138 DE_NULL, // const void* pNext;
139 0u, // VkBufferCreateFlags flags;
140 bufferSize, // VkDeviceSize size;
141 usageFlags, // VkBufferUsageFlags usage;
142 vk::VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
143 1u, // uint32_t queueFamilyCount;
144 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
145 };
146
147 return vk::createBuffer(vk, vkDevice, &bufferInfo);
148 }
149 class SSBOCornerCaseInstance : public TestInstance
150 {
151 public:
152 SSBOCornerCaseInstance(Context &context, int testSize);
153 virtual ~SSBOCornerCaseInstance(void);
154 virtual tcu::TestStatus iterate(void);
155
156 private:
157 int m_testSize;
158 };
SSBOCornerCaseInstance(Context & context,int testSize)159 SSBOCornerCaseInstance::SSBOCornerCaseInstance(Context &context, int testSize)
160 : TestInstance(context)
161 , m_testSize(testSize)
162 {
163 }
~SSBOCornerCaseInstance(void)164 SSBOCornerCaseInstance::~SSBOCornerCaseInstance(void)
165 {
166 }
167
iterate(void)168 tcu::TestStatus SSBOCornerCaseInstance::iterate(void)
169 {
170 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
171 const vk::VkDevice device = m_context.getDevice();
172 const vk::VkQueue queue = m_context.getUniversalQueue();
173 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
174
175 vk::Move<vk::VkBuffer> buffer;
176 de::MovePtr<vk::Allocation> alloc;
177
178 // Create descriptor set
179 const uint32_t acBufferSize = 4;
180 vk::Move<vk::VkBuffer> acBuffer(createBuffer(m_context, acBufferSize, vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
181 vk::VkDeviceSize acMemorySize = 0;
182 de::UniquePtr<vk::Allocation> acBufferAlloc(
183 allocateAndBindMemory(m_context, *acBuffer, vk::MemoryRequirement::HostVisible, &acMemorySize));
184
185 deMemset(acBufferAlloc->getHostPtr(), 0, acBufferSize);
186 flushMappedMemoryRange(vk, device, acBufferAlloc->getMemory(), acBufferAlloc->getOffset(), acMemorySize);
187
188 vk::DescriptorSetLayoutBuilder setLayoutBuilder;
189 vk::DescriptorPoolBuilder poolBuilder;
190
191 setLayoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
192 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2);
193
194 const vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(setLayoutBuilder.build(vk, device));
195 const vk::Unique<vk::VkDescriptorPool> descriptorPool(
196 poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
197
198 const vk::VkDescriptorSetAllocateInfo allocInfo = {
199 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, DE_NULL, *descriptorPool, 1u, &descriptorSetLayout.get(),
200 };
201
202 const vk::Unique<vk::VkDescriptorSet> descriptorSet(allocateDescriptorSet(vk, device, &allocInfo));
203 const vk::VkDescriptorBufferInfo descriptorInfo = makeDescriptorBufferInfo(*acBuffer, 0ull, acBufferSize);
204
205 vk::DescriptorSetUpdateBuilder setUpdateBuilder;
206 vk::VkDescriptorBufferInfo descriptor;
207
208 setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
209 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descriptorInfo);
210
211 vk::VkFlags usageFlags = vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | vk::VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
212 bool memoryDeviceAddress = false;
213
214 if (m_context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address"))
215 memoryDeviceAddress = true;
216
217 // Upload base buffers
218 const int bufferSize = 64 * m_testSize;
219 {
220 vk::VkPhysicalDeviceProperties properties;
221 m_context.getInstanceInterface().getPhysicalDeviceProperties(m_context.getPhysicalDevice(), &properties);
222
223 DE_ASSERT(bufferSize > 0);
224
225 buffer = createBuffer(m_context, bufferSize, usageFlags);
226 alloc = allocateAndBindMemory(
227 m_context, *buffer,
228 vk::MemoryRequirement::HostVisible |
229 (memoryDeviceAddress ? vk::MemoryRequirement::DeviceAddress : vk::MemoryRequirement::Any));
230 descriptor = makeDescriptorBufferInfo(*buffer, 0, bufferSize);
231 }
232
233 vk::VkBufferDeviceAddressInfo info{
234 vk::VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, // VkStructureType sType;
235 DE_NULL, // const void* pNext;
236 descriptor.buffer // VkBuffer buffer
237 };
238
239 vk::VkDeviceAddress addr = vk.getBufferDeviceAddress(device, &info);
240
241 setUpdateBuilder.update(vk, device);
242
243 const vk::VkPushConstantRange pushConstRange = {
244 vk::VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags stageFlags
245 0, // uint32_t offset
246 (uint32_t)(sizeof(vk::VkDeviceAddress)) // uint32_t size
247 };
248
249 // Must fit in spec min max
250 DE_ASSERT(pushConstRange.size <= 128);
251
252 const vk::VkPipelineLayoutCreateInfo pipelineLayoutParams = {
253 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
254 DE_NULL, // const void* pNext;
255 (vk::VkPipelineLayoutCreateFlags)0,
256 1u, // uint32_t descriptorSetCount;
257 &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
258 1u, // uint32_t pushConstantRangeCount;
259 &pushConstRange, // const VkPushConstantRange* pPushConstantRanges;
260 };
261 vk::Move<vk::VkPipelineLayout> pipelineLayout(createPipelineLayout(vk, device, &pipelineLayoutParams));
262
263 vk::Move<vk::VkShaderModule> shaderModule(
264 createShaderModule(vk, device, m_context.getBinaryCollection().get("compute"), 0));
265 const vk::VkPipelineShaderStageCreateInfo pipelineShaderStageParams = {
266 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
267 DE_NULL, // const void* pNext;
268 (vk::VkPipelineShaderStageCreateFlags)0,
269 vk::VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStage stage;
270 *shaderModule, // VkShader shader;
271 "main", //
272 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
273 };
274 const vk::VkComputePipelineCreateInfo pipelineCreateInfo = {
275 vk::VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
276 DE_NULL, // const void* pNext;
277 0, // VkPipelineCreateFlags flags;
278 pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage;
279 *pipelineLayout, // VkPipelineLayout layout;
280 DE_NULL, // VkPipeline basePipelineHandle;
281 0, // int32_t basePipelineIndex;
282 };
283 vk::Move<vk::VkPipeline> pipeline(createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo));
284
285 vk::Move<vk::VkCommandPool> cmdPool(
286 createCommandPool(vk, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
287 vk::Move<vk::VkCommandBuffer> cmdBuffer(
288 allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
289
290 beginCommandBuffer(vk, *cmdBuffer, 0u);
291
292 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
293
294 vk.cmdPushConstants(*cmdBuffer, *pipelineLayout, vk::VK_SHADER_STAGE_COMPUTE_BIT, 0, (uint32_t)(sizeof(addr)),
295 &addr);
296
297 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u,
298 &descriptorSet.get(), 0u, DE_NULL);
299
300 vk.cmdDispatch(*cmdBuffer, 1, 1, 1);
301
302 endCommandBuffer(vk, *cmdBuffer);
303
304 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
305
306 // Test always passes if it doesn't cause a crash.
307 return tcu::TestStatus::pass("Test did not cause a crash");
308 }
309
initPrograms(vk::SourceCollections & programCollection) const310 void CornerCase::initPrograms(vk::SourceCollections &programCollection) const
311 {
312 DE_ASSERT(!m_computeShaderSrc.empty());
313
314 programCollection.glslSources.add("compute") << glu::ComputeSource(m_computeShaderSrc);
315 }
316
createInstance(Context & context) const317 TestInstance *CornerCase::createInstance(Context &context) const
318 {
319 if (!context.isBufferDeviceAddressSupported())
320 TCU_THROW(NotSupportedError, "Physical storage buffer pointers not supported");
321 return new SSBOCornerCaseInstance(context, m_testSize);
322 }
323
delayedInit(void)324 void CornerCase::delayedInit(void)
325 {
326 m_computeShaderSrc = useCornerCaseShader(m_testSize);
327 }
328 } // namespace
329
createSSBOCornerCaseTests(tcu::TestContext & testCtx)330 tcu::TestCaseGroup *createSSBOCornerCaseTests(tcu::TestContext &testCtx)
331 {
332 de::MovePtr<tcu::TestCaseGroup> cornerCaseGroup(new tcu::TestCaseGroup(testCtx, "corner_case"));
333 cornerCaseGroup->addChild(new CornerCase(testCtx, "long_shader_bitwise_and"));
334 return cornerCaseGroup.release();
335 }
336 } // namespace ssbo
337 } // namespace vkt
338