1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 Imagination Technologies Ltd.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Robustness Utilities
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktRobustnessUtil.hpp"
26 #include "vktCustomInstancesDevices.hpp"
27 #include "vkDefs.hpp"
28 #include "vkImageUtil.hpp"
29 #include "vkPrograms.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkRefUtil.hpp"
32 #include "vkTypeUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vkObjUtil.hpp"
35 #include "vkSafetyCriticalUtil.hpp"
36 #include "tcuCommandLine.hpp"
37 #include "vkDeviceUtil.hpp"
38 #include "deMath.h"
39 #include <iomanip>
40 #include <limits>
41 #include <sstream>
42
43 namespace vkt
44 {
45 namespace robustness
46 {
47
48 using namespace vk;
49 using std::string;
50 using std::vector;
51
createRobustBufferAccessDevice(Context & context,const vkt::CustomInstance & customInstance,const VkPhysicalDeviceFeatures2 * enabledFeatures2)52 Move<VkDevice> createRobustBufferAccessDevice(Context &context,
53 #ifdef CTS_USES_VULKANSC
54 const vkt::CustomInstance &customInstance,
55 #endif // CTS_USES_VULKANSC
56 const VkPhysicalDeviceFeatures2 *enabledFeatures2)
57 {
58 const float queuePriority = 1.0f;
59
60 // Create a universal queue that supports graphics and compute
61 const VkDeviceQueueCreateInfo queueParams = {
62 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
63 DE_NULL, // const void* pNext;
64 0u, // VkDeviceQueueCreateFlags flags;
65 context.getUniversalQueueFamilyIndex(), // uint32_t queueFamilyIndex;
66 1u, // uint32_t queueCount;
67 &queuePriority // const float* pQueuePriorities;
68 };
69
70 VkPhysicalDeviceFeatures enabledFeatures = context.getDeviceFeatures();
71 enabledFeatures.robustBufferAccess = true;
72
73 // \note Extensions in core are not explicitly enabled even though
74 // they are in the extension list advertised to tests.
75 const auto &extensionPtrs = context.getDeviceCreationExtensions();
76
77 void *pNext = (void *)enabledFeatures2;
78 #ifdef CTS_USES_VULKANSC
79 VkDeviceObjectReservationCreateInfo memReservationInfo = context.getTestContext().getCommandLine().isSubProcess() ?
80 context.getResourceInterface()->getStatMax() :
81 resetDeviceObjectReservationCreateInfo();
82 memReservationInfo.pNext = pNext;
83 pNext = &memReservationInfo;
84
85 VkPhysicalDeviceVulkanSC10Features sc10Features = createDefaultSC10Features();
86 sc10Features.pNext = pNext;
87 pNext = &sc10Features;
88
89 VkPipelineCacheCreateInfo pcCI;
90 std::vector<VkPipelinePoolSize> poolSizes;
91 if (context.getTestContext().getCommandLine().isSubProcess())
92 {
93 if (context.getResourceInterface()->getCacheDataSize() > 0)
94 {
95 pcCI = {
96 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
97 DE_NULL, // const void* pNext;
98 VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
99 VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT, // VkPipelineCacheCreateFlags flags;
100 context.getResourceInterface()->getCacheDataSize(), // uintptr_t initialDataSize;
101 context.getResourceInterface()->getCacheData() // const void* pInitialData;
102 };
103 memReservationInfo.pipelineCacheCreateInfoCount = 1;
104 memReservationInfo.pPipelineCacheCreateInfos = &pcCI;
105 }
106
107 poolSizes = context.getResourceInterface()->getPipelinePoolSizes();
108 if (!poolSizes.empty())
109 {
110 memReservationInfo.pipelinePoolSizeCount = uint32_t(poolSizes.size());
111 memReservationInfo.pPipelinePoolSizes = poolSizes.data();
112 }
113 }
114 #endif
115
116 const VkDeviceCreateInfo deviceParams = {
117 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
118 pNext, // const void* pNext;
119 0u, // VkDeviceCreateFlags flags;
120 1u, // uint32_t queueCreateInfoCount;
121 &queueParams, // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
122 0u, // uint32_t enabledLayerCount;
123 nullptr, // const char* const* ppEnabledLayerNames;
124 de::sizeU32(extensionPtrs), // uint32_t enabledExtensionCount;
125 de::dataOrNull(extensionPtrs), // const char* const* ppEnabledExtensionNames;
126 enabledFeatures2 ? nullptr : &enabledFeatures // const VkPhysicalDeviceFeatures* pEnabledFeatures;
127 };
128
129 // We are creating a custom device with a potentially large amount of extensions and features enabled, using the default device
130 // as a reference. Some implementations may only enable certain device extensions if some instance extensions are enabled, so in
131 // this case it's important to reuse the context instance when creating the device.
132
133 #ifdef CTS_USES_VULKANSC
134 vk::VkInstance instance = customInstance;
135 const auto &vki = customInstance.getDriver();
136 const auto physicalDevice = chooseDevice(vki, instance, context.getTestContext().getCommandLine());
137 #else
138 vk::VkInstance instance = context.getInstance();
139 const auto &vki = context.getInstanceInterface();
140 const auto physicalDevice = context.getPhysicalDevice();
141 #endif // CTS_USES_VULKANSC
142
143 return createCustomDevice(context.getTestContext().getCommandLine().isValidationEnabled(),
144 context.getPlatformInterface(), instance, vki, physicalDevice, &deviceParams);
145 }
146
areEqual(float a,float b)147 bool areEqual(float a, float b)
148 {
149 return deFloatAbs(a - b) <= 0.001f;
150 }
151
isValueZero(const void * valuePtr,size_t valueSizeInBytes)152 bool isValueZero(const void *valuePtr, size_t valueSizeInBytes)
153 {
154 const uint8_t *bytePtr = reinterpret_cast<const uint8_t *>(valuePtr);
155
156 for (size_t i = 0; i < valueSizeInBytes; i++)
157 {
158 if (bytePtr[i] != 0)
159 return false;
160 }
161
162 return true;
163 }
164
isValueWithinBuffer(const void * buffer,VkDeviceSize bufferSize,const void * valuePtr,size_t valueSizeInBytes)165 bool isValueWithinBuffer(const void *buffer, VkDeviceSize bufferSize, const void *valuePtr, size_t valueSizeInBytes)
166 {
167 const uint8_t *byteBuffer = reinterpret_cast<const uint8_t *>(buffer);
168
169 if (bufferSize < ((VkDeviceSize)valueSizeInBytes))
170 return false;
171
172 for (VkDeviceSize i = 0; i <= (bufferSize - valueSizeInBytes); i++)
173 {
174 if (!deMemCmp(&byteBuffer[i], valuePtr, valueSizeInBytes))
175 return true;
176 }
177
178 return false;
179 }
180
isValueWithinBufferOrZero(const void * buffer,VkDeviceSize bufferSize,const void * valuePtr,size_t valueSizeInBytes)181 bool isValueWithinBufferOrZero(const void *buffer, VkDeviceSize bufferSize, const void *valuePtr,
182 size_t valueSizeInBytes)
183 {
184 return isValueWithinBuffer(buffer, bufferSize, valuePtr, valueSizeInBytes) ||
185 isValueZero(valuePtr, valueSizeInBytes);
186 }
187
188 template <typename T>
verifyVec4IntegerValues(const void * vecPtr)189 bool verifyVec4IntegerValues(const void *vecPtr)
190 {
191 const T Tzero = T{0};
192 const T Tone = T{1};
193 const T Tmax = std::numeric_limits<T>::max();
194
195 T values[4];
196 deMemcpy(values, vecPtr, 4 * sizeof(T));
197 return (values[0] == Tzero && values[1] == Tzero && values[2] == Tzero &&
198 (values[3] == Tzero || values[3] == Tone || values[3] == Tmax));
199 }
200
verifyOutOfBoundsVec4(const void * vecPtr,VkFormat bufferFormat)201 bool verifyOutOfBoundsVec4(const void *vecPtr, VkFormat bufferFormat)
202 {
203 if (isUintFormat(bufferFormat))
204 {
205 if (bufferFormat == VK_FORMAT_R64_UINT)
206 return verifyVec4IntegerValues<uint64_t>(vecPtr);
207 return verifyVec4IntegerValues<uint32_t>(vecPtr);
208 }
209 else if (isIntFormat(bufferFormat))
210 {
211 if (bufferFormat == VK_FORMAT_R64_SINT)
212 return verifyVec4IntegerValues<int64_t>(vecPtr);
213 return verifyVec4IntegerValues<int32_t>(vecPtr);
214 }
215 else if (isFloatFormat(bufferFormat))
216 {
217 const float *data = (float *)vecPtr;
218
219 return areEqual(data[0], 0.0f) && areEqual(data[1], 0.0f) && areEqual(data[2], 0.0f) &&
220 (areEqual(data[3], 0.0f) || areEqual(data[3], 1.0f));
221 }
222 else if (bufferFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
223 {
224 return *((uint32_t *)vecPtr) == 0xc0000000u;
225 }
226
227 DE_ASSERT(false);
228 return false;
229 }
230
populateBufferWithTestValues(void * buffer,VkDeviceSize size,VkFormat format)231 void populateBufferWithTestValues(void *buffer, VkDeviceSize size, VkFormat format)
232 {
233 // Assign a sequence of 32-bit values
234 for (VkDeviceSize scalarNdx = 0; scalarNdx < size / 4; scalarNdx++)
235 {
236 const uint32_t valueIndex = (uint32_t)(2 + scalarNdx); // Do not use 0 or 1
237
238 if (isUintFormat(format))
239 {
240 reinterpret_cast<uint32_t *>(buffer)[scalarNdx] = valueIndex;
241 }
242 else if (isIntFormat(format))
243 {
244 reinterpret_cast<int32_t *>(buffer)[scalarNdx] = -int32_t(valueIndex);
245 }
246 else if (isFloatFormat(format))
247 {
248 reinterpret_cast<float *>(buffer)[scalarNdx] = float(valueIndex);
249 }
250 else if (format == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
251 {
252 const uint32_t r = ((valueIndex + 0) & ((2u << 10) - 1u));
253 const uint32_t g = ((valueIndex + 1) & ((2u << 10) - 1u));
254 const uint32_t b = ((valueIndex + 2) & ((2u << 10) - 1u));
255 const uint32_t a = ((valueIndex + 0) & ((2u << 2) - 1u));
256
257 reinterpret_cast<uint32_t *>(buffer)[scalarNdx] = (a << 30) | (b << 20) | (g << 10) | r;
258 }
259 else
260 {
261 DE_ASSERT(false);
262 }
263 }
264 }
265
logValue(std::ostringstream & logMsg,const void * valuePtr,VkFormat valueFormat,size_t valueSize)266 void logValue(std::ostringstream &logMsg, const void *valuePtr, VkFormat valueFormat, size_t valueSize)
267 {
268 if (isUintFormat(valueFormat))
269 {
270 logMsg << *reinterpret_cast<const uint32_t *>(valuePtr);
271 }
272 else if (isIntFormat(valueFormat))
273 {
274 logMsg << *reinterpret_cast<const int32_t *>(valuePtr);
275 }
276 else if (isFloatFormat(valueFormat))
277 {
278 logMsg << *reinterpret_cast<const float *>(valuePtr);
279 }
280 else
281 {
282 const uint8_t *bytePtr = reinterpret_cast<const uint8_t *>(valuePtr);
283 const std::ios::fmtflags streamFlags = logMsg.flags();
284
285 logMsg << std::hex;
286 for (size_t i = 0; i < valueSize; i++)
287 {
288 logMsg << " " << (uint32_t)bytePtr[i];
289 }
290 logMsg.flags(streamFlags);
291 }
292 }
293
294 // TestEnvironment
295
TestEnvironment(Context & context,const DeviceInterface & vk,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,VkDescriptorSet descriptorSet)296 TestEnvironment::TestEnvironment(Context &context, const DeviceInterface &vk, VkDevice device,
297 VkDescriptorSetLayout descriptorSetLayout, VkDescriptorSet descriptorSet)
298 : m_context(context)
299 , m_device(device)
300 , m_descriptorSetLayout(descriptorSetLayout)
301 , m_descriptorSet(descriptorSet)
302 {
303 // Create command pool
304 {
305 const VkCommandPoolCreateInfo commandPoolParams = {
306 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
307 DE_NULL, // const void* pNext;
308 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // VkCommandPoolCreateFlags flags;
309 context.getUniversalQueueFamilyIndex() // uint32_t queueFamilyIndex;
310 };
311
312 m_commandPool = createCommandPool(vk, m_device, &commandPoolParams);
313 }
314
315 // Create command buffer
316 {
317 const VkCommandBufferAllocateInfo commandBufferAllocateInfo = {
318 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
319 DE_NULL, // const void* pNext;
320 *m_commandPool, // VkCommandPool commandPool;
321 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
322 1u, // uint32_t bufferCount;
323 };
324
325 m_commandBuffer = allocateCommandBuffer(vk, m_device, &commandBufferAllocateInfo);
326 }
327 }
328
getCommandBuffer(void)329 VkCommandBuffer TestEnvironment::getCommandBuffer(void)
330 {
331 return *m_commandBuffer;
332 }
333
334 // GraphicsEnvironment
335
GraphicsEnvironment(Context & context,const DeviceInterface & vk,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,VkDescriptorSet descriptorSet,const VertexBindings & vertexBindings,const VertexAttributes & vertexAttributes,const DrawConfig & drawConfig,bool testPipelineRobustness)336 GraphicsEnvironment::GraphicsEnvironment(Context &context, const DeviceInterface &vk, VkDevice device,
337 VkDescriptorSetLayout descriptorSetLayout, VkDescriptorSet descriptorSet,
338 const VertexBindings &vertexBindings, const VertexAttributes &vertexAttributes,
339 const DrawConfig &drawConfig, bool testPipelineRobustness)
340
341 : TestEnvironment(context, vk, device, descriptorSetLayout, descriptorSet)
342 , m_renderSize(16, 16)
343 , m_colorFormat(VK_FORMAT_R8G8B8A8_UNORM)
344 {
345 const auto &vki = context.getInstanceInterface();
346 const auto instance = context.getInstance();
347 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
348 const VkComponentMapping componentMappingRGBA = {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
349 VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
350 const VkPhysicalDevice physicalDevice = chooseDevice(vki, instance, context.getTestContext().getCommandLine());
351 SimpleAllocator memAlloc(vk, m_device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
352
353 // Create color image and view
354 {
355 const VkImageCreateInfo colorImageParams = {
356 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
357 DE_NULL, // const void* pNext;
358 0u, // VkImageCreateFlags flags;
359 VK_IMAGE_TYPE_2D, // VkImageType imageType;
360 m_colorFormat, // VkFormat format;
361 {(uint32_t)m_renderSize.x(), (uint32_t)m_renderSize.y(), 1u}, // VkExtent3D extent;
362 1u, // uint32_t mipLevels;
363 1u, // uint32_t arrayLayers;
364 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
365 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
366 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // VkImageUsageFlags usage;
367 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
368 1u, // uint32_t queueFamilyIndexCount;
369 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
370 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
371 };
372
373 m_colorImage = createImage(vk, m_device, &colorImageParams);
374 m_colorImageAlloc =
375 memAlloc.allocate(getImageMemoryRequirements(vk, m_device, *m_colorImage), MemoryRequirement::Any);
376 VK_CHECK(vk.bindImageMemory(m_device, *m_colorImage, m_colorImageAlloc->getMemory(),
377 m_colorImageAlloc->getOffset()));
378
379 const VkImageViewCreateInfo colorAttachmentViewParams = {
380 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
381 DE_NULL, // const void* pNext;
382 0u, // VkImageViewCreateFlags flags;
383 *m_colorImage, // VkImage image;
384 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
385 m_colorFormat, // VkFormat format;
386 componentMappingRGBA, // VkComponentMapping components;
387 {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u} // VkImageSubresourceRange subresourceRange;
388 };
389
390 m_colorAttachmentView = createImageView(vk, m_device, &colorAttachmentViewParams);
391 }
392
393 // Create render pass
394 m_renderPass = makeRenderPass(vk, m_device, m_colorFormat);
395
396 // Create framebuffer
397 {
398 const VkFramebufferCreateInfo framebufferParams = {
399 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
400 DE_NULL, // const void* pNext;
401 0u, // VkFramebufferCreateFlags flags;
402 *m_renderPass, // VkRenderPass renderPass;
403 1u, // uint32_t attachmentCount;
404 &m_colorAttachmentView.get(), // const VkImageView* pAttachments;
405 (uint32_t)m_renderSize.x(), // uint32_t width;
406 (uint32_t)m_renderSize.y(), // uint32_t height;
407 1u // uint32_t layers;
408 };
409
410 m_framebuffer = createFramebuffer(vk, m_device, &framebufferParams);
411 }
412
413 // Create pipeline layout
414 {
415 const VkPipelineLayoutCreateInfo pipelineLayoutParams = {
416 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
417 DE_NULL, // const void* pNext;
418 0u, // VkPipelineLayoutCreateFlags flags;
419 1u, // uint32_t setLayoutCount;
420 &m_descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
421 0u, // uint32_t pushConstantRangeCount;
422 DE_NULL // const VkPushConstantRange* pPushConstantRanges;
423 };
424
425 m_pipelineLayout = createPipelineLayout(vk, m_device, &pipelineLayoutParams);
426 }
427
428 m_vertexShaderModule = createShaderModule(vk, m_device, m_context.getBinaryCollection().get("vertex"), 0);
429 m_fragmentShaderModule = createShaderModule(vk, m_device, m_context.getBinaryCollection().get("fragment"), 0);
430
431 // Create pipeline
432 {
433 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams = {
434 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
435 DE_NULL, // const void* pNext;
436 0u, // VkPipelineVertexInputStateCreateFlags flags;
437 (uint32_t)vertexBindings.size(), // uint32_t vertexBindingDescriptionCount;
438 vertexBindings.data(), // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
439 (uint32_t)vertexAttributes.size(), // uint32_t vertexAttributeDescriptionCount;
440 vertexAttributes.data() // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
441 };
442
443 const std::vector<VkViewport> viewports(1, makeViewport(m_renderSize));
444 const std::vector<VkRect2D> scissors(1, makeRect2D(m_renderSize));
445
446 const void *pNext = DE_NULL;
447 #ifndef CTS_USES_VULKANSC
448 VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo = initVulkanStructure();
449
450 if (testPipelineRobustness)
451 {
452 pipelineRobustnessInfo.storageBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT;
453 pipelineRobustnessInfo.uniformBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT;
454 pipelineRobustnessInfo.vertexInputs = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT;
455 pipelineRobustnessInfo.images = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT;
456 pNext = &pipelineRobustnessInfo;
457 }
458 #else
459 DE_UNREF(testPipelineRobustness);
460 #endif
461
462 m_graphicsPipeline = makeGraphicsPipeline(
463 vk, // const DeviceInterface& vk
464 m_device, // const VkDevice device
465 *m_pipelineLayout, // const VkPipelineLayout pipelineLayout
466 *m_vertexShaderModule, // const VkShaderModule vertexShaderModule
467 DE_NULL, // const VkShaderModule tessellationControlShaderModule
468 DE_NULL, // const VkShaderModule tessellationEvalShaderModule
469 DE_NULL, // const VkShaderModule geometryShaderModule
470 *m_fragmentShaderModule, // const VkShaderModule fragmentShaderModule
471 *m_renderPass, // const VkRenderPass renderPass
472 viewports, // const std::vector<VkViewport>& viewports
473 scissors, // const std::vector<VkRect2D>& scissors
474 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // const VkPrimitiveTopology topology
475 0u, // const uint32_t subpass
476 0u, // const uint32_t patchControlPoints
477 &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
478 DE_NULL, // const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo
479 DE_NULL, // const VkPipelineMultisampleStateCreateInfo* multisampleStateCreateInfo
480 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* depthStencilStateCreateInfo
481 DE_NULL, // const VkPipelineColorBlendStateCreateInfo* colorBlendStateCreateInfo
482 DE_NULL, // const VkPipelineDynamicStateCreateInfo* dynamicStateCreateInfo
483 pNext); // void* pNext
484 }
485
486 // Record commands
487 {
488 const VkImageMemoryBarrier imageLayoutBarrier = {
489 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
490 DE_NULL, // const void* pNext;
491 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
492 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
493 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
494 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
495 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
496 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
497 *m_colorImage, // VkImage image;
498 {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u} // VkImageSubresourceRange subresourceRange;
499 };
500
501 beginCommandBuffer(vk, *m_commandBuffer, 0u);
502 {
503 vk.cmdPipelineBarrier(*m_commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
504 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u,
505 DE_NULL, 1u, &imageLayoutBarrier);
506
507 beginRenderPass(vk, *m_commandBuffer, *m_renderPass, *m_framebuffer,
508 makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()), tcu::Vec4(0.0f));
509 {
510 const std::vector<VkDeviceSize> vertexBufferOffsets(drawConfig.vertexBuffers.size(), 0ull);
511
512 vk.cmdBindPipeline(*m_commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_graphicsPipeline);
513 vk.cmdBindDescriptorSets(*m_commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0, 1,
514 &m_descriptorSet, 0, DE_NULL);
515 vk.cmdBindVertexBuffers(*m_commandBuffer, 0, (uint32_t)drawConfig.vertexBuffers.size(),
516 drawConfig.vertexBuffers.data(), vertexBufferOffsets.data());
517
518 if (drawConfig.indexBuffer == DE_NULL || drawConfig.indexCount == 0)
519 {
520 vk.cmdDraw(*m_commandBuffer, drawConfig.vertexCount, drawConfig.instanceCount, 0, 0);
521 }
522 else
523 {
524 vk.cmdBindIndexBuffer(*m_commandBuffer, drawConfig.indexBuffer, 0, VK_INDEX_TYPE_UINT32);
525 vk.cmdDrawIndexed(*m_commandBuffer, drawConfig.indexCount, drawConfig.instanceCount, 0, 0, 0);
526 }
527 }
528 endRenderPass(vk, *m_commandBuffer);
529 }
530 endCommandBuffer(vk, *m_commandBuffer);
531 }
532 }
533
534 // ComputeEnvironment
535
ComputeEnvironment(Context & context,const DeviceInterface & vk,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,VkDescriptorSet descriptorSet,bool testPipelineRobustness)536 ComputeEnvironment::ComputeEnvironment(Context &context, const DeviceInterface &vk, VkDevice device,
537 VkDescriptorSetLayout descriptorSetLayout, VkDescriptorSet descriptorSet,
538 bool testPipelineRobustness)
539
540 : TestEnvironment(context, vk, device, descriptorSetLayout, descriptorSet)
541 {
542 // Create pipeline layout
543 {
544 const VkPipelineLayoutCreateInfo pipelineLayoutParams = {
545 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
546 DE_NULL, // const void* pNext;
547 0u, // VkPipelineLayoutCreateFlags flags;
548 1u, // uint32_t setLayoutCount;
549 &m_descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
550 0u, // uint32_t pushConstantRangeCount;
551 DE_NULL // const VkPushConstantRange* pPushConstantRanges;
552 };
553
554 m_pipelineLayout = createPipelineLayout(vk, m_device, &pipelineLayoutParams);
555 }
556
557 // Create compute pipeline
558 {
559 m_computeShaderModule = createShaderModule(vk, m_device, m_context.getBinaryCollection().get("compute"), 0);
560
561 const VkPipelineShaderStageCreateInfo computeStageParams = {
562 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
563 DE_NULL, // const void* pNext;
564 0u, // VkPipelineShaderStageCreateFlags flags;
565 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
566 *m_computeShaderModule, // VkShaderModule module;
567 "main", // const char* pName;
568 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
569 };
570
571 const void *pNext = DE_NULL;
572 #ifndef CTS_USES_VULKANSC
573 VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo = initVulkanStructure();
574
575 if (testPipelineRobustness)
576 {
577 pipelineRobustnessInfo.storageBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT;
578 pipelineRobustnessInfo.uniformBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT;
579 pipelineRobustnessInfo.vertexInputs = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
580 pipelineRobustnessInfo.images = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT;
581 pNext = &pipelineRobustnessInfo;
582 }
583 #else
584 DE_UNREF(testPipelineRobustness);
585 #endif
586
587 const VkComputePipelineCreateInfo computePipelineParams = {
588 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
589 pNext, // const void* pNext;
590 0u, // VkPipelineCreateFlags flags;
591 computeStageParams, // VkPipelineShaderStageCreateInfo stage;
592 *m_pipelineLayout, // VkPipelineLayout layout;
593 DE_NULL, // VkPipeline basePipelineHandle;
594 0u // int32_t basePipelineIndex;
595 };
596
597 m_computePipeline = createComputePipeline(vk, m_device, DE_NULL, &computePipelineParams);
598 }
599
600 // Record commands
601 {
602 beginCommandBuffer(vk, *m_commandBuffer, 0u);
603 vk.cmdBindPipeline(*m_commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_computePipeline);
604 vk.cmdBindDescriptorSets(*m_commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0, 1,
605 &m_descriptorSet, 0, DE_NULL);
606 vk.cmdDispatch(*m_commandBuffer, 32, 32, 1);
607
608 const VkMemoryBarrier barrier = {
609 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
610 nullptr, // pNext
611 VK_ACCESS_SHADER_WRITE_BIT, // srcAccessMask
612 VK_ACCESS_HOST_READ_BIT, // dstAccessMask
613 };
614 vk.cmdPipelineBarrier(*m_commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
615 (VkDependencyFlags)0, 1, &barrier, 0, nullptr, 0, nullptr);
616
617 endCommandBuffer(vk, *m_commandBuffer);
618 }
619 }
620
621 } // namespace robustness
622 } // namespace vkt
623