1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Synchronization tests utilities
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktSynchronizationUtil.hpp"
25 #include "vkTypeUtil.hpp"
26 #include "vkCmdUtil.hpp"
27 #include "vkBarrierUtil.hpp"
28 #include "deStringUtil.hpp"
29 #include <set>
30 #include <limits>
31 
32 namespace vkt
33 {
34 namespace synchronization
35 {
36 using namespace vk;
37 
makeCommandBuffer(const DeviceInterface & vk,const VkDevice device,const VkCommandPool commandPool)38 Move<VkCommandBuffer> makeCommandBuffer(const DeviceInterface &vk, const VkDevice device,
39                                         const VkCommandPool commandPool)
40 {
41     const VkCommandBufferAllocateInfo info = {
42         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
43         DE_NULL,                                        // const void* pNext;
44         commandPool,                                    // VkCommandPool commandPool;
45         VK_COMMAND_BUFFER_LEVEL_PRIMARY,                // VkCommandBufferLevel level;
46         1u,                                             // uint32_t commandBufferCount;
47     };
48     return allocateCommandBuffer(vk, device, &info);
49 }
50 
makeComputePipeline(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkShaderModule shaderModule,const VkSpecializationInfo * specInfo,PipelineCacheData & pipelineCacheData,de::SharedPtr<vk::ResourceInterface> resourceInterface)51 Move<VkPipeline> makeComputePipeline(const DeviceInterface &vk, const VkDevice device,
52                                      const VkPipelineLayout pipelineLayout, const VkShaderModule shaderModule,
53                                      const VkSpecializationInfo *specInfo, PipelineCacheData &pipelineCacheData,
54                                      de::SharedPtr<vk::ResourceInterface> resourceInterface)
55 {
56     const VkPipelineShaderStageCreateInfo shaderStageInfo = {
57         VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
58         DE_NULL,                                             // const void* pNext;
59         (VkPipelineShaderStageCreateFlags)0,                 // VkPipelineShaderStageCreateFlags flags;
60         VK_SHADER_STAGE_COMPUTE_BIT,                         // VkShaderStageFlagBits stage;
61         shaderModule,                                        // VkShaderModule module;
62         "main",                                              // const char* pName;
63         specInfo,                                            // const VkSpecializationInfo* pSpecializationInfo;
64     };
65     const VkComputePipelineCreateInfo pipelineInfo = {
66         VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
67         DE_NULL,                                        // const void* pNext;
68         (VkPipelineCreateFlags)0,                       // VkPipelineCreateFlags flags;
69         shaderStageInfo,                                // VkPipelineShaderStageCreateInfo stage;
70         pipelineLayout,                                 // VkPipelineLayout layout;
71         DE_NULL,                                        // VkPipeline basePipelineHandle;
72         0,                                              // int32_t basePipelineIndex;
73     };
74 
75     {
76         const vk::Unique<vk::VkPipelineCache> pipelineCache(
77             pipelineCacheData.createPipelineCache(vk, device, resourceInterface));
78 
79         vk::Move<vk::VkPipeline> pipeline(createComputePipeline(vk, device, *pipelineCache, &pipelineInfo));
80 
81         // Refresh data from cache
82         pipelineCacheData.setFromPipelineCache(vk, device, *pipelineCache);
83 
84         return pipeline;
85     }
86 }
87 
makeImageCreateInfo(const VkImageType imageType,const VkExtent3D & extent,const VkFormat format,const VkImageUsageFlags usage,const VkSampleCountFlagBits samples,const VkImageTiling tiling)88 VkImageCreateInfo makeImageCreateInfo(const VkImageType imageType, const VkExtent3D &extent, const VkFormat format,
89                                       const VkImageUsageFlags usage, const VkSampleCountFlagBits samples,
90                                       const VkImageTiling tiling)
91 {
92     return {
93         VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType          sType;
94         DE_NULL,                             // const void*              pNext;
95         (VkImageCreateFlags)0,               // VkImageCreateFlags       flags;
96         imageType,                           // VkImageType              imageType;
97         format,                              // VkFormat                 format;
98         extent,                              // VkExtent3D               extent;
99         1u,                                  // uint32_t                 mipLevels;
100         1u,                                  // uint32_t                 arrayLayers;
101         samples,                             // VkSampleCountFlagBits    samples;
102         tiling,                              // VkImageTiling            tiling;
103         usage,                               // VkImageUsageFlags        usage;
104         VK_SHARING_MODE_EXCLUSIVE,           // VkSharingMode            sharingMode;
105         0u,                                  // uint32_t                 queueFamilyIndexCount;
106         DE_NULL,                             // const uint32_t*          pQueueFamilyIndices;
107         VK_IMAGE_LAYOUT_UNDEFINED,           // VkImageLayout            initialLayout;
108     };
109 }
110 
beginRenderPassWithRasterizationDisabled(const DeviceInterface & vk,const VkCommandBuffer commandBuffer,const VkRenderPass renderPass,const VkFramebuffer framebuffer)111 void beginRenderPassWithRasterizationDisabled(const DeviceInterface &vk, const VkCommandBuffer commandBuffer,
112                                               const VkRenderPass renderPass, const VkFramebuffer framebuffer)
113 {
114     const VkRect2D renderArea = {{0, 0}, {0, 0}};
115 
116     beginRenderPass(vk, commandBuffer, renderPass, framebuffer, renderArea);
117 }
118 
setShader(const DeviceInterface & vk,const VkDevice device,const VkShaderStageFlagBits stage,const ProgramBinary & binary,const VkSpecializationInfo * specInfo)119 GraphicsPipelineBuilder &GraphicsPipelineBuilder::setShader(const DeviceInterface &vk, const VkDevice device,
120                                                             const VkShaderStageFlagBits stage,
121                                                             const ProgramBinary &binary,
122                                                             const VkSpecializationInfo *specInfo)
123 {
124     VkShaderModule module;
125     switch (stage)
126     {
127     case (VK_SHADER_STAGE_VERTEX_BIT):
128         DE_ASSERT(m_vertexShaderModule.get() == DE_NULL);
129         m_vertexShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
130         module               = *m_vertexShaderModule;
131         break;
132 
133     case (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT):
134         DE_ASSERT(m_tessControlShaderModule.get() == DE_NULL);
135         m_tessControlShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
136         module                    = *m_tessControlShaderModule;
137         break;
138 
139     case (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT):
140         DE_ASSERT(m_tessEvaluationShaderModule.get() == DE_NULL);
141         m_tessEvaluationShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
142         module                       = *m_tessEvaluationShaderModule;
143         break;
144 
145     case (VK_SHADER_STAGE_GEOMETRY_BIT):
146         DE_ASSERT(m_geometryShaderModule.get() == DE_NULL);
147         m_geometryShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
148         module                 = *m_geometryShaderModule;
149         break;
150 
151     case (VK_SHADER_STAGE_FRAGMENT_BIT):
152         DE_ASSERT(m_fragmentShaderModule.get() == DE_NULL);
153         m_fragmentShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
154         module                 = *m_fragmentShaderModule;
155         break;
156 
157     default:
158         DE_FATAL("Invalid shader stage");
159         return *this;
160     }
161 
162     const VkPipelineShaderStageCreateInfo pipelineShaderStageInfo = {
163         VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
164         DE_NULL,                                             // const void* pNext;
165         (VkPipelineShaderStageCreateFlags)0,                 // VkPipelineShaderStageCreateFlags flags;
166         stage,                                               // VkShaderStageFlagBits stage;
167         module,                                              // VkShaderModule module;
168         "main",                                              // const char* pName;
169         specInfo,                                            // const VkSpecializationInfo* pSpecializationInfo;
170     };
171 
172     m_shaderStageFlags |= stage;
173     m_shaderStages.push_back(pipelineShaderStageInfo);
174 
175     return *this;
176 }
177 
setVertexInputSingleAttribute(const VkFormat vertexFormat,const uint32_t stride)178 GraphicsPipelineBuilder &GraphicsPipelineBuilder::setVertexInputSingleAttribute(const VkFormat vertexFormat,
179                                                                                 const uint32_t stride)
180 {
181     const VkVertexInputBindingDescription bindingDesc = {
182         0u,                          // uint32_t binding;
183         stride,                      // uint32_t stride;
184         VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
185     };
186     const VkVertexInputAttributeDescription attributeDesc = {
187         0u,           // uint32_t location;
188         0u,           // uint32_t binding;
189         vertexFormat, // VkFormat format;
190         0u,           // uint32_t offset;
191     };
192 
193     m_vertexInputBindings.clear();
194     m_vertexInputBindings.push_back(bindingDesc);
195 
196     m_vertexInputAttributes.clear();
197     m_vertexInputAttributes.push_back(attributeDesc);
198 
199     return *this;
200 }
201 
202 template <typename T>
dataPointer(const std::vector<T> & vec)203 inline const T *dataPointer(const std::vector<T> &vec)
204 {
205     return (vec.size() != 0 ? &vec[0] : DE_NULL);
206 }
207 
build(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkRenderPass renderPass,PipelineCacheData & pipelineCacheData,de::SharedPtr<vk::ResourceInterface> resourceInterface)208 Move<VkPipeline> GraphicsPipelineBuilder::build(const DeviceInterface &vk, const VkDevice device,
209                                                 const VkPipelineLayout pipelineLayout, const VkRenderPass renderPass,
210                                                 PipelineCacheData &pipelineCacheData,
211                                                 de::SharedPtr<vk::ResourceInterface> resourceInterface)
212 {
213     const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo = {
214         VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType                             sType;
215         DE_NULL,                                                   // const void*                                 pNext;
216         (VkPipelineVertexInputStateCreateFlags)0,                  // VkPipelineVertexInputStateCreateFlags       flags;
217         static_cast<uint32_t>(
218             m_vertexInputBindings.size()), // uint32_t                                    vertexBindingDescriptionCount;
219         dataPointer(m_vertexInputBindings), // const VkVertexInputBindingDescription*      pVertexBindingDescriptions;
220         static_cast<uint32_t>(
221             m_vertexInputAttributes
222                 .size()), // uint32_t                                    vertexAttributeDescriptionCount;
223         dataPointer(
224             m_vertexInputAttributes), // const VkVertexInputAttributeDescription*    pVertexAttributeDescriptions;
225     };
226 
227     const VkPrimitiveTopology topology = (m_shaderStageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ?
228                                              VK_PRIMITIVE_TOPOLOGY_PATCH_LIST :
229                                              m_primitiveTopology;
230     const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo = {
231         VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType                             sType;
232         DE_NULL,                                    // const void*                                 pNext;
233         (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags     flags;
234         topology,                                   // VkPrimitiveTopology                         topology;
235         VK_FALSE, // VkBool32                                    primitiveRestartEnable;
236     };
237 
238     const VkPipelineTessellationStateCreateInfo pipelineTessellationStateInfo = {
239         VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, // VkStructureType                             sType;
240         DE_NULL,                                                   // const void*                                 pNext;
241         (VkPipelineTessellationStateCreateFlags)0,                 // VkPipelineTessellationStateCreateFlags      flags;
242         m_patchControlPoints, // uint32_t                                    patchControlPoints;
243     };
244 
245     const VkViewport viewport = makeViewport(m_renderSize);
246     const VkRect2D scissor    = makeRect2D(m_renderSize);
247 
248     const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo = {
249         VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType                             sType;
250         DE_NULL,                                               // const void*                                 pNext;
251         (VkPipelineViewportStateCreateFlags)0,                 // VkPipelineViewportStateCreateFlags          flags;
252         1u,        // uint32_t                                    viewportCount;
253         &viewport, // const VkViewport*                           pViewports;
254         1u,        // uint32_t                                    scissorCount;
255         &scissor,  // const VkRect2D*                             pScissors;
256     };
257 
258     const bool isRasterizationDisabled = ((m_shaderStageFlags & VK_SHADER_STAGE_FRAGMENT_BIT) == 0);
259     const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo = {
260         VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType                          sType;
261         DE_NULL,                                                    // const void*                              pNext;
262         (VkPipelineRasterizationStateCreateFlags)0,                 // VkPipelineRasterizationStateCreateFlags  flags;
263         VK_FALSE,                // VkBool32                                 depthClampEnable;
264         isRasterizationDisabled, // VkBool32                                 rasterizerDiscardEnable;
265         VK_POLYGON_MODE_FILL,    // VkPolygonMode polygonMode;
266         m_cullModeFlags,         // VkCullModeFlags cullMode;
267         m_frontFace,             // VkFrontFace frontFace;
268         VK_FALSE,                // VkBool32 depthBiasEnable;
269         0.0f,                    // float depthBiasConstantFactor;
270         0.0f,                    // float depthBiasClamp;
271         0.0f,                    // float depthBiasSlopeFactor;
272         1.0f,                    // float lineWidth;
273     };
274 
275     const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo = {
276         VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
277         DE_NULL,                                                  // const void* pNext;
278         (VkPipelineMultisampleStateCreateFlags)0,                 // VkPipelineMultisampleStateCreateFlags flags;
279         VK_SAMPLE_COUNT_1_BIT,                                    // VkSampleCountFlagBits rasterizationSamples;
280         VK_FALSE,                                                 // VkBool32 sampleShadingEnable;
281         0.0f,                                                     // float minSampleShading;
282         DE_NULL,                                                  // const VkSampleMask* pSampleMask;
283         VK_FALSE,                                                 // VkBool32 alphaToCoverageEnable;
284         VK_FALSE                                                  // VkBool32 alphaToOneEnable;
285     };
286 
287     const VkStencilOpState stencilOpState = makeStencilOpState(VK_STENCIL_OP_KEEP,  // stencil fail
288                                                                VK_STENCIL_OP_KEEP,  // depth & stencil pass
289                                                                VK_STENCIL_OP_KEEP,  // depth only fail
290                                                                VK_COMPARE_OP_NEVER, // compare op
291                                                                0u,                  // compare mask
292                                                                0u,                  // write mask
293                                                                0u);                 // reference
294 
295     const VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo = {
296         VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
297         DE_NULL,                                                    // const void* pNext;
298         (VkPipelineDepthStencilStateCreateFlags)0,                  // VkPipelineDepthStencilStateCreateFlags flags;
299         VK_FALSE,                                                   // VkBool32 depthTestEnable;
300         VK_FALSE,                                                   // VkBool32 depthWriteEnable;
301         VK_COMPARE_OP_LESS,                                         // VkCompareOp depthCompareOp;
302         VK_FALSE,                                                   // VkBool32 depthBoundsTestEnable;
303         VK_FALSE,                                                   // VkBool32 stencilTestEnable;
304         stencilOpState,                                             // VkStencilOpState front;
305         stencilOpState,                                             // VkStencilOpState back;
306         0.0f,                                                       // float minDepthBounds;
307         1.0f,                                                       // float maxDepthBounds;
308     };
309 
310     const VkColorComponentFlags colorComponentsAll =
311         VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
312     const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState = {
313         m_blendEnable,             // VkBool32 blendEnable;
314         VK_BLEND_FACTOR_SRC_ALPHA, // VkBlendFactor srcColorBlendFactor;
315         VK_BLEND_FACTOR_ONE,       // VkBlendFactor dstColorBlendFactor;
316         VK_BLEND_OP_ADD,           // VkBlendOp colorBlendOp;
317         VK_BLEND_FACTOR_SRC_ALPHA, // VkBlendFactor srcAlphaBlendFactor;
318         VK_BLEND_FACTOR_ONE,       // VkBlendFactor dstAlphaBlendFactor;
319         VK_BLEND_OP_ADD,           // VkBlendOp alphaBlendOp;
320         colorComponentsAll,        // VkColorComponentFlags colorWriteMask;
321     };
322 
323     const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo = {
324         VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
325         DE_NULL,                                                  // const void* pNext;
326         (VkPipelineColorBlendStateCreateFlags)0,                  // VkPipelineColorBlendStateCreateFlags flags;
327         VK_FALSE,                                                 // VkBool32 logicOpEnable;
328         VK_LOGIC_OP_COPY,                                         // VkLogicOp logicOp;
329         1u,                                                       // uint32_t attachmentCount;
330         &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
331         {0.0f, 0.0f, 0.0f, 0.0f},           // float blendConstants[4];
332     };
333 
334     const VkGraphicsPipelineCreateInfo graphicsPipelineInfo = {
335         VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
336         DE_NULL,                                         // const void* pNext;
337         (VkPipelineCreateFlags)0,                        // VkPipelineCreateFlags flags;
338         static_cast<uint32_t>(m_shaderStages.size()),    // uint32_t stageCount;
339         &m_shaderStages[0],                              // const VkPipelineShaderStageCreateInfo* pStages;
340         &vertexInputStateInfo,           // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
341         &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
342         (m_shaderStageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ?
343              &pipelineTessellationStateInfo :
344              DE_NULL), // const VkPipelineTessellationStateCreateInfo* pTessellationState;
345         (isRasterizationDisabled ?
346              DE_NULL :
347              &pipelineViewportStateInfo), // const VkPipelineViewportStateCreateInfo* pViewportState;
348         &pipelineRasterizationStateInfo,  // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
349         (isRasterizationDisabled ?
350              DE_NULL :
351              &pipelineMultisampleStateInfo), // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
352         (isRasterizationDisabled ?
353              DE_NULL :
354              &pipelineDepthStencilStateInfo), // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
355         (isRasterizationDisabled ?
356              DE_NULL :
357              &pipelineColorBlendStateInfo), // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
358         DE_NULL,                            // const VkPipelineDynamicStateCreateInfo* pDynamicState;
359         pipelineLayout,                     // VkPipelineLayout layout;
360         renderPass,                         // VkRenderPass renderPass;
361         0u,                                 // uint32_t subpass;
362         DE_NULL,                            // VkPipeline basePipelineHandle;
363         0,                                  // int32_t basePipelineIndex;
364     };
365 
366     {
367         const vk::Unique<vk::VkPipelineCache> pipelineCache(
368             pipelineCacheData.createPipelineCache(vk, device, resourceInterface));
369         vk::Move<vk::VkPipeline> pipeline(createGraphicsPipeline(vk, device, *pipelineCache, &graphicsPipelineInfo));
370 
371         // Refresh data from cache
372         pipelineCacheData.setFromPipelineCache(vk, device, *pipelineCache);
373 
374         return pipeline;
375     }
376 }
377 
378 // Uses some structures added by VK_KHR_synchronization2 to fill legacy structures.
379 // With this approach we dont have to create branch in each test (one for legacy
380 // second for new synchronization), this helps to reduce code of some tests.
381 class LegacySynchronizationWrapper : public SynchronizationWrapperBase
382 {
383 protected:
384     struct SubmitInfoData
385     {
386         uint32_t waitSemaphoreCount;
387         std::size_t waitSemaphoreIndex;
388         std::size_t waitSemaphoreValueIndexPlusOne;
389         uint32_t commandBufferCount;
390         uint32_t commandBufferIndex;
391         uint32_t signalSemaphoreCount;
392         std::size_t signalSemaphoreIndex;
393         std::size_t signalSemaphoreValueIndexPlusOne;
394     };
395 
isStageFlagAllowed(VkPipelineStageFlags2 stage) const396     bool isStageFlagAllowed(VkPipelineStageFlags2 stage) const
397     {
398         // synchronization2 suports more stages then legacy synchronization
399         // and so SynchronizationWrapper can only be used for cases that
400         // operate on stages also supported by legacy synchronization
401         // NOTE: if some tests hits assertion that uses this method then this
402         // test should not use synchronizationWrapper - it should be synchronization2 exclusive
403 
404         static const std::set<uint32_t> allowedStages{
405             VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
406             VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
407             VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
408             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
409             VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
410             VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
411             VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
412             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
413             VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
414             VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
415             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
416             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
417             VK_PIPELINE_STAGE_TRANSFER_BIT,
418             VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
419             VK_PIPELINE_STAGE_HOST_BIT,
420             VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
421             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
422 #ifndef CTS_USES_VULKANSC
423             VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
424             VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT,
425             VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR,
426             VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,
427 #endif // CTS_USES_VULKANSC
428             VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
429 #ifndef CTS_USES_VULKANSC
430             VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV,
431             VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV,
432             VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT,
433             VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV,
434 #endif // CTS_USES_VULKANSC
435             VK_PIPELINE_STAGE_NONE_KHR,
436         };
437 
438         if (stage > static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()))
439             return false;
440 
441         return (allowedStages.find(static_cast<uint32_t>(stage)) != allowedStages.end());
442     }
443 
isAccessFlagAllowed(VkAccessFlags2 access) const444     bool isAccessFlagAllowed(VkAccessFlags2 access) const
445     {
446         // synchronization2 suports more access flags then legacy synchronization
447         // and so SynchronizationWrapper can only be used for cases that
448         // operate on access flags also supported by legacy synchronization
449         // NOTE: if some tests hits assertion that uses this method then this
450         // test should not use synchronizationWrapper - it should be synchronization2 exclusive
451 
452         static const std::set<uint32_t> allowedAccessFlags{
453             VK_ACCESS_INDIRECT_COMMAND_READ_BIT,
454             VK_ACCESS_INDEX_READ_BIT,
455             VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
456             VK_ACCESS_UNIFORM_READ_BIT,
457             VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,
458             VK_ACCESS_SHADER_READ_BIT,
459             VK_ACCESS_SHADER_WRITE_BIT,
460             VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
461             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
462             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
463             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
464             VK_ACCESS_TRANSFER_READ_BIT,
465             VK_ACCESS_TRANSFER_WRITE_BIT,
466             VK_ACCESS_HOST_READ_BIT,
467             VK_ACCESS_HOST_WRITE_BIT,
468             VK_ACCESS_MEMORY_READ_BIT,
469             VK_ACCESS_MEMORY_WRITE_BIT,
470 #ifndef CTS_USES_VULKANSC
471             VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT,
472             VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
473             VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT,
474             VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT,
475 #endif // CTS_USES_VULKANSC
476             VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT,
477 #ifndef CTS_USES_VULKANSC
478             VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR,
479             VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR,
480 #endif // CTS_USES_VULKANSC
481             VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
482 #ifndef CTS_USES_VULKANSC
483             VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT,
484             VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV,
485             VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV,
486 #endif // CTS_USES_VULKANSC
487             VK_ACCESS_NONE_KHR,
488         };
489 
490         if (access > static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()))
491             return false;
492 
493         return (allowedAccessFlags.find(static_cast<uint32_t>(access)) != allowedAccessFlags.end());
494     }
495 
496 public:
LegacySynchronizationWrapper(const DeviceInterface & vk,bool usingTimelineSemaphores,uint32_t submitInfoCount=1u)497     LegacySynchronizationWrapper(const DeviceInterface &vk, bool usingTimelineSemaphores, uint32_t submitInfoCount = 1u)
498         : SynchronizationWrapperBase(vk)
499         , m_submited(false)
500     {
501         m_waitSemaphores.reserve(submitInfoCount);
502         m_signalSemaphores.reserve(submitInfoCount);
503         m_waitDstStageMasks.reserve(submitInfoCount);
504         m_commandBuffers.reserve(submitInfoCount);
505         m_submitInfoData.reserve(submitInfoCount);
506 
507         if (usingTimelineSemaphores)
508             m_timelineSemaphoreValues.reserve(2 * submitInfoCount);
509     }
510 
511     ~LegacySynchronizationWrapper() = default;
512 
addSubmitInfo(uint32_t waitSemaphoreInfoCount,const VkSemaphoreSubmitInfo * pWaitSemaphoreInfos,uint32_t commandBufferInfoCount,const VkCommandBufferSubmitInfo * pCommandBufferInfos,uint32_t signalSemaphoreInfoCount,const VkSemaphoreSubmitInfo * pSignalSemaphoreInfos,bool usingWaitTimelineSemaphore,bool usingSignalTimelineSemaphore)513     void addSubmitInfo(uint32_t waitSemaphoreInfoCount, const VkSemaphoreSubmitInfo *pWaitSemaphoreInfos,
514                        uint32_t commandBufferInfoCount, const VkCommandBufferSubmitInfo *pCommandBufferInfos,
515                        uint32_t signalSemaphoreInfoCount, const VkSemaphoreSubmitInfo *pSignalSemaphoreInfos,
516                        bool usingWaitTimelineSemaphore, bool usingSignalTimelineSemaphore) override
517     {
518         m_submitInfoData.push_back(
519             SubmitInfoData{waitSemaphoreInfoCount, 0, 0, commandBufferInfoCount, 0u, signalSemaphoreInfoCount, 0, 0});
520         SubmitInfoData &si = m_submitInfoData.back();
521 
522         // memorize wait values
523         if (usingWaitTimelineSemaphore)
524         {
525             DE_ASSERT(pWaitSemaphoreInfos);
526             si.waitSemaphoreValueIndexPlusOne = m_timelineSemaphoreValues.size() + 1;
527             for (uint32_t i = 0; i < waitSemaphoreInfoCount; ++i)
528                 m_timelineSemaphoreValues.push_back(pWaitSemaphoreInfos[i].value);
529         }
530 
531         // memorize signal values
532         if (usingSignalTimelineSemaphore)
533         {
534             DE_ASSERT(pSignalSemaphoreInfos);
535             si.signalSemaphoreValueIndexPlusOne = m_timelineSemaphoreValues.size() + 1;
536             for (uint32_t i = 0; i < signalSemaphoreInfoCount; ++i)
537                 m_timelineSemaphoreValues.push_back(pSignalSemaphoreInfos[i].value);
538         }
539 
540         // construct list of semaphores that we need to wait on
541         if (waitSemaphoreInfoCount)
542         {
543             si.waitSemaphoreIndex = m_waitSemaphores.size();
544             for (uint32_t i = 0; i < waitSemaphoreInfoCount; ++i)
545             {
546                 DE_ASSERT(isStageFlagAllowed(pWaitSemaphoreInfos[i].stageMask));
547                 m_waitSemaphores.push_back(pWaitSemaphoreInfos[i].semaphore);
548                 m_waitDstStageMasks.push_back(static_cast<VkPipelineStageFlags>(pWaitSemaphoreInfos[i].stageMask));
549             }
550         }
551 
552         // construct list of command buffers
553         if (commandBufferInfoCount)
554         {
555             si.commandBufferIndex = static_cast<uint32_t>(m_commandBuffers.size());
556             for (uint32_t i = 0; i < commandBufferInfoCount; ++i)
557                 m_commandBuffers.push_back(pCommandBufferInfos[i].commandBuffer);
558         }
559 
560         // construct list of semaphores that will be signaled
561         if (signalSemaphoreInfoCount)
562         {
563             si.signalSemaphoreIndex = m_signalSemaphores.size();
564             for (uint32_t i = 0; i < signalSemaphoreInfoCount; ++i)
565                 m_signalSemaphores.push_back(pSignalSemaphoreInfos[i].semaphore);
566         }
567     }
568 
cmdPipelineBarrier(VkCommandBuffer commandBuffer,const VkDependencyInfo * pDependencyInfo) const569     void cmdPipelineBarrier(VkCommandBuffer commandBuffer, const VkDependencyInfo *pDependencyInfo) const override
570     {
571         DE_ASSERT(pDependencyInfo);
572 
573 #ifndef CTS_USES_VULKANSC
574         VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_NONE;
575         VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_NONE;
576 #else
577         VkPipelineStageFlags srcStageMask  = VK_PIPELINE_STAGE_NONE_KHR;
578         VkPipelineStageFlags dstStageMask  = VK_PIPELINE_STAGE_NONE_KHR;
579 #endif // CTS_USES_VULKANSC
580         uint32_t memoryBarrierCount                  = pDependencyInfo->memoryBarrierCount;
581         VkMemoryBarrier *pMemoryBarriers             = DE_NULL;
582         uint32_t bufferMemoryBarrierCount            = pDependencyInfo->bufferMemoryBarrierCount;
583         VkBufferMemoryBarrier *pBufferMemoryBarriers = DE_NULL;
584         uint32_t imageMemoryBarrierCount             = pDependencyInfo->imageMemoryBarrierCount;
585         VkImageMemoryBarrier *pImageMemoryBarriers   = DE_NULL;
586 
587         // translate VkMemoryBarrier2 to VkMemoryBarrier
588         std::vector<VkMemoryBarrier> memoryBarriers;
589         if (memoryBarrierCount)
590         {
591             memoryBarriers.reserve(memoryBarrierCount);
592             for (uint32_t i = 0; i < memoryBarrierCount; ++i)
593             {
594                 const VkMemoryBarrier2 &pMemoryBarrier = pDependencyInfo->pMemoryBarriers[i];
595 
596                 DE_ASSERT(isStageFlagAllowed(pMemoryBarrier.srcStageMask));
597                 DE_ASSERT(isStageFlagAllowed(pMemoryBarrier.dstStageMask));
598                 DE_ASSERT(isAccessFlagAllowed(pMemoryBarrier.srcAccessMask));
599                 DE_ASSERT(isAccessFlagAllowed(pMemoryBarrier.dstAccessMask));
600 
601                 srcStageMask |= static_cast<VkPipelineStageFlags>(pMemoryBarrier.srcStageMask);
602                 dstStageMask |= static_cast<VkPipelineStageFlags>(pMemoryBarrier.dstStageMask);
603                 memoryBarriers.push_back(makeMemoryBarrier(static_cast<VkAccessFlags>(pMemoryBarrier.srcAccessMask),
604                                                            static_cast<VkAccessFlags>(pMemoryBarrier.dstAccessMask)));
605             }
606             pMemoryBarriers = &memoryBarriers[0];
607         }
608 
609         // translate VkBufferMemoryBarrier2 to VkBufferMemoryBarrier
610         std::vector<VkBufferMemoryBarrier> bufferMemoryBarriers;
611         if (bufferMemoryBarrierCount)
612         {
613             bufferMemoryBarriers.reserve(bufferMemoryBarrierCount);
614             for (uint32_t i = 0; i < bufferMemoryBarrierCount; ++i)
615             {
616                 const VkBufferMemoryBarrier2 &pBufferMemoryBarrier = pDependencyInfo->pBufferMemoryBarriers[i];
617 
618                 DE_ASSERT(isStageFlagAllowed(pBufferMemoryBarrier.srcStageMask));
619                 DE_ASSERT(isStageFlagAllowed(pBufferMemoryBarrier.dstStageMask));
620                 DE_ASSERT(isAccessFlagAllowed(pBufferMemoryBarrier.srcAccessMask));
621                 DE_ASSERT(isAccessFlagAllowed(pBufferMemoryBarrier.dstAccessMask));
622 
623                 srcStageMask |= static_cast<VkPipelineStageFlags>(pBufferMemoryBarrier.srcStageMask);
624                 dstStageMask |= static_cast<VkPipelineStageFlags>(pBufferMemoryBarrier.dstStageMask);
625                 bufferMemoryBarriers.push_back(makeBufferMemoryBarrier(
626                     static_cast<VkAccessFlags>(pBufferMemoryBarrier.srcAccessMask),
627                     static_cast<VkAccessFlags>(pBufferMemoryBarrier.dstAccessMask), pBufferMemoryBarrier.buffer,
628                     pBufferMemoryBarrier.offset, pBufferMemoryBarrier.size, pBufferMemoryBarrier.srcQueueFamilyIndex,
629                     pBufferMemoryBarrier.dstQueueFamilyIndex));
630             }
631             pBufferMemoryBarriers = &bufferMemoryBarriers[0];
632         }
633 
634         // translate VkImageMemoryBarrier2 to VkImageMemoryBarrier
635         std::vector<VkImageMemoryBarrier> imageMemoryBarriers;
636         if (imageMemoryBarrierCount)
637         {
638             imageMemoryBarriers.reserve(imageMemoryBarrierCount);
639             for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i)
640             {
641                 const VkImageMemoryBarrier2 &pImageMemoryBarrier = pDependencyInfo->pImageMemoryBarriers[i];
642 
643                 DE_ASSERT(isStageFlagAllowed(pImageMemoryBarrier.srcStageMask));
644                 DE_ASSERT(isStageFlagAllowed(pImageMemoryBarrier.dstStageMask));
645                 DE_ASSERT(isAccessFlagAllowed(pImageMemoryBarrier.srcAccessMask));
646                 DE_ASSERT(isAccessFlagAllowed(pImageMemoryBarrier.dstAccessMask));
647 
648                 srcStageMask |= static_cast<VkPipelineStageFlags>(pImageMemoryBarrier.srcStageMask);
649                 dstStageMask |= static_cast<VkPipelineStageFlags>(pImageMemoryBarrier.dstStageMask);
650                 imageMemoryBarriers.push_back(makeImageMemoryBarrier(
651                     static_cast<VkAccessFlags>(pImageMemoryBarrier.srcAccessMask),
652                     static_cast<VkAccessFlags>(pImageMemoryBarrier.dstAccessMask), pImageMemoryBarrier.oldLayout,
653                     pImageMemoryBarrier.newLayout, pImageMemoryBarrier.image, pImageMemoryBarrier.subresourceRange,
654                     pImageMemoryBarrier.srcQueueFamilyIndex, pImageMemoryBarrier.dstQueueFamilyIndex));
655             }
656             pImageMemoryBarriers = &imageMemoryBarriers[0];
657         }
658 
659         m_vk.cmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, (VkDependencyFlags)0, memoryBarrierCount,
660                                 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
661                                 imageMemoryBarrierCount, pImageMemoryBarriers);
662     }
663 
cmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,const VkDependencyInfo * pDependencyInfo) const664     void cmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event,
665                      const VkDependencyInfo *pDependencyInfo) const override
666     {
667         DE_ASSERT(pDependencyInfo);
668 
669 #ifndef CTS_USES_VULKANSC
670         VkPipelineStageFlags2 srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
671 #else
672         VkPipelineStageFlags2 srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR;
673 #endif // CTS_USES_VULKANSC
674         if (pDependencyInfo->pMemoryBarriers)
675             srcStageMask = pDependencyInfo->pMemoryBarriers[0].srcStageMask;
676         if (pDependencyInfo->pBufferMemoryBarriers)
677             srcStageMask = pDependencyInfo->pBufferMemoryBarriers[0].srcStageMask;
678         if (pDependencyInfo->pImageMemoryBarriers)
679             srcStageMask = pDependencyInfo->pImageMemoryBarriers[0].srcStageMask;
680 
681         DE_ASSERT(isStageFlagAllowed(srcStageMask));
682         m_vk.cmdSetEvent(commandBuffer, event, static_cast<VkPipelineStageFlags>(srcStageMask));
683     }
684 
cmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags2 flag) const685     void cmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 flag) const override
686     {
687         DE_ASSERT(isStageFlagAllowed(flag));
688         VkPipelineStageFlags legacyStageMask = static_cast<VkPipelineStageFlags>(flag);
689         m_vk.cmdResetEvent(commandBuffer, event, legacyStageMask);
690     }
691 
cmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,const VkDependencyInfo * pDependencyInfo) const692     void cmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
693                        const VkDependencyInfo *pDependencyInfo) const override
694     {
695         DE_ASSERT(pDependencyInfo);
696 
697 #ifndef CTS_USES_VULKANSC
698         VkPipelineStageFlags2 srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
699         VkPipelineStageFlags2 dstStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
700 #else
701         VkPipelineStageFlags2 srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR;
702         VkPipelineStageFlags2 dstStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR;
703 #endif // CTS_USES_VULKANSC
704         uint32_t memoryBarrierCount                  = pDependencyInfo->memoryBarrierCount;
705         uint32_t bufferMemoryBarrierCount            = pDependencyInfo->bufferMemoryBarrierCount;
706         uint32_t imageMemoryBarrierCount             = pDependencyInfo->imageMemoryBarrierCount;
707         VkMemoryBarrier *pMemoryBarriers             = DE_NULL;
708         VkBufferMemoryBarrier *pBufferMemoryBarriers = DE_NULL;
709         VkImageMemoryBarrier *pImageMemoryBarriers   = DE_NULL;
710         std::vector<VkMemoryBarrier> memoryBarriers;
711         std::vector<VkBufferMemoryBarrier> bufferMemoryBarriers;
712         std::vector<VkImageMemoryBarrier> imageMemoryBarriers;
713 
714         if (pDependencyInfo->pMemoryBarriers)
715         {
716             srcStageMask = pDependencyInfo->pMemoryBarriers[0].srcStageMask;
717             dstStageMask = pDependencyInfo->pMemoryBarriers[0].dstStageMask;
718 
719             memoryBarriers.reserve(memoryBarrierCount);
720             for (uint32_t i = 0; i < memoryBarrierCount; ++i)
721             {
722                 const VkMemoryBarrier2 &mb = pDependencyInfo->pMemoryBarriers[i];
723                 DE_ASSERT(isAccessFlagAllowed(mb.srcAccessMask));
724                 DE_ASSERT(isAccessFlagAllowed(mb.dstAccessMask));
725                 memoryBarriers.push_back(makeMemoryBarrier(static_cast<VkAccessFlags>(mb.srcAccessMask),
726                                                            static_cast<VkAccessFlags>(mb.dstAccessMask)));
727             }
728             pMemoryBarriers = &memoryBarriers[0];
729         }
730         if (pDependencyInfo->pBufferMemoryBarriers)
731         {
732             srcStageMask = pDependencyInfo->pBufferMemoryBarriers[0].srcStageMask;
733             dstStageMask = pDependencyInfo->pBufferMemoryBarriers[0].dstStageMask;
734 
735             bufferMemoryBarriers.reserve(bufferMemoryBarrierCount);
736             for (uint32_t i = 0; i < bufferMemoryBarrierCount; ++i)
737             {
738                 const VkBufferMemoryBarrier2 &bmb = pDependencyInfo->pBufferMemoryBarriers[i];
739                 DE_ASSERT(isAccessFlagAllowed(bmb.srcAccessMask));
740                 DE_ASSERT(isAccessFlagAllowed(bmb.dstAccessMask));
741                 bufferMemoryBarriers.push_back(makeBufferMemoryBarrier(
742                     static_cast<VkAccessFlags>(bmb.srcAccessMask), static_cast<VkAccessFlags>(bmb.dstAccessMask),
743                     bmb.buffer, bmb.offset, bmb.size, bmb.srcQueueFamilyIndex, bmb.dstQueueFamilyIndex));
744             }
745             pBufferMemoryBarriers = &bufferMemoryBarriers[0];
746         }
747         if (pDependencyInfo->pImageMemoryBarriers)
748         {
749             srcStageMask = pDependencyInfo->pImageMemoryBarriers[0].srcStageMask;
750             dstStageMask = pDependencyInfo->pImageMemoryBarriers[0].dstStageMask;
751 
752             imageMemoryBarriers.reserve(imageMemoryBarrierCount);
753             for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i)
754             {
755                 const VkImageMemoryBarrier2 &imb = pDependencyInfo->pImageMemoryBarriers[i];
756                 DE_ASSERT(isAccessFlagAllowed(imb.srcAccessMask));
757                 DE_ASSERT(isAccessFlagAllowed(imb.dstAccessMask));
758                 imageMemoryBarriers.push_back(makeImageMemoryBarrier(
759                     static_cast<VkAccessFlags>(imb.srcAccessMask), static_cast<VkAccessFlags>(imb.dstAccessMask),
760                     imb.oldLayout, imb.newLayout, imb.image, imb.subresourceRange, imb.srcQueueFamilyIndex,
761                     imb.dstQueueFamilyIndex));
762             }
763             pImageMemoryBarriers = &imageMemoryBarriers[0];
764         }
765 
766         DE_ASSERT(isStageFlagAllowed(srcStageMask));
767         DE_ASSERT(isStageFlagAllowed(dstStageMask));
768         m_vk.cmdWaitEvents(commandBuffer, eventCount, pEvents, static_cast<VkPipelineStageFlags>(srcStageMask),
769                            static_cast<VkPipelineStageFlags>(dstStageMask), memoryBarrierCount, pMemoryBarriers,
770                            bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
771                            pImageMemoryBarriers);
772     }
773 
queueSubmit(VkQueue queue,VkFence fence)774     VkResult queueSubmit(VkQueue queue, VkFence fence) override
775     {
776         // make sure submit info was added
777         DE_ASSERT(!m_submitInfoData.empty());
778 
779         // make sure separate LegacySynchronizationWrapper is created per single submit
780         DE_ASSERT(!m_submited);
781 
782         std::vector<VkSubmitInfo> submitInfo(m_submitInfoData.size(), {VK_STRUCTURE_TYPE_SUBMIT_INFO, DE_NULL, 0u,
783                                                                        DE_NULL, DE_NULL, 0u, DE_NULL, 0u, DE_NULL});
784 
785         std::vector<VkTimelineSemaphoreSubmitInfo> timelineSemaphoreSubmitInfo;
786         timelineSemaphoreSubmitInfo.reserve(m_submitInfoData.size());
787 
788         // translate indexes from m_submitInfoData to pointers and construct VkSubmitInfo
789         for (uint32_t i = 0; i < m_submitInfoData.size(); ++i)
790         {
791             auto &data       = m_submitInfoData[i];
792             VkSubmitInfo &si = submitInfo[i];
793 
794             si.waitSemaphoreCount   = data.waitSemaphoreCount;
795             si.commandBufferCount   = data.commandBufferCount;
796             si.signalSemaphoreCount = data.signalSemaphoreCount;
797 
798             if (data.waitSemaphoreValueIndexPlusOne || data.signalSemaphoreValueIndexPlusOne)
799             {
800                 uint64_t *pWaitSemaphoreValues = DE_NULL;
801                 if (data.waitSemaphoreValueIndexPlusOne)
802                     pWaitSemaphoreValues = &m_timelineSemaphoreValues[data.waitSemaphoreValueIndexPlusOne - 1];
803 
804                 uint64_t *pSignalSemaphoreValues = DE_NULL;
805                 if (data.signalSemaphoreValueIndexPlusOne)
806                     pSignalSemaphoreValues = &m_timelineSemaphoreValues[data.signalSemaphoreValueIndexPlusOne - 1];
807 
808                 timelineSemaphoreSubmitInfo.push_back({
809                     VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, // VkStructureType sType;
810                     DE_NULL,                                          // const void* pNext;
811                     data.waitSemaphoreCount,                          // uint32_t            waitSemaphoreValueCount
812                     pWaitSemaphoreValues,                             // const uint64_t*    pWaitSemaphoreValues
813                     data.signalSemaphoreCount,                        // uint32_t            signalSemaphoreValueCount
814                     pSignalSemaphoreValues                            // const uint64_t*    pSignalSemaphoreValues
815                 });
816                 si.pNext = &timelineSemaphoreSubmitInfo.back();
817             }
818 
819             if (data.waitSemaphoreCount)
820             {
821                 si.pWaitSemaphores   = &m_waitSemaphores[data.waitSemaphoreIndex];
822                 si.pWaitDstStageMask = &m_waitDstStageMasks[data.waitSemaphoreIndex];
823             }
824 
825             if (data.commandBufferCount)
826                 si.pCommandBuffers = &m_commandBuffers[data.commandBufferIndex];
827 
828             if (data.signalSemaphoreCount)
829                 si.pSignalSemaphores = &m_signalSemaphores[data.signalSemaphoreIndex];
830         }
831 
832         m_submited = true;
833         return m_vk.queueSubmit(queue, static_cast<uint32_t>(submitInfo.size()), &submitInfo[0], fence);
834     }
835 
836 protected:
837     std::vector<VkSemaphore> m_waitSemaphores;
838     std::vector<VkSemaphore> m_signalSemaphores;
839     std::vector<VkPipelineStageFlags> m_waitDstStageMasks;
840     std::vector<VkCommandBuffer> m_commandBuffers;
841     std::vector<SubmitInfoData> m_submitInfoData;
842     std::vector<uint64_t> m_timelineSemaphoreValues;
843     bool m_submited;
844 };
845 
846 class Synchronization2Wrapper : public SynchronizationWrapperBase
847 {
848 public:
Synchronization2Wrapper(const DeviceInterface & vk,uint32_t submitInfoCount)849     Synchronization2Wrapper(const DeviceInterface &vk, uint32_t submitInfoCount) : SynchronizationWrapperBase(vk)
850     {
851         m_submitInfo.reserve(submitInfoCount);
852     }
853 
854     ~Synchronization2Wrapper() = default;
855 
addSubmitInfo(uint32_t waitSemaphoreInfoCount,const VkSemaphoreSubmitInfo * pWaitSemaphoreInfos,uint32_t commandBufferInfoCount,const VkCommandBufferSubmitInfo * pCommandBufferInfos,uint32_t signalSemaphoreInfoCount,const VkSemaphoreSubmitInfo * pSignalSemaphoreInfos,bool usingWaitTimelineSemaphore,bool usingSignalTimelineSemaphore)856     void addSubmitInfo(uint32_t waitSemaphoreInfoCount, const VkSemaphoreSubmitInfo *pWaitSemaphoreInfos,
857                        uint32_t commandBufferInfoCount, const VkCommandBufferSubmitInfo *pCommandBufferInfos,
858                        uint32_t signalSemaphoreInfoCount, const VkSemaphoreSubmitInfo *pSignalSemaphoreInfos,
859                        bool usingWaitTimelineSemaphore, bool usingSignalTimelineSemaphore) override
860     {
861         DE_UNREF(usingWaitTimelineSemaphore);
862         DE_UNREF(usingSignalTimelineSemaphore);
863 
864         m_submitInfo.push_back(VkSubmitInfo2{
865 #ifndef CTS_USES_VULKANSC
866             VK_STRUCTURE_TYPE_SUBMIT_INFO_2, // VkStructureType                        sType
867 #else
868             VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR, // VkStructureType                        sType
869 #endif                                // CTS_USES_VULKANSC
870             DE_NULL,                  // const void*                            pNext
871             0u,                       // VkSubmitFlags                        flags
872             waitSemaphoreInfoCount,   // uint32_t                                waitSemaphoreInfoCount
873             pWaitSemaphoreInfos,      // const VkSemaphoreSubmitInfo*            pWaitSemaphoreInfos
874             commandBufferInfoCount,   // uint32_t                                commandBufferInfoCount
875             pCommandBufferInfos,      // const VkCommandBufferSubmitInfo*        pCommandBufferInfos
876             signalSemaphoreInfoCount, // uint32_t                                signalSemaphoreInfoCount
877             pSignalSemaphoreInfos     // const VkSemaphoreSubmitInfo*            pSignalSemaphoreInfos
878         });
879     }
880 
cmdPipelineBarrier(VkCommandBuffer commandBuffer,const VkDependencyInfo * pDependencyInfo) const881     void cmdPipelineBarrier(VkCommandBuffer commandBuffer, const VkDependencyInfo *pDependencyInfo) const override
882     {
883 #ifndef CTS_USES_VULKANSC
884         m_vk.cmdPipelineBarrier2(commandBuffer, pDependencyInfo);
885 #else
886         m_vk.cmdPipelineBarrier2KHR(commandBuffer, pDependencyInfo);
887 #endif // CTS_USES_VULKANSC
888     }
889 
cmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,const VkDependencyInfo * pDependencyInfo) const890     void cmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event,
891                      const VkDependencyInfo *pDependencyInfo) const override
892     {
893 #ifndef CTS_USES_VULKANSC
894         m_vk.cmdSetEvent2(commandBuffer, event, pDependencyInfo);
895 #else
896         m_vk.cmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
897 #endif // CTS_USES_VULKANSC
898     }
899 
cmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,const VkDependencyInfo * pDependencyInfo) const900     void cmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
901                        const VkDependencyInfo *pDependencyInfo) const override
902     {
903 #ifndef CTS_USES_VULKANSC
904         m_vk.cmdWaitEvents2(commandBuffer, eventCount, pEvents, pDependencyInfo);
905 #else
906         m_vk.cmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfo);
907 #endif // CTS_USES_VULKANSC
908     }
909 
cmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags2 flag) const910     void cmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 flag) const override
911     {
912 #ifndef CTS_USES_VULKANSC
913         m_vk.cmdResetEvent2(commandBuffer, event, flag);
914 #else
915         m_vk.cmdResetEvent2KHR(commandBuffer, event, flag);
916 #endif // CTS_USES_VULKANSC
917     }
918 
queueSubmit(VkQueue queue,VkFence fence)919     VkResult queueSubmit(VkQueue queue, VkFence fence) override
920     {
921 #ifndef CTS_USES_VULKANSC
922         return m_vk.queueSubmit2(queue, static_cast<uint32_t>(m_submitInfo.size()), &m_submitInfo[0], fence);
923 #else
924         return m_vk.queueSubmit2KHR(queue, static_cast<uint32_t>(m_submitInfo.size()), &m_submitInfo[0], fence);
925 #endif // CTS_USES_VULKANSC
926     }
927 
928 protected:
929     std::vector<VkSubmitInfo2> m_submitInfo;
930 };
931 
getSynchronizationWrapper(SynchronizationType type,const DeviceInterface & vk,bool usingTimelineSemaphores,uint32_t submitInfoCount)932 SynchronizationWrapperPtr getSynchronizationWrapper(SynchronizationType type, const DeviceInterface &vk,
933                                                     bool usingTimelineSemaphores, uint32_t submitInfoCount)
934 {
935     return (type == SynchronizationType::LEGACY) ?
936                SynchronizationWrapperPtr(
937                    new LegacySynchronizationWrapper(vk, usingTimelineSemaphores, submitInfoCount)) :
938                SynchronizationWrapperPtr(new Synchronization2Wrapper(vk, submitInfoCount));
939 }
940 
submitCommandsAndWait(SynchronizationWrapperPtr synchronizationWrapper,const DeviceInterface & vk,const VkDevice device,const VkQueue queue,const VkCommandBuffer cmdBuffer)941 void submitCommandsAndWait(SynchronizationWrapperPtr synchronizationWrapper, const DeviceInterface &vk,
942                            const VkDevice device, const VkQueue queue, const VkCommandBuffer cmdBuffer)
943 {
944     VkCommandBufferSubmitInfo commandBufferInfoCount = makeCommonCommandBufferSubmitInfo(cmdBuffer);
945 
946     synchronizationWrapper->addSubmitInfo(
947         0u,                      // uint32_t                                waitSemaphoreInfoCount
948         DE_NULL,                 // const VkSemaphoreSubmitInfo*            pWaitSemaphoreInfos
949         1u,                      // uint32_t                                commandBufferInfoCount
950         &commandBufferInfoCount, // const VkCommandBufferSubmitInfo*        pCommandBufferInfos
951         0u,                      // uint32_t                                signalSemaphoreInfoCount
952         DE_NULL                  // const VkSemaphoreSubmitInfo*            pSignalSemaphoreInfos
953     );
954 
955     const Unique<VkFence> fence(createFence(vk, device));
956     VK_CHECK(synchronizationWrapper->queueSubmit(queue, *fence));
957     VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), true, ~0ull));
958 }
959 
requireFeatures(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const FeatureFlags flags)960 void requireFeatures(const InstanceInterface &vki, const VkPhysicalDevice physDevice, const FeatureFlags flags)
961 {
962     const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(vki, physDevice);
963 
964     if (((flags & FEATURE_TESSELLATION_SHADER) != 0) && !features.tessellationShader)
965         throw tcu::NotSupportedError("Tessellation shader not supported");
966 
967     if (((flags & FEATURE_GEOMETRY_SHADER) != 0) && !features.geometryShader)
968         throw tcu::NotSupportedError("Geometry shader not supported");
969 
970     if (((flags & FEATURE_SHADER_FLOAT_64) != 0) && !features.shaderFloat64)
971         throw tcu::NotSupportedError("Double-precision floats not supported");
972 
973     if (((flags & FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS) != 0) && !features.vertexPipelineStoresAndAtomics)
974         throw tcu::NotSupportedError("SSBO and image writes not supported in vertex pipeline");
975 
976     if (((flags & FEATURE_FRAGMENT_STORES_AND_ATOMICS) != 0) && !features.fragmentStoresAndAtomics)
977         throw tcu::NotSupportedError("SSBO and image writes not supported in fragment shader");
978 
979     if (((flags & FEATURE_SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE) != 0) &&
980         !features.shaderTessellationAndGeometryPointSize)
981         throw tcu::NotSupportedError("Tessellation and geometry shaders don't support PointSize built-in");
982 }
983 
requireStorageImageSupport(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const VkFormat fmt,const VkImageTiling tiling)984 void requireStorageImageSupport(const InstanceInterface &vki, const VkPhysicalDevice physDevice, const VkFormat fmt,
985                                 const VkImageTiling tiling)
986 {
987     const VkFormatProperties p = getPhysicalDeviceFormatProperties(vki, physDevice, fmt);
988     const auto &features = ((tiling == VK_IMAGE_TILING_LINEAR) ? p.linearTilingFeatures : p.optimalTilingFeatures);
989 
990     if ((features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) == 0)
991         throw tcu::NotSupportedError("Storage image format not supported");
992 }
993 
getResourceName(const ResourceDescription & resource)994 std::string getResourceName(const ResourceDescription &resource)
995 {
996     std::ostringstream str;
997 
998     if ((resource.type == RESOURCE_TYPE_BUFFER) || (resource.type == RESOURCE_TYPE_INDEX_BUFFER))
999     {
1000         str << "buffer_" << resource.size.x();
1001     }
1002     else if (resource.type == RESOURCE_TYPE_IMAGE)
1003     {
1004         str << "image_" << resource.size.x() << (resource.size.y() > 0 ? "x" + de::toString(resource.size.y()) : "")
1005             << (resource.size.z() > 0 ? "x" + de::toString(resource.size.z()) : "") << "_"
1006             << de::toLower(getFormatName(resource.imageFormat)).substr(10);
1007     }
1008     else if (isIndirectBuffer(resource.type))
1009         str << "indirect_buffer";
1010     else
1011         DE_ASSERT(0);
1012 
1013     return str.str();
1014 }
1015 
isIndirectBuffer(const ResourceType type)1016 bool isIndirectBuffer(const ResourceType type)
1017 {
1018     switch (type)
1019     {
1020     case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW:
1021     case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED:
1022     case RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH:
1023         return true;
1024 
1025     default:
1026         return false;
1027     }
1028 }
1029 
makeCommonCommandBufferSubmitInfo(const VkCommandBuffer cmdBuf)1030 VkCommandBufferSubmitInfo makeCommonCommandBufferSubmitInfo(const VkCommandBuffer cmdBuf)
1031 {
1032     return {
1033 #ifndef CTS_USES_VULKANSC
1034         VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO, // VkStructureType        sType
1035 #else
1036         VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR, // VkStructureType        sType
1037 #endif           // CTS_USES_VULKANSC
1038         DE_NULL, // const void*            pNext
1039         cmdBuf,  // VkCommandBuffer        commandBuffer
1040         0u       // uint32_t                deviceMask
1041     };
1042 }
1043 
makeCommonSemaphoreSubmitInfo(VkSemaphore semaphore,uint64_t value,VkPipelineStageFlags2 stageMask)1044 VkSemaphoreSubmitInfo makeCommonSemaphoreSubmitInfo(VkSemaphore semaphore, uint64_t value,
1045                                                     VkPipelineStageFlags2 stageMask)
1046 {
1047     return {
1048 #ifndef CTS_USES_VULKANSC
1049         VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO, // VkStructureType                sType
1050 #else
1051         VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR,      // VkStructureType                sType
1052 #endif             // CTS_USES_VULKANSC
1053         DE_NULL,   // const void*                    pNext
1054         semaphore, // VkSemaphore                    semaphore
1055         value,     // uint64_t                        value
1056         stageMask, // VkPipelineStageFlags2        stageMask
1057         0u         // uint32_t                        deviceIndex
1058     };
1059 }
1060 
makeCommonDependencyInfo(const VkMemoryBarrier2 * pMemoryBarrier,const VkBufferMemoryBarrier2 * pBufferMemoryBarrier,const VkImageMemoryBarrier2 * pImageMemoryBarrier,bool eventDependency)1061 VkDependencyInfo makeCommonDependencyInfo(const VkMemoryBarrier2 *pMemoryBarrier,
1062                                           const VkBufferMemoryBarrier2 *pBufferMemoryBarrier,
1063                                           const VkImageMemoryBarrier2 *pImageMemoryBarrier, bool eventDependency)
1064 {
1065     return {
1066 #ifndef CTS_USES_VULKANSC
1067         VK_STRUCTURE_TYPE_DEPENDENCY_INFO, // VkStructureType                    sType
1068 #else
1069         VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR,            // VkStructureType                    sType
1070 #endif           // CTS_USES_VULKANSC
1071         DE_NULL, // const void*                        pNext
1072         eventDependency ?
1073             (VkDependencyFlags)0u :
1074             (VkDependencyFlags)VK_DEPENDENCY_BY_REGION_BIT, // VkDependencyFlags                dependencyFlags
1075         !!pMemoryBarrier,                                   // uint32_t                            memoryBarrierCount
1076         pMemoryBarrier,                                     // const VkMemoryBarrier2KHR*        pMemoryBarriers
1077         !!pBufferMemoryBarrier, // uint32_t                            bufferMemoryBarrierCount
1078         pBufferMemoryBarrier,   // const VkBufferMemoryBarrier2KHR* pBufferMemoryBarriers
1079         !!pImageMemoryBarrier,  // uint32_t                            imageMemoryBarrierCount
1080         pImageMemoryBarrier     // const VkImageMemoryBarrier2KHR*    pImageMemoryBarriers
1081     };
1082 }
1083 
PipelineCacheData(void)1084 PipelineCacheData::PipelineCacheData(void)
1085 {
1086 }
1087 
~PipelineCacheData(void)1088 PipelineCacheData::~PipelineCacheData(void)
1089 {
1090 }
1091 
createPipelineCache(const vk::DeviceInterface & vk,const vk::VkDevice device,de::SharedPtr<vk::ResourceInterface> resourceInterface) const1092 vk::Move<VkPipelineCache> PipelineCacheData::createPipelineCache(
1093     const vk::DeviceInterface &vk, const vk::VkDevice device,
1094     de::SharedPtr<vk::ResourceInterface> resourceInterface) const
1095 {
1096 #ifndef CTS_USES_VULKANSC
1097     DE_UNREF(resourceInterface);
1098 #endif
1099     const de::ScopedLock dataLock(m_lock);
1100     const struct vk::VkPipelineCacheCreateInfo params = {vk::VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, DE_NULL,
1101 #ifndef CTS_USES_VULKANSC
1102                                                          (vk::VkPipelineCacheCreateFlags)0, (uintptr_t)m_data.size(),
1103                                                          (m_data.empty() ? DE_NULL : &m_data[0])
1104 #else
1105         VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
1106             VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT,
1107         resourceInterface->getCacheDataSize(), // uintptr_t initialDataSize;
1108         resourceInterface->getCacheData()        // const void* pInitialData;
1109 #endif // CTS_USES_VULKANSC
1110     };
1111 
1112     return vk::createPipelineCache(vk, device, &params);
1113 }
1114 
setFromPipelineCache(const vk::DeviceInterface & vk,const vk::VkDevice device,const vk::VkPipelineCache pipelineCache)1115 void PipelineCacheData::setFromPipelineCache(const vk::DeviceInterface &vk, const vk::VkDevice device,
1116                                              const vk::VkPipelineCache pipelineCache)
1117 {
1118     const de::ScopedLock dataLock(m_lock);
1119 
1120 #ifndef CTS_USES_VULKANSC
1121     uintptr_t dataSize = 0;
1122     VK_CHECK(vk.getPipelineCacheData(device, pipelineCache, &dataSize, DE_NULL));
1123 
1124     m_data.resize(dataSize);
1125 
1126     if (dataSize > 0)
1127         VK_CHECK(vk.getPipelineCacheData(device, pipelineCache, &dataSize, &m_data[0]));
1128 #else
1129     DE_UNREF(vk);
1130     DE_UNREF(device);
1131     DE_UNREF(pipelineCache);
1132 #endif
1133 }
1134 
getSyncDevice(de::MovePtr<VideoDevice> & device,Context & context)1135 vk::VkDevice getSyncDevice(de::MovePtr<VideoDevice> &device, Context &context)
1136 {
1137     if (device == DE_NULL)
1138         return context.getDevice();
1139     else
1140         return device->getDeviceSupportingQueue();
1141 }
1142 
getSyncDeviceInterface(de::MovePtr<VideoDevice> & device,Context & context)1143 const vk::DeviceInterface &getSyncDeviceInterface(de::MovePtr<VideoDevice> &device, Context &context)
1144 {
1145     if (device == DE_NULL)
1146         return context.getDeviceInterface();
1147     else
1148         return device->getDeviceDriver();
1149 }
1150 
getSyncQueueFamilyIndex(de::MovePtr<VideoDevice> & device,Context & context)1151 uint32_t getSyncQueueFamilyIndex(de::MovePtr<VideoDevice> &device, Context &context)
1152 {
1153     if (device == DE_NULL)
1154         return context.getUniversalQueueFamilyIndex();
1155     else
1156         return device->getQueueFamilyVideo();
1157 }
1158 
getSyncQueue(de::MovePtr<VideoDevice> & device,Context & context)1159 vk::VkQueue getSyncQueue(de::MovePtr<VideoDevice> &device, Context &context)
1160 {
1161     if (device == DE_NULL)
1162         return context.getUniversalQueue();
1163     else
1164         return getDeviceQueue(device->getDeviceDriver(), device->getDeviceSupportingQueue(),
1165                               device->getQueueFamilyVideo(), 0u);
1166 }
1167 
1168 } // namespace synchronization
1169 } // namespace vkt
1170