1 /*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vk_synchronization.h"
25
26 #include "vk_alloc.h"
27 #include "vk_command_buffer.h"
28 #include "vk_common_entrypoints.h"
29 #include "vk_device.h"
30 #include "vk_queue.h"
31 #include "vk_util.h"
32 #include "../wsi/wsi_common.h"
33
34 VkAccessFlags2
vk_expand_src_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)35 vk_expand_src_access_flags2(VkPipelineStageFlags2 stages,
36 VkAccessFlags2 access)
37 {
38 if (access & VK_ACCESS_2_MEMORY_WRITE_BIT)
39 access |= vk_write_access2_for_pipeline_stage_flags2(stages);;
40
41 if (access & VK_ACCESS_2_SHADER_WRITE_BIT)
42 access |= VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT;
43
44 return access;
45 }
46
47 VkAccessFlags2
vk_expand_dst_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)48 vk_expand_dst_access_flags2(VkPipelineStageFlags2 stages,
49 VkAccessFlags2 access)
50 {
51 if (access & VK_ACCESS_2_MEMORY_READ_BIT)
52 access |= vk_read_access2_for_pipeline_stage_flags2(stages);
53
54 if (access & VK_ACCESS_2_SHADER_READ_BIT)
55 access |= VK_ACCESS_2_SHADER_SAMPLED_READ_BIT |
56 VK_ACCESS_2_SHADER_STORAGE_READ_BIT |
57 VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR;
58
59 return access;
60 }
61
62 VkAccessFlags2
vk_filter_src_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)63 vk_filter_src_access_flags2(VkPipelineStageFlags2 stages,
64 VkAccessFlags2 access)
65 {
66 const VkAccessFlags2 all_write_access =
67 vk_write_access2_for_pipeline_stage_flags2(stages);
68
69 /* We only care about write access in src flags */
70 return vk_expand_src_access_flags2(stages, access) & all_write_access;
71 }
72
73 VkAccessFlags2
vk_filter_dst_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)74 vk_filter_dst_access_flags2(VkPipelineStageFlags2 stages,
75 VkAccessFlags2 access)
76 {
77 const VkAccessFlags2 all_read_access =
78 vk_read_access2_for_pipeline_stage_flags2(stages);
79
80 /* We only care about read access in dst flags */
81 return vk_expand_dst_access_flags2(stages, access) & all_read_access;
82 }
83
84 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t query)85 vk_common_CmdWriteTimestamp(
86 VkCommandBuffer commandBuffer,
87 VkPipelineStageFlagBits pipelineStage,
88 VkQueryPool queryPool,
89 uint32_t query)
90 {
91 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
92 struct vk_device *device = cmd_buffer->base.device;
93
94 device->dispatch_table.CmdWriteTimestamp2(commandBuffer,
95 (VkPipelineStageFlags2) pipelineStage,
96 queryPool,
97 query);
98 }
99
100 static VkMemoryBarrier2
upgrade_memory_barrier(const VkMemoryBarrier * barrier,VkPipelineStageFlags2 src_stage_mask2,VkPipelineStageFlags2 dst_stage_mask2)101 upgrade_memory_barrier(const VkMemoryBarrier *barrier,
102 VkPipelineStageFlags2 src_stage_mask2,
103 VkPipelineStageFlags2 dst_stage_mask2)
104 {
105 return (VkMemoryBarrier2) {
106 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
107 .pNext = barrier->pNext,
108 .srcStageMask = src_stage_mask2,
109 .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
110 .dstStageMask = dst_stage_mask2,
111 .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
112 };
113 }
114
115 static VkBufferMemoryBarrier2
upgrade_buffer_memory_barrier(const VkBufferMemoryBarrier * barrier,VkPipelineStageFlags2 src_stage_mask2,VkPipelineStageFlags2 dst_stage_mask2)116 upgrade_buffer_memory_barrier(const VkBufferMemoryBarrier *barrier,
117 VkPipelineStageFlags2 src_stage_mask2,
118 VkPipelineStageFlags2 dst_stage_mask2)
119 {
120 return (VkBufferMemoryBarrier2) {
121 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2,
122 .pNext = barrier->pNext,
123 .srcStageMask = src_stage_mask2,
124 .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
125 .dstStageMask = dst_stage_mask2,
126 .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
127 .srcQueueFamilyIndex = barrier->srcQueueFamilyIndex,
128 .dstQueueFamilyIndex = barrier->dstQueueFamilyIndex,
129 .buffer = barrier->buffer,
130 .offset = barrier->offset,
131 .size = barrier->size,
132 };
133 }
134
135 static VkImageMemoryBarrier2
upgrade_image_memory_barrier(const VkImageMemoryBarrier * barrier,VkPipelineStageFlags2 src_stage_mask2,VkPipelineStageFlags2 dst_stage_mask2)136 upgrade_image_memory_barrier(const VkImageMemoryBarrier *barrier,
137 VkPipelineStageFlags2 src_stage_mask2,
138 VkPipelineStageFlags2 dst_stage_mask2)
139 {
140 return (VkImageMemoryBarrier2) {
141 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
142 .pNext = barrier->pNext,
143 .srcStageMask = src_stage_mask2,
144 .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
145 .dstStageMask = dst_stage_mask2,
146 .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
147 .oldLayout = barrier->oldLayout,
148 .newLayout = barrier->newLayout,
149 .srcQueueFamilyIndex = barrier->srcQueueFamilyIndex,
150 .dstQueueFamilyIndex = barrier->dstQueueFamilyIndex,
151 .image = barrier->image,
152 .subresourceRange = barrier->subresourceRange,
153 };
154 }
155
156 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)157 vk_common_CmdPipelineBarrier(
158 VkCommandBuffer commandBuffer,
159 VkPipelineStageFlags srcStageMask,
160 VkPipelineStageFlags dstStageMask,
161 VkDependencyFlags dependencyFlags,
162 uint32_t memoryBarrierCount,
163 const VkMemoryBarrier* pMemoryBarriers,
164 uint32_t bufferMemoryBarrierCount,
165 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
166 uint32_t imageMemoryBarrierCount,
167 const VkImageMemoryBarrier* pImageMemoryBarriers)
168 {
169 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
170 struct vk_device *device = cmd_buffer->base.device;
171
172 STACK_ARRAY(VkMemoryBarrier2, memory_barriers, memoryBarrierCount);
173 STACK_ARRAY(VkBufferMemoryBarrier2, buffer_barriers, bufferMemoryBarrierCount);
174 STACK_ARRAY(VkImageMemoryBarrier2, image_barriers, imageMemoryBarrierCount);
175
176 VkPipelineStageFlags2 src_stage_mask2 = (VkPipelineStageFlags2) srcStageMask;
177 VkPipelineStageFlags2 dst_stage_mask2 = (VkPipelineStageFlags2) dstStageMask;
178
179 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
180 memory_barriers[i] = upgrade_memory_barrier(&pMemoryBarriers[i],
181 src_stage_mask2,
182 dst_stage_mask2);
183 }
184 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
185 buffer_barriers[i] = upgrade_buffer_memory_barrier(&pBufferMemoryBarriers[i],
186 src_stage_mask2,
187 dst_stage_mask2);
188 }
189 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
190 image_barriers[i] = upgrade_image_memory_barrier(&pImageMemoryBarriers[i],
191 src_stage_mask2,
192 dst_stage_mask2);
193 }
194
195 VkDependencyInfo dep_info = {
196 .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
197 .memoryBarrierCount = memoryBarrierCount,
198 .pMemoryBarriers = memory_barriers,
199 .bufferMemoryBarrierCount = bufferMemoryBarrierCount,
200 .pBufferMemoryBarriers = buffer_barriers,
201 .imageMemoryBarrierCount = imageMemoryBarrierCount,
202 .pImageMemoryBarriers = image_barriers,
203 };
204
205 device->dispatch_table.CmdPipelineBarrier2(commandBuffer, &dep_info);
206
207 STACK_ARRAY_FINISH(memory_barriers);
208 STACK_ARRAY_FINISH(buffer_barriers);
209 STACK_ARRAY_FINISH(image_barriers);
210 }
211
212 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)213 vk_common_CmdSetEvent(
214 VkCommandBuffer commandBuffer,
215 VkEvent event,
216 VkPipelineStageFlags stageMask)
217 {
218 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
219 struct vk_device *device = cmd_buffer->base.device;
220
221 VkMemoryBarrier2 mem_barrier = {
222 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
223 .srcStageMask = (VkPipelineStageFlags2) stageMask,
224 .dstStageMask = (VkPipelineStageFlags2) stageMask,
225 };
226 VkDependencyInfo dep_info = {
227 .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
228 .memoryBarrierCount = 1,
229 .pMemoryBarriers = &mem_barrier,
230 };
231
232 device->dispatch_table.CmdSetEvent2(commandBuffer, event, &dep_info);
233 }
234
235 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)236 vk_common_CmdResetEvent(
237 VkCommandBuffer commandBuffer,
238 VkEvent event,
239 VkPipelineStageFlags stageMask)
240 {
241 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
242 struct vk_device *device = cmd_buffer->base.device;
243
244 device->dispatch_table.CmdResetEvent2(commandBuffer,
245 event,
246 (VkPipelineStageFlags2) stageMask);
247 }
248
249 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags destStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)250 vk_common_CmdWaitEvents(
251 VkCommandBuffer commandBuffer,
252 uint32_t eventCount,
253 const VkEvent* pEvents,
254 VkPipelineStageFlags srcStageMask,
255 VkPipelineStageFlags destStageMask,
256 uint32_t memoryBarrierCount,
257 const VkMemoryBarrier* pMemoryBarriers,
258 uint32_t bufferMemoryBarrierCount,
259 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
260 uint32_t imageMemoryBarrierCount,
261 const VkImageMemoryBarrier* pImageMemoryBarriers)
262 {
263 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
264 struct vk_device *device = cmd_buffer->base.device;
265
266 if (eventCount == 0)
267 return;
268
269 STACK_ARRAY(VkDependencyInfo, deps, eventCount);
270
271 /* Note that dstStageMask and srcStageMask in the CmdWaitEvent2() call
272 * are the same. This is to match the CmdSetEvent2() call from
273 * vk_common_CmdSetEvent(). The actual src->dst stage barrier will
274 * happen as part of the CmdPipelineBarrier() call below.
275 */
276 VkMemoryBarrier2 stage_barrier = {
277 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
278 .srcStageMask = srcStageMask,
279 .dstStageMask = srcStageMask,
280 };
281
282 for (uint32_t i = 0; i < eventCount; i++) {
283 deps[i] = (VkDependencyInfo) {
284 .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
285 .memoryBarrierCount = 1,
286 .pMemoryBarriers = &stage_barrier,
287 };
288 }
289 device->dispatch_table.CmdWaitEvents2(commandBuffer, eventCount, pEvents, deps);
290
291 STACK_ARRAY_FINISH(deps);
292
293 /* Setting dependency to 0 because :
294 *
295 * - For BY_REGION_BIT and VIEW_LOCAL_BIT, events are not allowed inside a
296 * render pass so these don't apply.
297 *
298 * - For DEVICE_GROUP_BIT, we have the following bit of spec text:
299 *
300 * "Semaphore and event dependencies are device-local and only
301 * execute on the one physical device that performs the
302 * dependency."
303 */
304 const VkDependencyFlags dep_flags = 0;
305
306 device->dispatch_table.CmdPipelineBarrier(commandBuffer,
307 srcStageMask, destStageMask,
308 dep_flags,
309 memoryBarrierCount, pMemoryBarriers,
310 bufferMemoryBarrierCount, pBufferMemoryBarriers,
311 imageMemoryBarrierCount, pImageMemoryBarriers);
312 }
313
314 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkBuffer dstBuffer,VkDeviceSize dstOffset,uint32_t marker)315 vk_common_CmdWriteBufferMarkerAMD(
316 VkCommandBuffer commandBuffer,
317 VkPipelineStageFlagBits pipelineStage,
318 VkBuffer dstBuffer,
319 VkDeviceSize dstOffset,
320 uint32_t marker)
321 {
322 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
323 struct vk_device *device = cmd_buffer->base.device;
324
325 device->dispatch_table.CmdWriteBufferMarker2AMD(commandBuffer,
326 (VkPipelineStageFlags2) pipelineStage,
327 dstBuffer,
328 dstOffset,
329 marker);
330 }
331
332 VKAPI_ATTR void VKAPI_CALL
vk_common_GetQueueCheckpointDataNV(VkQueue queue,uint32_t * pCheckpointDataCount,VkCheckpointDataNV * pCheckpointData)333 vk_common_GetQueueCheckpointDataNV(
334 VkQueue queue,
335 uint32_t* pCheckpointDataCount,
336 VkCheckpointDataNV* pCheckpointData)
337 {
338 unreachable("Entrypoint not implemented");
339 }
340
341 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)342 vk_common_QueueSubmit(
343 VkQueue _queue,
344 uint32_t submitCount,
345 const VkSubmitInfo* pSubmits,
346 VkFence fence)
347 {
348 VK_FROM_HANDLE(vk_queue, queue, _queue);
349 struct vk_device *device = queue->base.device;
350
351 STACK_ARRAY(VkSubmitInfo2, submit_info_2, submitCount);
352 STACK_ARRAY(VkPerformanceQuerySubmitInfoKHR, perf_query_submit_info, submitCount);
353 STACK_ARRAY(struct wsi_memory_signal_submit_info, wsi_mem_submit_info, submitCount);
354
355 uint32_t n_wait_semaphores = 0;
356 uint32_t n_command_buffers = 0;
357 uint32_t n_signal_semaphores = 0;
358 for (uint32_t s = 0; s < submitCount; s++) {
359 n_wait_semaphores += pSubmits[s].waitSemaphoreCount;
360 n_command_buffers += pSubmits[s].commandBufferCount;
361 n_signal_semaphores += pSubmits[s].signalSemaphoreCount;
362 }
363
364 STACK_ARRAY(VkSemaphoreSubmitInfo, wait_semaphores, n_wait_semaphores);
365 STACK_ARRAY(VkCommandBufferSubmitInfo, command_buffers, n_command_buffers);
366 STACK_ARRAY(VkSemaphoreSubmitInfo, signal_semaphores, n_signal_semaphores);
367
368 n_wait_semaphores = 0;
369 n_command_buffers = 0;
370 n_signal_semaphores = 0;
371
372 for (uint32_t s = 0; s < submitCount; s++) {
373 const VkTimelineSemaphoreSubmitInfo *timeline_info =
374 vk_find_struct_const(pSubmits[s].pNext,
375 TIMELINE_SEMAPHORE_SUBMIT_INFO);
376 const uint64_t *wait_values = NULL;
377 const uint64_t *signal_values = NULL;
378
379 if (timeline_info && timeline_info->waitSemaphoreValueCount) {
380 /* From the Vulkan 1.3.204 spec:
381 *
382 * VUID-VkSubmitInfo-pNext-03240
383 *
384 * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
385 * and any element of pSignalSemaphores was created with a VkSemaphoreType of
386 * VK_SEMAPHORE_TYPE_TIMELINE, then its signalSemaphoreValueCount member must equal
387 * signalSemaphoreCount"
388 */
389 assert(timeline_info->waitSemaphoreValueCount == pSubmits[s].waitSemaphoreCount);
390 wait_values = timeline_info->pWaitSemaphoreValues;
391 }
392
393 if (timeline_info && timeline_info->signalSemaphoreValueCount) {
394 /* From the Vulkan 1.3.204 spec:
395 *
396 * VUID-VkSubmitInfo-pNext-03241
397 *
398 * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
399 * and any element of pWaitSemaphores was created with a VkSemaphoreType of
400 * VK_SEMAPHORE_TYPE_TIMELINE, then its waitSemaphoreValueCount member must equal
401 * waitSemaphoreCount"
402 */
403 assert(timeline_info->signalSemaphoreValueCount == pSubmits[s].signalSemaphoreCount);
404 signal_values = timeline_info->pSignalSemaphoreValues;
405 }
406
407 const VkDeviceGroupSubmitInfo *group_info =
408 vk_find_struct_const(pSubmits[s].pNext, DEVICE_GROUP_SUBMIT_INFO);
409
410 for (uint32_t i = 0; i < pSubmits[s].waitSemaphoreCount; i++) {
411 wait_semaphores[n_wait_semaphores + i] = (VkSemaphoreSubmitInfo) {
412 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
413 .semaphore = pSubmits[s].pWaitSemaphores[i],
414 .value = wait_values ? wait_values[i] : 0,
415 .stageMask = pSubmits[s].pWaitDstStageMask[i],
416 .deviceIndex = group_info ? group_info->pWaitSemaphoreDeviceIndices[i] : 0,
417 };
418 }
419 for (uint32_t i = 0; i < pSubmits[s].commandBufferCount; i++) {
420 command_buffers[n_command_buffers + i] = (VkCommandBufferSubmitInfo) {
421 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO,
422 .commandBuffer = pSubmits[s].pCommandBuffers[i],
423 .deviceMask = group_info ? group_info->pCommandBufferDeviceMasks[i] : 0,
424 };
425 }
426 for (uint32_t i = 0; i < pSubmits[s].signalSemaphoreCount; i++) {
427 signal_semaphores[n_signal_semaphores + i] = (VkSemaphoreSubmitInfo) {
428 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
429 .semaphore = pSubmits[s].pSignalSemaphores[i],
430 .value = signal_values ? signal_values[i] : 0,
431 .stageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
432 .deviceIndex = group_info ? group_info->pSignalSemaphoreDeviceIndices[i] : 0,
433 };
434 }
435
436 const VkProtectedSubmitInfo *protected_info =
437 vk_find_struct_const(pSubmits[s].pNext, PROTECTED_SUBMIT_INFO);
438
439 submit_info_2[s] = (VkSubmitInfo2) {
440 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2,
441 .flags = ((protected_info && protected_info->protectedSubmit) ?
442 VK_SUBMIT_PROTECTED_BIT : 0),
443 .waitSemaphoreInfoCount = pSubmits[s].waitSemaphoreCount,
444 .pWaitSemaphoreInfos = &wait_semaphores[n_wait_semaphores],
445 .commandBufferInfoCount = pSubmits[s].commandBufferCount,
446 .pCommandBufferInfos = &command_buffers[n_command_buffers],
447 .signalSemaphoreInfoCount = pSubmits[s].signalSemaphoreCount,
448 .pSignalSemaphoreInfos = &signal_semaphores[n_signal_semaphores],
449 };
450
451 const VkPerformanceQuerySubmitInfoKHR *query_info =
452 vk_find_struct_const(pSubmits[s].pNext,
453 PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
454 if (query_info) {
455 perf_query_submit_info[s] = *query_info;
456 perf_query_submit_info[s].pNext = NULL;
457 __vk_append_struct(&submit_info_2[s], &perf_query_submit_info[s]);
458 }
459
460 const struct wsi_memory_signal_submit_info *mem_signal_info =
461 vk_find_struct_const(pSubmits[s].pNext,
462 WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
463 if (mem_signal_info) {
464 wsi_mem_submit_info[s] = *mem_signal_info;
465 wsi_mem_submit_info[s].pNext = NULL;
466 __vk_append_struct(&submit_info_2[s], &wsi_mem_submit_info[s]);
467 }
468
469 n_wait_semaphores += pSubmits[s].waitSemaphoreCount;
470 n_command_buffers += pSubmits[s].commandBufferCount;
471 n_signal_semaphores += pSubmits[s].signalSemaphoreCount;
472 }
473
474 VkResult result = device->dispatch_table.QueueSubmit2(_queue,
475 submitCount,
476 submit_info_2,
477 fence);
478
479 STACK_ARRAY_FINISH(wait_semaphores);
480 STACK_ARRAY_FINISH(command_buffers);
481 STACK_ARRAY_FINISH(signal_semaphores);
482 STACK_ARRAY_FINISH(submit_info_2);
483 STACK_ARRAY_FINISH(perf_query_submit_info);
484 STACK_ARRAY_FINISH(wsi_mem_submit_info);
485
486 return result;
487 }
488