1 /*
2 * Copyright © 2019 Red Hat.
3 * Copyright © 2022 Collabora, LTD
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "vk_alloc.h"
26 #include "vk_cmd_enqueue_entrypoints.h"
27 #include "vk_command_buffer.h"
28 #include "vk_descriptor_update_template.h"
29 #include "vk_device.h"
30 #include "vk_pipeline_layout.h"
31 #include "vk_util.h"
32
33 static inline unsigned
vk_descriptor_type_update_size(VkDescriptorType type)34 vk_descriptor_type_update_size(VkDescriptorType type)
35 {
36 switch (type) {
37 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
38 unreachable("handled in caller");
39
40 case VK_DESCRIPTOR_TYPE_SAMPLER:
41 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
42 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
43 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
44 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
45 return sizeof(VkDescriptorImageInfo);
46
47 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
48 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
49 return sizeof(VkBufferView);
50
51 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
52 return sizeof(VkAccelerationStructureKHR);
53
54 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
55 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
56 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
57 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
58 default:
59 return sizeof(VkDescriptorBufferInfo);
60 }
61 }
62
63 static void
vk_cmd_push_descriptor_set_with_template2_khr_free(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)64 vk_cmd_push_descriptor_set_with_template2_khr_free(
65 struct vk_cmd_queue *queue, struct vk_cmd_queue_entry *cmd)
66 {
67 struct vk_command_buffer *cmd_buffer =
68 container_of(queue, struct vk_command_buffer, cmd_queue);
69 struct vk_device *device = cmd_buffer->base.device;
70
71 struct vk_cmd_push_descriptor_set_with_template2_khr *info_ =
72 &cmd->u.push_descriptor_set_with_template2_khr;
73
74 VkPushDescriptorSetWithTemplateInfoKHR *info =
75 info_->push_descriptor_set_with_template_info;
76
77 VK_FROM_HANDLE(vk_descriptor_update_template, templ,
78 info->descriptorUpdateTemplate);
79 VK_FROM_HANDLE(vk_pipeline_layout, layout, info->layout);
80
81 vk_descriptor_update_template_unref(device, templ);
82 vk_pipeline_layout_unref(device, layout);
83
84 if (info->pNext) {
85 VkPipelineLayoutCreateInfo *pnext = (void *)info->pNext;
86
87 vk_free(queue->alloc, (void *)pnext->pSetLayouts);
88 vk_free(queue->alloc, (void *)pnext->pPushConstantRanges);
89 vk_free(queue->alloc, pnext);
90 }
91 }
92
93 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdPushDescriptorSetWithTemplate2KHR(VkCommandBuffer commandBuffer,const VkPushDescriptorSetWithTemplateInfoKHR * pPushDescriptorSetWithTemplateInfo)94 vk_cmd_enqueue_CmdPushDescriptorSetWithTemplate2KHR(
95 VkCommandBuffer commandBuffer,
96 const VkPushDescriptorSetWithTemplateInfoKHR *pPushDescriptorSetWithTemplateInfo)
97 {
98 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
99
100 struct vk_cmd_queue *queue = &cmd_buffer->cmd_queue;
101
102 struct vk_cmd_queue_entry *cmd =
103 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
104 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
105 if (!cmd)
106 return;
107
108 cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE2_KHR;
109 cmd->driver_free_cb = vk_cmd_push_descriptor_set_with_template2_khr_free;
110 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
111
112 VkPushDescriptorSetWithTemplateInfoKHR *info =
113 vk_zalloc(cmd_buffer->cmd_queue.alloc,
114 sizeof(VkPushDescriptorSetWithTemplateInfoKHR), 8,
115 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
116
117 cmd->u.push_descriptor_set_with_template2_khr
118 .push_descriptor_set_with_template_info = info;
119
120 /* From the application's perspective, the vk_cmd_queue_entry can outlive the
121 * template. Therefore, we take a reference here and free it when the
122 * vk_cmd_queue_entry is freed, tying the lifetimes.
123 */
124 info->descriptorUpdateTemplate =
125 pPushDescriptorSetWithTemplateInfo->descriptorUpdateTemplate;
126
127 VK_FROM_HANDLE(vk_descriptor_update_template, templ,
128 info->descriptorUpdateTemplate);
129 vk_descriptor_update_template_ref(templ);
130
131 info->set = pPushDescriptorSetWithTemplateInfo->set;
132 info->sType = pPushDescriptorSetWithTemplateInfo->sType;
133
134 /* Similar concerns for the pipeline layout */
135 info->layout = pPushDescriptorSetWithTemplateInfo->layout;
136
137 VK_FROM_HANDLE(vk_pipeline_layout, layout, info->layout);
138 vk_pipeline_layout_ref(layout);
139
140 /* What makes this tricky is that the size of pData is implicit. We determine
141 * it by walking the template and determining the ranges read by the driver.
142 */
143 size_t data_size = 0;
144 for (unsigned i = 0; i < templ->entry_count; ++i) {
145 struct vk_descriptor_template_entry entry = templ->entries[i];
146 unsigned end = 0;
147
148 /* From the spec:
149 *
150 * If descriptorType is VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK then
151 * the value of stride is ignored and the stride is assumed to be 1,
152 * i.e. the descriptor update information for them is always specified
153 * as a contiguous range.
154 */
155 if (entry.type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
156 end = entry.offset + entry.array_count;
157 } else if (entry.array_count > 0) {
158 end = entry.offset + ((entry.array_count - 1) * entry.stride) +
159 vk_descriptor_type_update_size(entry.type);
160 }
161
162 data_size = MAX2(data_size, end);
163 }
164
165 uint8_t *out_pData = vk_zalloc(cmd_buffer->cmd_queue.alloc, data_size, 8,
166 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
167 const uint8_t *pData = pPushDescriptorSetWithTemplateInfo->pData;
168
169 /* Now walk the template again, copying what we actually need */
170 for (unsigned i = 0; i < templ->entry_count; ++i) {
171 struct vk_descriptor_template_entry entry = templ->entries[i];
172 unsigned size = 0;
173
174 if (entry.type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
175 size = entry.array_count;
176 } else if (entry.array_count > 0) {
177 size = ((entry.array_count - 1) * entry.stride) +
178 vk_descriptor_type_update_size(entry.type);
179 }
180
181 memcpy(out_pData + entry.offset, pData + entry.offset, size);
182 }
183
184 info->pData = out_pData;
185
186 const VkBaseInStructure *pnext = pPushDescriptorSetWithTemplateInfo->pNext;
187
188 if (pnext) {
189 switch ((int32_t)pnext->sType) {
190 /* TODO: The set layouts below would need to be reference counted. Punting
191 * until there's a cmd_enqueue-based driver implementing
192 * VK_NV_per_stage_descriptor_set.
193 */
194 #if 0
195 case VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO:
196 info->pNext =
197 vk_zalloc(queue->alloc, sizeof(VkPipelineLayoutCreateInfo), 8,
198 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
199 if (info->pNext == NULL)
200 goto err;
201
202 memcpy((void *)info->pNext, pnext,
203 sizeof(VkPipelineLayoutCreateInfo));
204
205 VkPipelineLayoutCreateInfo *tmp_dst2 = (void *)info->pNext;
206 VkPipelineLayoutCreateInfo *tmp_src2 = (void *)pnext;
207
208 if (tmp_src2->pSetLayouts) {
209 tmp_dst2->pSetLayouts = vk_zalloc(
210 queue->alloc,
211 sizeof(*tmp_dst2->pSetLayouts) * tmp_dst2->setLayoutCount, 8,
212 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
213 if (tmp_dst2->pSetLayouts == NULL)
214 goto err;
215
216 memcpy(
217 (void *)tmp_dst2->pSetLayouts, tmp_src2->pSetLayouts,
218 sizeof(*tmp_dst2->pSetLayouts) * tmp_dst2->setLayoutCount);
219 }
220
221 if (tmp_src2->pPushConstantRanges) {
222 tmp_dst2->pPushConstantRanges =
223 vk_zalloc(queue->alloc,
224 sizeof(*tmp_dst2->pPushConstantRanges) *
225 tmp_dst2->pushConstantRangeCount,
226 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
227 if (tmp_dst2->pPushConstantRanges == NULL)
228 goto err;
229
230 memcpy((void *)tmp_dst2->pPushConstantRanges,
231 tmp_src2->pPushConstantRanges,
232 sizeof(*tmp_dst2->pPushConstantRanges) *
233 tmp_dst2->pushConstantRangeCount);
234 }
235 break;
236 #endif
237
238 default:
239 goto err;
240 }
241 }
242
243 return;
244
245 err:
246 if (cmd)
247 vk_cmd_push_descriptor_set_with_template2_khr_free(queue, cmd);
248
249 vk_command_buffer_set_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
250 }
251
252 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,VkDescriptorUpdateTemplate descriptorUpdateTemplate,VkPipelineLayout layout,uint32_t set,const void * pData)253 vk_cmd_enqueue_CmdPushDescriptorSetWithTemplateKHR(
254 VkCommandBuffer commandBuffer,
255 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
256 VkPipelineLayout layout,
257 uint32_t set,
258 const void* pData)
259 {
260 const VkPushDescriptorSetWithTemplateInfoKHR two = {
261 .sType = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR,
262 .descriptorUpdateTemplate = descriptorUpdateTemplate,
263 .layout = layout,
264 .set = set,
265 .pData = pData,
266 };
267
268 vk_cmd_enqueue_CmdPushDescriptorSetWithTemplate2KHR(commandBuffer, &two);
269 }
270
271 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdDrawMultiEXT(VkCommandBuffer commandBuffer,uint32_t drawCount,const VkMultiDrawInfoEXT * pVertexInfo,uint32_t instanceCount,uint32_t firstInstance,uint32_t stride)272 vk_cmd_enqueue_CmdDrawMultiEXT(VkCommandBuffer commandBuffer,
273 uint32_t drawCount,
274 const VkMultiDrawInfoEXT *pVertexInfo,
275 uint32_t instanceCount,
276 uint32_t firstInstance,
277 uint32_t stride)
278 {
279 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
280
281 struct vk_cmd_queue_entry *cmd =
282 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
283 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
284 if (!cmd)
285 return;
286
287 cmd->type = VK_CMD_DRAW_MULTI_EXT;
288 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
289
290 cmd->u.draw_multi_ext.draw_count = drawCount;
291 if (pVertexInfo) {
292 unsigned i = 0;
293 cmd->u.draw_multi_ext.vertex_info =
294 vk_zalloc(cmd_buffer->cmd_queue.alloc,
295 sizeof(*cmd->u.draw_multi_ext.vertex_info) * drawCount, 8,
296 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
297
298 vk_foreach_multi_draw(draw, i, pVertexInfo, drawCount, stride) {
299 memcpy(&cmd->u.draw_multi_ext.vertex_info[i], draw,
300 sizeof(*cmd->u.draw_multi_ext.vertex_info));
301 }
302 }
303 cmd->u.draw_multi_ext.instance_count = instanceCount;
304 cmd->u.draw_multi_ext.first_instance = firstInstance;
305 cmd->u.draw_multi_ext.stride = stride;
306 }
307
308 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer,uint32_t drawCount,const VkMultiDrawIndexedInfoEXT * pIndexInfo,uint32_t instanceCount,uint32_t firstInstance,uint32_t stride,const int32_t * pVertexOffset)309 vk_cmd_enqueue_CmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer,
310 uint32_t drawCount,
311 const VkMultiDrawIndexedInfoEXT *pIndexInfo,
312 uint32_t instanceCount,
313 uint32_t firstInstance,
314 uint32_t stride,
315 const int32_t *pVertexOffset)
316 {
317 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
318
319 struct vk_cmd_queue_entry *cmd =
320 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
321 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
322 if (!cmd)
323 return;
324
325 cmd->type = VK_CMD_DRAW_MULTI_INDEXED_EXT;
326 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
327
328 cmd->u.draw_multi_indexed_ext.draw_count = drawCount;
329
330 if (pIndexInfo) {
331 unsigned i = 0;
332 cmd->u.draw_multi_indexed_ext.index_info =
333 vk_zalloc(cmd_buffer->cmd_queue.alloc,
334 sizeof(*cmd->u.draw_multi_indexed_ext.index_info) * drawCount, 8,
335 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
336
337 vk_foreach_multi_draw_indexed(draw, i, pIndexInfo, drawCount, stride) {
338 cmd->u.draw_multi_indexed_ext.index_info[i].firstIndex = draw->firstIndex;
339 cmd->u.draw_multi_indexed_ext.index_info[i].indexCount = draw->indexCount;
340 if (pVertexOffset == NULL)
341 cmd->u.draw_multi_indexed_ext.index_info[i].vertexOffset = draw->vertexOffset;
342 }
343 }
344
345 cmd->u.draw_multi_indexed_ext.instance_count = instanceCount;
346 cmd->u.draw_multi_indexed_ext.first_instance = firstInstance;
347 cmd->u.draw_multi_indexed_ext.stride = stride;
348
349 if (pVertexOffset) {
350 cmd->u.draw_multi_indexed_ext.vertex_offset =
351 vk_zalloc(cmd_buffer->cmd_queue.alloc,
352 sizeof(*cmd->u.draw_multi_indexed_ext.vertex_offset), 8,
353 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
354
355 memcpy(cmd->u.draw_multi_indexed_ext.vertex_offset, pVertexOffset,
356 sizeof(*cmd->u.draw_multi_indexed_ext.vertex_offset));
357 }
358 }
359
360 static void
push_descriptors_set_free(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)361 push_descriptors_set_free(struct vk_cmd_queue *queue,
362 struct vk_cmd_queue_entry *cmd)
363 {
364 struct vk_command_buffer *cmd_buffer =
365 container_of(queue, struct vk_command_buffer, cmd_queue);
366 struct vk_cmd_push_descriptor_set_khr *pds = &cmd->u.push_descriptor_set_khr;
367
368 VK_FROM_HANDLE(vk_pipeline_layout, vk_layout, pds->layout);
369 vk_pipeline_layout_unref(cmd_buffer->base.device, vk_layout);
370
371 for (unsigned i = 0; i < pds->descriptor_write_count; i++) {
372 VkWriteDescriptorSet *entry = &pds->descriptor_writes[i];
373 switch (entry->descriptorType) {
374 case VK_DESCRIPTOR_TYPE_SAMPLER:
375 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
376 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
377 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
378 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
379 vk_free(queue->alloc, (void *)entry->pImageInfo);
380 break;
381 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
382 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
383 vk_free(queue->alloc, (void *)entry->pTexelBufferView);
384 break;
385 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
386 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
387 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
388 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
389 default:
390 vk_free(queue->alloc, (void *)entry->pBufferInfo);
391 break;
392 }
393 }
394 }
395
396 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t set,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites)397 vk_cmd_enqueue_CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,
398 VkPipelineBindPoint pipelineBindPoint,
399 VkPipelineLayout layout,
400 uint32_t set,
401 uint32_t descriptorWriteCount,
402 const VkWriteDescriptorSet *pDescriptorWrites)
403 {
404 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
405 struct vk_cmd_push_descriptor_set_khr *pds;
406
407 struct vk_cmd_queue_entry *cmd =
408 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
409 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
410 if (!cmd)
411 return;
412
413 pds = &cmd->u.push_descriptor_set_khr;
414
415 cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET_KHR;
416 cmd->driver_free_cb = push_descriptors_set_free;
417 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
418
419 pds->pipeline_bind_point = pipelineBindPoint;
420 pds->set = set;
421 pds->descriptor_write_count = descriptorWriteCount;
422
423 /* From the application's perspective, the vk_cmd_queue_entry can outlive the
424 * layout. Take a reference.
425 */
426 VK_FROM_HANDLE(vk_pipeline_layout, vk_layout, layout);
427 pds->layout = layout;
428 vk_pipeline_layout_ref(vk_layout);
429
430 if (pDescriptorWrites) {
431 pds->descriptor_writes =
432 vk_zalloc(cmd_buffer->cmd_queue.alloc,
433 sizeof(*pds->descriptor_writes) * descriptorWriteCount, 8,
434 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
435 memcpy(pds->descriptor_writes,
436 pDescriptorWrites,
437 sizeof(*pds->descriptor_writes) * descriptorWriteCount);
438
439 for (unsigned i = 0; i < descriptorWriteCount; i++) {
440 switch (pds->descriptor_writes[i].descriptorType) {
441 case VK_DESCRIPTOR_TYPE_SAMPLER:
442 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
443 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
444 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
445 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
446 pds->descriptor_writes[i].pImageInfo =
447 vk_zalloc(cmd_buffer->cmd_queue.alloc,
448 sizeof(VkDescriptorImageInfo) * pds->descriptor_writes[i].descriptorCount, 8,
449 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
450 memcpy((VkDescriptorImageInfo *)pds->descriptor_writes[i].pImageInfo,
451 pDescriptorWrites[i].pImageInfo,
452 sizeof(VkDescriptorImageInfo) * pds->descriptor_writes[i].descriptorCount);
453 break;
454 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
455 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
456 pds->descriptor_writes[i].pTexelBufferView =
457 vk_zalloc(cmd_buffer->cmd_queue.alloc,
458 sizeof(VkBufferView) * pds->descriptor_writes[i].descriptorCount, 8,
459 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
460 memcpy((VkBufferView *)pds->descriptor_writes[i].pTexelBufferView,
461 pDescriptorWrites[i].pTexelBufferView,
462 sizeof(VkBufferView) * pds->descriptor_writes[i].descriptorCount);
463 break;
464 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
465 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
466 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
467 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
468 default:
469 pds->descriptor_writes[i].pBufferInfo =
470 vk_zalloc(cmd_buffer->cmd_queue.alloc,
471 sizeof(VkDescriptorBufferInfo) * pds->descriptor_writes[i].descriptorCount, 8,
472 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
473 memcpy((VkDescriptorBufferInfo *)pds->descriptor_writes[i].pBufferInfo,
474 pDescriptorWrites[i].pBufferInfo,
475 sizeof(VkDescriptorBufferInfo) * pds->descriptor_writes[i].descriptorCount);
476 break;
477 }
478 }
479 }
480 }
481
482 static void
unref_pipeline_layout(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)483 unref_pipeline_layout(struct vk_cmd_queue *queue,
484 struct vk_cmd_queue_entry *cmd)
485 {
486 struct vk_command_buffer *cmd_buffer =
487 container_of(queue, struct vk_command_buffer, cmd_queue);
488 VK_FROM_HANDLE(vk_pipeline_layout, layout,
489 cmd->u.bind_descriptor_sets.layout);
490
491 assert(cmd->type == VK_CMD_BIND_DESCRIPTOR_SETS);
492
493 vk_pipeline_layout_unref(cmd_buffer->base.device, layout);
494 }
495
496 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)497 vk_cmd_enqueue_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
498 VkPipelineBindPoint pipelineBindPoint,
499 VkPipelineLayout layout,
500 uint32_t firstSet,
501 uint32_t descriptorSetCount,
502 const VkDescriptorSet* pDescriptorSets,
503 uint32_t dynamicOffsetCount,
504 const uint32_t *pDynamicOffsets)
505 {
506 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
507
508 struct vk_cmd_queue_entry *cmd =
509 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
510 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
511 if (!cmd)
512 return;
513
514 cmd->type = VK_CMD_BIND_DESCRIPTOR_SETS;
515 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
516
517 /* We need to hold a reference to the descriptor set as long as this
518 * command is in the queue. Otherwise, it may get deleted out from under
519 * us before the command is replayed.
520 */
521 vk_pipeline_layout_ref(vk_pipeline_layout_from_handle(layout));
522 cmd->u.bind_descriptor_sets.layout = layout;
523 cmd->driver_free_cb = unref_pipeline_layout;
524
525 cmd->u.bind_descriptor_sets.pipeline_bind_point = pipelineBindPoint;
526 cmd->u.bind_descriptor_sets.first_set = firstSet;
527 cmd->u.bind_descriptor_sets.descriptor_set_count = descriptorSetCount;
528 if (pDescriptorSets) {
529 cmd->u.bind_descriptor_sets.descriptor_sets =
530 vk_zalloc(cmd_buffer->cmd_queue.alloc,
531 sizeof(*cmd->u.bind_descriptor_sets.descriptor_sets) * descriptorSetCount, 8,
532 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
533
534 memcpy(cmd->u.bind_descriptor_sets.descriptor_sets, pDescriptorSets,
535 sizeof(*cmd->u.bind_descriptor_sets.descriptor_sets) * descriptorSetCount);
536 }
537 cmd->u.bind_descriptor_sets.dynamic_offset_count = dynamicOffsetCount;
538 if (pDynamicOffsets) {
539 cmd->u.bind_descriptor_sets.dynamic_offsets =
540 vk_zalloc(cmd_buffer->cmd_queue.alloc,
541 sizeof(*cmd->u.bind_descriptor_sets.dynamic_offsets) * dynamicOffsetCount, 8,
542 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
543
544 memcpy(cmd->u.bind_descriptor_sets.dynamic_offsets, pDynamicOffsets,
545 sizeof(*cmd->u.bind_descriptor_sets.dynamic_offsets) * dynamicOffsetCount);
546 }
547 }
548
549 #ifdef VK_ENABLE_BETA_EXTENSIONS
550 static void
dispatch_graph_amdx_free(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)551 dispatch_graph_amdx_free(struct vk_cmd_queue *queue, struct vk_cmd_queue_entry *cmd)
552 {
553 VkDispatchGraphCountInfoAMDX *count_info = cmd->u.dispatch_graph_amdx.count_info;
554 void *infos = (void *)count_info->infos.hostAddress;
555
556 for (uint32_t i = 0; i < count_info->count; i++) {
557 VkDispatchGraphInfoAMDX *info = (void *)((const uint8_t *)infos + i * count_info->stride);
558 vk_free(queue->alloc, (void *)info->payloads.hostAddress);
559 }
560
561 vk_free(queue->alloc, infos);
562 }
563
564 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdDispatchGraphAMDX(VkCommandBuffer commandBuffer,VkDeviceAddress scratch,const VkDispatchGraphCountInfoAMDX * pCountInfo)565 vk_cmd_enqueue_CmdDispatchGraphAMDX(VkCommandBuffer commandBuffer, VkDeviceAddress scratch,
566 const VkDispatchGraphCountInfoAMDX *pCountInfo)
567 {
568 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
569
570 if (vk_command_buffer_has_error(cmd_buffer))
571 return;
572
573 VkResult result = VK_SUCCESS;
574 const VkAllocationCallbacks *alloc = cmd_buffer->cmd_queue.alloc;
575
576 struct vk_cmd_queue_entry *cmd =
577 vk_zalloc(alloc, sizeof(struct vk_cmd_queue_entry), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
578 if (!cmd) {
579 result = VK_ERROR_OUT_OF_HOST_MEMORY;
580 goto err;
581 }
582
583 cmd->type = VK_CMD_DISPATCH_GRAPH_AMDX;
584 cmd->driver_free_cb = dispatch_graph_amdx_free;
585
586 cmd->u.dispatch_graph_amdx.scratch = scratch;
587
588 cmd->u.dispatch_graph_amdx.count_info =
589 vk_zalloc(alloc, sizeof(VkDispatchGraphCountInfoAMDX), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
590 if (cmd->u.dispatch_graph_amdx.count_info == NULL)
591 goto err;
592
593 memcpy((void *)cmd->u.dispatch_graph_amdx.count_info, pCountInfo,
594 sizeof(VkDispatchGraphCountInfoAMDX));
595
596 uint32_t infos_size = pCountInfo->count * pCountInfo->stride;
597 void *infos = vk_zalloc(alloc, infos_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
598 cmd->u.dispatch_graph_amdx.count_info->infos.hostAddress = infos;
599 memcpy(infos, pCountInfo->infos.hostAddress, infos_size);
600
601 for (uint32_t i = 0; i < pCountInfo->count; i++) {
602 VkDispatchGraphInfoAMDX *info = (void *)((const uint8_t *)infos + i * pCountInfo->stride);
603
604 uint32_t payloads_size = info->payloadCount * info->payloadStride;
605 void *dst_payload = vk_zalloc(alloc, payloads_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
606 memcpy(dst_payload, info->payloads.hostAddress, payloads_size);
607 info->payloads.hostAddress = dst_payload;
608 }
609
610 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
611 goto finish;
612 err:
613 if (cmd) {
614 vk_free(alloc, cmd);
615 dispatch_graph_amdx_free(&cmd_buffer->cmd_queue, cmd);
616 }
617
618 finish:
619 if (unlikely(result != VK_SUCCESS))
620 vk_command_buffer_set_error(cmd_buffer, result);
621 }
622 #endif
623
624 static void
vk_cmd_build_acceleration_structures_khr_free(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)625 vk_cmd_build_acceleration_structures_khr_free(struct vk_cmd_queue *queue,
626 struct vk_cmd_queue_entry *cmd)
627 {
628 struct vk_cmd_build_acceleration_structures_khr *build =
629 &cmd->u.build_acceleration_structures_khr;
630
631 for (uint32_t i = 0; i < build->info_count; i++) {
632 vk_free(queue->alloc, (void *)build->infos[i].pGeometries);
633 vk_free(queue->alloc, (void *)build->pp_build_range_infos[i]);
634 }
635 }
636
637 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer,uint32_t infoCount,const VkAccelerationStructureBuildGeometryInfoKHR * pInfos,const VkAccelerationStructureBuildRangeInfoKHR * const * ppBuildRangeInfos)638 vk_cmd_enqueue_CmdBuildAccelerationStructuresKHR(
639 VkCommandBuffer commandBuffer, uint32_t infoCount,
640 const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
641 const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos)
642 {
643 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
644
645 if (vk_command_buffer_has_error(cmd_buffer))
646 return;
647
648 struct vk_cmd_queue *queue = &cmd_buffer->cmd_queue;
649
650 struct vk_cmd_queue_entry *cmd =
651 vk_zalloc(queue->alloc, vk_cmd_queue_type_sizes[VK_CMD_BUILD_ACCELERATION_STRUCTURES_KHR], 8,
652 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
653 if (!cmd)
654 goto err;
655
656 cmd->type = VK_CMD_BUILD_ACCELERATION_STRUCTURES_KHR;
657 cmd->driver_free_cb = vk_cmd_build_acceleration_structures_khr_free;
658
659 struct vk_cmd_build_acceleration_structures_khr *build =
660 &cmd->u.build_acceleration_structures_khr;
661
662 build->info_count = infoCount;
663 if (pInfos) {
664 build->infos = vk_zalloc(queue->alloc, sizeof(*build->infos) * infoCount, 8,
665 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
666 if (!build->infos)
667 goto err;
668
669 memcpy((VkAccelerationStructureBuildGeometryInfoKHR *)build->infos, pInfos,
670 sizeof(*build->infos) * (infoCount));
671
672 for (uint32_t i = 0; i < infoCount; i++) {
673 uint32_t geometries_size =
674 build->infos[i].geometryCount * sizeof(VkAccelerationStructureGeometryKHR);
675 VkAccelerationStructureGeometryKHR *geometries =
676 vk_zalloc(queue->alloc, geometries_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
677 if (!geometries)
678 goto err;
679
680 if (pInfos[i].pGeometries) {
681 memcpy(geometries, pInfos[i].pGeometries, geometries_size);
682 } else {
683 for (uint32_t j = 0; j < build->infos[i].geometryCount; j++)
684 memcpy(&geometries[j], pInfos[i].ppGeometries[j], sizeof(VkAccelerationStructureGeometryKHR));
685 }
686
687 build->infos[i].pGeometries = geometries;
688 }
689 }
690 if (ppBuildRangeInfos) {
691 build->pp_build_range_infos =
692 vk_zalloc(queue->alloc, sizeof(*build->pp_build_range_infos) * infoCount, 8,
693 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
694 if (!build->pp_build_range_infos)
695 goto err;
696
697 VkAccelerationStructureBuildRangeInfoKHR **pp_build_range_infos =
698 (void *)build->pp_build_range_infos;
699
700 for (uint32_t i = 0; i < infoCount; i++) {
701 uint32_t build_range_size =
702 build->infos[i].geometryCount * sizeof(VkAccelerationStructureBuildRangeInfoKHR);
703 VkAccelerationStructureBuildRangeInfoKHR *p_build_range_infos =
704 vk_zalloc(queue->alloc, build_range_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
705 if (!p_build_range_infos)
706 goto err;
707
708 memcpy(p_build_range_infos, ppBuildRangeInfos[i], build_range_size);
709
710 pp_build_range_infos[i] = p_build_range_infos;
711 }
712 }
713
714 list_addtail(&cmd->cmd_link, &queue->cmds);
715 return;
716
717 err:
718 if (cmd)
719 vk_cmd_build_acceleration_structures_khr_free(queue, cmd);
720
721 vk_command_buffer_set_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
722 }
723
vk_cmd_enqueue_CmdPushConstants2KHR(VkCommandBuffer commandBuffer,const VkPushConstantsInfoKHR * pPushConstantsInfo)724 VKAPI_ATTR void VKAPI_CALL vk_cmd_enqueue_CmdPushConstants2KHR(
725 VkCommandBuffer commandBuffer,
726 const VkPushConstantsInfoKHR* pPushConstantsInfo)
727 {
728 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
729 struct vk_cmd_queue *queue = &cmd_buffer->cmd_queue;
730
731 struct vk_cmd_queue_entry *cmd = vk_zalloc(queue->alloc, vk_cmd_queue_type_sizes[VK_CMD_PUSH_CONSTANTS2_KHR], 8,
732 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
733 if (!cmd)
734 return;
735
736 cmd->type = VK_CMD_PUSH_CONSTANTS2_KHR;
737
738 VkPushConstantsInfoKHR *info = vk_zalloc(queue->alloc, sizeof(*info), 8,
739 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
740 void *pValues = vk_zalloc(queue->alloc, pPushConstantsInfo->size, 8,
741 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
742
743 memcpy(info, pPushConstantsInfo, sizeof(*info));
744 memcpy(pValues, pPushConstantsInfo->pValues, pPushConstantsInfo->size);
745
746 cmd->u.push_constants2_khr.push_constants_info = info;
747 info->pValues = pValues;
748
749 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
750 }
751
752 static void
vk_free_cmd_push_descriptor_set2_khr(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)753 vk_free_cmd_push_descriptor_set2_khr(struct vk_cmd_queue *queue,
754 struct vk_cmd_queue_entry *cmd)
755 {
756 ralloc_free(cmd->driver_data);
757 }
758
vk_cmd_enqueue_CmdPushDescriptorSet2KHR(VkCommandBuffer commandBuffer,const VkPushDescriptorSetInfoKHR * pPushDescriptorSetInfo)759 VKAPI_ATTR void VKAPI_CALL vk_cmd_enqueue_CmdPushDescriptorSet2KHR(
760 VkCommandBuffer commandBuffer,
761 const VkPushDescriptorSetInfoKHR* pPushDescriptorSetInfo)
762 {
763 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
764 struct vk_cmd_queue_entry *cmd = vk_zalloc(cmd_buffer->cmd_queue.alloc, vk_cmd_queue_type_sizes[VK_CMD_PUSH_DESCRIPTOR_SET2_KHR], 8,
765 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
766
767 cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET2_KHR;
768 cmd->driver_free_cb = vk_free_cmd_push_descriptor_set2_khr;
769
770 void *ctx = cmd->driver_data = ralloc_context(NULL);
771 if (pPushDescriptorSetInfo) {
772 cmd->u.push_descriptor_set2_khr.push_descriptor_set_info = vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(VkPushDescriptorSetInfoKHR), 8,
773 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
774
775 memcpy((void*)cmd->u.push_descriptor_set2_khr.push_descriptor_set_info, pPushDescriptorSetInfo, sizeof(VkPushDescriptorSetInfoKHR));
776 VkPushDescriptorSetInfoKHR *tmp_dst1 = (void *) cmd->u.push_descriptor_set2_khr.push_descriptor_set_info; (void) tmp_dst1;
777 VkPushDescriptorSetInfoKHR *tmp_src1 = (void *) pPushDescriptorSetInfo; (void) tmp_src1;
778
779 const VkBaseInStructure *pnext = tmp_dst1->pNext;
780 if (pnext) {
781 switch ((int32_t)pnext->sType) {
782 case VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO:
783 if (pnext) {
784 tmp_dst1->pNext = rzalloc(ctx, VkPipelineLayoutCreateInfo);
785
786 memcpy((void*)tmp_dst1->pNext, pnext, sizeof(VkPipelineLayoutCreateInfo));
787 VkPipelineLayoutCreateInfo *tmp_dst2 = (void *) tmp_dst1->pNext; (void) tmp_dst2;
788 VkPipelineLayoutCreateInfo *tmp_src2 = (void *) pnext; (void) tmp_src2;
789 if (tmp_src2->pSetLayouts) {
790 tmp_dst2->pSetLayouts = rzalloc_array_size(ctx, sizeof(*tmp_dst2->pSetLayouts), tmp_dst2->setLayoutCount);
791
792 memcpy((void*)tmp_dst2->pSetLayouts, tmp_src2->pSetLayouts, sizeof(*tmp_dst2->pSetLayouts) * tmp_dst2->setLayoutCount);
793 }
794 if (tmp_src2->pPushConstantRanges) {
795 tmp_dst2->pPushConstantRanges = rzalloc_array_size(ctx, sizeof(*tmp_dst2->pPushConstantRanges), tmp_dst2->pushConstantRangeCount);
796
797 memcpy((void*)tmp_dst2->pPushConstantRanges, tmp_src2->pPushConstantRanges, sizeof(*tmp_dst2->pPushConstantRanges) * tmp_dst2->pushConstantRangeCount);
798 }
799
800 } else {
801 tmp_dst1->pNext = NULL;
802 }
803 break;
804 }
805 }
806 if (tmp_src1->pDescriptorWrites) {
807 tmp_dst1->pDescriptorWrites = vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*tmp_dst1->pDescriptorWrites) * tmp_dst1->descriptorWriteCount, 8,
808 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
809
810 memcpy((void*)tmp_dst1->pDescriptorWrites, tmp_src1->pDescriptorWrites, sizeof(*tmp_dst1->pDescriptorWrites) * tmp_dst1->descriptorWriteCount);
811 for (unsigned i = 0; i < tmp_src1->descriptorWriteCount; i++) {
812 VkWriteDescriptorSet *dstwrite = (void*)&tmp_dst1->pDescriptorWrites[i];
813 const VkWriteDescriptorSet *write = &tmp_src1->pDescriptorWrites[i];
814 switch (write->descriptorType) {
815 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK: {
816 const VkWriteDescriptorSetInlineUniformBlock *uniform_data = vk_find_struct_const(write->pNext, WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK);
817 assert(uniform_data);
818 VkWriteDescriptorSetInlineUniformBlock *dst = rzalloc(ctx, VkWriteDescriptorSetInlineUniformBlock);
819 memcpy((void*)dst, uniform_data, sizeof(*uniform_data));
820 dst->pData = ralloc_size(ctx, uniform_data->dataSize);
821 memcpy((void*)dst->pData, uniform_data->pData, uniform_data->dataSize);
822 dstwrite->pNext = dst;
823 break;
824 }
825
826 case VK_DESCRIPTOR_TYPE_SAMPLER:
827 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
828 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
829 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
830 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
831 dstwrite->pImageInfo = rzalloc_array(ctx, VkDescriptorImageInfo, write->descriptorCount);
832 {
833 VkDescriptorImageInfo *arr = (void*)dstwrite->pImageInfo;
834 typed_memcpy(arr, write->pImageInfo, write->descriptorCount);
835 }
836 break;
837
838 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
839 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
840 dstwrite->pTexelBufferView = rzalloc_array(ctx, VkBufferView, write->descriptorCount);
841 {
842 VkBufferView *arr = (void*)dstwrite->pTexelBufferView;
843 typed_memcpy(arr, write->pTexelBufferView, write->descriptorCount);
844 }
845 break;
846
847 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
848 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
849 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
850 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
851 dstwrite->pBufferInfo = rzalloc_array(ctx, VkDescriptorBufferInfo, write->descriptorCount);
852 {
853 VkDescriptorBufferInfo *arr = (void*)dstwrite->pBufferInfo;
854 typed_memcpy(arr, write->pBufferInfo, write->descriptorCount);
855 }
856 break;
857
858 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
859 const VkWriteDescriptorSetAccelerationStructureKHR *accel_structs =
860 vk_find_struct_const(write->pNext, WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR);
861
862 uint32_t accel_structs_size = sizeof(VkAccelerationStructureKHR) * accel_structs->accelerationStructureCount;
863 VkWriteDescriptorSetAccelerationStructureKHR *write_accel_structs =
864 rzalloc_size(ctx, sizeof(VkWriteDescriptorSetAccelerationStructureKHR) + accel_structs_size);
865
866 write_accel_structs->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR;
867 write_accel_structs->accelerationStructureCount = accel_structs->accelerationStructureCount;
868 write_accel_structs->pAccelerationStructures = (void *)&write_accel_structs[1];
869 memcpy((void *)write_accel_structs->pAccelerationStructures, accel_structs->pAccelerationStructures, accel_structs_size);
870
871 dstwrite->pNext = write_accel_structs;
872 break;
873 }
874
875 default:
876 break;
877 }
878 }
879 }
880 }
881
882 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
883 }
884