xref: /aosp_15_r20/external/mesa3d/src/virtio/vulkan/vn_descriptor_set.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_descriptor_set.h"
12 
13 #include "venus-protocol/vn_protocol_driver_descriptor_pool.h"
14 #include "venus-protocol/vn_protocol_driver_descriptor_set.h"
15 #include "venus-protocol/vn_protocol_driver_descriptor_set_layout.h"
16 #include "venus-protocol/vn_protocol_driver_descriptor_update_template.h"
17 
18 #include "vn_device.h"
19 #include "vn_pipeline.h"
20 
21 void
vn_descriptor_set_layout_destroy(struct vn_device * dev,struct vn_descriptor_set_layout * layout)22 vn_descriptor_set_layout_destroy(struct vn_device *dev,
23                                  struct vn_descriptor_set_layout *layout)
24 {
25    VkDevice dev_handle = vn_device_to_handle(dev);
26    VkDescriptorSetLayout layout_handle =
27       vn_descriptor_set_layout_to_handle(layout);
28    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
29 
30    vn_async_vkDestroyDescriptorSetLayout(dev->primary_ring, dev_handle,
31                                          layout_handle, NULL);
32 
33    vn_object_base_fini(&layout->base);
34    vk_free(alloc, layout);
35 }
36 
37 static void
vn_descriptor_set_destroy(struct vn_device * dev,struct vn_descriptor_set * set,const VkAllocationCallbacks * alloc)38 vn_descriptor_set_destroy(struct vn_device *dev,
39                           struct vn_descriptor_set *set,
40                           const VkAllocationCallbacks *alloc)
41 {
42    list_del(&set->head);
43 
44    vn_descriptor_set_layout_unref(dev, set->layout);
45 
46    vn_object_base_fini(&set->base);
47    vk_free(alloc, set);
48 }
49 
50 /* Map VkDescriptorType to contiguous enum vn_descriptor_type */
51 static enum vn_descriptor_type
vn_descriptor_type(VkDescriptorType type)52 vn_descriptor_type(VkDescriptorType type)
53 {
54    switch (type) {
55    case VK_DESCRIPTOR_TYPE_SAMPLER:
56       return VN_DESCRIPTOR_TYPE_SAMPLER;
57    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
58       return VN_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
59    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
60       return VN_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
61    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
62       return VN_DESCRIPTOR_TYPE_STORAGE_IMAGE;
63    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
64       return VN_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
65    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
66       return VN_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
67    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
68       return VN_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
69    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
70       return VN_DESCRIPTOR_TYPE_STORAGE_BUFFER;
71    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
72       return VN_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
73    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
74       return VN_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
75    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
76       return VN_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
77    case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
78       return VN_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK;
79    case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
80       return VN_DESCRIPTOR_TYPE_MUTABLE_EXT;
81    default:
82       break;
83    }
84 
85    unreachable("bad VkDescriptorType");
86 }
87 
88 /* descriptor set layout commands */
89 
90 void
vn_GetDescriptorSetLayoutSupport(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,VkDescriptorSetLayoutSupport * pSupport)91 vn_GetDescriptorSetLayoutSupport(
92    VkDevice device,
93    const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
94    VkDescriptorSetLayoutSupport *pSupport)
95 {
96    struct vn_device *dev = vn_device_from_handle(device);
97 
98    /* TODO per-device cache */
99    vn_call_vkGetDescriptorSetLayoutSupport(dev->primary_ring, device,
100                                            pCreateInfo, pSupport);
101 }
102 
103 static void
vn_descriptor_set_layout_init(struct vn_device * dev,const VkDescriptorSetLayoutCreateInfo * create_info,uint32_t last_binding,struct vn_descriptor_set_layout * layout)104 vn_descriptor_set_layout_init(
105    struct vn_device *dev,
106    const VkDescriptorSetLayoutCreateInfo *create_info,
107    uint32_t last_binding,
108    struct vn_descriptor_set_layout *layout)
109 {
110    VkDevice dev_handle = vn_device_to_handle(dev);
111    VkDescriptorSetLayout layout_handle =
112       vn_descriptor_set_layout_to_handle(layout);
113    const VkDescriptorSetLayoutBindingFlagsCreateInfo *binding_flags =
114       vk_find_struct_const(create_info->pNext,
115                            DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
116 
117    const VkMutableDescriptorTypeCreateInfoEXT *mutable_descriptor_info =
118       vk_find_struct_const(create_info->pNext,
119                            MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
120 
121    /* 14.2.1. Descriptor Set Layout
122     *
123     * If bindingCount is zero or if this structure is not included in
124     * the pNext chain, the VkDescriptorBindingFlags for each descriptor
125     * set layout binding is considered to be zero.
126     */
127    if (binding_flags && !binding_flags->bindingCount)
128       binding_flags = NULL;
129 
130    layout->is_push_descriptor =
131       create_info->flags &
132       VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
133 
134    layout->refcount = VN_REFCOUNT_INIT(1);
135    layout->last_binding = last_binding;
136 
137    for (uint32_t i = 0; i < create_info->bindingCount; i++) {
138       const VkDescriptorSetLayoutBinding *binding_info =
139          &create_info->pBindings[i];
140       const enum vn_descriptor_type type =
141          vn_descriptor_type(binding_info->descriptorType);
142       struct vn_descriptor_set_layout_binding *binding =
143          &layout->bindings[binding_info->binding];
144 
145       if (binding_info->binding == last_binding) {
146          /* 14.2.1. Descriptor Set Layout
147           *
148           * VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT must only be
149           * used for the last binding in the descriptor set layout (i.e. the
150           * binding with the largest value of binding).
151           *
152           * 41. Features
153           *
154           * descriptorBindingVariableDescriptorCount indicates whether the
155           * implementation supports descriptor sets with a variable-sized last
156           * binding. If this feature is not enabled,
157           * VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT must not be
158           * used.
159           */
160          layout->has_variable_descriptor_count =
161             binding_flags &&
162             (binding_flags->pBindingFlags[i] &
163              VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT);
164       }
165 
166       binding->type = type;
167       binding->count = binding_info->descriptorCount;
168 
169       switch (type) {
170       case VN_DESCRIPTOR_TYPE_SAMPLER:
171       case VN_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
172          binding->has_immutable_samplers = binding_info->pImmutableSamplers;
173          break;
174       case VN_DESCRIPTOR_TYPE_MUTABLE_EXT:
175          assert(mutable_descriptor_info->mutableDescriptorTypeListCount &&
176                 mutable_descriptor_info->pMutableDescriptorTypeLists[i]
177                    .descriptorTypeCount);
178          const VkMutableDescriptorTypeListEXT *list =
179             &mutable_descriptor_info->pMutableDescriptorTypeLists[i];
180          for (uint32_t j = 0; j < list->descriptorTypeCount; j++) {
181             BITSET_SET(binding->mutable_descriptor_types,
182                        vn_descriptor_type(list->pDescriptorTypes[j]));
183          }
184          break;
185       default:
186          break;
187       }
188    }
189 
190    vn_async_vkCreateDescriptorSetLayout(dev->primary_ring, dev_handle,
191                                         create_info, NULL, &layout_handle);
192 }
193 
194 VkResult
vn_CreateDescriptorSetLayout(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)195 vn_CreateDescriptorSetLayout(
196    VkDevice device,
197    const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
198    const VkAllocationCallbacks *pAllocator,
199    VkDescriptorSetLayout *pSetLayout)
200 {
201    struct vn_device *dev = vn_device_from_handle(device);
202    /* ignore pAllocator as the layout is reference-counted */
203    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
204 
205    STACK_ARRAY(VkDescriptorSetLayoutBinding, bindings,
206                pCreateInfo->bindingCount);
207 
208    uint32_t last_binding = 0;
209    VkDescriptorSetLayoutCreateInfo local_create_info;
210    if (pCreateInfo->bindingCount) {
211       typed_memcpy(bindings, pCreateInfo->pBindings,
212                    pCreateInfo->bindingCount);
213 
214       for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
215          VkDescriptorSetLayoutBinding *binding = &bindings[i];
216 
217          if (last_binding < binding->binding)
218             last_binding = binding->binding;
219 
220          switch (binding->descriptorType) {
221          case VK_DESCRIPTOR_TYPE_SAMPLER:
222          case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
223             break;
224          default:
225             binding->pImmutableSamplers = NULL;
226             break;
227          }
228       }
229 
230       local_create_info = *pCreateInfo;
231       local_create_info.pBindings = bindings;
232       pCreateInfo = &local_create_info;
233    }
234 
235    const size_t layout_size =
236       offsetof(struct vn_descriptor_set_layout, bindings[last_binding + 1]);
237    /* allocated with the device scope */
238    struct vn_descriptor_set_layout *layout =
239       vk_zalloc(alloc, layout_size, VN_DEFAULT_ALIGN,
240                 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
241    if (!layout) {
242       STACK_ARRAY_FINISH(bindings);
243       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
244    }
245 
246    vn_object_base_init(&layout->base, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT,
247                        &dev->base);
248 
249    vn_descriptor_set_layout_init(dev, pCreateInfo, last_binding, layout);
250 
251    STACK_ARRAY_FINISH(bindings);
252 
253    *pSetLayout = vn_descriptor_set_layout_to_handle(layout);
254 
255    return VK_SUCCESS;
256 }
257 
258 void
vn_DestroyDescriptorSetLayout(VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)259 vn_DestroyDescriptorSetLayout(VkDevice device,
260                               VkDescriptorSetLayout descriptorSetLayout,
261                               const VkAllocationCallbacks *pAllocator)
262 {
263    struct vn_device *dev = vn_device_from_handle(device);
264    struct vn_descriptor_set_layout *layout =
265       vn_descriptor_set_layout_from_handle(descriptorSetLayout);
266 
267    if (!layout)
268       return;
269 
270    vn_descriptor_set_layout_unref(dev, layout);
271 }
272 
273 /* descriptor pool commands */
274 
275 VkResult
vn_CreateDescriptorPool(VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)276 vn_CreateDescriptorPool(VkDevice device,
277                         const VkDescriptorPoolCreateInfo *pCreateInfo,
278                         const VkAllocationCallbacks *pAllocator,
279                         VkDescriptorPool *pDescriptorPool)
280 {
281    VN_TRACE_FUNC();
282    struct vn_device *dev = vn_device_from_handle(device);
283    const VkAllocationCallbacks *alloc =
284       pAllocator ? pAllocator : &dev->base.base.alloc;
285 
286    const VkDescriptorPoolInlineUniformBlockCreateInfo *iub_info =
287       vk_find_struct_const(pCreateInfo->pNext,
288                            DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO);
289 
290    uint32_t mutable_states_count = 0;
291    for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
292       const VkDescriptorPoolSize *pool_size = &pCreateInfo->pPoolSizes[i];
293       if (pool_size->type == VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
294          mutable_states_count++;
295    }
296    struct vn_descriptor_pool *pool;
297    struct vn_descriptor_pool_state_mutable *mutable_states;
298 
299    VK_MULTIALLOC(ma);
300    vk_multialloc_add(&ma, &pool, __typeof__(*pool), 1);
301    vk_multialloc_add(&ma, &mutable_states, __typeof__(*mutable_states),
302                      mutable_states_count);
303 
304    if (!vk_multialloc_zalloc(&ma, alloc, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
305       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
306 
307    vn_object_base_init(&pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL,
308                        &dev->base);
309 
310    pool->allocator = *alloc;
311    pool->mutable_states = mutable_states;
312 
313    const VkMutableDescriptorTypeCreateInfoEXT *mutable_descriptor_info =
314       vk_find_struct_const(pCreateInfo->pNext,
315                            MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
316 
317    /* Without VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, the set
318     * allocation must not fail due to a fragmented pool per spec. In this
319     * case, set allocation can be asynchronous with pool resource tracking.
320     */
321    pool->async_set_allocation =
322       !VN_PERF(NO_ASYNC_SET_ALLOC) &&
323       !(pCreateInfo->flags &
324         VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
325 
326    pool->max.set_count = pCreateInfo->maxSets;
327 
328    if (iub_info)
329       pool->max.iub_binding_count = iub_info->maxInlineUniformBlockBindings;
330 
331    uint32_t next_mutable_state = 0;
332    for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
333       const VkDescriptorPoolSize *pool_size = &pCreateInfo->pPoolSizes[i];
334       const enum vn_descriptor_type type =
335          vn_descriptor_type(pool_size->type);
336 
337       if (type != VN_DESCRIPTOR_TYPE_MUTABLE_EXT) {
338          pool->max.descriptor_counts[type] += pool_size->descriptorCount;
339          continue;
340       }
341 
342       struct vn_descriptor_pool_state_mutable *mutable_state = NULL;
343       BITSET_DECLARE(mutable_types, VN_NUM_DESCRIPTOR_TYPES);
344       if (!mutable_descriptor_info ||
345           i >= mutable_descriptor_info->mutableDescriptorTypeListCount) {
346          BITSET_ONES(mutable_types);
347       } else {
348          BITSET_ZERO(mutable_types);
349          const VkMutableDescriptorTypeListEXT *list =
350             &mutable_descriptor_info->pMutableDescriptorTypeLists[i];
351 
352          for (uint32_t j = 0; j < list->descriptorTypeCount; j++) {
353             BITSET_SET(mutable_types,
354                        vn_descriptor_type(list->pDescriptorTypes[j]));
355          }
356       }
357       for (uint32_t j = 0; j < next_mutable_state; j++) {
358          if (BITSET_EQUAL(mutable_types, pool->mutable_states[j].types)) {
359             mutable_state = &pool->mutable_states[j];
360             break;
361          }
362       }
363 
364       if (!mutable_state) {
365          /* The application must ensure that partial overlap does not exist in
366           * pPoolSizes. so this entry must have a disjoint set of types.
367           */
368          mutable_state = &pool->mutable_states[next_mutable_state++];
369          BITSET_COPY(mutable_state->types, mutable_types);
370       }
371 
372       mutable_state->max += pool_size->descriptorCount;
373    }
374 
375    pool->mutable_states_count = next_mutable_state;
376    list_inithead(&pool->descriptor_sets);
377 
378    VkDescriptorPool pool_handle = vn_descriptor_pool_to_handle(pool);
379    vn_async_vkCreateDescriptorPool(dev->primary_ring, device, pCreateInfo,
380                                    NULL, &pool_handle);
381 
382    vn_tls_set_async_pipeline_create();
383 
384    *pDescriptorPool = pool_handle;
385 
386    return VK_SUCCESS;
387 }
388 
389 void
vn_DestroyDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)390 vn_DestroyDescriptorPool(VkDevice device,
391                          VkDescriptorPool descriptorPool,
392                          const VkAllocationCallbacks *pAllocator)
393 {
394    VN_TRACE_FUNC();
395    struct vn_device *dev = vn_device_from_handle(device);
396    struct vn_descriptor_pool *pool =
397       vn_descriptor_pool_from_handle(descriptorPool);
398    const VkAllocationCallbacks *alloc;
399 
400    if (!pool)
401       return;
402 
403    alloc = pAllocator ? pAllocator : &pool->allocator;
404 
405    vn_async_vkDestroyDescriptorPool(dev->primary_ring, device, descriptorPool,
406                                     NULL);
407 
408    list_for_each_entry_safe(struct vn_descriptor_set, set,
409                             &pool->descriptor_sets, head)
410       vn_descriptor_set_destroy(dev, set, alloc);
411 
412    vn_object_base_fini(&pool->base);
413    vk_free(alloc, pool);
414 }
415 
416 static struct vn_descriptor_pool_state_mutable *
vn_get_mutable_state(const struct vn_descriptor_pool * pool,const struct vn_descriptor_set_layout_binding * binding)417 vn_get_mutable_state(const struct vn_descriptor_pool *pool,
418                      const struct vn_descriptor_set_layout_binding *binding)
419 {
420    for (uint32_t i = 0; i < pool->mutable_states_count; i++) {
421       struct vn_descriptor_pool_state_mutable *mutable_state =
422          &pool->mutable_states[i];
423       BITSET_DECLARE(shared_types, VN_NUM_DESCRIPTOR_TYPES);
424       BITSET_AND(shared_types, mutable_state->types,
425                  binding->mutable_descriptor_types);
426 
427       /* The application must ensure that partial overlap does not exist in
428        * pPoolSizes, so there only exists one matching entry.
429        */
430       if (BITSET_EQUAL(shared_types, binding->mutable_descriptor_types))
431          return mutable_state;
432    }
433    unreachable("bad mutable descriptor binding");
434 }
435 
436 static inline void
vn_pool_restore_mutable_states(struct vn_descriptor_pool * pool,const struct vn_descriptor_set_layout * layout,uint32_t binding_index,uint32_t descriptor_count)437 vn_pool_restore_mutable_states(struct vn_descriptor_pool *pool,
438                                const struct vn_descriptor_set_layout *layout,
439                                uint32_t binding_index,
440                                uint32_t descriptor_count)
441 {
442    assert(layout->bindings[binding_index].type ==
443           VN_DESCRIPTOR_TYPE_MUTABLE_EXT);
444    assert(descriptor_count);
445    struct vn_descriptor_pool_state_mutable *mutable_state =
446       vn_get_mutable_state(pool, &layout->bindings[binding_index]);
447    assert(mutable_state && mutable_state->used >= descriptor_count);
448    mutable_state->used -= descriptor_count;
449 }
450 
451 static bool
vn_descriptor_pool_alloc_descriptors(struct vn_descriptor_pool * pool,const struct vn_descriptor_set_layout * layout,uint32_t last_binding_descriptor_count)452 vn_descriptor_pool_alloc_descriptors(
453    struct vn_descriptor_pool *pool,
454    const struct vn_descriptor_set_layout *layout,
455    uint32_t last_binding_descriptor_count)
456 {
457    assert(pool->async_set_allocation);
458 
459    if (pool->used.set_count == pool->max.set_count)
460       return false;
461 
462    /* backup current pool state to recovery */
463    struct vn_descriptor_pool_state recovery = pool->used;
464    pool->used.set_count++;
465 
466    uint32_t i = 0;
467    for (; i <= layout->last_binding; i++) {
468       const struct vn_descriptor_set_layout_binding *binding =
469          &layout->bindings[i];
470       const enum vn_descriptor_type type = binding->type;
471       const uint32_t count = i == layout->last_binding
472                                 ? last_binding_descriptor_count
473                                 : binding->count;
474 
475       /* Skip resource accounting for either of below:
476        * - reserved binding entry that has a valid type with a zero count
477        * - invalid binding entry from sparse binding indices
478        */
479       if (!count)
480          continue;
481 
482       if (type == VN_DESCRIPTOR_TYPE_MUTABLE_EXT) {
483          /* A mutable descriptor can be allocated if below are satisfied:
484           * - vn_descriptor_pool_state_mutable::types is a superset
485           * - vn_descriptor_pool_state_mutable::{max - used} is enough
486           */
487          struct vn_descriptor_pool_state_mutable *mutable_state =
488             vn_get_mutable_state(pool, binding);
489          assert(mutable_state);
490          if (mutable_state->used + count > mutable_state->max)
491             goto restore;
492 
493          mutable_state->used += count;
494       } else {
495          if (type == VN_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK &&
496              ++pool->used.iub_binding_count > pool->max.iub_binding_count)
497             goto restore;
498 
499          pool->used.descriptor_counts[type] += count;
500          if (pool->used.descriptor_counts[type] >
501              pool->max.descriptor_counts[type])
502             goto restore;
503       }
504    }
505 
506    return true;
507 
508 restore:
509    /* restore pool state before this allocation */
510    pool->used = recovery;
511    for (uint32_t j = 0; j < i; j++) {
512       /* mutable state at binding i is not changed */
513       const uint32_t count = layout->bindings[j].count;
514       if (count && layout->bindings[j].type == VN_DESCRIPTOR_TYPE_MUTABLE_EXT)
515          vn_pool_restore_mutable_states(pool, layout, j, count);
516    }
517    return false;
518 }
519 
520 static void
vn_descriptor_pool_free_descriptors(struct vn_descriptor_pool * pool,const struct vn_descriptor_set_layout * layout,uint32_t last_binding_descriptor_count)521 vn_descriptor_pool_free_descriptors(
522    struct vn_descriptor_pool *pool,
523    const struct vn_descriptor_set_layout *layout,
524    uint32_t last_binding_descriptor_count)
525 {
526    assert(pool->async_set_allocation);
527 
528    for (uint32_t i = 0; i <= layout->last_binding; i++) {
529       const uint32_t count = i == layout->last_binding
530                                 ? last_binding_descriptor_count
531                                 : layout->bindings[i].count;
532       if (!count)
533          continue;
534 
535       const enum vn_descriptor_type type = layout->bindings[i].type;
536       if (type == VN_DESCRIPTOR_TYPE_MUTABLE_EXT) {
537          vn_pool_restore_mutable_states(pool, layout, i, count);
538       } else {
539          pool->used.descriptor_counts[type] -= count;
540 
541          if (type == VN_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
542             pool->used.iub_binding_count--;
543       }
544    }
545 
546    pool->used.set_count--;
547 }
548 
549 static inline void
vn_descriptor_pool_reset_descriptors(struct vn_descriptor_pool * pool)550 vn_descriptor_pool_reset_descriptors(struct vn_descriptor_pool *pool)
551 {
552    assert(pool->async_set_allocation);
553 
554    memset(&pool->used, 0, sizeof(pool->used));
555 
556    for (uint32_t i = 0; i < pool->mutable_states_count; i++)
557       pool->mutable_states[i].used = 0;
558 }
559 
560 VkResult
vn_ResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)561 vn_ResetDescriptorPool(VkDevice device,
562                        VkDescriptorPool descriptorPool,
563                        VkDescriptorPoolResetFlags flags)
564 {
565    VN_TRACE_FUNC();
566    struct vn_device *dev = vn_device_from_handle(device);
567    struct vn_descriptor_pool *pool =
568       vn_descriptor_pool_from_handle(descriptorPool);
569    const VkAllocationCallbacks *alloc = &pool->allocator;
570 
571    vn_async_vkResetDescriptorPool(dev->primary_ring, device, descriptorPool,
572                                   flags);
573 
574    list_for_each_entry_safe(struct vn_descriptor_set, set,
575                             &pool->descriptor_sets, head)
576       vn_descriptor_set_destroy(dev, set, alloc);
577 
578    if (pool->async_set_allocation)
579       vn_descriptor_pool_reset_descriptors(pool);
580 
581    return VK_SUCCESS;
582 }
583 
584 /* descriptor set commands */
585 
586 VkResult
vn_AllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)587 vn_AllocateDescriptorSets(VkDevice device,
588                           const VkDescriptorSetAllocateInfo *pAllocateInfo,
589                           VkDescriptorSet *pDescriptorSets)
590 {
591    struct vn_device *dev = vn_device_from_handle(device);
592    struct vn_descriptor_pool *pool =
593       vn_descriptor_pool_from_handle(pAllocateInfo->descriptorPool);
594    const VkAllocationCallbacks *alloc = &pool->allocator;
595    VkResult result;
596 
597    /* 14.2.3. Allocation of Descriptor Sets
598     *
599     * If descriptorSetCount is zero or this structure is not included in
600     * the pNext chain, then the variable lengths are considered to be zero.
601     */
602    const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_info =
603       vk_find_struct_const(
604          pAllocateInfo->pNext,
605          DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
606    if (variable_info && !variable_info->descriptorSetCount)
607       variable_info = NULL;
608 
609    uint32_t i = 0;
610    for (; i < pAllocateInfo->descriptorSetCount; i++) {
611       struct vn_descriptor_set_layout *layout =
612          vn_descriptor_set_layout_from_handle(pAllocateInfo->pSetLayouts[i]);
613 
614       /* 14.2.3. Allocation of Descriptor Sets
615        *
616        * If VkDescriptorSetAllocateInfo::pSetLayouts[i] does not include a
617        * variable count descriptor binding, then pDescriptorCounts[i] is
618        * ignored.
619        */
620       uint32_t last_binding_descriptor_count = 0;
621       if (!layout->has_variable_descriptor_count) {
622          last_binding_descriptor_count =
623             layout->bindings[layout->last_binding].count;
624       } else if (variable_info) {
625          last_binding_descriptor_count = variable_info->pDescriptorCounts[i];
626       }
627 
628       if (pool->async_set_allocation &&
629           !vn_descriptor_pool_alloc_descriptors(
630              pool, layout, last_binding_descriptor_count)) {
631          result = VK_ERROR_OUT_OF_POOL_MEMORY;
632          goto fail;
633       }
634 
635       struct vn_descriptor_set *set =
636          vk_zalloc(alloc, sizeof(*set), VN_DEFAULT_ALIGN,
637                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
638       if (!set) {
639          if (pool->async_set_allocation) {
640             vn_descriptor_pool_free_descriptors(
641                pool, layout, last_binding_descriptor_count);
642          }
643          result = VK_ERROR_OUT_OF_HOST_MEMORY;
644          goto fail;
645       }
646 
647       vn_object_base_init(&set->base, VK_OBJECT_TYPE_DESCRIPTOR_SET,
648                           &dev->base);
649 
650       /* We might reorder vkCmdBindDescriptorSets after
651        * vkDestroyDescriptorSetLayout due to batching.  The spec says
652        *
653        *   VkDescriptorSetLayout objects may be accessed by commands that
654        *   operate on descriptor sets allocated using that layout, and those
655        *   descriptor sets must not be updated with vkUpdateDescriptorSets
656        *   after the descriptor set layout has been destroyed. Otherwise, a
657        *   VkDescriptorSetLayout object passed as a parameter to create
658        *   another object is not further accessed by that object after the
659        *   duration of the command it is passed into.
660        *
661        * It is ambiguous but the reordering is likely invalid.  Let's keep the
662        * layout alive with the set to defer vkDestroyDescriptorSetLayout.
663        */
664       set->layout = vn_descriptor_set_layout_ref(dev, layout);
665       set->last_binding_descriptor_count = last_binding_descriptor_count;
666       list_addtail(&set->head, &pool->descriptor_sets);
667 
668       pDescriptorSets[i] = vn_descriptor_set_to_handle(set);
669    }
670 
671    if (pool->async_set_allocation) {
672       vn_async_vkAllocateDescriptorSets(dev->primary_ring, device,
673                                         pAllocateInfo, pDescriptorSets);
674    } else {
675       result = vn_call_vkAllocateDescriptorSets(
676          dev->primary_ring, device, pAllocateInfo, pDescriptorSets);
677       if (result != VK_SUCCESS)
678          goto fail;
679    }
680 
681    return VK_SUCCESS;
682 
683 fail:
684    for (uint32_t j = 0; j < i; j++) {
685       struct vn_descriptor_set *set =
686          vn_descriptor_set_from_handle(pDescriptorSets[j]);
687 
688       if (pool->async_set_allocation) {
689          vn_descriptor_pool_free_descriptors(
690             pool, set->layout, set->last_binding_descriptor_count);
691       }
692 
693       vn_descriptor_set_destroy(dev, set, alloc);
694    }
695 
696    memset(pDescriptorSets, 0,
697           sizeof(*pDescriptorSets) * pAllocateInfo->descriptorSetCount);
698 
699    return vn_error(dev->instance, result);
700 }
701 
702 VkResult
vn_FreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)703 vn_FreeDescriptorSets(VkDevice device,
704                       VkDescriptorPool descriptorPool,
705                       uint32_t descriptorSetCount,
706                       const VkDescriptorSet *pDescriptorSets)
707 {
708    struct vn_device *dev = vn_device_from_handle(device);
709    struct vn_descriptor_pool *pool =
710       vn_descriptor_pool_from_handle(descriptorPool);
711    const VkAllocationCallbacks *alloc = &pool->allocator;
712 
713    assert(!pool->async_set_allocation);
714 
715    vn_async_vkFreeDescriptorSets(dev->primary_ring, device, descriptorPool,
716                                  descriptorSetCount, pDescriptorSets);
717 
718    for (uint32_t i = 0; i < descriptorSetCount; i++) {
719       struct vn_descriptor_set *set =
720          vn_descriptor_set_from_handle(pDescriptorSets[i]);
721 
722       if (!set)
723          continue;
724 
725       vn_descriptor_set_destroy(dev, set, alloc);
726    }
727 
728    return VK_SUCCESS;
729 }
730 
731 uint32_t
vn_descriptor_set_count_write_images(uint32_t write_count,const VkWriteDescriptorSet * writes)732 vn_descriptor_set_count_write_images(uint32_t write_count,
733                                      const VkWriteDescriptorSet *writes)
734 {
735    uint32_t img_info_count = 0;
736    for (uint32_t i = 0; i < write_count; i++) {
737       const VkWriteDescriptorSet *write = &writes[i];
738       switch (write->descriptorType) {
739       case VK_DESCRIPTOR_TYPE_SAMPLER:
740       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
741       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
742       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
743       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
744          img_info_count += write->descriptorCount;
745          break;
746       default:
747          break;
748       }
749    }
750    return img_info_count;
751 }
752 
753 const VkWriteDescriptorSet *
vn_descriptor_set_get_writes(uint32_t write_count,const VkWriteDescriptorSet * writes,VkPipelineLayout pipeline_layout_handle,struct vn_descriptor_set_writes * local)754 vn_descriptor_set_get_writes(uint32_t write_count,
755                              const VkWriteDescriptorSet *writes,
756                              VkPipelineLayout pipeline_layout_handle,
757                              struct vn_descriptor_set_writes *local)
758 {
759    const struct vn_pipeline_layout *pipeline_layout =
760       vn_pipeline_layout_from_handle(pipeline_layout_handle);
761 
762    typed_memcpy(local->writes, writes, write_count);
763 
764    uint32_t img_info_count = 0;
765    for (uint32_t i = 0; i < write_count; i++) {
766       const struct vn_descriptor_set_layout *set_layout =
767          pipeline_layout
768             ? pipeline_layout->push_descriptor_set_layout
769             : vn_descriptor_set_from_handle(writes[i].dstSet)->layout;
770       VkWriteDescriptorSet *write = &local->writes[i];
771       VkDescriptorImageInfo *img_infos = &local->img_infos[img_info_count];
772       bool ignore_sampler = true;
773       bool ignore_iview = false;
774       switch (write->descriptorType) {
775       case VK_DESCRIPTOR_TYPE_SAMPLER:
776          ignore_iview = true;
777          FALLTHROUGH;
778       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
779          ignore_sampler =
780             set_layout->bindings[write->dstBinding].has_immutable_samplers;
781          FALLTHROUGH;
782       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
783       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
784       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
785          typed_memcpy(img_infos, write->pImageInfo, write->descriptorCount);
786          for (uint32_t j = 0; j < write->descriptorCount; j++) {
787             if (ignore_sampler)
788                img_infos[j].sampler = VK_NULL_HANDLE;
789             if (ignore_iview)
790                img_infos[j].imageView = VK_NULL_HANDLE;
791          }
792          write->pImageInfo = img_infos;
793          write->pBufferInfo = NULL;
794          write->pTexelBufferView = NULL;
795          img_info_count += write->descriptorCount;
796          break;
797       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
798       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
799          write->pImageInfo = NULL;
800          write->pBufferInfo = NULL;
801          break;
802       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
803       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
804       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
805       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
806          write->pImageInfo = NULL;
807          write->pTexelBufferView = NULL;
808          break;
809       case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
810       case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
811       default:
812          write->pImageInfo = NULL;
813          write->pBufferInfo = NULL;
814          write->pTexelBufferView = NULL;
815          break;
816       }
817    }
818    return local->writes;
819 }
820 
821 void
vn_UpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)822 vn_UpdateDescriptorSets(VkDevice device,
823                         uint32_t descriptorWriteCount,
824                         const VkWriteDescriptorSet *pDescriptorWrites,
825                         uint32_t descriptorCopyCount,
826                         const VkCopyDescriptorSet *pDescriptorCopies)
827 {
828    struct vn_device *dev = vn_device_from_handle(device);
829    const uint32_t img_info_count = vn_descriptor_set_count_write_images(
830       descriptorWriteCount, pDescriptorWrites);
831 
832    STACK_ARRAY(VkWriteDescriptorSet, writes, descriptorWriteCount);
833    STACK_ARRAY(VkDescriptorImageInfo, img_infos, img_info_count);
834    struct vn_descriptor_set_writes local = {
835       .writes = writes,
836       .img_infos = img_infos,
837    };
838    pDescriptorWrites = vn_descriptor_set_get_writes(
839       descriptorWriteCount, pDescriptorWrites, VK_NULL_HANDLE, &local);
840 
841    vn_async_vkUpdateDescriptorSets(dev->primary_ring, device,
842                                    descriptorWriteCount, pDescriptorWrites,
843                                    descriptorCopyCount, pDescriptorCopies);
844 
845    STACK_ARRAY_FINISH(writes);
846    STACK_ARRAY_FINISH(img_infos);
847 }
848 
849 /* descriptor update template commands */
850 
851 static void
vn_descriptor_update_template_init(struct vn_descriptor_update_template * templ,const VkDescriptorUpdateTemplateCreateInfo * create_info)852 vn_descriptor_update_template_init(
853    struct vn_descriptor_update_template *templ,
854    const VkDescriptorUpdateTemplateCreateInfo *create_info)
855 {
856    templ->entry_count = create_info->descriptorUpdateEntryCount;
857    for (uint32_t i = 0; i < create_info->descriptorUpdateEntryCount; i++) {
858       const VkDescriptorUpdateTemplateEntry *entry =
859          &create_info->pDescriptorUpdateEntries[i];
860       templ->entries[i] = *entry;
861       switch (entry->descriptorType) {
862       case VK_DESCRIPTOR_TYPE_SAMPLER:
863       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
864       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
865       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
866       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
867          templ->img_info_count += entry->descriptorCount;
868          break;
869       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
870       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
871          templ->bview_count += entry->descriptorCount;
872          break;
873       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
874       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
875       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
876       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
877          templ->buf_info_count += entry->descriptorCount;
878          break;
879       case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
880          templ->iub_count += 1;
881          break;
882       case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
883          break;
884       default:
885          unreachable("unhandled descriptor type");
886          break;
887       }
888    }
889 }
890 
891 VkResult
vn_CreateDescriptorUpdateTemplate(VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)892 vn_CreateDescriptorUpdateTemplate(
893    VkDevice device,
894    const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
895    const VkAllocationCallbacks *pAllocator,
896    VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
897 {
898    VN_TRACE_FUNC();
899    struct vn_device *dev = vn_device_from_handle(device);
900    const VkAllocationCallbacks *alloc =
901       pAllocator ? pAllocator : &dev->base.base.alloc;
902 
903    const size_t templ_size =
904       offsetof(struct vn_descriptor_update_template,
905                entries[pCreateInfo->descriptorUpdateEntryCount]);
906    struct vn_descriptor_update_template *templ = vk_zalloc(
907       alloc, templ_size, VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
908    if (!templ)
909       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
910 
911    vn_object_base_init(&templ->base,
912                        VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, &dev->base);
913 
914    if (pCreateInfo->templateType ==
915        VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
916       struct vn_pipeline_layout *pipeline_layout =
917          vn_pipeline_layout_from_handle(pCreateInfo->pipelineLayout);
918       templ->push.pipeline_bind_point = pCreateInfo->pipelineBindPoint;
919       templ->push.set_layout = pipeline_layout->push_descriptor_set_layout;
920    }
921 
922    vn_descriptor_update_template_init(templ, pCreateInfo);
923 
924    /* no host object */
925    *pDescriptorUpdateTemplate =
926       vn_descriptor_update_template_to_handle(templ);
927 
928    return VK_SUCCESS;
929 }
930 
931 void
vn_DestroyDescriptorUpdateTemplate(VkDevice device,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const VkAllocationCallbacks * pAllocator)932 vn_DestroyDescriptorUpdateTemplate(
933    VkDevice device,
934    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
935    const VkAllocationCallbacks *pAllocator)
936 {
937    VN_TRACE_FUNC();
938    struct vn_device *dev = vn_device_from_handle(device);
939    struct vn_descriptor_update_template *templ =
940       vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
941    const VkAllocationCallbacks *alloc =
942       pAllocator ? pAllocator : &dev->base.base.alloc;
943 
944    if (!templ)
945       return;
946 
947    /* no host object */
948    vn_object_base_fini(&templ->base);
949    vk_free(alloc, templ);
950 }
951 
952 void
vn_descriptor_set_fill_update_with_template(struct vn_descriptor_update_template * templ,VkDescriptorSet set_handle,const uint8_t * data,struct vn_descriptor_set_update * update)953 vn_descriptor_set_fill_update_with_template(
954    struct vn_descriptor_update_template *templ,
955    VkDescriptorSet set_handle,
956    const uint8_t *data,
957    struct vn_descriptor_set_update *update)
958 {
959    struct vn_descriptor_set *set = vn_descriptor_set_from_handle(set_handle);
960    const struct vn_descriptor_set_layout *set_layout =
961       templ->push.set_layout ? templ->push.set_layout : set->layout;
962 
963    update->write_count = templ->entry_count;
964 
965    uint32_t img_info_offset = 0;
966    uint32_t buf_info_offset = 0;
967    uint32_t bview_offset = 0;
968    uint32_t iub_offset = 0;
969    for (uint32_t i = 0; i < templ->entry_count; i++) {
970       const VkDescriptorUpdateTemplateEntry *entry = &templ->entries[i];
971       const uint8_t *ptr = data + entry->offset;
972       bool ignore_sampler = true;
973       bool ignore_iview = false;
974       VkDescriptorImageInfo *img_infos = NULL;
975       VkDescriptorBufferInfo *buf_infos = NULL;
976       VkBufferView *bview_handles = NULL;
977       VkWriteDescriptorSetInlineUniformBlock *iub = NULL;
978       switch (entry->descriptorType) {
979       case VK_DESCRIPTOR_TYPE_SAMPLER:
980          ignore_iview = true;
981          FALLTHROUGH;
982       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
983          ignore_sampler =
984             set_layout->bindings[entry->dstBinding].has_immutable_samplers;
985          FALLTHROUGH;
986       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
987       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
988       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
989          img_infos = &update->img_infos[img_info_offset];
990          for (uint32_t j = 0; j < entry->descriptorCount; j++) {
991             const VkDescriptorImageInfo *src = (const void *)ptr;
992             img_infos[j] = (VkDescriptorImageInfo){
993                .sampler = ignore_sampler ? VK_NULL_HANDLE : src->sampler,
994                .imageView = ignore_iview ? VK_NULL_HANDLE : src->imageView,
995                .imageLayout = src->imageLayout,
996             };
997             ptr += entry->stride;
998          }
999          img_info_offset += entry->descriptorCount;
1000          break;
1001       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1002       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1003          bview_handles = &update->bview_handles[bview_offset];
1004          for (uint32_t j = 0; j < entry->descriptorCount; j++) {
1005             bview_handles[j] = *(const VkBufferView *)ptr;
1006             ptr += entry->stride;
1007          }
1008          bview_offset += entry->descriptorCount;
1009          break;
1010       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1011       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1012       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1013       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1014          buf_infos = &update->buf_infos[buf_info_offset];
1015          for (uint32_t j = 0; j < entry->descriptorCount; j++) {
1016             buf_infos[j] = *(const VkDescriptorBufferInfo *)ptr;
1017             ptr += entry->stride;
1018          }
1019          buf_info_offset += entry->descriptorCount;
1020          break;
1021       case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
1022          iub = &update->iubs[iub_offset];
1023          *iub = (VkWriteDescriptorSetInlineUniformBlock){
1024             .sType =
1025                VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK,
1026             .dataSize = entry->descriptorCount,
1027             .pData = (const void *)ptr,
1028          };
1029          iub_offset++;
1030          break;
1031       case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
1032          break;
1033       default:
1034          unreachable("unhandled descriptor type");
1035          break;
1036       }
1037       update->writes[i] = (VkWriteDescriptorSet){
1038          .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1039          .pNext = iub,
1040          .dstSet = set_handle,
1041          .dstBinding = entry->dstBinding,
1042          .dstArrayElement = entry->dstArrayElement,
1043          .descriptorCount = entry->descriptorCount,
1044          .descriptorType = entry->descriptorType,
1045          .pImageInfo = img_infos,
1046          .pBufferInfo = buf_infos,
1047          .pTexelBufferView = bview_handles,
1048       };
1049    }
1050 }
1051 
1052 void
vn_UpdateDescriptorSetWithTemplate(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)1053 vn_UpdateDescriptorSetWithTemplate(
1054    VkDevice device,
1055    VkDescriptorSet descriptorSet,
1056    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1057    const void *pData)
1058 {
1059    struct vn_device *dev = vn_device_from_handle(device);
1060    struct vn_descriptor_update_template *templ =
1061       vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
1062 
1063    STACK_ARRAY(VkWriteDescriptorSet, writes, templ->entry_count);
1064    STACK_ARRAY(VkDescriptorImageInfo, img_infos, templ->img_info_count);
1065    STACK_ARRAY(VkDescriptorBufferInfo, buf_infos, templ->buf_info_count);
1066    STACK_ARRAY(VkBufferView, bview_handles, templ->bview_count);
1067    STACK_ARRAY(VkWriteDescriptorSetInlineUniformBlock, iubs,
1068                templ->iub_count);
1069    struct vn_descriptor_set_update update = {
1070       .writes = writes,
1071       .img_infos = img_infos,
1072       .buf_infos = buf_infos,
1073       .bview_handles = bview_handles,
1074       .iubs = iubs,
1075    };
1076    vn_descriptor_set_fill_update_with_template(templ, descriptorSet, pData,
1077                                                &update);
1078 
1079    vn_async_vkUpdateDescriptorSets(
1080       dev->primary_ring, device, update.write_count, update.writes, 0, NULL);
1081 
1082    STACK_ARRAY_FINISH(writes);
1083    STACK_ARRAY_FINISH(img_infos);
1084    STACK_ARRAY_FINISH(buf_infos);
1085    STACK_ARRAY_FINISH(bview_handles);
1086    STACK_ARRAY_FINISH(iubs);
1087 }
1088