xref: /aosp_15_r20/external/mesa3d/src/nouveau/vulkan/nvk_descriptor_set.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 #include "nvk_descriptor_set.h"
6 
7 #include "nvk_buffer.h"
8 #include "nvk_buffer_view.h"
9 #include "nvk_descriptor_set_layout.h"
10 #include "nvk_device.h"
11 #include "nvk_entrypoints.h"
12 #include "nvk_image_view.h"
13 #include "nvk_physical_device.h"
14 #include "nvk_sampler.h"
15 #include "nvkmd/nvkmd.h"
16 
17 #include "util/format/u_format.h"
18 
19 static inline uint32_t
align_u32(uint32_t v,uint32_t a)20 align_u32(uint32_t v, uint32_t a)
21 {
22    assert(a != 0 && a == (a & -a));
23    return (v + a - 1) & ~(a - 1);
24 }
25 
26 static inline void *
desc_ubo_data(struct nvk_descriptor_set * set,uint32_t binding,uint32_t elem,uint32_t * size_out)27 desc_ubo_data(struct nvk_descriptor_set *set, uint32_t binding,
28               uint32_t elem, uint32_t *size_out)
29 {
30    const struct nvk_descriptor_set_binding_layout *binding_layout =
31       &set->layout->binding[binding];
32 
33    uint32_t offset = binding_layout->offset + elem * binding_layout->stride;
34    assert(offset < set->size);
35 
36    if (size_out != NULL)
37       *size_out = set->size - offset;
38 
39    return (char *)set->mapped_ptr + offset;
40 }
41 
42 static void
write_desc(struct nvk_descriptor_set * set,uint32_t binding,uint32_t elem,const void * desc_data,size_t desc_size)43 write_desc(struct nvk_descriptor_set *set, uint32_t binding, uint32_t elem,
44            const void *desc_data, size_t desc_size)
45 {
46    ASSERTED uint32_t dst_size;
47    void *dst = desc_ubo_data(set, binding, elem, &dst_size);
48    assert(desc_size <= dst_size);
49    memcpy(dst, desc_data, desc_size);
50 }
51 
52 static void
get_sampled_image_view_desc(VkDescriptorType descriptor_type,const VkDescriptorImageInfo * const info,void * dst,size_t dst_size)53 get_sampled_image_view_desc(VkDescriptorType descriptor_type,
54                             const VkDescriptorImageInfo *const info,
55                             void *dst, size_t dst_size)
56 {
57    struct nvk_sampled_image_descriptor desc[3] = { };
58    uint8_t plane_count = 1;
59 
60    if (descriptor_type != VK_DESCRIPTOR_TYPE_SAMPLER &&
61        info && info->imageView != VK_NULL_HANDLE) {
62       VK_FROM_HANDLE(nvk_image_view, view, info->imageView);
63 
64       plane_count = view->plane_count;
65       for (uint8_t plane = 0; plane < plane_count; plane++) {
66          assert(view->planes[plane].sampled_desc_index > 0);
67          assert(view->planes[plane].sampled_desc_index < (1 << 20));
68          desc[plane].image_index = view->planes[plane].sampled_desc_index;
69       }
70    }
71 
72    if (descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
73        descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
74       VK_FROM_HANDLE(nvk_sampler, sampler, info->sampler);
75 
76       plane_count = MAX2(plane_count, sampler->plane_count);
77 
78       for (uint8_t plane = 0; plane < plane_count; plane++) {
79          /* We need to replicate the last sampler plane out to all image
80           * planes due to sampler table entry limitations. See
81           * nvk_CreateSampler in nvk_sampler.c for more details.
82           */
83          uint8_t sampler_plane = MIN2(plane, sampler->plane_count - 1);
84          assert(sampler->planes[sampler_plane].desc_index < (1 << 12));
85          desc[plane].sampler_index = sampler->planes[sampler_plane].desc_index;
86       }
87    }
88 
89    assert(sizeof(desc[0]) * plane_count <= dst_size);
90    memcpy(dst, desc, sizeof(desc[0]) * plane_count);
91 }
92 
93 static void
write_sampled_image_view_desc(struct nvk_descriptor_set * set,const VkDescriptorImageInfo * const _info,uint32_t binding,uint32_t elem,VkDescriptorType descriptor_type)94 write_sampled_image_view_desc(struct nvk_descriptor_set *set,
95                               const VkDescriptorImageInfo *const _info,
96                               uint32_t binding, uint32_t elem,
97                               VkDescriptorType descriptor_type)
98 {
99    VkDescriptorImageInfo info = *_info;
100 
101    if (descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
102        descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
103       const struct nvk_descriptor_set_binding_layout *binding_layout =
104          &set->layout->binding[binding];
105       if (binding_layout->immutable_samplers != NULL) {
106          info.sampler = nvk_sampler_to_handle(
107             binding_layout->immutable_samplers[elem]);
108       }
109    }
110 
111    uint32_t dst_size;
112    void *dst = desc_ubo_data(set, binding, elem, &dst_size);
113    get_sampled_image_view_desc(descriptor_type, &info, dst, dst_size);
114 }
115 
116 static void
get_storage_image_view_desc(const VkDescriptorImageInfo * const info,void * dst,size_t dst_size)117 get_storage_image_view_desc(const VkDescriptorImageInfo *const info,
118                             void *dst, size_t dst_size)
119 {
120    struct nvk_storage_image_descriptor desc = { };
121 
122    if (info && info->imageView != VK_NULL_HANDLE) {
123       VK_FROM_HANDLE(nvk_image_view, view, info->imageView);
124 
125       /* Storage images are always single plane */
126       assert(view->plane_count == 1);
127       uint8_t plane = 0;
128 
129       assert(view->planes[plane].storage_desc_index > 0);
130       assert(view->planes[plane].storage_desc_index < (1 << 20));
131 
132       desc.image_index = view->planes[plane].storage_desc_index;
133 
134       const struct nil_Extent4D_Samples px_extent_sa =
135          nil_px_extent_sa(view->planes[plane].sample_layout);
136       desc.sw_log2 = util_logbase2(px_extent_sa.width);
137       desc.sh_log2 = util_logbase2(px_extent_sa.height);
138    }
139 
140    assert(sizeof(desc) <= dst_size);
141    memcpy(dst, &desc, sizeof(desc));
142 }
143 
144 static void
write_storage_image_view_desc(struct nvk_descriptor_set * set,const VkDescriptorImageInfo * const info,uint32_t binding,uint32_t elem)145 write_storage_image_view_desc(struct nvk_descriptor_set *set,
146                               const VkDescriptorImageInfo *const info,
147                               uint32_t binding, uint32_t elem)
148 {
149    uint32_t dst_size;
150    void *dst = desc_ubo_data(set, binding, elem, &dst_size);
151    get_storage_image_view_desc(info, dst, dst_size);
152 }
153 
154 static union nvk_buffer_descriptor
ubo_desc(struct nvk_physical_device * pdev,struct nvk_addr_range addr_range)155 ubo_desc(struct nvk_physical_device *pdev,
156          struct nvk_addr_range addr_range)
157 {
158    const uint32_t min_cbuf_alignment = nvk_min_cbuf_alignment(&pdev->info);
159 
160    assert(addr_range.addr % min_cbuf_alignment == 0);
161    assert(addr_range.range <= NVK_MAX_CBUF_SIZE);
162 
163    addr_range.addr = align64(addr_range.addr, min_cbuf_alignment);
164    addr_range.range = align(addr_range.range, min_cbuf_alignment);
165 
166    if (nvk_use_bindless_cbuf(&pdev->info)) {
167       return (union nvk_buffer_descriptor) { .cbuf = {
168          .base_addr_shift_4 = addr_range.addr >> 4,
169          .size_shift_4 = addr_range.range >> 4,
170       }};
171    } else {
172       return (union nvk_buffer_descriptor) { .addr = {
173          .base_addr = addr_range.addr,
174          .size = addr_range.range,
175       }};
176    }
177 }
178 
179 static void
write_ubo_desc(struct nvk_physical_device * pdev,struct nvk_descriptor_set * set,const VkDescriptorBufferInfo * const info,uint32_t binding,uint32_t elem)180 write_ubo_desc(struct nvk_physical_device *pdev,
181                struct nvk_descriptor_set *set,
182                const VkDescriptorBufferInfo *const info,
183                uint32_t binding, uint32_t elem)
184 {
185    VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
186    struct nvk_addr_range addr_range =
187       nvk_buffer_addr_range(buffer, info->offset, info->range);
188 
189    const union nvk_buffer_descriptor desc = ubo_desc(pdev, addr_range);
190    write_desc(set, binding, elem, &desc, sizeof(desc));
191 }
192 
193 static void
write_dynamic_ubo_desc(struct nvk_physical_device * pdev,struct nvk_descriptor_set * set,const VkDescriptorBufferInfo * const info,uint32_t binding,uint32_t elem)194 write_dynamic_ubo_desc(struct nvk_physical_device *pdev,
195                        struct nvk_descriptor_set *set,
196                        const VkDescriptorBufferInfo *const info,
197                        uint32_t binding, uint32_t elem)
198 {
199    VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
200    struct nvk_addr_range addr_range =
201       nvk_buffer_addr_range(buffer, info->offset, info->range);
202 
203    const struct nvk_descriptor_set_binding_layout *binding_layout =
204       &set->layout->binding[binding];
205    set->dynamic_buffers[binding_layout->dynamic_buffer_index + elem] =
206       ubo_desc(pdev, addr_range);
207 }
208 
209 static union nvk_buffer_descriptor
ssbo_desc(struct nvk_addr_range addr_range)210 ssbo_desc(struct nvk_addr_range addr_range)
211 {
212    assert(addr_range.addr % NVK_MIN_SSBO_ALIGNMENT == 0);
213    assert(addr_range.range <= UINT32_MAX);
214 
215    addr_range.addr = align64(addr_range.addr, NVK_MIN_SSBO_ALIGNMENT);
216    addr_range.range = align(addr_range.range, NVK_SSBO_BOUNDS_CHECK_ALIGNMENT);
217 
218    return (union nvk_buffer_descriptor) { .addr = {
219       .base_addr = addr_range.addr,
220       .size = addr_range.range,
221    }};
222 }
223 
224 static void
write_ssbo_desc(struct nvk_descriptor_set * set,const VkDescriptorBufferInfo * const info,uint32_t binding,uint32_t elem)225 write_ssbo_desc(struct nvk_descriptor_set *set,
226                 const VkDescriptorBufferInfo *const info,
227                 uint32_t binding, uint32_t elem)
228 {
229    VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
230    struct nvk_addr_range addr_range =
231       nvk_buffer_addr_range(buffer, info->offset, info->range);
232 
233    const union nvk_buffer_descriptor desc = ssbo_desc(addr_range);
234    write_desc(set, binding, elem, &desc, sizeof(desc));
235 }
236 
237 static void
write_dynamic_ssbo_desc(struct nvk_descriptor_set * set,const VkDescriptorBufferInfo * const info,uint32_t binding,uint32_t elem)238 write_dynamic_ssbo_desc(struct nvk_descriptor_set *set,
239                         const VkDescriptorBufferInfo *const info,
240                         uint32_t binding, uint32_t elem)
241 {
242    VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
243    struct nvk_addr_range addr_range =
244       nvk_buffer_addr_range(buffer, info->offset, info->range);
245 
246    const struct nvk_descriptor_set_binding_layout *binding_layout =
247       &set->layout->binding[binding];
248    set->dynamic_buffers[binding_layout->dynamic_buffer_index + elem] =
249       ssbo_desc(addr_range);
250 }
251 
252 static void
get_edb_buffer_view_desc(struct nvk_device * dev,const VkDescriptorAddressInfoEXT * info,void * dst,size_t dst_size)253 get_edb_buffer_view_desc(struct nvk_device *dev,
254                          const VkDescriptorAddressInfoEXT *info,
255                          void *dst, size_t dst_size)
256 {
257    struct nvk_edb_buffer_view_descriptor desc = { };
258    if (info != NULL && info->address != 0) {
259       enum pipe_format format = vk_format_to_pipe_format(info->format);
260       desc = nvk_edb_bview_cache_get_descriptor(dev, &dev->edb_bview_cache,
261                                                 info->address, info->range,
262                                                 format);
263    }
264    assert(sizeof(desc) <= dst_size);
265    memcpy(dst, &desc, sizeof(desc));
266 }
267 
268 static void
write_buffer_view_desc(struct nvk_physical_device * pdev,struct nvk_descriptor_set * set,const VkBufferView bufferView,uint32_t binding,uint32_t elem)269 write_buffer_view_desc(struct nvk_physical_device *pdev,
270                        struct nvk_descriptor_set *set,
271                        const VkBufferView bufferView,
272                        uint32_t binding, uint32_t elem)
273 {
274    VK_FROM_HANDLE(nvk_buffer_view, view, bufferView);
275 
276    if (nvk_use_edb_buffer_views(pdev)) {
277       struct nvk_edb_buffer_view_descriptor desc = { };
278       if (view != NULL)
279          desc = view->edb_desc;
280       write_desc(set, binding, elem, &desc, sizeof(desc));
281    } else {
282       struct nvk_buffer_view_descriptor desc = { };
283       if (view != NULL)
284          desc = view->desc;
285       write_desc(set, binding, elem, &desc, sizeof(desc));
286    }
287 }
288 
289 static void
write_inline_uniform_data(struct nvk_descriptor_set * set,const VkWriteDescriptorSetInlineUniformBlock * info,uint32_t binding,uint32_t offset)290 write_inline_uniform_data(struct nvk_descriptor_set *set,
291                           const VkWriteDescriptorSetInlineUniformBlock *info,
292                           uint32_t binding, uint32_t offset)
293 {
294    assert(set->layout->binding[binding].stride == 1);
295    write_desc(set, binding, offset, info->pData, info->dataSize);
296 }
297 
298 VKAPI_ATTR void VKAPI_CALL
nvk_UpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)299 nvk_UpdateDescriptorSets(VkDevice device,
300                          uint32_t descriptorWriteCount,
301                          const VkWriteDescriptorSet *pDescriptorWrites,
302                          uint32_t descriptorCopyCount,
303                          const VkCopyDescriptorSet *pDescriptorCopies)
304 {
305    VK_FROM_HANDLE(nvk_device, dev, device);
306    struct nvk_physical_device *pdev = nvk_device_physical(dev);
307 
308    for (uint32_t w = 0; w < descriptorWriteCount; w++) {
309       const VkWriteDescriptorSet *write = &pDescriptorWrites[w];
310       VK_FROM_HANDLE(nvk_descriptor_set, set, write->dstSet);
311 
312       switch (write->descriptorType) {
313       case VK_DESCRIPTOR_TYPE_SAMPLER:
314       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
315       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
316       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
317          for (uint32_t j = 0; j < write->descriptorCount; j++) {
318             write_sampled_image_view_desc(set, write->pImageInfo + j,
319                                           write->dstBinding,
320                                           write->dstArrayElement + j,
321                                           write->descriptorType);
322          }
323          break;
324 
325       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
326          for (uint32_t j = 0; j < write->descriptorCount; j++) {
327             write_storage_image_view_desc(set, write->pImageInfo + j,
328                                           write->dstBinding,
329                                           write->dstArrayElement + j);
330          }
331          break;
332 
333       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
334       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
335          for (uint32_t j = 0; j < write->descriptorCount; j++) {
336             write_buffer_view_desc(pdev, set, write->pTexelBufferView[j],
337                                    write->dstBinding, write->dstArrayElement + j);
338          }
339          break;
340 
341       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
342          for (uint32_t j = 0; j < write->descriptorCount; j++) {
343             write_ubo_desc(pdev, set, write->pBufferInfo + j,
344                            write->dstBinding,
345                            write->dstArrayElement + j);
346          }
347          break;
348 
349       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
350          for (uint32_t j = 0; j < write->descriptorCount; j++) {
351             write_ssbo_desc(set, write->pBufferInfo + j,
352                             write->dstBinding,
353                             write->dstArrayElement + j);
354          }
355          break;
356 
357       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
358          for (uint32_t j = 0; j < write->descriptorCount; j++) {
359             write_dynamic_ubo_desc(pdev, set, write->pBufferInfo + j,
360                                    write->dstBinding,
361                                    write->dstArrayElement + j);
362          }
363          break;
364 
365       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
366          for (uint32_t j = 0; j < write->descriptorCount; j++) {
367             write_dynamic_ssbo_desc(set, write->pBufferInfo + j,
368                                     write->dstBinding,
369                                     write->dstArrayElement + j);
370          }
371          break;
372 
373       case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK: {
374          const VkWriteDescriptorSetInlineUniformBlock *write_inline =
375             vk_find_struct_const(write->pNext,
376                                  WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK);
377          assert(write_inline->dataSize == write->descriptorCount);
378          write_inline_uniform_data(set, write_inline, write->dstBinding,
379                                    write->dstArrayElement);
380          break;
381       }
382 
383       default:
384          break;
385       }
386    }
387 
388    for (uint32_t i = 0; i < descriptorCopyCount; i++) {
389       const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
390       VK_FROM_HANDLE(nvk_descriptor_set, src, copy->srcSet);
391       VK_FROM_HANDLE(nvk_descriptor_set, dst, copy->dstSet);
392 
393       const struct nvk_descriptor_set_binding_layout *src_binding_layout =
394          &src->layout->binding[copy->srcBinding];
395       const struct nvk_descriptor_set_binding_layout *dst_binding_layout =
396          &dst->layout->binding[copy->dstBinding];
397 
398       if (dst_binding_layout->stride > 0 && src_binding_layout->stride > 0) {
399          for (uint32_t j = 0; j < copy->descriptorCount; j++) {
400             ASSERTED uint32_t dst_max_size, src_max_size;
401             void *dst_map = desc_ubo_data(dst, copy->dstBinding,
402                                           copy->dstArrayElement + j,
403                                           &dst_max_size);
404             const void *src_map = desc_ubo_data(src, copy->srcBinding,
405                                                 copy->srcArrayElement + j,
406                                                 &src_max_size);
407             const uint32_t copy_size = MIN2(dst_binding_layout->stride,
408                                             src_binding_layout->stride);
409             assert(copy_size <= dst_max_size && copy_size <= src_max_size);
410             memcpy(dst_map, src_map, copy_size);
411          }
412       }
413 
414       switch (src_binding_layout->type) {
415       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
416       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
417          const uint32_t dst_dyn_start =
418             dst_binding_layout->dynamic_buffer_index + copy->dstArrayElement;
419          const uint32_t src_dyn_start =
420             src_binding_layout->dynamic_buffer_index + copy->srcArrayElement;
421          typed_memcpy(&dst->dynamic_buffers[dst_dyn_start],
422                       &src->dynamic_buffers[src_dyn_start],
423                       copy->descriptorCount);
424          break;
425       }
426       default:
427          break;
428       }
429    }
430 }
431 
432 void
nvk_push_descriptor_set_update(struct nvk_device * dev,struct nvk_push_descriptor_set * push_set,struct nvk_descriptor_set_layout * layout,uint32_t write_count,const VkWriteDescriptorSet * writes)433 nvk_push_descriptor_set_update(struct nvk_device *dev,
434                                struct nvk_push_descriptor_set *push_set,
435                                struct nvk_descriptor_set_layout *layout,
436                                uint32_t write_count,
437                                const VkWriteDescriptorSet *writes)
438 {
439    struct nvk_physical_device *pdev = nvk_device_physical(dev);
440 
441    assert(layout->non_variable_descriptor_buffer_size < sizeof(push_set->data));
442    struct nvk_descriptor_set set = {
443       .layout = layout,
444       .size = sizeof(push_set->data),
445       .mapped_ptr = push_set->data,
446    };
447 
448    for (uint32_t w = 0; w < write_count; w++) {
449       const VkWriteDescriptorSet *write = &writes[w];
450       assert(write->dstSet == VK_NULL_HANDLE);
451 
452       switch (write->descriptorType) {
453       case VK_DESCRIPTOR_TYPE_SAMPLER:
454       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
455       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
456       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
457          for (uint32_t j = 0; j < write->descriptorCount; j++) {
458             write_sampled_image_view_desc(&set, write->pImageInfo + j,
459                                           write->dstBinding,
460                                           write->dstArrayElement + j,
461                                           write->descriptorType);
462          }
463          break;
464 
465       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
466          for (uint32_t j = 0; j < write->descriptorCount; j++) {
467             write_storage_image_view_desc(&set, write->pImageInfo + j,
468                                           write->dstBinding,
469                                           write->dstArrayElement + j);
470          }
471          break;
472 
473       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
474       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
475          for (uint32_t j = 0; j < write->descriptorCount; j++) {
476             write_buffer_view_desc(pdev, &set, write->pTexelBufferView[j],
477                                    write->dstBinding, write->dstArrayElement + j);
478          }
479          break;
480 
481       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
482          for (uint32_t j = 0; j < write->descriptorCount; j++) {
483             write_ubo_desc(pdev, &set, write->pBufferInfo + j,
484                            write->dstBinding,
485                            write->dstArrayElement + j);
486          }
487          break;
488 
489       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
490          for (uint32_t j = 0; j < write->descriptorCount; j++) {
491             write_ssbo_desc(&set, write->pBufferInfo + j,
492                             write->dstBinding,
493                             write->dstArrayElement + j);
494          }
495          break;
496 
497       default:
498          break;
499       }
500    }
501 }
502 
503 static void
504 nvk_descriptor_pool_free(struct nvk_descriptor_pool *pool,
505                          uint64_t addr, uint64_t size);
506 
507 static void
nvk_descriptor_set_destroy(struct nvk_device * dev,struct nvk_descriptor_pool * pool,struct nvk_descriptor_set * set)508 nvk_descriptor_set_destroy(struct nvk_device *dev,
509                            struct nvk_descriptor_pool *pool,
510                            struct nvk_descriptor_set *set)
511 {
512    list_del(&set->link);
513    if (set->size > 0)
514       nvk_descriptor_pool_free(pool, set->addr, set->size);
515    vk_descriptor_set_layout_unref(&dev->vk, &set->layout->vk);
516 
517    vk_object_free(&dev->vk, NULL, set);
518 }
519 
520 static void
nvk_destroy_descriptor_pool(struct nvk_device * dev,const VkAllocationCallbacks * pAllocator,struct nvk_descriptor_pool * pool)521 nvk_destroy_descriptor_pool(struct nvk_device *dev,
522                             const VkAllocationCallbacks *pAllocator,
523                             struct nvk_descriptor_pool *pool)
524 {
525    list_for_each_entry_safe(struct nvk_descriptor_set, set, &pool->sets, link)
526       nvk_descriptor_set_destroy(dev, pool, set);
527 
528    util_vma_heap_finish(&pool->heap);
529 
530    if (pool->mem != NULL)
531       nvkmd_mem_unref(pool->mem);
532 
533    vk_object_free(&dev->vk, pAllocator, pool);
534 }
535 
536 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateDescriptorPool(VkDevice _device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)537 nvk_CreateDescriptorPool(VkDevice _device,
538                          const VkDescriptorPoolCreateInfo *pCreateInfo,
539                          const VkAllocationCallbacks *pAllocator,
540                          VkDescriptorPool *pDescriptorPool)
541 {
542    VK_FROM_HANDLE(nvk_device, dev, _device);
543    struct nvk_physical_device *pdev = nvk_device_physical(dev);
544    struct nvk_descriptor_pool *pool;
545    VkResult result;
546 
547    pool = vk_object_zalloc(&dev->vk, pAllocator, sizeof(*pool),
548                            VK_OBJECT_TYPE_DESCRIPTOR_POOL);
549    if (!pool)
550       return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
551 
552    list_inithead(&pool->sets);
553 
554    const VkMutableDescriptorTypeCreateInfoEXT *mutable_info =
555       vk_find_struct_const(pCreateInfo->pNext,
556                            MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
557 
558    uint32_t max_align = 0;
559    for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
560       const VkMutableDescriptorTypeListEXT *type_list = NULL;
561       if (pCreateInfo->pPoolSizes[i].type == VK_DESCRIPTOR_TYPE_MUTABLE_EXT &&
562           mutable_info && i < mutable_info->mutableDescriptorTypeListCount)
563             type_list = &mutable_info->pMutableDescriptorTypeLists[i];
564 
565       uint32_t stride, alignment;
566       nvk_descriptor_stride_align_for_type(pdev, 0 /* not DESCRIPTOR_BUFFER */,
567                                            pCreateInfo->pPoolSizes[i].type,
568                                            type_list, &stride, &alignment);
569       max_align = MAX2(max_align, alignment);
570    }
571 
572    uint64_t mem_size = 0;
573    for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
574       const VkMutableDescriptorTypeListEXT *type_list = NULL;
575       if (pCreateInfo->pPoolSizes[i].type == VK_DESCRIPTOR_TYPE_MUTABLE_EXT &&
576           mutable_info && i < mutable_info->mutableDescriptorTypeListCount)
577             type_list = &mutable_info->pMutableDescriptorTypeLists[i];
578 
579       uint32_t stride, alignment;
580       nvk_descriptor_stride_align_for_type(pdev, 0 /* not DESCRIPTOR_BUFFER */,
581                                            pCreateInfo->pPoolSizes[i].type,
582                                            type_list, &stride, &alignment);
583       mem_size += MAX2(stride, max_align) *
584                  pCreateInfo->pPoolSizes[i].descriptorCount;
585    }
586 
587    /* Individual descriptor sets are aligned to the min UBO alignment to
588     * ensure that we don't end up with unaligned data access in any shaders.
589     * This means that each descriptor buffer allocated may burn up to 16B of
590     * extra space to get the right alignment.  (Technically, it's at most 28B
591     * because we're always going to start at least 4B aligned but we're being
592     * conservative here.)  Allocate enough extra space that we can chop it
593     * into maxSets pieces and align each one of them to 32B.
594     */
595    mem_size += nvk_min_cbuf_alignment(&pdev->info) * pCreateInfo->maxSets;
596 
597    if (mem_size) {
598       result = nvkmd_dev_alloc_mapped_mem(dev->nvkmd, &dev->vk.base,
599                                           mem_size, 0, NVKMD_MEM_LOCAL,
600                                           NVKMD_MEM_MAP_WR, &pool->mem);
601       if (result != VK_SUCCESS) {
602          nvk_destroy_descriptor_pool(dev, pAllocator, pool);
603          return result;
604       }
605 
606       /* The BO may be larger thanks to GPU page alignment.  We may as well
607        * make that extra space available to the client.
608        */
609       assert(pool->mem->size_B >= mem_size);
610       util_vma_heap_init(&pool->heap, pool->mem->va->addr, pool->mem->size_B);
611    } else {
612       util_vma_heap_init(&pool->heap, 0, 0);
613    }
614 
615    *pDescriptorPool = nvk_descriptor_pool_to_handle(pool);
616    return VK_SUCCESS;
617 }
618 
619 static VkResult
nvk_descriptor_pool_alloc(struct nvk_descriptor_pool * pool,uint64_t size,uint64_t alignment,uint64_t * addr_out,void ** map_out)620 nvk_descriptor_pool_alloc(struct nvk_descriptor_pool *pool,
621                           uint64_t size, uint64_t alignment,
622                           uint64_t *addr_out, void **map_out)
623 {
624    assert(size > 0);
625    assert(size % alignment == 0);
626 
627    if (size > pool->heap.free_size)
628       return VK_ERROR_OUT_OF_POOL_MEMORY;
629 
630    uint64_t addr = util_vma_heap_alloc(&pool->heap, size, alignment);
631    if (addr == 0)
632       return VK_ERROR_FRAGMENTED_POOL;
633 
634    assert(addr >= pool->mem->va->addr);
635    assert(addr + size <= pool->mem->va->addr + pool->mem->size_B);
636    uint64_t offset = addr - pool->mem->va->addr;
637 
638    *addr_out = addr;
639    *map_out = pool->mem->map + offset;
640 
641    return VK_SUCCESS;
642 }
643 
644 static void
nvk_descriptor_pool_free(struct nvk_descriptor_pool * pool,uint64_t addr,uint64_t size)645 nvk_descriptor_pool_free(struct nvk_descriptor_pool *pool,
646                          uint64_t addr, uint64_t size)
647 {
648    assert(size > 0);
649    assert(addr >= pool->mem->va->addr);
650    assert(addr + size <= pool->mem->va->addr + pool->mem->size_B);
651    util_vma_heap_free(&pool->heap, addr, size);
652 }
653 
654 static VkResult
nvk_descriptor_set_create(struct nvk_device * dev,struct nvk_descriptor_pool * pool,struct nvk_descriptor_set_layout * layout,uint32_t variable_count,struct nvk_descriptor_set ** out_set)655 nvk_descriptor_set_create(struct nvk_device *dev,
656                           struct nvk_descriptor_pool *pool,
657                           struct nvk_descriptor_set_layout *layout,
658                           uint32_t variable_count,
659                           struct nvk_descriptor_set **out_set)
660 {
661    struct nvk_physical_device *pdev = nvk_device_physical(dev);
662    struct nvk_descriptor_set *set;
663    VkResult result;
664 
665    uint32_t mem_size = sizeof(struct nvk_descriptor_set) +
666       layout->dynamic_buffer_count * sizeof(struct nvk_buffer_address);
667 
668    set = vk_object_zalloc(&dev->vk, NULL, mem_size,
669                           VK_OBJECT_TYPE_DESCRIPTOR_SET);
670    if (!set)
671       return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
672 
673    set->size = layout->non_variable_descriptor_buffer_size;
674 
675    if (layout->binding_count > 0 &&
676        (layout->binding[layout->binding_count - 1].flags &
677         VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)) {
678       uint32_t stride = layout->binding[layout->binding_count-1].stride;
679       set->size += stride * variable_count;
680    }
681 
682    uint32_t alignment = nvk_min_cbuf_alignment(&pdev->info);
683    set->size = align64(set->size, alignment);
684 
685    if (set->size > 0) {
686       result = nvk_descriptor_pool_alloc(pool, set->size, alignment,
687                                          &set->addr, &set->mapped_ptr);
688       if (result != VK_SUCCESS) {
689          vk_object_free(&dev->vk, NULL, set);
690          return result;
691       }
692    }
693 
694    vk_descriptor_set_layout_ref(&layout->vk);
695    set->layout = layout;
696 
697    for (uint32_t b = 0; b < layout->binding_count; b++) {
698       if (layout->binding[b].type != VK_DESCRIPTOR_TYPE_SAMPLER &&
699           layout->binding[b].type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
700          continue;
701 
702       if (layout->binding[b].immutable_samplers == NULL)
703          continue;
704 
705       uint32_t array_size = layout->binding[b].array_size;
706       if (layout->binding[b].flags &
707           VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)
708          array_size = variable_count;
709 
710       const VkDescriptorImageInfo empty = {};
711       for (uint32_t j = 0; j < array_size; j++) {
712          write_sampled_image_view_desc(set, &empty, b, j,
713                                        layout->binding[b].type);
714       }
715    }
716 
717    list_addtail(&set->link, &pool->sets);
718    *out_set = set;
719 
720    return VK_SUCCESS;
721 }
722 
723 VKAPI_ATTR VkResult VKAPI_CALL
nvk_AllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)724 nvk_AllocateDescriptorSets(VkDevice device,
725                            const VkDescriptorSetAllocateInfo *pAllocateInfo,
726                            VkDescriptorSet *pDescriptorSets)
727 {
728    VK_FROM_HANDLE(nvk_device, dev, device);
729    VK_FROM_HANDLE(nvk_descriptor_pool, pool, pAllocateInfo->descriptorPool);
730 
731    VkResult result = VK_SUCCESS;
732    uint32_t i;
733 
734    struct nvk_descriptor_set *set = NULL;
735 
736    const VkDescriptorSetVariableDescriptorCountAllocateInfo *var_desc_count =
737       vk_find_struct_const(pAllocateInfo->pNext,
738                            DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
739 
740    /* allocate a set of buffers for each shader to contain descriptors */
741    for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
742       VK_FROM_HANDLE(nvk_descriptor_set_layout, layout,
743                      pAllocateInfo->pSetLayouts[i]);
744       /* If descriptorSetCount is zero or this structure is not included in
745        * the pNext chain, then the variable lengths are considered to be zero.
746        */
747       const uint32_t variable_count =
748          var_desc_count && var_desc_count->descriptorSetCount > 0 ?
749          var_desc_count->pDescriptorCounts[i] : 0;
750 
751       result = nvk_descriptor_set_create(dev, pool, layout,
752                                          variable_count, &set);
753       if (result != VK_SUCCESS)
754          break;
755 
756       pDescriptorSets[i] = nvk_descriptor_set_to_handle(set);
757    }
758 
759    if (result != VK_SUCCESS) {
760       nvk_FreeDescriptorSets(device, pAllocateInfo->descriptorPool, i, pDescriptorSets);
761       for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
762          pDescriptorSets[i] = VK_NULL_HANDLE;
763       }
764    }
765    return result;
766 }
767 
768 VKAPI_ATTR VkResult VKAPI_CALL
nvk_FreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)769 nvk_FreeDescriptorSets(VkDevice device,
770                        VkDescriptorPool descriptorPool,
771                        uint32_t descriptorSetCount,
772                        const VkDescriptorSet *pDescriptorSets)
773 {
774    VK_FROM_HANDLE(nvk_device, dev, device);
775    VK_FROM_HANDLE(nvk_descriptor_pool, pool, descriptorPool);
776 
777    for (uint32_t i = 0; i < descriptorSetCount; i++) {
778       VK_FROM_HANDLE(nvk_descriptor_set, set, pDescriptorSets[i]);
779 
780       if (set)
781          nvk_descriptor_set_destroy(dev, pool, set);
782    }
783    return VK_SUCCESS;
784 }
785 
786 VKAPI_ATTR void VKAPI_CALL
nvk_DestroyDescriptorPool(VkDevice device,VkDescriptorPool _pool,const VkAllocationCallbacks * pAllocator)787 nvk_DestroyDescriptorPool(VkDevice device,
788                           VkDescriptorPool _pool,
789                           const VkAllocationCallbacks *pAllocator)
790 {
791    VK_FROM_HANDLE(nvk_device, dev, device);
792    VK_FROM_HANDLE(nvk_descriptor_pool, pool, _pool);
793 
794    if (!_pool)
795       return;
796 
797    nvk_destroy_descriptor_pool(dev, pAllocator, pool);
798 }
799 
800 VKAPI_ATTR VkResult VKAPI_CALL
nvk_ResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)801 nvk_ResetDescriptorPool(VkDevice device,
802                         VkDescriptorPool descriptorPool,
803                         VkDescriptorPoolResetFlags flags)
804 {
805    VK_FROM_HANDLE(nvk_device, dev, device);
806    VK_FROM_HANDLE(nvk_descriptor_pool, pool, descriptorPool);
807 
808    list_for_each_entry_safe(struct nvk_descriptor_set, set, &pool->sets, link)
809       nvk_descriptor_set_destroy(dev, pool, set);
810 
811    return VK_SUCCESS;
812 }
813 
814 static void
nvk_descriptor_set_write_template(struct nvk_device * dev,struct nvk_descriptor_set * set,const struct vk_descriptor_update_template * template,const void * data)815 nvk_descriptor_set_write_template(struct nvk_device *dev,
816                                   struct nvk_descriptor_set *set,
817                                   const struct vk_descriptor_update_template *template,
818                                   const void *data)
819 {
820    struct nvk_physical_device *pdev = nvk_device_physical(dev);
821 
822    for (uint32_t i = 0; i < template->entry_count; i++) {
823       const struct vk_descriptor_template_entry *entry =
824          &template->entries[i];
825 
826       switch (entry->type) {
827       case VK_DESCRIPTOR_TYPE_SAMPLER:
828       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
829       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
830       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
831          for (uint32_t j = 0; j < entry->array_count; j++) {
832             const VkDescriptorImageInfo *info =
833                data + entry->offset + j * entry->stride;
834 
835             write_sampled_image_view_desc(set, info,
836                                           entry->binding,
837                                           entry->array_element + j,
838                                           entry->type);
839          }
840          break;
841 
842       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
843          for (uint32_t j = 0; j < entry->array_count; j++) {
844             const VkDescriptorImageInfo *info =
845                data + entry->offset + j * entry->stride;
846 
847             write_storage_image_view_desc(set, info,
848                                           entry->binding,
849                                           entry->array_element + j);
850          }
851          break;
852 
853       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
854       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
855          for (uint32_t j = 0; j < entry->array_count; j++) {
856             const VkBufferView *bview =
857                data + entry->offset + j * entry->stride;
858 
859             write_buffer_view_desc(pdev, set, *bview,
860                                    entry->binding,
861                                    entry->array_element + j);
862          }
863          break;
864 
865       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
866          for (uint32_t j = 0; j < entry->array_count; j++) {
867             const VkDescriptorBufferInfo *info =
868                data + entry->offset + j * entry->stride;
869 
870             write_ubo_desc(pdev, set, info,
871                            entry->binding,
872                            entry->array_element + j);
873          }
874          break;
875 
876       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
877          for (uint32_t j = 0; j < entry->array_count; j++) {
878             const VkDescriptorBufferInfo *info =
879                data + entry->offset + j * entry->stride;
880 
881             write_ssbo_desc(set, info,
882                             entry->binding,
883                             entry->array_element + j);
884          }
885          break;
886 
887       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
888          for (uint32_t j = 0; j < entry->array_count; j++) {
889             const VkDescriptorBufferInfo *info =
890                data + entry->offset + j * entry->stride;
891 
892             write_dynamic_ubo_desc(pdev, set, info,
893                                    entry->binding,
894                                    entry->array_element + j);
895          }
896          break;
897 
898       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
899          for (uint32_t j = 0; j < entry->array_count; j++) {
900             const VkDescriptorBufferInfo *info =
901                data + entry->offset + j * entry->stride;
902 
903             write_dynamic_ssbo_desc(set, info,
904                                     entry->binding,
905                                     entry->array_element + j);
906          }
907          break;
908 
909       case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
910          write_desc(set,
911                     entry->binding,
912                     entry->array_element,
913                     data + entry->offset,
914                     entry->array_count);
915          break;
916 
917       default:
918          break;
919       }
920    }
921 }
922 
923 VKAPI_ATTR void VKAPI_CALL
nvk_UpdateDescriptorSetWithTemplate(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)924 nvk_UpdateDescriptorSetWithTemplate(VkDevice device,
925                                     VkDescriptorSet descriptorSet,
926                                     VkDescriptorUpdateTemplate descriptorUpdateTemplate,
927                                     const void *pData)
928 {
929    VK_FROM_HANDLE(nvk_device, dev, device);
930    VK_FROM_HANDLE(nvk_descriptor_set, set, descriptorSet);
931    VK_FROM_HANDLE(vk_descriptor_update_template, template,
932                   descriptorUpdateTemplate);
933 
934    nvk_descriptor_set_write_template(dev, set, template, pData);
935 }
936 
937 void
nvk_push_descriptor_set_update_template(struct nvk_device * dev,struct nvk_push_descriptor_set * push_set,struct nvk_descriptor_set_layout * layout,const struct vk_descriptor_update_template * template,const void * data)938 nvk_push_descriptor_set_update_template(
939    struct nvk_device *dev,
940    struct nvk_push_descriptor_set *push_set,
941    struct nvk_descriptor_set_layout *layout,
942    const struct vk_descriptor_update_template *template,
943    const void *data)
944 {
945    struct nvk_descriptor_set tmp_set = {
946       .layout = layout,
947       .size = sizeof(push_set->data),
948       .mapped_ptr = push_set->data,
949    };
950    nvk_descriptor_set_write_template(dev, &tmp_set, template, data);
951 }
952 
953 VKAPI_ATTR void VKAPI_CALL
nvk_GetDescriptorEXT(VkDevice _device,const VkDescriptorGetInfoEXT * pDescriptorInfo,size_t dataSize,void * pDescriptor)954 nvk_GetDescriptorEXT(VkDevice _device,
955                      const VkDescriptorGetInfoEXT *pDescriptorInfo,
956                      size_t dataSize, void *pDescriptor)
957 {
958    VK_FROM_HANDLE(nvk_device, dev, _device);
959    struct nvk_physical_device *pdev = nvk_device_physical(dev);
960 
961    switch (pDescriptorInfo->type) {
962    case VK_DESCRIPTOR_TYPE_SAMPLER: {
963       const VkDescriptorImageInfo info = {
964          .sampler = *pDescriptorInfo->data.pSampler,
965       };
966       get_sampled_image_view_desc(VK_DESCRIPTOR_TYPE_SAMPLER,
967                                   &info, pDescriptor, dataSize);
968       break;
969    }
970 
971    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
972       get_sampled_image_view_desc(pDescriptorInfo->type,
973                                   pDescriptorInfo->data.pCombinedImageSampler,
974                                   pDescriptor, dataSize);
975       break;
976 
977    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
978       get_sampled_image_view_desc(pDescriptorInfo->type,
979                                   pDescriptorInfo->data.pSampledImage,
980                                   pDescriptor, dataSize);
981       break;
982 
983    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
984       get_storage_image_view_desc(pDescriptorInfo->data.pStorageImage,
985                                   pDescriptor, dataSize);
986       break;
987 
988    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
989       get_edb_buffer_view_desc(dev, pDescriptorInfo->data.pUniformTexelBuffer,
990                                pDescriptor, dataSize);
991       break;
992 
993    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
994       get_edb_buffer_view_desc(dev, pDescriptorInfo->data.pStorageTexelBuffer,
995                                pDescriptor, dataSize);
996       break;
997 
998    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
999       struct nvk_addr_range addr_range = { };
1000       if (pDescriptorInfo->data.pUniformBuffer != NULL &&
1001           pDescriptorInfo->data.pUniformBuffer->address != 0) {
1002          addr_range = (const struct nvk_addr_range) {
1003             .addr = pDescriptorInfo->data.pUniformBuffer->address,
1004             .range = pDescriptorInfo->data.pUniformBuffer->range,
1005          };
1006       }
1007       union nvk_buffer_descriptor desc = ubo_desc(pdev, addr_range);
1008       assert(sizeof(desc) <= dataSize);
1009       memcpy(pDescriptor, &desc, sizeof(desc));
1010       break;
1011    }
1012 
1013    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
1014       struct nvk_addr_range addr_range = { };
1015       if (pDescriptorInfo->data.pUniformBuffer != NULL &&
1016           pDescriptorInfo->data.pUniformBuffer->address != 0) {
1017          addr_range = (const struct nvk_addr_range) {
1018             .addr = pDescriptorInfo->data.pUniformBuffer->address,
1019             .range = pDescriptorInfo->data.pUniformBuffer->range,
1020          };
1021       }
1022       union nvk_buffer_descriptor desc = ssbo_desc(addr_range);
1023       assert(sizeof(desc) <= dataSize);
1024       memcpy(pDescriptor, &desc, sizeof(desc));
1025       break;
1026    }
1027 
1028    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1029       get_sampled_image_view_desc(pDescriptorInfo->type,
1030                                   pDescriptorInfo->data.pInputAttachmentImage,
1031                                   pDescriptor, dataSize);
1032       break;
1033 
1034    default:
1035       unreachable("Unknown descriptor type");
1036    }
1037 }
1038