xref: /aosp_15_r20/external/mesa3d/src/nouveau/vulkan/nvk_device_memory.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 #include "nvk_device_memory.h"
6 
7 #include "nvk_device.h"
8 #include "nvk_entrypoints.h"
9 #include "nvk_image.h"
10 #include "nvk_physical_device.h"
11 #include "nvkmd/nvkmd.h"
12 
13 #include "util/u_atomic.h"
14 
15 #include <inttypes.h>
16 #include <sys/mman.h>
17 
18 /* Supports opaque fd only */
19 const VkExternalMemoryProperties nvk_opaque_fd_mem_props = {
20    .externalMemoryFeatures =
21       VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
22       VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
23    .exportFromImportedHandleTypes =
24       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
25    .compatibleHandleTypes =
26       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
27 };
28 
29 /* Supports opaque fd and dma_buf. */
30 const VkExternalMemoryProperties nvk_dma_buf_mem_props = {
31    .externalMemoryFeatures =
32       VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
33       VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
34    .exportFromImportedHandleTypes =
35       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
36       VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
37    .compatibleHandleTypes =
38       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
39       VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
40 };
41 
42 static enum nvkmd_mem_flags
nvk_memory_type_flags(const VkMemoryType * type,VkExternalMemoryHandleTypeFlagBits handle_types)43 nvk_memory_type_flags(const VkMemoryType *type,
44                       VkExternalMemoryHandleTypeFlagBits handle_types)
45 {
46    enum nvkmd_mem_flags flags = 0;
47    if (type->propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
48       flags = NVKMD_MEM_LOCAL;
49    else
50       flags = NVKMD_MEM_GART;
51 
52    if (type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
53       flags |= NVKMD_MEM_CAN_MAP;
54 
55    if (handle_types != 0)
56       flags |= NVKMD_MEM_SHARED;
57 
58    return flags;
59 }
60 
61 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetMemoryFdPropertiesKHR(VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)62 nvk_GetMemoryFdPropertiesKHR(VkDevice device,
63                              VkExternalMemoryHandleTypeFlagBits handleType,
64                              int fd,
65                              VkMemoryFdPropertiesKHR *pMemoryFdProperties)
66 {
67    VK_FROM_HANDLE(nvk_device, dev, device);
68    struct nvk_physical_device *pdev = nvk_device_physical(dev);
69    struct nvkmd_mem *mem;
70    VkResult result;
71 
72    switch (handleType) {
73    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
74    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
75       result = nvkmd_dev_import_dma_buf(dev->nvkmd, &dev->vk.base, fd, &mem);
76       if (result != VK_SUCCESS)
77          return result;
78       break;
79    default:
80       return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
81    }
82 
83    uint32_t type_bits = 0;
84    if (handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) {
85       /* We allow a dma-buf to be imported anywhere because there's no way
86        * for us to actually know where it came from.
87        */
88       type_bits = BITFIELD_MASK(pdev->mem_type_count);
89    } else {
90       for (unsigned t = 0; t < ARRAY_SIZE(pdev->mem_types); t++) {
91          const enum nvkmd_mem_flags flags =
92             nvk_memory_type_flags(&pdev->mem_types[t], handleType);
93          if (!(flags & ~mem->flags))
94             type_bits |= (1 << t);
95       }
96    }
97 
98    pMemoryFdProperties->memoryTypeBits = type_bits;
99 
100    nvkmd_mem_unref(mem);
101 
102    return VK_SUCCESS;
103 }
104 
105 VKAPI_ATTR VkResult VKAPI_CALL
nvk_AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMem)106 nvk_AllocateMemory(VkDevice device,
107                    const VkMemoryAllocateInfo *pAllocateInfo,
108                    const VkAllocationCallbacks *pAllocator,
109                    VkDeviceMemory *pMem)
110 {
111    VK_FROM_HANDLE(nvk_device, dev, device);
112    struct nvk_physical_device *pdev = nvk_device_physical(dev);
113    struct nvk_device_memory *mem;
114    VkResult result = VK_SUCCESS;
115 
116    const VkImportMemoryFdInfoKHR *fd_info =
117       vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
118    const VkExportMemoryAllocateInfo *export_info =
119       vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
120    const VkMemoryDedicatedAllocateInfo *dedicated_info =
121       vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO);
122    const VkMemoryType *type =
123       &pdev->mem_types[pAllocateInfo->memoryTypeIndex];
124 
125    VkExternalMemoryHandleTypeFlagBits handle_types = 0;
126    if (export_info != NULL)
127       handle_types |= export_info->handleTypes;
128    if (fd_info != NULL)
129       handle_types |= fd_info->handleType;
130 
131    const enum nvkmd_mem_flags flags = nvk_memory_type_flags(type, handle_types);
132 
133    uint32_t alignment = (1ULL << 12);
134    if (flags & NVKMD_MEM_LOCAL)
135       alignment = (1ULL << 16);
136 
137    uint8_t pte_kind = 0, tile_mode = 0;
138    if (dedicated_info != NULL) {
139       VK_FROM_HANDLE(nvk_image, image, dedicated_info->image);
140       if (image != NULL &&
141           image->vk.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
142          /* This image might be shared with GL so we need to set the BO flags
143           * such that GL can bind and use it.
144           */
145          assert(image->plane_count == 1);
146          alignment = MAX2(alignment, image->planes[0].nil.align_B);
147          pte_kind = image->planes[0].nil.pte_kind;
148          tile_mode = image->planes[0].nil.tile_mode;
149       }
150    }
151 
152    const uint64_t aligned_size =
153       align64(pAllocateInfo->allocationSize, alignment);
154 
155    mem = vk_device_memory_create(&dev->vk, pAllocateInfo,
156                                  pAllocator, sizeof(*mem));
157    if (!mem)
158       return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
159 
160    if (fd_info && fd_info->handleType) {
161       assert(fd_info->handleType ==
162                VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
163              fd_info->handleType ==
164                VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
165 
166       result = nvkmd_dev_import_dma_buf(dev->nvkmd, &dev->vk.base,
167                                         fd_info->fd, &mem->mem);
168       if (result != VK_SUCCESS)
169          goto fail_alloc;
170 
171       /* We can't really assert anything for dma-bufs because they could come
172        * in from some other device.
173        */
174       if (fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
175          assert(!(flags & ~mem->mem->flags));
176    } else if (pte_kind != 0 || tile_mode != 0) {
177       result = nvkmd_dev_alloc_tiled_mem(dev->nvkmd, &dev->vk.base,
178                                          aligned_size, alignment,
179                                          pte_kind, tile_mode, flags,
180                                          &mem->mem);
181       if (result != VK_SUCCESS)
182          goto fail_alloc;
183    } else {
184       result = nvkmd_dev_alloc_mem(dev->nvkmd, &dev->vk.base,
185                                    aligned_size, alignment, flags,
186                                    &mem->mem);
187       if (result != VK_SUCCESS)
188          goto fail_alloc;
189    }
190 
191    if (pdev->debug_flags & NVK_DEBUG_ZERO_MEMORY) {
192       if (type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
193          void *map;
194          result = nvkmd_mem_map(mem->mem, &dev->vk.base,
195                                 NVKMD_MEM_MAP_RDWR, NULL, &map);
196          if (result != VK_SUCCESS)
197             goto fail_mem;
198 
199          memset(map, 0, mem->mem->size_B);
200          nvkmd_mem_unmap(mem->mem, 0);
201       } else {
202          result = nvk_upload_queue_fill(dev, &dev->upload,
203                                         mem->mem->va->addr,
204                                         0, mem->mem->size_B);
205          if (result != VK_SUCCESS)
206             goto fail_mem;
207 
208          /* Since we don't know when the memory will be freed, sync now */
209          result = nvk_upload_queue_sync(dev, &dev->upload);
210          if (result != VK_SUCCESS)
211             goto fail_mem;
212       }
213    }
214 
215    if (fd_info && fd_info->handleType) {
216       /* From the Vulkan spec:
217        *
218        *    "Importing memory from a file descriptor transfers ownership of
219        *    the file descriptor from the application to the Vulkan
220        *    implementation. The application must not perform any operations on
221        *    the file descriptor after a successful import."
222        *
223        * If the import fails, we leave the file descriptor open.
224        */
225       close(fd_info->fd);
226    }
227 
228    struct nvk_memory_heap *heap = &pdev->mem_heaps[type->heapIndex];
229    p_atomic_add(&heap->used, mem->mem->size_B);
230 
231    *pMem = nvk_device_memory_to_handle(mem);
232 
233    return VK_SUCCESS;
234 
235 fail_mem:
236    nvkmd_mem_unref(mem->mem);
237 fail_alloc:
238    vk_device_memory_destroy(&dev->vk, pAllocator, &mem->vk);
239    return result;
240 }
241 
242 VKAPI_ATTR void VKAPI_CALL
nvk_FreeMemory(VkDevice device,VkDeviceMemory _mem,const VkAllocationCallbacks * pAllocator)243 nvk_FreeMemory(VkDevice device,
244                VkDeviceMemory _mem,
245                const VkAllocationCallbacks *pAllocator)
246 {
247    VK_FROM_HANDLE(nvk_device, dev, device);
248    VK_FROM_HANDLE(nvk_device_memory, mem, _mem);
249    struct nvk_physical_device *pdev = nvk_device_physical(dev);
250 
251    if (!mem)
252       return;
253 
254    const VkMemoryType *type = &pdev->mem_types[mem->vk.memory_type_index];
255    struct nvk_memory_heap *heap = &pdev->mem_heaps[type->heapIndex];
256    p_atomic_add(&heap->used, -((int64_t)mem->mem->size_B));
257 
258    nvkmd_mem_unref(mem->mem);
259 
260    vk_device_memory_destroy(&dev->vk, pAllocator, &mem->vk);
261 }
262 
263 VKAPI_ATTR VkResult VKAPI_CALL
nvk_MapMemory2KHR(VkDevice device,const VkMemoryMapInfoKHR * pMemoryMapInfo,void ** ppData)264 nvk_MapMemory2KHR(VkDevice device,
265                   const VkMemoryMapInfoKHR *pMemoryMapInfo,
266                   void **ppData)
267 {
268    VK_FROM_HANDLE(nvk_device, dev, device);
269    VK_FROM_HANDLE(nvk_device_memory, mem, pMemoryMapInfo->memory);
270    VkResult result;
271 
272    if (mem == NULL) {
273       *ppData = NULL;
274       return VK_SUCCESS;
275    }
276 
277    const VkDeviceSize offset = pMemoryMapInfo->offset;
278    const VkDeviceSize size =
279       vk_device_memory_range(&mem->vk, pMemoryMapInfo->offset,
280                                        pMemoryMapInfo->size);
281 
282    enum nvkmd_mem_map_flags map_flags = NVKMD_MEM_MAP_CLIENT |
283                                         NVKMD_MEM_MAP_RDWR;
284 
285    void *fixed_addr = NULL;
286    if (pMemoryMapInfo->flags & VK_MEMORY_MAP_PLACED_BIT_EXT) {
287       const VkMemoryMapPlacedInfoEXT *placed_info =
288          vk_find_struct_const(pMemoryMapInfo->pNext, MEMORY_MAP_PLACED_INFO_EXT);
289       map_flags |= NVKMD_MEM_MAP_FIXED;
290       fixed_addr = placed_info->pPlacedAddress;
291    }
292 
293    /* From the Vulkan spec version 1.0.32 docs for MapMemory:
294     *
295     *  * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
296     *    assert(size != 0);
297     *  * If size is not equal to VK_WHOLE_SIZE, size must be less than or
298     *    equal to the size of the memory minus offset
299     */
300    assert(size > 0);
301    assert(offset + size <= mem->mem->size_B);
302 
303    if (size != (size_t)size) {
304       return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
305                        "requested size 0x%"PRIx64" does not fit in %u bits",
306                        size, (unsigned)(sizeof(size_t) * 8));
307    }
308 
309    /* From the Vulkan 1.2.194 spec:
310     *
311     *    "memory must not be currently host mapped"
312     */
313    if (mem->mem->map != NULL) {
314       return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
315                        "Memory object already mapped.");
316    }
317 
318    void *mem_map;
319    result = nvkmd_mem_map(mem->mem, &mem->vk.base, map_flags,
320                           fixed_addr, &mem_map);
321    if (result != VK_SUCCESS)
322       return result;
323 
324    *ppData = mem_map + offset;
325 
326    return VK_SUCCESS;
327 }
328 
329 VKAPI_ATTR VkResult VKAPI_CALL
nvk_UnmapMemory2KHR(VkDevice device,const VkMemoryUnmapInfoKHR * pMemoryUnmapInfo)330 nvk_UnmapMemory2KHR(VkDevice device,
331                     const VkMemoryUnmapInfoKHR *pMemoryUnmapInfo)
332 {
333    VK_FROM_HANDLE(nvk_device_memory, mem, pMemoryUnmapInfo->memory);
334 
335    if (mem == NULL)
336       return VK_SUCCESS;
337 
338    if (pMemoryUnmapInfo->flags & VK_MEMORY_UNMAP_RESERVE_BIT_EXT) {
339       return nvkmd_mem_overmap(mem->mem, &mem->vk.base, NVKMD_MEM_MAP_CLIENT);
340    } else {
341       nvkmd_mem_unmap(mem->mem, NVKMD_MEM_MAP_CLIENT);
342       return VK_SUCCESS;
343    }
344 }
345 
346 VKAPI_ATTR VkResult VKAPI_CALL
nvk_FlushMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)347 nvk_FlushMappedMemoryRanges(VkDevice device,
348                             uint32_t memoryRangeCount,
349                             const VkMappedMemoryRange *pMemoryRanges)
350 {
351    return VK_SUCCESS;
352 }
353 
354 VKAPI_ATTR VkResult VKAPI_CALL
nvk_InvalidateMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)355 nvk_InvalidateMappedMemoryRanges(VkDevice device,
356                                  uint32_t memoryRangeCount,
357                                  const VkMappedMemoryRange *pMemoryRanges)
358 {
359    return VK_SUCCESS;
360 }
361 
362 VKAPI_ATTR void VKAPI_CALL
nvk_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory _mem,VkDeviceSize * pCommittedMemoryInBytes)363 nvk_GetDeviceMemoryCommitment(VkDevice device,
364                               VkDeviceMemory _mem,
365                               VkDeviceSize* pCommittedMemoryInBytes)
366 {
367    VK_FROM_HANDLE(nvk_device_memory, mem, _mem);
368 
369    *pCommittedMemoryInBytes = mem->mem->size_B;
370 }
371 
372 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetMemoryFdKHR(VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFD)373 nvk_GetMemoryFdKHR(VkDevice device,
374                    const VkMemoryGetFdInfoKHR *pGetFdInfo,
375                    int *pFD)
376 {
377    VK_FROM_HANDLE(nvk_device, dev, device);
378    VK_FROM_HANDLE(nvk_device_memory, mem, pGetFdInfo->memory);
379 
380    switch (pGetFdInfo->handleType) {
381    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
382    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
383       return nvkmd_mem_export_dma_buf(mem->mem, &mem->vk.base, pFD);
384    default:
385       assert(!"unsupported handle type");
386       return vk_error(dev, VK_ERROR_FEATURE_NOT_PRESENT);
387    }
388 }
389 
390 VKAPI_ATTR uint64_t VKAPI_CALL
nvk_GetDeviceMemoryOpaqueCaptureAddress(UNUSED VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)391 nvk_GetDeviceMemoryOpaqueCaptureAddress(
392    UNUSED VkDevice device,
393    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
394 {
395    VK_FROM_HANDLE(nvk_device_memory, mem, pInfo->memory);
396 
397    return mem->mem->va->addr;
398 }
399