1 /*
2 * Copyright 2024 Valve Corporation
3 * Copyright 2024 Alyssa Rosenzweig
4 * Copyright 2022-2023 Collabora Ltd. and Red Hat Inc.
5 * SPDX-License-Identifier: MIT
6 */
7 #include "hk_device_memory.h"
8
9 #include "hk_device.h"
10 #include "hk_entrypoints.h"
11 #include "hk_image.h"
12 #include "hk_physical_device.h"
13
14 #include "asahi/lib/agx_bo.h"
15 #include "util/u_atomic.h"
16
17 #include <inttypes.h>
18 #include <sys/mman.h>
19
20 /* Supports opaque fd only */
21 const VkExternalMemoryProperties hk_opaque_fd_mem_props = {
22 .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
23 VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
24 .exportFromImportedHandleTypes =
25 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
26 .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
27 };
28
29 /* Supports opaque fd and dma_buf. */
30 const VkExternalMemoryProperties hk_dma_buf_mem_props = {
31 .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
32 VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
33 .exportFromImportedHandleTypes =
34 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
35 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
36 .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
37 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
38 };
39
40 static enum agx_bo_flags
hk_memory_type_flags(const VkMemoryType * type,VkExternalMemoryHandleTypeFlagBits handle_types)41 hk_memory_type_flags(const VkMemoryType *type,
42 VkExternalMemoryHandleTypeFlagBits handle_types)
43 {
44 unsigned flags = 0;
45
46 if (handle_types)
47 flags |= AGX_BO_SHARED | AGX_BO_SHAREABLE;
48
49 return flags;
50 }
51
52 VKAPI_ATTR VkResult VKAPI_CALL
hk_GetMemoryFdPropertiesKHR(VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)53 hk_GetMemoryFdPropertiesKHR(VkDevice device,
54 VkExternalMemoryHandleTypeFlagBits handleType,
55 int fd,
56 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
57 {
58 VK_FROM_HANDLE(hk_device, dev, device);
59 struct hk_physical_device *pdev = hk_device_physical(dev);
60 struct agx_bo *bo;
61
62 switch (handleType) {
63 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
64 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
65 bo = agx_bo_import(&dev->dev, fd);
66 if (bo == NULL)
67 return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
68 break;
69 default:
70 return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
71 }
72
73 uint32_t type_bits = 0;
74 for (unsigned t = 0; t < ARRAY_SIZE(pdev->mem_types); t++) {
75 const unsigned flags =
76 hk_memory_type_flags(&pdev->mem_types[t], handleType);
77 if (!(flags & ~bo->flags))
78 type_bits |= (1 << t);
79 }
80
81 pMemoryFdProperties->memoryTypeBits = type_bits;
82
83 agx_bo_unreference(&dev->dev, bo);
84
85 return VK_SUCCESS;
86 }
87
88 VKAPI_ATTR VkResult VKAPI_CALL
hk_AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMem)89 hk_AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
90 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMem)
91 {
92 VK_FROM_HANDLE(hk_device, dev, device);
93 struct hk_physical_device *pdev = hk_device_physical(dev);
94 struct hk_device_memory *mem;
95 VkResult result = VK_SUCCESS;
96
97 const VkImportMemoryFdInfoKHR *fd_info =
98 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
99 const VkExportMemoryAllocateInfo *export_info =
100 vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
101 const VkMemoryType *type = &pdev->mem_types[pAllocateInfo->memoryTypeIndex];
102
103 VkExternalMemoryHandleTypeFlagBits handle_types = 0;
104 if (export_info != NULL)
105 handle_types |= export_info->handleTypes;
106 if (fd_info != NULL)
107 handle_types |= fd_info->handleType;
108
109 const unsigned flags = hk_memory_type_flags(type, handle_types);
110
111 uint32_t alignment = 16384; /* Apple page size */
112
113 struct hk_memory_heap *heap = &pdev->mem_heaps[type->heapIndex];
114 if (p_atomic_read(&heap->used) > heap->size)
115 return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
116
117 const uint64_t aligned_size =
118 align64(pAllocateInfo->allocationSize, alignment);
119
120 mem = vk_device_memory_create(&dev->vk, pAllocateInfo, pAllocator,
121 sizeof(*mem));
122 if (!mem)
123 return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
124
125 mem->map = NULL;
126 if (fd_info && fd_info->handleType) {
127 assert(
128 fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
129 fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
130
131 mem->bo = agx_bo_import(&dev->dev, fd_info->fd);
132 if (mem->bo == NULL) {
133 result = vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
134 goto fail_alloc;
135 }
136 assert(!(flags & ~mem->bo->flags));
137 } else {
138 enum agx_bo_flags flags = 0;
139 if (handle_types)
140 flags |= AGX_BO_SHAREABLE;
141
142 mem->bo = agx_bo_create(&dev->dev, aligned_size, 0, flags, "App memory");
143 if (!mem->bo) {
144 result = vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
145 goto fail_alloc;
146 }
147 }
148
149 if (fd_info && fd_info->handleType) {
150 /* From the Vulkan spec:
151 *
152 * "Importing memory from a file descriptor transfers ownership of
153 * the file descriptor from the application to the Vulkan
154 * implementation. The application must not perform any operations on
155 * the file descriptor after a successful import."
156 *
157 * If the import fails, we leave the file descriptor open.
158 */
159 close(fd_info->fd);
160 }
161
162 uint64_t heap_used = p_atomic_add_return(&heap->used, mem->bo->size);
163 if (heap_used > heap->size) {
164 hk_FreeMemory(device, hk_device_memory_to_handle(mem), pAllocator);
165 return vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
166 "Out of heap memory");
167 }
168
169 *pMem = hk_device_memory_to_handle(mem);
170
171 return VK_SUCCESS;
172
173 fail_alloc:
174 vk_device_memory_destroy(&dev->vk, pAllocator, &mem->vk);
175 return result;
176 }
177
178 VKAPI_ATTR void VKAPI_CALL
hk_FreeMemory(VkDevice device,VkDeviceMemory _mem,const VkAllocationCallbacks * pAllocator)179 hk_FreeMemory(VkDevice device, VkDeviceMemory _mem,
180 const VkAllocationCallbacks *pAllocator)
181 {
182 VK_FROM_HANDLE(hk_device, dev, device);
183 VK_FROM_HANDLE(hk_device_memory, mem, _mem);
184 struct hk_physical_device *pdev = hk_device_physical(dev);
185
186 if (!mem)
187 return;
188
189 const VkMemoryType *type = &pdev->mem_types[mem->vk.memory_type_index];
190 struct hk_memory_heap *heap = &pdev->mem_heaps[type->heapIndex];
191 p_atomic_add(&heap->used, -((int64_t)mem->bo->size));
192
193 agx_bo_unreference(&dev->dev, mem->bo);
194
195 vk_device_memory_destroy(&dev->vk, pAllocator, &mem->vk);
196 }
197
198 VKAPI_ATTR VkResult VKAPI_CALL
hk_MapMemory2KHR(VkDevice device,const VkMemoryMapInfoKHR * pMemoryMapInfo,void ** ppData)199 hk_MapMemory2KHR(VkDevice device, const VkMemoryMapInfoKHR *pMemoryMapInfo,
200 void **ppData)
201 {
202 VK_FROM_HANDLE(hk_device, dev, device);
203 VK_FROM_HANDLE(hk_device_memory, mem, pMemoryMapInfo->memory);
204
205 if (mem == NULL) {
206 *ppData = NULL;
207 return VK_SUCCESS;
208 }
209
210 const VkDeviceSize offset = pMemoryMapInfo->offset;
211 const VkDeviceSize size = vk_device_memory_range(
212 &mem->vk, pMemoryMapInfo->offset, pMemoryMapInfo->size);
213
214 UNUSED void *fixed_addr = NULL;
215 if (pMemoryMapInfo->flags & VK_MEMORY_MAP_PLACED_BIT_EXT) {
216 const VkMemoryMapPlacedInfoEXT *placed_info = vk_find_struct_const(
217 pMemoryMapInfo->pNext, MEMORY_MAP_PLACED_INFO_EXT);
218 fixed_addr = placed_info->pPlacedAddress;
219 }
220
221 /* From the Vulkan spec version 1.0.32 docs for MapMemory:
222 *
223 * * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
224 * assert(size != 0);
225 * * If size is not equal to VK_WHOLE_SIZE, size must be less than or
226 * equal to the size of the memory minus offset
227 */
228 assert(size > 0);
229 assert(offset + size <= mem->bo->size);
230
231 if (size != (size_t)size) {
232 return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
233 "requested size 0x%" PRIx64 " does not fit in %u bits",
234 size, (unsigned)(sizeof(size_t) * 8));
235 }
236
237 /* From the Vulkan 1.2.194 spec:
238 *
239 * "memory must not be currently host mapped"
240 */
241 if (mem->map != NULL) {
242 return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
243 "Memory object already mapped.");
244 }
245
246 mem->map = mem->bo->map;
247 *ppData = mem->map + offset;
248
249 return VK_SUCCESS;
250 }
251
252 VKAPI_ATTR VkResult VKAPI_CALL
hk_UnmapMemory2KHR(VkDevice device,const VkMemoryUnmapInfoKHR * pMemoryUnmapInfo)253 hk_UnmapMemory2KHR(VkDevice device,
254 const VkMemoryUnmapInfoKHR *pMemoryUnmapInfo)
255 {
256 VK_FROM_HANDLE(hk_device_memory, mem, pMemoryUnmapInfo->memory);
257
258 if (mem == NULL)
259 return VK_SUCCESS;
260
261 if (pMemoryUnmapInfo->flags & VK_MEMORY_UNMAP_RESERVE_BIT_EXT) {
262 unreachable("todo");
263 #if 0
264 VK_FROM_HANDLE(hk_device, dev, device);
265
266 int err = agx_bo_overmap(mem->bo, mem->map);
267 if (err) {
268 return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
269 "Failed to map over original mapping");
270 }
271 #endif
272 } else {
273 /* TODO */
274 //// agx_bo_unmap(mem->bo, mem->map);
275 }
276
277 mem->map = NULL;
278
279 return VK_SUCCESS;
280 }
281
282 VKAPI_ATTR VkResult VKAPI_CALL
hk_FlushMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)283 hk_FlushMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount,
284 const VkMappedMemoryRange *pMemoryRanges)
285 {
286 return VK_SUCCESS;
287 }
288
289 VKAPI_ATTR VkResult VKAPI_CALL
hk_InvalidateMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)290 hk_InvalidateMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount,
291 const VkMappedMemoryRange *pMemoryRanges)
292 {
293 return VK_SUCCESS;
294 }
295
296 VKAPI_ATTR void VKAPI_CALL
hk_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory _mem,VkDeviceSize * pCommittedMemoryInBytes)297 hk_GetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory _mem,
298 VkDeviceSize *pCommittedMemoryInBytes)
299 {
300 VK_FROM_HANDLE(hk_device_memory, mem, _mem);
301
302 *pCommittedMemoryInBytes = mem->bo->size;
303 }
304
305 VKAPI_ATTR VkResult VKAPI_CALL
hk_GetMemoryFdKHR(VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFD)306 hk_GetMemoryFdKHR(VkDevice device, const VkMemoryGetFdInfoKHR *pGetFdInfo,
307 int *pFD)
308 {
309 VK_FROM_HANDLE(hk_device, dev, device);
310 VK_FROM_HANDLE(hk_device_memory, memory, pGetFdInfo->memory);
311
312 switch (pGetFdInfo->handleType) {
313 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
314 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
315 *pFD = agx_bo_export(&dev->dev, memory->bo);
316 return VK_SUCCESS;
317 default:
318 assert(!"unsupported handle type");
319 return vk_error(dev, VK_ERROR_FEATURE_NOT_PRESENT);
320 }
321 }
322
323 VKAPI_ATTR uint64_t VKAPI_CALL
hk_GetDeviceMemoryOpaqueCaptureAddress(UNUSED VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)324 hk_GetDeviceMemoryOpaqueCaptureAddress(
325 UNUSED VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo)
326 {
327 VK_FROM_HANDLE(hk_device_memory, mem, pInfo->memory);
328
329 return mem->bo->va->addr;
330 }
331