1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_device_memory.h"
12
13 #include "venus-protocol/vn_protocol_driver_device_memory.h"
14 #include "venus-protocol/vn_protocol_driver_transport.h"
15
16 #include "vn_android.h"
17 #include "vn_buffer.h"
18 #include "vn_device.h"
19 #include "vn_image.h"
20 #include "vn_physical_device.h"
21 #include "vn_renderer.h"
22 #include "vn_renderer_util.h"
23
24 /* device memory commands */
25
26 static inline VkResult
vn_device_memory_alloc_simple(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info)27 vn_device_memory_alloc_simple(struct vn_device *dev,
28 struct vn_device_memory *mem,
29 const VkMemoryAllocateInfo *alloc_info)
30 {
31 VkDevice dev_handle = vn_device_to_handle(dev);
32 VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
33 if (VN_PERF(NO_ASYNC_MEM_ALLOC)) {
34 return vn_call_vkAllocateMemory(dev->primary_ring, dev_handle,
35 alloc_info, NULL, &mem_handle);
36 }
37
38 struct vn_ring_submit_command ring_submit;
39 vn_submit_vkAllocateMemory(dev->primary_ring, 0, dev_handle, alloc_info,
40 NULL, &mem_handle, &ring_submit);
41 if (!ring_submit.ring_seqno_valid)
42 return VK_ERROR_OUT_OF_HOST_MEMORY;
43
44 mem->bo_ring_seqno_valid = true;
45 mem->bo_ring_seqno = ring_submit.ring_seqno;
46 return VK_SUCCESS;
47 }
48
49 static inline void
vn_device_memory_free_simple(struct vn_device * dev,struct vn_device_memory * mem)50 vn_device_memory_free_simple(struct vn_device *dev,
51 struct vn_device_memory *mem)
52 {
53 VkDevice dev_handle = vn_device_to_handle(dev);
54 VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
55 vn_async_vkFreeMemory(dev->primary_ring, dev_handle, mem_handle, NULL);
56 }
57
58 static VkResult
vn_device_memory_wait_alloc(struct vn_device * dev,struct vn_device_memory * mem)59 vn_device_memory_wait_alloc(struct vn_device *dev,
60 struct vn_device_memory *mem)
61 {
62 if (!mem->bo_ring_seqno_valid)
63 return VK_SUCCESS;
64
65 /* fine to false it here since renderer submission failure is fatal */
66 mem->bo_ring_seqno_valid = false;
67
68 /* no need to wait for ring if
69 * - mem alloc is done upon bo map or export
70 * - mem import is done upon bo destroy
71 */
72 if (vn_ring_get_seqno_status(dev->primary_ring, mem->bo_ring_seqno))
73 return VK_SUCCESS;
74
75 const uint64_t ring_id = vn_ring_get_id(dev->primary_ring);
76 uint32_t local_data[8];
77 struct vn_cs_encoder local_enc =
78 VN_CS_ENCODER_INITIALIZER_LOCAL(local_data, sizeof(local_data));
79 vn_encode_vkWaitRingSeqnoMESA(&local_enc, 0, ring_id, mem->bo_ring_seqno);
80 return vn_renderer_submit_simple(dev->renderer, local_data,
81 vn_cs_encoder_get_len(&local_enc));
82 }
83
84 static inline VkResult
vn_device_memory_bo_init(struct vn_device * dev,struct vn_device_memory * mem)85 vn_device_memory_bo_init(struct vn_device *dev, struct vn_device_memory *mem)
86 {
87 VkResult result = vn_device_memory_wait_alloc(dev, mem);
88 if (result != VK_SUCCESS)
89 return result;
90
91 const struct vk_device_memory *mem_vk = &mem->base.base;
92 const VkMemoryType *mem_type = &dev->physical_device->memory_properties
93 .memoryTypes[mem_vk->memory_type_index];
94 return vn_renderer_bo_create_from_device_memory(
95 dev->renderer, mem_vk->size, mem->base.id, mem_type->propertyFlags,
96 mem_vk->export_handle_types, &mem->base_bo);
97 }
98
99 static inline void
vn_device_memory_bo_fini(struct vn_device * dev,struct vn_device_memory * mem)100 vn_device_memory_bo_fini(struct vn_device *dev, struct vn_device_memory *mem)
101 {
102 if (mem->base_bo) {
103 vn_device_memory_wait_alloc(dev, mem);
104 vn_renderer_bo_unref(dev->renderer, mem->base_bo);
105 }
106 }
107
108 VkResult
vn_device_memory_import_dma_buf(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info,bool force_unmappable,int fd)109 vn_device_memory_import_dma_buf(struct vn_device *dev,
110 struct vn_device_memory *mem,
111 const VkMemoryAllocateInfo *alloc_info,
112 bool force_unmappable,
113 int fd)
114 {
115 const VkMemoryType *mem_type =
116 &dev->physical_device->memory_properties
117 .memoryTypes[alloc_info->memoryTypeIndex];
118
119 struct vn_renderer_bo *bo;
120 VkResult result = vn_renderer_bo_create_from_dma_buf(
121 dev->renderer, alloc_info->allocationSize, fd,
122 force_unmappable ? 0 : mem_type->propertyFlags, &bo);
123 if (result != VK_SUCCESS)
124 return result;
125
126 vn_ring_roundtrip(dev->primary_ring);
127
128 const VkImportMemoryResourceInfoMESA import_memory_resource_info = {
129 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA,
130 .pNext = alloc_info->pNext,
131 .resourceId = bo->res_id,
132 };
133 const VkMemoryAllocateInfo memory_allocate_info = {
134 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
135 .pNext = &import_memory_resource_info,
136 .allocationSize = alloc_info->allocationSize,
137 .memoryTypeIndex = alloc_info->memoryTypeIndex,
138 };
139 result = vn_device_memory_alloc_simple(dev, mem, &memory_allocate_info);
140 if (result != VK_SUCCESS) {
141 vn_renderer_bo_unref(dev->renderer, bo);
142 return result;
143 }
144
145 /* need to close import fd on success to avoid fd leak */
146 close(fd);
147 mem->base_bo = bo;
148
149 return VK_SUCCESS;
150 }
151
152 static VkResult
vn_device_memory_alloc_guest_vram(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info)153 vn_device_memory_alloc_guest_vram(struct vn_device *dev,
154 struct vn_device_memory *mem,
155 const VkMemoryAllocateInfo *alloc_info)
156 {
157 const struct vk_device_memory *mem_vk = &mem->base.base;
158 const VkMemoryType *mem_type = &dev->physical_device->memory_properties
159 .memoryTypes[mem_vk->memory_type_index];
160 VkMemoryPropertyFlags flags = mem_type->propertyFlags;
161
162 /* For external allocation handles, it's possible scenario when requested
163 * non-mappable memory. To make sure that virtio-gpu driver will send to
164 * the host the address of allocated blob using RESOURCE_MAP_BLOB command
165 * a flag VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT must be set.
166 */
167 if (mem_vk->export_handle_types)
168 flags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
169
170 VkResult result = vn_renderer_bo_create_from_device_memory(
171 dev->renderer, mem_vk->size, mem->base.id, flags,
172 mem_vk->export_handle_types, &mem->base_bo);
173 if (result != VK_SUCCESS) {
174 return result;
175 }
176
177 const VkImportMemoryResourceInfoMESA import_memory_resource_info = {
178 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA,
179 .pNext = alloc_info->pNext,
180 .resourceId = mem->base_bo->res_id,
181 };
182
183 const VkMemoryAllocateInfo memory_allocate_info = {
184 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
185 .pNext = &import_memory_resource_info,
186 .allocationSize = alloc_info->allocationSize,
187 .memoryTypeIndex = alloc_info->memoryTypeIndex,
188 };
189
190 vn_ring_roundtrip(dev->primary_ring);
191
192 result = vn_device_memory_alloc_simple(dev, mem, &memory_allocate_info);
193 if (result != VK_SUCCESS) {
194 vn_renderer_bo_unref(dev->renderer, mem->base_bo);
195 return result;
196 }
197
198 return VK_SUCCESS;
199 }
200
201 static VkResult
vn_device_memory_alloc_export(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info)202 vn_device_memory_alloc_export(struct vn_device *dev,
203 struct vn_device_memory *mem,
204 const VkMemoryAllocateInfo *alloc_info)
205 {
206 VkResult result = vn_device_memory_alloc_simple(dev, mem, alloc_info);
207 if (result != VK_SUCCESS)
208 return result;
209
210 result = vn_device_memory_bo_init(dev, mem);
211 if (result != VK_SUCCESS) {
212 vn_device_memory_free_simple(dev, mem);
213 return result;
214 }
215
216 result =
217 vn_ring_submit_roundtrip(dev->primary_ring, &mem->bo_roundtrip_seqno);
218 if (result != VK_SUCCESS) {
219 vn_renderer_bo_unref(dev->renderer, mem->base_bo);
220 vn_device_memory_free_simple(dev, mem);
221 return result;
222 }
223
224 mem->bo_roundtrip_seqno_valid = true;
225
226 return VK_SUCCESS;
227 }
228
229 struct vn_device_memory_alloc_info {
230 VkMemoryAllocateInfo alloc;
231 VkExportMemoryAllocateInfo export;
232 VkMemoryAllocateFlagsInfo flags;
233 VkMemoryDedicatedAllocateInfo dedicated;
234 VkMemoryOpaqueCaptureAddressAllocateInfo capture;
235 };
236
237 static const VkMemoryAllocateInfo *
vn_device_memory_fix_alloc_info(const VkMemoryAllocateInfo * alloc_info,const VkExternalMemoryHandleTypeFlagBits renderer_handle_type,bool has_guest_vram,struct vn_device_memory_alloc_info * local_info)238 vn_device_memory_fix_alloc_info(
239 const VkMemoryAllocateInfo *alloc_info,
240 const VkExternalMemoryHandleTypeFlagBits renderer_handle_type,
241 bool has_guest_vram,
242 struct vn_device_memory_alloc_info *local_info)
243 {
244 local_info->alloc = *alloc_info;
245 VkBaseOutStructure *cur = (void *)&local_info->alloc;
246
247 vk_foreach_struct_const(src, alloc_info->pNext) {
248 void *next = NULL;
249 switch (src->sType) {
250 case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
251 /* guest vram turns export alloc into import, so drop export info */
252 if (has_guest_vram)
253 break;
254 memcpy(&local_info->export, src, sizeof(local_info->export));
255 local_info->export.handleTypes = renderer_handle_type;
256 next = &local_info->export;
257 break;
258 case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
259 memcpy(&local_info->flags, src, sizeof(local_info->flags));
260 next = &local_info->flags;
261 break;
262 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
263 memcpy(&local_info->dedicated, src, sizeof(local_info->dedicated));
264 next = &local_info->dedicated;
265 break;
266 case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
267 memcpy(&local_info->capture, src, sizeof(local_info->capture));
268 next = &local_info->capture;
269 break;
270 default:
271 break;
272 }
273
274 if (next) {
275 cur->pNext = next;
276 cur = next;
277 }
278 }
279
280 cur->pNext = NULL;
281
282 return &local_info->alloc;
283 }
284
285 static VkResult
vn_device_memory_alloc(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info)286 vn_device_memory_alloc(struct vn_device *dev,
287 struct vn_device_memory *mem,
288 const VkMemoryAllocateInfo *alloc_info)
289 {
290 struct vk_device_memory *mem_vk = &mem->base.base;
291 const VkMemoryType *mem_type = &dev->physical_device->memory_properties
292 .memoryTypes[mem_vk->memory_type_index];
293
294 const bool has_guest_vram = dev->renderer->info.has_guest_vram;
295 const bool host_visible =
296 mem_type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
297 const bool export_alloc = mem_vk->export_handle_types;
298
299 const VkExternalMemoryHandleTypeFlagBits renderer_handle_type =
300 dev->physical_device->external_memory.renderer_handle_type;
301 struct vn_device_memory_alloc_info local_info;
302 if (mem_vk->export_handle_types &&
303 mem_vk->export_handle_types != renderer_handle_type) {
304 alloc_info = vn_device_memory_fix_alloc_info(
305 alloc_info, renderer_handle_type, has_guest_vram, &local_info);
306
307 /* ensure correct blob flags */
308 mem_vk->export_handle_types = renderer_handle_type;
309 }
310
311 if (has_guest_vram && (host_visible || export_alloc)) {
312 return vn_device_memory_alloc_guest_vram(dev, mem, alloc_info);
313 } else if (export_alloc) {
314 return vn_device_memory_alloc_export(dev, mem, alloc_info);
315 } else {
316 return vn_device_memory_alloc_simple(dev, mem, alloc_info);
317 }
318 }
319
320 static void
vn_device_memory_emit_report(struct vn_device * dev,struct vn_device_memory * mem,bool is_alloc,VkResult result)321 vn_device_memory_emit_report(struct vn_device *dev,
322 struct vn_device_memory *mem,
323 bool is_alloc,
324 VkResult result)
325 {
326 if (likely(!dev->memory_reports))
327 return;
328
329 const struct vk_device_memory *mem_vk = &mem->base.base;
330 VkDeviceMemoryReportEventTypeEXT type;
331 if (result != VK_SUCCESS) {
332 type = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT;
333 } else if (is_alloc) {
334 type = mem_vk->import_handle_type
335 ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT
336 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT;
337 } else {
338 type = mem_vk->import_handle_type
339 ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
340 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT;
341 }
342 const uint64_t mem_obj_id =
343 (mem_vk->import_handle_type | mem_vk->export_handle_types)
344 ? mem->base_bo->res_id
345 : mem->base.id;
346 const VkMemoryType *mem_type = &dev->physical_device->memory_properties
347 .memoryTypes[mem_vk->memory_type_index];
348 vn_device_emit_device_memory_report(dev, type, mem_obj_id, mem_vk->size,
349 VK_OBJECT_TYPE_DEVICE_MEMORY,
350 (uintptr_t)mem, mem_type->heapIndex);
351 }
352
353 VkResult
vn_AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)354 vn_AllocateMemory(VkDevice device,
355 const VkMemoryAllocateInfo *pAllocateInfo,
356 const VkAllocationCallbacks *pAllocator,
357 VkDeviceMemory *pMemory)
358 {
359 struct vn_device *dev = vn_device_from_handle(device);
360
361 const VkImportMemoryFdInfoKHR *import_fd_info = NULL;
362 const VkMemoryDedicatedAllocateInfo *dedicated_info = NULL;
363 vk_foreach_struct_const(pnext, pAllocateInfo->pNext) {
364 switch (pnext->sType) {
365 case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
366 import_fd_info = (const void *)pnext;
367 break;
368 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
369 dedicated_info = (const void *)pnext;
370 break;
371 default:
372 break;
373 }
374 }
375
376 struct vn_device_memory *mem = vk_device_memory_create(
377 &dev->base.base, pAllocateInfo, pAllocator, sizeof(*mem));
378 if (!mem)
379 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
380
381 vn_object_set_id(mem, vn_get_next_obj_id(), VK_OBJECT_TYPE_DEVICE_MEMORY);
382
383 VkResult result;
384 if (mem->base.base.ahardware_buffer) {
385 result = vn_android_device_import_ahb(dev, mem, dedicated_info);
386 } else if (import_fd_info) {
387 result = vn_device_memory_import_dma_buf(dev, mem, pAllocateInfo, false,
388 import_fd_info->fd);
389 } else {
390 result = vn_device_memory_alloc(dev, mem, pAllocateInfo);
391 }
392
393 vn_device_memory_emit_report(dev, mem, /* is_alloc */ true, result);
394
395 if (result != VK_SUCCESS) {
396 vk_device_memory_destroy(&dev->base.base, pAllocator, &mem->base.base);
397 return vn_error(dev->instance, result);
398 }
399
400 *pMemory = vn_device_memory_to_handle(mem);
401
402 return VK_SUCCESS;
403 }
404
405 void
vn_FreeMemory(VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocator)406 vn_FreeMemory(VkDevice device,
407 VkDeviceMemory memory,
408 const VkAllocationCallbacks *pAllocator)
409 {
410 struct vn_device *dev = vn_device_from_handle(device);
411 struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
412 if (!mem)
413 return;
414
415 vn_device_memory_emit_report(dev, mem, /* is_alloc */ false, VK_SUCCESS);
416
417 /* ensure renderer side import still sees the resource */
418 vn_device_memory_bo_fini(dev, mem);
419
420 if (mem->bo_roundtrip_seqno_valid)
421 vn_ring_wait_roundtrip(dev->primary_ring, mem->bo_roundtrip_seqno);
422
423 vn_device_memory_free_simple(dev, mem);
424 vk_device_memory_destroy(&dev->base.base, pAllocator, &mem->base.base);
425 }
426
427 uint64_t
vn_GetDeviceMemoryOpaqueCaptureAddress(VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)428 vn_GetDeviceMemoryOpaqueCaptureAddress(
429 VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo)
430 {
431 struct vn_device *dev = vn_device_from_handle(device);
432 return vn_call_vkGetDeviceMemoryOpaqueCaptureAddress(dev->primary_ring,
433 device, pInfo);
434 }
435
436 VkResult
vn_MapMemory(VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)437 vn_MapMemory(VkDevice device,
438 VkDeviceMemory memory,
439 VkDeviceSize offset,
440 VkDeviceSize size,
441 VkMemoryMapFlags flags,
442 void **ppData)
443 {
444 VN_TRACE_FUNC();
445 struct vn_device *dev = vn_device_from_handle(device);
446 struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
447 const struct vk_device_memory *mem_vk = &mem->base.base;
448 const bool need_bo = !mem->base_bo;
449 void *ptr = NULL;
450 VkResult result;
451
452 /* We don't want to blindly create a bo for each HOST_VISIBLE memory as
453 * that has a cost. By deferring bo creation until now, we can avoid the
454 * cost unless a bo is really needed. However, that means
455 * vn_renderer_bo_map will block until the renderer creates the resource
456 * and injects the pages into the guest.
457 *
458 * XXX We also assume that a vn_renderer_bo can be created as long as the
459 * renderer VkDeviceMemory has a mappable memory type. That is plain
460 * wrong. It is impossible to fix though until some new extension is
461 * created and supported by the driver, and that the renderer switches to
462 * the extension.
463 */
464 if (need_bo) {
465 result = vn_device_memory_bo_init(dev, mem);
466 if (result != VK_SUCCESS)
467 return vn_error(dev->instance, result);
468 }
469
470 ptr = vn_renderer_bo_map(dev->renderer, mem->base_bo);
471 if (!ptr) {
472 /* vn_renderer_bo_map implies a roundtrip on success, but not here. */
473 if (need_bo) {
474 result = vn_ring_submit_roundtrip(dev->primary_ring,
475 &mem->bo_roundtrip_seqno);
476 if (result != VK_SUCCESS)
477 return vn_error(dev->instance, result);
478
479 mem->bo_roundtrip_seqno_valid = true;
480 }
481
482 return vn_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
483 }
484
485 mem->map_end = size == VK_WHOLE_SIZE ? mem_vk->size : offset + size;
486
487 *ppData = ptr + offset;
488
489 return VK_SUCCESS;
490 }
491
492 void
vn_UnmapMemory(VkDevice device,VkDeviceMemory memory)493 vn_UnmapMemory(VkDevice device, VkDeviceMemory memory)
494 {
495 }
496
497 VkResult
vn_FlushMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)498 vn_FlushMappedMemoryRanges(VkDevice device,
499 uint32_t memoryRangeCount,
500 const VkMappedMemoryRange *pMemoryRanges)
501 {
502 struct vn_device *dev = vn_device_from_handle(device);
503
504 for (uint32_t i = 0; i < memoryRangeCount; i++) {
505 const VkMappedMemoryRange *range = &pMemoryRanges[i];
506 struct vn_device_memory *mem =
507 vn_device_memory_from_handle(range->memory);
508
509 const VkDeviceSize size = range->size == VK_WHOLE_SIZE
510 ? mem->map_end - range->offset
511 : range->size;
512 vn_renderer_bo_flush(dev->renderer, mem->base_bo, range->offset, size);
513 }
514
515 return VK_SUCCESS;
516 }
517
518 VkResult
vn_InvalidateMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)519 vn_InvalidateMappedMemoryRanges(VkDevice device,
520 uint32_t memoryRangeCount,
521 const VkMappedMemoryRange *pMemoryRanges)
522 {
523 struct vn_device *dev = vn_device_from_handle(device);
524
525 for (uint32_t i = 0; i < memoryRangeCount; i++) {
526 const VkMappedMemoryRange *range = &pMemoryRanges[i];
527 struct vn_device_memory *mem =
528 vn_device_memory_from_handle(range->memory);
529
530 const VkDeviceSize size = range->size == VK_WHOLE_SIZE
531 ? mem->map_end - range->offset
532 : range->size;
533 vn_renderer_bo_invalidate(dev->renderer, mem->base_bo, range->offset,
534 size);
535 }
536
537 return VK_SUCCESS;
538 }
539
540 void
vn_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory memory,VkDeviceSize * pCommittedMemoryInBytes)541 vn_GetDeviceMemoryCommitment(VkDevice device,
542 VkDeviceMemory memory,
543 VkDeviceSize *pCommittedMemoryInBytes)
544 {
545 struct vn_device *dev = vn_device_from_handle(device);
546 vn_call_vkGetDeviceMemoryCommitment(dev->primary_ring, device, memory,
547 pCommittedMemoryInBytes);
548 }
549
550 VkResult
vn_GetMemoryFdKHR(VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)551 vn_GetMemoryFdKHR(VkDevice device,
552 const VkMemoryGetFdInfoKHR *pGetFdInfo,
553 int *pFd)
554 {
555 VN_TRACE_FUNC();
556 struct vn_device *dev = vn_device_from_handle(device);
557 struct vn_device_memory *mem =
558 vn_device_memory_from_handle(pGetFdInfo->memory);
559
560 /* At the moment, we support only the below handle types. */
561 assert(pGetFdInfo->handleType &
562 (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
563 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
564 assert(mem->base_bo);
565 *pFd = vn_renderer_bo_export_dma_buf(dev->renderer, mem->base_bo);
566 if (*pFd < 0)
567 return vn_error(dev->instance, VK_ERROR_TOO_MANY_OBJECTS);
568
569 return VK_SUCCESS;
570 }
571
572 VkResult
vn_get_memory_dma_buf_properties(struct vn_device * dev,int fd,uint64_t * out_alloc_size,uint32_t * out_mem_type_bits)573 vn_get_memory_dma_buf_properties(struct vn_device *dev,
574 int fd,
575 uint64_t *out_alloc_size,
576 uint32_t *out_mem_type_bits)
577 {
578 VkDevice device = vn_device_to_handle(dev);
579
580 struct vn_renderer_bo *bo;
581 VkResult result = vn_renderer_bo_create_from_dma_buf(
582 dev->renderer, 0 /* size */, fd, 0 /* flags */, &bo);
583 if (result != VK_SUCCESS) {
584 vn_log(dev->instance, "bo_create_from_dma_buf failed");
585 return result;
586 }
587
588 vn_ring_roundtrip(dev->primary_ring);
589
590 VkMemoryResourceAllocationSizePropertiesMESA alloc_size_props = {
591 .sType =
592 VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_MESA,
593 };
594 VkMemoryResourcePropertiesMESA props = {
595 .sType = VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA,
596 .pNext = &alloc_size_props,
597 };
598 result = vn_call_vkGetMemoryResourcePropertiesMESA(
599 dev->primary_ring, device, bo->res_id, &props);
600 vn_renderer_bo_unref(dev->renderer, bo);
601 if (result != VK_SUCCESS) {
602 vn_log(dev->instance, "vkGetMemoryResourcePropertiesMESA failed");
603 return result;
604 }
605
606 *out_alloc_size = alloc_size_props.allocationSize;
607 *out_mem_type_bits = props.memoryTypeBits;
608
609 return VK_SUCCESS;
610 }
611
612 VkResult
vn_GetMemoryFdPropertiesKHR(VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)613 vn_GetMemoryFdPropertiesKHR(VkDevice device,
614 VkExternalMemoryHandleTypeFlagBits handleType,
615 int fd,
616 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
617 {
618 VN_TRACE_FUNC();
619 struct vn_device *dev = vn_device_from_handle(device);
620 uint64_t alloc_size = 0;
621 uint32_t mem_type_bits = 0;
622 VkResult result = VK_SUCCESS;
623
624 if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
625 return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
626
627 result =
628 vn_get_memory_dma_buf_properties(dev, fd, &alloc_size, &mem_type_bits);
629 if (result != VK_SUCCESS)
630 return vn_error(dev->instance, result);
631
632 pMemoryFdProperties->memoryTypeBits = mem_type_bits;
633
634 return VK_SUCCESS;
635 }
636