xref: /aosp_15_r20/external/virglrenderer/src/venus/vkr_device_memory.c (revision bbecb9d118dfdb95f99bd754f8fa9be01f189df3)
1 /*
2  * Copyright 2020 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "vkr_device_memory.h"
7 
8 #include <gbm.h>
9 
10 #include "venus-protocol/vn_protocol_renderer_transport.h"
11 
12 #include "vkr_device_memory_gen.h"
13 #include "vkr_physical_device.h"
14 
15 static bool
vkr_get_fd_handle_type_from_virgl_fd_type(struct vkr_physical_device * dev,enum virgl_resource_fd_type fd_type,VkExternalMemoryHandleTypeFlagBits * out_handle_type)16 vkr_get_fd_handle_type_from_virgl_fd_type(
17    struct vkr_physical_device *dev,
18    enum virgl_resource_fd_type fd_type,
19    VkExternalMemoryHandleTypeFlagBits *out_handle_type)
20 {
21    assert(dev);
22    assert(out_handle_type);
23 
24    switch (fd_type) {
25    case VIRGL_RESOURCE_FD_DMABUF:
26       if (!dev->EXT_external_memory_dma_buf)
27          return false;
28       *out_handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
29       break;
30    case VIRGL_RESOURCE_FD_OPAQUE:
31       if (!dev->KHR_external_memory_fd)
32          return false;
33       *out_handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
34       break;
35    default:
36       return false;
37    }
38 
39    return true;
40 }
41 
42 static bool
vkr_get_fd_info_from_resource_info(struct vkr_context * ctx,struct vkr_physical_device * physical_dev,const VkImportMemoryResourceInfoMESA * res_info,VkImportMemoryFdInfoKHR * out)43 vkr_get_fd_info_from_resource_info(struct vkr_context *ctx,
44                                    struct vkr_physical_device *physical_dev,
45                                    const VkImportMemoryResourceInfoMESA *res_info,
46                                    VkImportMemoryFdInfoKHR *out)
47 {
48    struct vkr_resource_attachment *att = NULL;
49    enum virgl_resource_fd_type fd_type;
50    int fd = -1;
51    VkExternalMemoryHandleTypeFlagBits handle_type;
52 
53    att = vkr_context_get_resource(ctx, res_info->resourceId);
54    if (!att) {
55       vkr_log("failed to import resource: invalid res_id %u", res_info->resourceId);
56       vkr_cs_decoder_set_fatal(&ctx->decoder);
57       return false;
58    }
59 
60    fd_type = virgl_resource_export_fd(att->resource, &fd);
61    if (fd_type == VIRGL_RESOURCE_FD_INVALID)
62       return false;
63 
64    if (!vkr_get_fd_handle_type_from_virgl_fd_type(physical_dev, fd_type, &handle_type)) {
65       close(fd);
66       return false;
67    }
68 
69    *out = (VkImportMemoryFdInfoKHR){
70       .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
71       .pNext = res_info->pNext,
72       .fd = fd,
73       .handleType = handle_type,
74    };
75    return true;
76 }
77 
78 static VkResult
vkr_get_fd_info_from_allocation_info(struct vkr_physical_device * physical_dev,const VkMemoryAllocateInfo * alloc_info,struct gbm_bo ** out_gbm_bo,VkImportMemoryFdInfoKHR * out_fd_info)79 vkr_get_fd_info_from_allocation_info(struct vkr_physical_device *physical_dev,
80                                      const VkMemoryAllocateInfo *alloc_info,
81                                      struct gbm_bo **out_gbm_bo,
82                                      VkImportMemoryFdInfoKHR *out_fd_info)
83 {
84 #ifdef MINIGBM
85    const uint32_t gbm_bo_use_flags =
86       GBM_BO_USE_LINEAR | GBM_BO_USE_SW_READ_RARELY | GBM_BO_USE_SW_WRITE_RARELY;
87 #else
88    const uint32_t gbm_bo_use_flags = GBM_BO_USE_LINEAR;
89 #endif
90 
91    struct gbm_bo *gbm_bo;
92    int fd = -1;
93 
94    assert(physical_dev->gbm_device);
95 
96    /*
97     * Reject here for simplicity. Letting VkPhysicalDeviceVulkan11Properties return
98     * min(maxMemoryAllocationSize, UINT32_MAX) will affect unmappable scenarios.
99     */
100    if (alloc_info->allocationSize > UINT32_MAX)
101       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
102 
103    /* 4K alignment is used on all implementations we support. */
104    gbm_bo =
105       gbm_bo_create(physical_dev->gbm_device, align(alloc_info->allocationSize, 4096), 1,
106                     GBM_FORMAT_R8, gbm_bo_use_flags);
107    if (!gbm_bo)
108       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
109 
110    /* gbm_bo_get_fd returns negative error code on failure */
111    fd = gbm_bo_get_fd(gbm_bo);
112    if (fd < 0) {
113       gbm_bo_destroy(gbm_bo);
114       return fd == -EMFILE ? VK_ERROR_TOO_MANY_OBJECTS : VK_ERROR_OUT_OF_HOST_MEMORY;
115    }
116 
117    *out_gbm_bo = gbm_bo;
118    *out_fd_info = (VkImportMemoryFdInfoKHR){
119       .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
120       .pNext = alloc_info->pNext,
121       .fd = fd,
122       .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
123    };
124    return VK_SUCCESS;
125 }
126 
127 static void
vkr_dispatch_vkAllocateMemory(struct vn_dispatch_context * dispatch,struct vn_command_vkAllocateMemory * args)128 vkr_dispatch_vkAllocateMemory(struct vn_dispatch_context *dispatch,
129                               struct vn_command_vkAllocateMemory *args)
130 {
131    struct vkr_context *ctx = dispatch->data;
132    struct vkr_device *dev = vkr_device_from_handle(args->device);
133    struct vkr_physical_device *physical_dev = dev->physical_device;
134    VkBaseInStructure *prev_of_res_info = NULL;
135    VkImportMemoryResourceInfoMESA *res_info = NULL;
136    VkImportMemoryFdInfoKHR local_import_info = { .fd = -1 };
137    VkExportMemoryAllocateInfo *export_info = vkr_find_struct(
138       args->pAllocateInfo->pNext, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO);
139    const bool no_dma_buf_export =
140       !export_info ||
141       !(export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
142    struct vkr_device_memory *mem = NULL;
143    const uint32_t mem_type_index = args->pAllocateInfo->memoryTypeIndex;
144    const uint32_t property_flags =
145       physical_dev->memory_properties.memoryTypes[mem_type_index].propertyFlags;
146    uint32_t valid_fd_types = 0;
147    struct gbm_bo *gbm_bo = NULL;
148 
149    /* translate VkImportMemoryResourceInfoMESA into VkImportMemoryFdInfoKHR in place */
150    prev_of_res_info = vkr_find_prev_struct(
151       args->pAllocateInfo, VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA);
152    if (prev_of_res_info) {
153       res_info = (VkImportMemoryResourceInfoMESA *)prev_of_res_info->pNext;
154       if (!vkr_get_fd_info_from_resource_info(ctx, physical_dev, res_info,
155                                               &local_import_info)) {
156          args->ret = VK_ERROR_INVALID_EXTERNAL_HANDLE;
157          return;
158       }
159 
160       prev_of_res_info->pNext = (const struct VkBaseInStructure *)&local_import_info;
161    }
162 
163    /* XXX Force dma_buf/opaque fd export or gbm bo import until a new extension that
164     * supports direct export from host visible memory
165     *
166     * Most VkImage and VkBuffer are non-external while most VkDeviceMemory are external
167     * if allocated with a host visible memory type. We still violate the spec by binding
168     * external memory to non-external image or buffer, which needs spec changes with a
169     * new extension.
170     *
171     * Skip forcing external if a valid VkImportMemoryResourceInfoMESA is provided, since
172     * the mapping will be directly set up from the existing virgl resource.
173     */
174    VkExportMemoryAllocateInfo local_export_info;
175    if ((property_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) && !res_info) {
176       /* An implementation can support dma_buf import along with opaque fd export/import.
177        * If the client driver is using external memory and requesting dma_buf, without
178        * dma_buf fd export support, we must use gbm bo import path instead of forcing
179        * opaque fd export. e.g. the client driver uses external memory for wsi image.
180        */
181       if (dev->physical_device->is_dma_buf_fd_export_supported ||
182           (dev->physical_device->is_opaque_fd_export_supported && no_dma_buf_export)) {
183          VkExternalMemoryHandleTypeFlagBits handle_type =
184             dev->physical_device->is_dma_buf_fd_export_supported
185                ? VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
186                : VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
187          if (export_info) {
188             export_info->handleTypes |= handle_type;
189          } else {
190             local_export_info = (const VkExportMemoryAllocateInfo){
191                .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
192                .pNext = args->pAllocateInfo->pNext,
193                .handleTypes = handle_type,
194             };
195             export_info = &local_export_info;
196             ((VkMemoryAllocateInfo *)args->pAllocateInfo)->pNext = &local_export_info;
197          }
198       } else if (dev->physical_device->EXT_external_memory_dma_buf) {
199          /* Allocate gbm bo to force dma_buf fd import. */
200          VkResult result;
201 
202          if (export_info) {
203             /* Strip export info since valid_fd_types can only be dma_buf here. */
204             VkBaseInStructure *prev_of_export_info = vkr_find_prev_struct(
205                args->pAllocateInfo, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO);
206 
207             prev_of_export_info->pNext = export_info->pNext;
208             export_info = NULL;
209          }
210 
211          result = vkr_get_fd_info_from_allocation_info(physical_dev, args->pAllocateInfo,
212                                                        &gbm_bo, &local_import_info);
213          if (result != VK_SUCCESS) {
214             args->ret = result;
215             return;
216          }
217 
218          ((VkMemoryAllocateInfo *)args->pAllocateInfo)->pNext = &local_import_info;
219 
220          valid_fd_types = 1 << VIRGL_RESOURCE_FD_DMABUF;
221       }
222    }
223 
224    if (export_info) {
225       if (export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
226          valid_fd_types |= 1 << VIRGL_RESOURCE_FD_OPAQUE;
227       if (export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
228          valid_fd_types |= 1 << VIRGL_RESOURCE_FD_DMABUF;
229    }
230 
231    mem = vkr_device_memory_create_and_add(ctx, args);
232    if (!mem) {
233       if (local_import_info.fd >= 0)
234          close(local_import_info.fd);
235       if (gbm_bo)
236          gbm_bo_destroy(gbm_bo);
237       return;
238    }
239 
240    mem->device = dev;
241    mem->property_flags = property_flags;
242    mem->valid_fd_types = valid_fd_types;
243    mem->gbm_bo = gbm_bo;
244    mem->allocation_size = args->pAllocateInfo->allocationSize;
245    mem->memory_type_index = mem_type_index;
246 }
247 
248 static void
vkr_dispatch_vkFreeMemory(struct vn_dispatch_context * dispatch,struct vn_command_vkFreeMemory * args)249 vkr_dispatch_vkFreeMemory(struct vn_dispatch_context *dispatch,
250                           struct vn_command_vkFreeMemory *args)
251 {
252    struct vkr_device_memory *mem = vkr_device_memory_from_handle(args->memory);
253    if (!mem)
254       return;
255 
256    vkr_device_memory_release(mem);
257    vkr_device_memory_destroy_and_remove(dispatch->data, args);
258 }
259 
260 static void
vkr_dispatch_vkGetDeviceMemoryCommitment(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetDeviceMemoryCommitment * args)261 vkr_dispatch_vkGetDeviceMemoryCommitment(
262    UNUSED struct vn_dispatch_context *dispatch,
263    struct vn_command_vkGetDeviceMemoryCommitment *args)
264 {
265    struct vkr_device *dev = vkr_device_from_handle(args->device);
266    struct vn_device_proc_table *vk = &dev->proc_table;
267 
268    vn_replace_vkGetDeviceMemoryCommitment_args_handle(args);
269    vk->GetDeviceMemoryCommitment(args->device, args->memory,
270                                  args->pCommittedMemoryInBytes);
271 }
272 
273 static void
vkr_dispatch_vkGetDeviceMemoryOpaqueCaptureAddress(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetDeviceMemoryOpaqueCaptureAddress * args)274 vkr_dispatch_vkGetDeviceMemoryOpaqueCaptureAddress(
275    UNUSED struct vn_dispatch_context *dispatch,
276    struct vn_command_vkGetDeviceMemoryOpaqueCaptureAddress *args)
277 {
278    struct vkr_device *dev = vkr_device_from_handle(args->device);
279    struct vn_device_proc_table *vk = &dev->proc_table;
280 
281    vn_replace_vkGetDeviceMemoryOpaqueCaptureAddress_args_handle(args);
282    args->ret = vk->GetDeviceMemoryOpaqueCaptureAddress(args->device, args->pInfo);
283 }
284 
285 static void
vkr_dispatch_vkGetMemoryResourcePropertiesMESA(struct vn_dispatch_context * dispatch,struct vn_command_vkGetMemoryResourcePropertiesMESA * args)286 vkr_dispatch_vkGetMemoryResourcePropertiesMESA(
287    struct vn_dispatch_context *dispatch,
288    struct vn_command_vkGetMemoryResourcePropertiesMESA *args)
289 {
290    struct vkr_context *ctx = dispatch->data;
291    struct vkr_device *dev = vkr_device_from_handle(args->device);
292    struct vn_device_proc_table *vk = &dev->proc_table;
293 
294    struct vkr_resource_attachment *att = vkr_context_get_resource(ctx, args->resourceId);
295    if (!att) {
296       vkr_log("failed to query resource props: invalid res_id %u", args->resourceId);
297       vkr_cs_decoder_set_fatal(&ctx->decoder);
298       return;
299    }
300 
301    int fd = -1;
302    enum virgl_resource_fd_type fd_type = virgl_resource_export_fd(att->resource, &fd);
303    VkExternalMemoryHandleTypeFlagBits handle_type;
304    if (!vkr_get_fd_handle_type_from_virgl_fd_type(dev->physical_device, fd_type,
305                                                   &handle_type) ||
306        handle_type != VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) {
307       close(fd);
308       args->ret = VK_ERROR_INVALID_EXTERNAL_HANDLE;
309       return;
310    }
311 
312    VkMemoryFdPropertiesKHR mem_fd_props = {
313       .sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR,
314       .pNext = NULL,
315       .memoryTypeBits = 0,
316    };
317    vn_replace_vkGetMemoryResourcePropertiesMESA_args_handle(args);
318    args->ret = vk->GetMemoryFdPropertiesKHR(args->device, handle_type, fd, &mem_fd_props);
319    if (args->ret != VK_SUCCESS) {
320       close(fd);
321       return;
322    }
323 
324    args->pMemoryResourceProperties->memoryTypeBits = mem_fd_props.memoryTypeBits;
325 
326    VkMemoryResourceAllocationSizeProperties100000MESA *alloc_size_props = vkr_find_struct(
327       args->pMemoryResourceProperties->pNext,
328       VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA);
329    if (alloc_size_props)
330       alloc_size_props->allocationSize = lseek(fd, 0, SEEK_END);
331 
332    close(fd);
333 }
334 
335 void
vkr_context_init_device_memory_dispatch(struct vkr_context * ctx)336 vkr_context_init_device_memory_dispatch(struct vkr_context *ctx)
337 {
338    struct vn_dispatch_context *dispatch = &ctx->dispatch;
339 
340    dispatch->dispatch_vkAllocateMemory = vkr_dispatch_vkAllocateMemory;
341    dispatch->dispatch_vkFreeMemory = vkr_dispatch_vkFreeMemory;
342    dispatch->dispatch_vkMapMemory = NULL;
343    dispatch->dispatch_vkUnmapMemory = NULL;
344    dispatch->dispatch_vkFlushMappedMemoryRanges = NULL;
345    dispatch->dispatch_vkInvalidateMappedMemoryRanges = NULL;
346    dispatch->dispatch_vkGetDeviceMemoryCommitment =
347       vkr_dispatch_vkGetDeviceMemoryCommitment;
348    dispatch->dispatch_vkGetDeviceMemoryOpaqueCaptureAddress =
349       vkr_dispatch_vkGetDeviceMemoryOpaqueCaptureAddress;
350 
351    dispatch->dispatch_vkGetMemoryResourcePropertiesMESA =
352       vkr_dispatch_vkGetMemoryResourcePropertiesMESA;
353 }
354 
355 void
vkr_device_memory_release(struct vkr_device_memory * mem)356 vkr_device_memory_release(struct vkr_device_memory *mem)
357 {
358    if (mem->gbm_bo)
359       gbm_bo_destroy(mem->gbm_bo);
360 }
361 
362 int
vkr_device_memory_export_fd(struct vkr_device_memory * mem,VkExternalMemoryHandleTypeFlagBits handle_type,int * out_fd)363 vkr_device_memory_export_fd(struct vkr_device_memory *mem,
364                             VkExternalMemoryHandleTypeFlagBits handle_type,
365                             int *out_fd)
366 {
367    struct vn_device_proc_table *vk = &mem->device->proc_table;
368    int fd = -1;
369 
370    if (mem->gbm_bo) {
371       /* mem->gbm_bo is a gbm bo backing non-external mappable memory */
372       assert((handle_type == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) &&
373              (mem->valid_fd_types == 1 << VIRGL_RESOURCE_FD_DMABUF));
374 
375       /* gbm_bo_get_fd returns negative error code on failure */
376       fd = gbm_bo_get_fd(mem->gbm_bo);
377       if (fd < 0)
378          return fd;
379    } else {
380       VkDevice dev_handle = mem->device->base.handle.device;
381       VkDeviceMemory mem_handle = mem->base.handle.device_memory;
382       const VkMemoryGetFdInfoKHR fd_info = {
383          .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
384          .memory = mem_handle,
385          .handleType = handle_type,
386       };
387       VkResult result = vk->GetMemoryFdKHR(dev_handle, &fd_info, &fd);
388       if (result != VK_SUCCESS)
389          return result == VK_ERROR_TOO_MANY_OBJECTS ? -EMFILE : -ENOMEM;
390    }
391 
392    *out_fd = fd;
393    return 0;
394 }
395