xref: /aosp_15_r20/external/mesa3d/src/virtio/vdrm/vdrm.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Google, Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 /* A simple helper layer for virtgpu drm native context, which also
7  * abstracted the differences between vtest (communicating via socket
8  * with vtest server) vs virtgpu (communicating via drm/virtio driver
9  * in the guest).
10  */
11 
12 #ifndef __VDRM_H__
13 #define __VDRM_H__
14 
15 #include <stdint.h>
16 
17 #include "util/simple_mtx.h"
18 
19 #include "virglrenderer_hw.h"
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24 
25 struct vdrm_device;
26 struct vdrm_execbuf_params;
27 
28 struct vdrm_device_funcs {
29    /* Note flush_locked and execbuf_locked are similar, and on top of virtgpu
30     * guest kernel driver are basically the same.  But with vtest, only cmds
31     * that result in host kernel cmd submission can take and/or return fence
32     * and/or syncobj fd's.
33     */
34    int (*execbuf_locked)(struct vdrm_device *vdev, struct vdrm_execbuf_params *p,
35                          void *command, unsigned size);
36    int (*flush_locked)(struct vdrm_device *vdev, uintptr_t *fencep);
37 
38    void (*wait_fence)(struct vdrm_device *vdev, uintptr_t fence);
39 
40    uint32_t (*dmabuf_to_handle)(struct vdrm_device *vdev, int fd);
41    uint32_t (*handle_to_res_id)(struct vdrm_device *vdev, uint32_t handle);
42 
43    uint32_t (*bo_create)(struct vdrm_device *vdev, size_t size, uint32_t blob_flags,
44                          uint64_t blob_id, struct vdrm_ccmd_req *req);
45    int (*bo_wait)(struct vdrm_device *vdev, uint32_t handle);
46    void *(*bo_map)(struct vdrm_device *vdev, uint32_t handle, size_t size, void *placed_addr);
47    int (*bo_export_dmabuf)(struct vdrm_device *vdev, uint32_t handle);
48    void (*bo_close)(struct vdrm_device *vdev, uint32_t handle);
49 
50    void (*close)(struct vdrm_device *vdev);
51 };
52 
53 struct vdrm_device {
54    const struct vdrm_device_funcs *funcs;
55 
56    struct virgl_renderer_capset_drm caps;
57    struct vdrm_shmem *shmem;
58    uint8_t *rsp_mem;
59    uint32_t rsp_mem_len;
60    uint32_t next_rsp_off;
61    simple_mtx_t rsp_lock;
62    simple_mtx_t eb_lock;
63 
64    uint32_t next_seqno;
65 
66    /*
67     * Buffering for requests to host:
68     */
69    uint32_t reqbuf_len;
70    uint32_t reqbuf_cnt;
71    uint8_t reqbuf[0x4000];
72 };
73 
74 struct vdrm_device *vdrm_device_connect(int fd, uint32_t context_type);
75 void vdrm_device_close(struct vdrm_device *vdev);
76 
77 void * vdrm_alloc_rsp(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, uint32_t sz);
78 int vdrm_send_req(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, bool sync);
79 int vdrm_flush(struct vdrm_device *vdev);
80 
81 struct vdrm_execbuf_params {
82    int ring_idx;
83 
84    struct vdrm_ccmd_req *req;     /* Note, must be host kernel cmd submit */
85 
86    uint32_t *handles;
87    uint32_t num_handles;
88 
89    struct drm_virtgpu_execbuffer_syncobj *in_syncobjs;
90    struct drm_virtgpu_execbuffer_syncobj *out_syncobjs;
91 
92    bool has_in_fence_fd : 1;
93    bool needs_out_fence_fd : 1;
94 
95    int fence_fd;                  /* in/out fence */
96 
97    uint32_t num_in_syncobjs;
98    uint32_t num_out_syncobjs;
99 };
100 
101 /**
102  * Note, must be a host cmd submission, which specified in/out fence/syncobj
103  * can be passed to.  In the vtest case, we can't get fences/syncobjs for
104  * other host cmds.
105  */
106 int vdrm_execbuf(struct vdrm_device *vdev, struct vdrm_execbuf_params *p);
107 
108 void vdrm_host_sync(struct vdrm_device *vdev, const struct vdrm_ccmd_req *req);
109 
110 /**
111  * Import dmabuf fd returning a GEM handle
112  */
113 static inline uint32_t
vdrm_dmabuf_to_handle(struct vdrm_device * vdev,int fd)114 vdrm_dmabuf_to_handle(struct vdrm_device *vdev, int fd)
115 {
116    return vdev->funcs->dmabuf_to_handle(vdev, fd);
117 }
118 
119 static inline uint32_t
vdrm_handle_to_res_id(struct vdrm_device * vdev,uint32_t handle)120 vdrm_handle_to_res_id(struct vdrm_device *vdev, uint32_t handle)
121 {
122    return vdev->funcs->handle_to_res_id(vdev, handle);
123 }
124 
125 uint32_t vdrm_bo_create(struct vdrm_device *vdev, size_t size,
126                         uint32_t blob_flags, uint64_t blob_id,
127                         struct vdrm_ccmd_req *req);
128 
129 static inline int
vdrm_bo_wait(struct vdrm_device * vdev,uint32_t handle)130 vdrm_bo_wait(struct vdrm_device *vdev, uint32_t handle)
131 {
132    return vdev->funcs->bo_wait(vdev, handle);
133 }
134 
135 static inline void *
vdrm_bo_map(struct vdrm_device * vdev,uint32_t handle,size_t size,void * placed_addr)136 vdrm_bo_map(struct vdrm_device *vdev, uint32_t handle, size_t size, void *placed_addr)
137 {
138    return vdev->funcs->bo_map(vdev, handle, size, placed_addr);
139 }
140 
141 static inline int
vdrm_bo_export_dmabuf(struct vdrm_device * vdev,uint32_t handle)142 vdrm_bo_export_dmabuf(struct vdrm_device *vdev, uint32_t handle)
143 {
144    return vdev->funcs->bo_export_dmabuf(vdev, handle);
145 }
146 
147 static inline void
vdrm_bo_close(struct vdrm_device * vdev,uint32_t handle)148 vdrm_bo_close(struct vdrm_device *vdev, uint32_t handle)
149 {
150    vdev->funcs->bo_close(vdev, handle);
151 }
152 
153 #ifdef __cplusplus
154 } /* end of extern "C" */
155 #endif
156 
157 #endif /* __VDRM_H__ */
158