xref: /aosp_15_r20/external/mesa3d/src/freedreno/vulkan/tu_knl.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  * SPDX-License-Identifier: MIT
5  *
6  * based in part on anv driver which is:
7  * Copyright © 2015 Intel Corporation
8  */
9 
10 #ifndef TU_DRM_H
11 #define TU_DRM_H
12 
13 #include "tu_common.h"
14 
15 struct tu_u_trace_syncobj;
16 struct vdrm_bo;
17 
18 enum tu_bo_alloc_flags {
19    TU_BO_ALLOC_NO_FLAGS = 0,
20    TU_BO_ALLOC_ALLOW_DUMP = 1 << 0,
21    TU_BO_ALLOC_GPU_READ_ONLY = 1 << 1,
22    TU_BO_ALLOC_REPLAYABLE = 1 << 2,
23    TU_BO_ALLOC_INTERNAL_RESOURCE = 1 << 3,
24    TU_BO_ALLOC_DMABUF = 1 << 4,
25    TU_BO_ALLOC_SHAREABLE = 1 << 5,
26 };
27 
28 /* Define tu_timeline_sync type based on drm syncobj for a point type
29  * for vk_sync_timeline, and the logic to handle is mostly copied from
30  * anv_bo_sync since it seems it can be used by similar way to anv.
31  */
32 enum tu_timeline_sync_state {
33    /** Indicates that this is a new (or newly reset fence) */
34    TU_TIMELINE_SYNC_STATE_RESET,
35 
36    /** Indicates that this fence has been submitted to the GPU but is still
37     * (as far as we know) in use by the GPU.
38     */
39    TU_TIMELINE_SYNC_STATE_SUBMITTED,
40 
41    TU_TIMELINE_SYNC_STATE_SIGNALED,
42 };
43 
44 enum tu_mem_sync_op {
45    TU_MEM_SYNC_CACHE_TO_GPU,
46    TU_MEM_SYNC_CACHE_FROM_GPU,
47 };
48 
49 struct tu_bo {
50    uint32_t gem_handle;
51 #ifdef TU_HAS_VIRTIO
52    uint32_t res_id;
53 #endif
54    uint64_t size;
55    uint64_t iova;
56    void *map;
57    const char *name; /* pointer to device->bo_sizes's entry's name */
58    int32_t refcnt;
59 
60    uint32_t bo_list_idx;
61 
62 #ifdef TU_HAS_KGSL
63    /* We have to store fd returned by ion_fd_data
64     * in order to be able to mmap this buffer and to
65     * export file descriptor.
66     */
67    int shared_fd;
68 #endif
69 
70    bool implicit_sync : 1;
71    bool never_unmap : 1;
72 
73    /* Pointer to the vk_object_base associated with the BO
74     * for the purposes of VK_EXT_device_address_binding_report
75     */
76    struct vk_object_base *base;
77 };
78 
79 struct tu_knl {
80    const char *name;
81 
82    VkResult (*device_init)(struct tu_device *dev);
83    void (*device_finish)(struct tu_device *dev);
84    int (*device_get_gpu_timestamp)(struct tu_device *dev, uint64_t *ts);
85    int (*device_get_suspend_count)(struct tu_device *dev, uint64_t *suspend_count);
86    VkResult (*device_check_status)(struct tu_device *dev);
87    int (*submitqueue_new)(struct tu_device *dev, int priority, uint32_t *queue_id);
88    void (*submitqueue_close)(struct tu_device *dev, uint32_t queue_id);
89    VkResult (*bo_init)(struct tu_device *dev, struct vk_object_base *base,
90                        struct tu_bo **out_bo, uint64_t size, uint64_t client_iova,
91                        VkMemoryPropertyFlags mem_property,
92                        enum tu_bo_alloc_flags flags, const char *name);
93    VkResult (*bo_init_dmabuf)(struct tu_device *dev, struct tu_bo **out_bo,
94                               uint64_t size, int prime_fd);
95    int (*bo_export_dmabuf)(struct tu_device *dev, struct tu_bo *bo);
96    VkResult (*bo_map)(struct tu_device *dev, struct tu_bo *bo, void *placed_addr);
97    void (*bo_allow_dump)(struct tu_device *dev, struct tu_bo *bo);
98    void (*bo_finish)(struct tu_device *dev, struct tu_bo *bo);
99    void (*bo_set_metadata)(struct tu_device *dev, struct tu_bo *bo,
100                            void *metadata, uint32_t metadata_size);
101    int (*bo_get_metadata)(struct tu_device *dev, struct tu_bo *bo,
102                           void *metadata, uint32_t metadata_size);
103    VkResult (*device_wait_u_trace)(struct tu_device *dev,
104                                    struct tu_u_trace_syncobj *syncobj);
105    VkResult (*queue_submit)(struct tu_queue *queue,
106                             struct vk_queue_submit *submit);
107 
108    const struct vk_device_entrypoint_table *device_entrypoints;
109 };
110 
111 struct tu_zombie_vma {
112    int fence;
113    uint32_t gem_handle;
114 #ifdef TU_HAS_VIRTIO
115    uint32_t res_id;
116 #endif
117    uint64_t iova;
118    uint64_t size;
119 };
120 
121 struct tu_timeline_sync {
122    struct vk_sync base;
123 
124    enum tu_timeline_sync_state state;
125    uint32_t syncobj;
126 };
127 
128 VkResult
129 tu_bo_init_new_explicit_iova(struct tu_device *dev,
130                              struct vk_object_base *base,
131                              struct tu_bo **out_bo,
132                              uint64_t size,
133                              uint64_t client_iova,
134                              VkMemoryPropertyFlags mem_property,
135                              enum tu_bo_alloc_flags flags,
136                              const char *name);
137 
138 static inline VkResult
tu_bo_init_new(struct tu_device * dev,struct vk_object_base * base,struct tu_bo ** out_bo,uint64_t size,enum tu_bo_alloc_flags flags,const char * name)139 tu_bo_init_new(struct tu_device *dev, struct vk_object_base *base,
140                struct tu_bo **out_bo, uint64_t size,
141                enum tu_bo_alloc_flags flags, const char *name)
142 {
143    // TODO don't mark everything with HOST_VISIBLE !!! Anything that
144    // never gets CPU access should not have this bit set
145    return tu_bo_init_new_explicit_iova(
146       dev, base, out_bo, size, 0,
147       VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
148          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
149          VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
150       flags, name);
151 }
152 
153 VkResult
154 tu_bo_init_dmabuf(struct tu_device *dev,
155                   struct tu_bo **bo,
156                   uint64_t size,
157                   int fd);
158 
159 int
160 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
161 
162 void
163 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
164 
165 VkResult
166 tu_bo_map(struct tu_device *dev, struct tu_bo *bo, void *placed_addr);
167 
168 VkResult
169 tu_bo_unmap(struct tu_device *dev, struct tu_bo *bo, bool reserve);
170 
171 void
172 tu_bo_sync_cache(struct tu_device *dev,
173                  struct tu_bo *bo,
174                  VkDeviceSize offset,
175                  VkDeviceSize size,
176                  enum tu_mem_sync_op op);
177 
178 uint32_t tu_get_l1_dcache_size();
179 
180 void tu_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo);
181 
182 void tu_bo_set_metadata(struct tu_device *dev, struct tu_bo *bo,
183                         void *metadata, uint32_t metadata_size);
184 int tu_bo_get_metadata(struct tu_device *dev, struct tu_bo *bo,
185                        void *metadata, uint32_t metadata_size);
186 
187 static inline struct tu_bo *
tu_bo_get_ref(struct tu_bo * bo)188 tu_bo_get_ref(struct tu_bo *bo)
189 {
190    p_atomic_inc(&bo->refcnt);
191    return bo;
192 }
193 
194 VkResult tu_knl_kgsl_load(struct tu_instance *instance, int fd);
195 
196 struct _drmVersion;
197 VkResult tu_knl_drm_msm_load(struct tu_instance *instance,
198                              int fd, struct _drmVersion *version,
199                              struct tu_physical_device **out);
200 VkResult tu_knl_drm_virtio_load(struct tu_instance *instance,
201                                 int fd, struct _drmVersion *version,
202                                 struct tu_physical_device **out);
203 
204 VkResult
205 tu_enumerate_devices(struct vk_instance *vk_instance);
206 VkResult
207 tu_physical_device_try_create(struct vk_instance *vk_instance,
208                               struct _drmDevice *drm_device,
209                               struct vk_physical_device **out);
210 
211 VkResult
212 tu_drm_device_init(struct tu_device *dev);
213 
214 void
215 tu_drm_device_finish(struct tu_device *dev);
216 
217 int
218 tu_device_get_gpu_timestamp(struct tu_device *dev,
219                             uint64_t *ts);
220 
221 int
222 tu_device_get_suspend_count(struct tu_device *dev,
223                             uint64_t *suspend_count);
224 
225 VkResult
226 tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj);
227 
228 VkResult
229 tu_device_check_status(struct vk_device *vk_device);
230 
231 int
232 tu_drm_submitqueue_new(struct tu_device *dev,
233                        int priority,
234                        uint32_t *queue_id);
235 
236 void
237 tu_drm_submitqueue_close(struct tu_device *dev, uint32_t queue_id);
238 
239 VkResult
240 tu_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit);
241 
242 #endif /* TU_DRM_H */
243