1 /*
2 * Copyright © 2012-2018 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <[email protected]>
7 */
8
9 #ifndef FREEDRENO_DRMIF_H_
10 #define FREEDRENO_DRMIF_H_
11
12 #include <stdint.h>
13
14 #include "util/bitset.h"
15 #include "util/list.h"
16 #include "util/u_debug.h"
17 #include "util/u_queue.h"
18
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22
23 struct fd_bo;
24 struct fd_pipe;
25 struct fd_device;
26
27 enum fd_pipe_id {
28 FD_PIPE_3D = 1,
29 FD_PIPE_2D = 2,
30 /* some devices have two 2d blocks.. not really sure how to
31 * use that yet, so just ignoring the 2nd 2d pipe for now
32 */
33 FD_PIPE_MAX
34 };
35
36 enum fd_param_id {
37 FD_DEVICE_ID,
38 FD_GMEM_SIZE,
39 FD_GMEM_BASE, /* 64b */
40 FD_GPU_ID,
41 FD_CHIP_ID, /* 64b */
42 FD_MAX_FREQ,
43 FD_TIMESTAMP,
44 FD_NR_PRIORITIES, /* # of rings == # of distinct priority levels */
45 FD_CTX_FAULTS, /* # of per context faults */
46 FD_GLOBAL_FAULTS, /* # of global (all context) faults */
47 FD_SUSPEND_COUNT, /* # of times the GPU has suspended, and potentially lost state */
48 FD_SYSPROF, /* Settable (for CAP_SYS_ADMIN) param for system profiling */
49 FD_VA_SIZE, /* GPU virtual address size */
50 };
51
52 /**
53 * Helper for fence/seqno comparisions which deals properly with rollover.
54 * Returns true if fence 'a' is before fence 'b'
55 */
56 static inline bool
fd_fence_before(uint32_t a,uint32_t b)57 fd_fence_before(uint32_t a, uint32_t b)
58 {
59 return (int32_t)(a - b) < 0;
60 }
61
62 static inline bool
fd_fence_after(uint32_t a,uint32_t b)63 fd_fence_after(uint32_t a, uint32_t b)
64 {
65 return (int32_t)(a - b) > 0;
66 }
67
68 /**
69 * Encapsulates submit out-fence(s), which consist of a 'timestamp' (per-
70 * pipe (submitqueue) sequence number) and optionally, if requested, an
71 * out-fence-fd
72 *
73 * Per submit, there are actually two fences:
74 * 1) The userspace maintained fence, which is used to optimistically
75 * avoid kernel ioctls to query if specific rendering is completed
76 * 2) The kernel maintained fence, which we cannot directly do anything
77 * with, other than pass it back to the kernel
78 *
79 * The userspace fence is mostly internal to the drm layer, but we want
80 * the gallium layer to be able to pass it back to us for things like
81 * fd_pipe_wait(). So this struct encapsulates the two.
82 */
83 struct fd_fence {
84 /**
85 * Note refcnt is *not* atomic, but protected by fence_lock, since the
86 * fence_lock is held in fd_bo_add_fence(), which is the hotpath.
87 */
88 int32_t refcnt;
89
90 struct fd_pipe *pipe;
91
92 /**
93 * The ready fence is signaled once the submit is actually flushed down
94 * to the kernel, and fence/fence_fd are populated. You must wait for
95 * this fence to be signaled before reading fence/fence_fd.
96 */
97 struct util_queue_fence ready;
98
99 uint32_t kfence; /* kernel fence */
100 uint32_t ufence; /* userspace fence */
101
102 /**
103 * Optional dma_fence fd, returned by submit if use_fence_fd is true
104 */
105 int fence_fd;
106 bool use_fence_fd;
107 };
108
109 struct fd_fence *fd_fence_new(struct fd_pipe *pipe, bool use_fence_fd);
110 struct fd_fence *fd_fence_ref(struct fd_fence *f);
111 struct fd_fence *fd_fence_ref_locked(struct fd_fence *f);
112 void fd_fence_del(struct fd_fence *f);
113 void fd_fence_del_locked(struct fd_fence *f);
114 void fd_fence_flush(struct fd_fence *f);
115 int fd_fence_wait(struct fd_fence *f);
116
117 /*
118 * bo flags:
119 */
120
121 #define FD_BO_CACHED_COHERENT BITSET_BIT(0) /* Default caching is WRITECOMBINE */
122 #define FD_BO_GPUREADONLY BITSET_BIT(1)
123 #define FD_BO_NOMAP BITSET_BIT(2) /* Hint that the bo will not be mmap'd */
124
125 /* Hint that the bo will be exported/shared: */
126 #define FD_BO_SHARED BITSET_BIT(4)
127 #define FD_BO_SCANOUT BITSET_BIT(5)
128
129 /* internal bo flags: */
130 #define _FD_BO_NOSYNC BITSET_BIT(7) /* Avoid userspace fencing on control buffers */
131
132 /* Additional flags hinting usage, only used for tracing. Buffers without
133 * one of these flags set will be presumed to be driver internal.
134 */
135 #define FD_BO_HINT_BUFFER BITSET_BIT(8)
136 #define FD_BO_HINT_IMAGE BITSET_BIT(9)
137 #define FD_BO_HINT_COMMAND BITSET_BIT(10)
138 #define _FD_BO_HINT_HEAP BITSET_BIT(11)
139 #define _FD_BO_HINTS ( \
140 FD_BO_HINT_BUFFER | \
141 FD_BO_HINT_IMAGE | \
142 FD_BO_HINT_COMMAND | \
143 _FD_BO_HINT_HEAP | \
144 0)
145
146 /*
147 * bo access flags: (keep aligned to MSM_PREP_x)
148 */
149 #define FD_BO_PREP_READ BITSET_BIT(0)
150 #define FD_BO_PREP_WRITE BITSET_BIT(1)
151 #define FD_BO_PREP_NOSYNC BITSET_BIT(2)
152 #define FD_BO_PREP_FLUSH BITSET_BIT(3)
153
154
155 /* device functions:
156 */
157
158 struct fd_device *fd_device_new(int fd);
159 struct fd_device *fd_device_new_dup(int fd);
160 struct fd_device *fd_device_open(void);
161 struct fd_device *fd_device_ref(struct fd_device *dev);
162 void fd_device_purge(struct fd_device *dev);
163 void fd_device_del(struct fd_device *dev);
164 int fd_device_fd(struct fd_device *dev);
165
166 enum fd_version {
167 FD_VERSION_MADVISE = 1, /* kernel supports madvise */
168 FD_VERSION_UNLIMITED_CMDS = 1, /* submits w/ >4 cmd buffers (growable ringbuffer) */
169 FD_VERSION_FENCE_FD = 2, /* submit command supports in/out fences */
170 FD_VERSION_GMEM_BASE = 3, /* supports querying GMEM base address */
171 FD_VERSION_SUBMIT_QUEUES = 3, /* submit queues and multiple priority levels */
172 FD_VERSION_BO_IOVA = 3, /* supports fd_bo_get/put_iova() */
173 FD_VERSION_SOFTPIN = 4, /* adds softpin, bo name, and dump flag */
174 FD_VERSION_ROBUSTNESS = 5, /* adds FD_NR_FAULTS and FD_PP_PGTABLE */
175 FD_VERSION_MEMORY_FD = 2, /* supports shared memory objects */
176 FD_VERSION_SUSPENDS = 7, /* Adds MSM_PARAM_SUSPENDS to detect device suspend */
177 FD_VERSION_CACHED_COHERENT = 8, /* Adds cached-coherent support (a6xx+) */
178 FD_VERSION_VA_SIZE = 9,
179 };
180 enum fd_version fd_device_version(struct fd_device *dev);
181
182 bool fd_has_syncobj(struct fd_device *dev);
183
184 /* pipe functions:
185 */
186
187 struct fd_pipe *fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id);
188 struct fd_pipe *fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id,
189 uint32_t prio);
190 struct fd_pipe *fd_pipe_ref(struct fd_pipe *pipe);
191 struct fd_pipe *fd_pipe_ref_locked(struct fd_pipe *pipe);
192 void fd_pipe_del(struct fd_pipe *pipe);
193 void fd_pipe_purge(struct fd_pipe *pipe);
194 const struct fd_dev_id * fd_pipe_dev_id(struct fd_pipe *pipe);
195 int fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
196 uint64_t *value);
197 int fd_pipe_set_param(struct fd_pipe *pipe, enum fd_param_id param,
198 uint64_t value);
199 int fd_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence);
200 /* timeout in nanosec */
201 int fd_pipe_wait_timeout(struct fd_pipe *pipe, const struct fd_fence *fence,
202 uint64_t timeout);
203
204 /* buffer-object functions:
205 */
206
207 struct fd_bo {
208 struct fd_device *dev;
209 uint32_t size;
210 uint32_t handle;
211 uint32_t name;
212 int32_t refcnt;
213 uint32_t reloc_flags; /* flags like FD_RELOC_DUMP to use for relocs to this BO */
214 uint32_t alloc_flags; /* flags that control allocation/mapping, ie. FD_BO_x */
215 uint64_t iova;
216 void *map;
217 const struct fd_bo_funcs *funcs;
218
219 enum {
220 NO_CACHE = 0,
221 BO_CACHE = 1,
222 RING_CACHE = 2,
223 } bo_reuse : 2;
224
225 /* Most recent index in submit's bo table, used to optimize the common
226 * case where a bo is used many times in the same submit.
227 */
228 uint32_t idx;
229
230 struct list_head node; /* bucket-list entry */
231 time_t free_time; /* time when added to bucket-list */
232
233 unsigned short nr_fences, max_fences;
234 struct fd_fence **fences;
235
236 /* In the common case, there is no more than one fence attached.
237 * This provides storage for the fences table until it grows to
238 * be larger than a single element.
239 */
240 struct fd_fence *_inline_fence;
241 };
242
243 struct fd_bo *_fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags);
244 void _fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap);
245
246 static inline void fd_bo_set_name(struct fd_bo *bo, const char *fmt, ...)
247 _util_printf_format(2, 3);
248
249 static inline void
fd_bo_set_name(struct fd_bo * bo,const char * fmt,...)250 fd_bo_set_name(struct fd_bo *bo, const char *fmt, ...)
251 {
252 #ifndef NDEBUG
253 va_list ap;
254 va_start(ap, fmt);
255 _fd_bo_set_name(bo, fmt, ap);
256 va_end(ap);
257 #endif
258 }
259
260 static inline struct fd_bo *fd_bo_new(struct fd_device *dev, uint32_t size,
261 uint32_t flags, const char *fmt, ...)
262 _util_printf_format(4, 5);
263
264 static inline struct fd_bo *
fd_bo_new(struct fd_device * dev,uint32_t size,uint32_t flags,const char * fmt,...)265 fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags, const char *fmt,
266 ...)
267 {
268 struct fd_bo *bo = _fd_bo_new(dev, size, flags);
269 #ifndef NDEBUG
270 if (fmt) {
271 va_list ap;
272 va_start(ap, fmt);
273 _fd_bo_set_name(bo, fmt, ap);
274 va_end(ap);
275 }
276 #endif
277 return bo;
278 }
279
280 struct fd_bo *fd_bo_from_handle(struct fd_device *dev, uint32_t handle,
281 uint32_t size);
282 struct fd_bo *fd_bo_from_name(struct fd_device *dev, uint32_t name);
283 struct fd_bo *fd_bo_from_dmabuf(struct fd_device *dev, int fd);
284 void fd_bo_mark_for_dump(struct fd_bo *bo);
285
286 static inline uint64_t
fd_bo_get_iova(struct fd_bo * bo)287 fd_bo_get_iova(struct fd_bo *bo)
288 {
289 /* ancient kernels did not support this */
290 assert(bo->iova != 0);
291 return bo->iova;
292 }
293
294 struct fd_bo *fd_bo_ref(struct fd_bo *bo);
295 void fd_bo_del(struct fd_bo *bo);
296 void fd_bo_del_array(struct fd_bo **bos, int count);
297 void fd_bo_del_list_nocache(struct list_head *list);
298 int fd_bo_get_name(struct fd_bo *bo, uint32_t *name);
299 uint32_t fd_bo_handle(struct fd_bo *bo);
300 int fd_bo_dmabuf_drm(struct fd_bo *bo);
301 int fd_bo_dmabuf(struct fd_bo *bo);
302 uint32_t fd_bo_size(struct fd_bo *bo);
303 void *fd_bo_map(struct fd_bo *bo);
304 void fd_bo_upload(struct fd_bo *bo, void *src, unsigned off, unsigned len);
305 bool fd_bo_prefer_upload(struct fd_bo *bo, unsigned len);
306 int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
307 bool fd_bo_is_cached(struct fd_bo *bo);
308 void fd_bo_set_metadata(struct fd_bo *bo, void *metadata, uint32_t metadata_size);
309 int fd_bo_get_metadata(struct fd_bo *bo, void *metadata, uint32_t metadata_size);
310
311 #ifdef __cplusplus
312 } /* end of extern "C" */
313 #endif
314
315 #endif /* FREEDRENO_DRMIF_H_ */
316