1 /*
2 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved.
3 * The term “Broadcom” refers to Broadcom Inc.
4 * and/or its subsidiaries.
5 * SPDX-License-Identifier: MIT
6 */
7
8 #include <libsync.h>
9
10 #include "util/u_memory.h"
11 #include "util/u_atomic.h"
12 #include "util/list.h"
13 #include "util/u_thread.h"
14
15 #include "pipebuffer/pb_buffer_fenced.h"
16
17 #include "vmw_screen.h"
18 #include "vmw_fence.h"
19
20 struct vmw_fence_ops
21 {
22 /*
23 * Immutable members.
24 */
25 struct pb_fence_ops base;
26 struct vmw_winsys_screen *vws;
27
28 mtx_t mutex;
29
30 /*
31 * Protected by mutex;
32 */
33 struct list_head not_signaled;
34 uint32_t last_signaled;
35 uint32_t last_emitted;
36 };
37
38 struct vmw_fence
39 {
40 struct list_head ops_list;
41 int32_t refcount;
42 uint32_t handle;
43 uint32_t mask;
44 int32_t signalled;
45 uint32_t seqno;
46 int32_t fence_fd;
47 bool imported; /* TRUE if imported from another process */
48 };
49
50 /**
51 * vmw_fence_seq_is_signaled - Check whether a fence seqno is
52 * signaled.
53 *
54 * @ops: Pointer to a struct pb_fence_ops.
55 *
56 */
57 static inline bool
vmw_fence_seq_is_signaled(uint32_t seq,uint32_t last,uint32_t cur)58 vmw_fence_seq_is_signaled(uint32_t seq, uint32_t last, uint32_t cur)
59 {
60 return (cur - last <= cur - seq);
61 }
62
63
64 /**
65 * vmw_fence_ops - Return the vmw_fence_ops structure backing a
66 * struct pb_fence_ops pointer.
67 *
68 * @ops: Pointer to a struct pb_fence_ops.
69 *
70 */
71 static inline struct vmw_fence_ops *
vmw_fence_ops(struct pb_fence_ops * ops)72 vmw_fence_ops(struct pb_fence_ops *ops)
73 {
74 assert(ops);
75 return (struct vmw_fence_ops *)ops;
76 }
77
78
79 /**
80 * vmw_fences_release - Release all fences from the not_signaled
81 * list.
82 *
83 * @ops: Pointer to a struct vmw_fence_ops.
84 *
85 */
86 static void
vmw_fences_release(struct vmw_fence_ops * ops)87 vmw_fences_release(struct vmw_fence_ops *ops)
88 {
89 struct vmw_fence *fence, *n;
90
91 mtx_lock(&ops->mutex);
92 LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
93 list_delinit(&fence->ops_list);
94 mtx_unlock(&ops->mutex);
95 }
96
97 /**
98 * vmw_fences_signal - Traverse the not_signaled list and try to
99 * signal unsignaled fences.
100 *
101 * @ops: Pointer to a struct pb_fence_ops.
102 * @signaled: Seqno that has signaled.
103 * @emitted: Last seqno emitted by the kernel.
104 * @has_emitted: Whether we provide the emitted value.
105 *
106 */
107 void
vmw_fences_signal(struct pb_fence_ops * fence_ops,uint32_t signaled,uint32_t emitted,bool has_emitted)108 vmw_fences_signal(struct pb_fence_ops *fence_ops,
109 uint32_t signaled,
110 uint32_t emitted,
111 bool has_emitted)
112 {
113 struct vmw_fence_ops *ops = NULL;
114 struct vmw_fence *fence, *n;
115
116 if (fence_ops == NULL)
117 return;
118
119 ops = vmw_fence_ops(fence_ops);
120 mtx_lock(&ops->mutex);
121
122 if (!has_emitted) {
123 emitted = ops->last_emitted;
124 if (emitted - signaled > (1 << 30))
125 emitted = signaled;
126 }
127
128 if (signaled == ops->last_signaled && emitted == ops->last_emitted)
129 goto out_unlock;
130
131 LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list) {
132 if (!vmw_fence_seq_is_signaled(fence->seqno, signaled, emitted))
133 break;
134
135 p_atomic_set(&fence->signalled, 1);
136 list_delinit(&fence->ops_list);
137 }
138 ops->last_signaled = signaled;
139 ops->last_emitted = emitted;
140
141 out_unlock:
142 mtx_unlock(&ops->mutex);
143 }
144
145
146 /**
147 * vmw_fence - return the vmw_fence object identified by a
148 * struct pipe_fence_handle *
149 *
150 * @fence: The opaque pipe fence handle.
151 */
152 static inline struct vmw_fence *
vmw_fence(struct pipe_fence_handle * fence)153 vmw_fence(struct pipe_fence_handle *fence)
154 {
155 return (struct vmw_fence *) fence;
156 }
157
158
159 /**
160 * vmw_fence_create - Create a user-space fence object.
161 *
162 * @fence_ops: The fence_ops manager to register with.
163 * @handle: Handle identifying the kernel fence object.
164 * @mask: Mask of flags that this fence object may signal.
165 * @fd: File descriptor to associate with the fence
166 *
167 * Returns NULL on failure.
168 */
169 struct pipe_fence_handle *
vmw_fence_create(struct pb_fence_ops * fence_ops,uint32_t handle,uint32_t seqno,uint32_t mask,int32_t fd)170 vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
171 uint32_t seqno, uint32_t mask, int32_t fd)
172 {
173 struct vmw_fence *fence = CALLOC_STRUCT(vmw_fence);
174 struct vmw_fence_ops *ops = NULL;
175
176 if (!fence)
177 return NULL;
178
179 p_atomic_set(&fence->refcount, 1);
180 fence->handle = handle;
181 fence->mask = mask;
182 fence->seqno = seqno;
183 fence->fence_fd = fd;
184 p_atomic_set(&fence->signalled, 0);
185
186 /*
187 * If the fence was not created by our device, then we won't
188 * manage it with our ops
189 */
190 if (!fence_ops) {
191 fence->imported = true;
192 return (struct pipe_fence_handle *) fence;
193 }
194
195 ops = vmw_fence_ops(fence_ops);
196
197 mtx_lock(&ops->mutex);
198
199 if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
200 p_atomic_set(&fence->signalled, 1);
201 list_inithead(&fence->ops_list);
202 } else {
203 p_atomic_set(&fence->signalled, 0);
204 list_addtail(&fence->ops_list, &ops->not_signaled);
205 }
206
207 mtx_unlock(&ops->mutex);
208
209 return (struct pipe_fence_handle *) fence;
210 }
211
212
213 /**
214 * vmw_fence_destroy - Frees a vmw fence object.
215 *
216 * Also closes the file handle associated with the object, if any
217 */
218 static
vmw_fence_destroy(struct vmw_fence * vfence)219 void vmw_fence_destroy(struct vmw_fence *vfence)
220 {
221 if (vfence->fence_fd != -1)
222 close(vfence->fence_fd);
223
224 FREE(vfence);
225 }
226
227
228 /**
229 * vmw_fence_reference - Reference / unreference a vmw fence object.
230 *
231 * @vws: Pointer to the winsys screen.
232 * @ptr: Pointer to reference transfer destination.
233 * @fence: Pointer to object to reference. May be NULL.
234 */
235 void
vmw_fence_reference(struct vmw_winsys_screen * vws,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)236 vmw_fence_reference(struct vmw_winsys_screen *vws,
237 struct pipe_fence_handle **ptr,
238 struct pipe_fence_handle *fence)
239 {
240 if (*ptr) {
241 struct vmw_fence *vfence = vmw_fence(*ptr);
242
243 if (p_atomic_dec_zero(&vfence->refcount)) {
244 struct vmw_fence_ops *ops = vmw_fence_ops(vws->fence_ops);
245
246 if (!vfence->imported) {
247 vmw_ioctl_fence_unref(vws, vfence->handle);
248
249 mtx_lock(&ops->mutex);
250 list_delinit(&vfence->ops_list);
251 mtx_unlock(&ops->mutex);
252 }
253
254 vmw_fence_destroy(vfence);
255 }
256 }
257
258 if (fence) {
259 struct vmw_fence *vfence = vmw_fence(fence);
260
261 p_atomic_inc(&vfence->refcount);
262 }
263
264 *ptr = fence;
265 }
266
267
268 /**
269 * vmw_fence_signalled - Check whether a fence object is signalled.
270 *
271 * @vws: Pointer to the winsys screen.
272 * @fence: Handle to the fence object.
273 * @flag: Fence flags to check. If the fence object can't signal
274 * a flag, it is assumed to be already signaled.
275 *
276 * Returns 0 if the fence object was signaled, nonzero otherwise.
277 */
278 int
vmw_fence_signalled(struct vmw_winsys_screen * vws,struct pipe_fence_handle * fence,unsigned flag)279 vmw_fence_signalled(struct vmw_winsys_screen *vws,
280 struct pipe_fence_handle *fence,
281 unsigned flag)
282 {
283 struct vmw_fence *vfence;
284 int32_t vflags = SVGA_FENCE_FLAG_EXEC;
285 int ret;
286 uint32_t old;
287
288 if (!fence)
289 return 0;
290
291 vfence = vmw_fence(fence);
292 old = p_atomic_read(&vfence->signalled);
293
294 vflags &= ~vfence->mask;
295
296 if ((old & vflags) == vflags)
297 return 0;
298
299 /*
300 * Currently we update signaled fences on each execbuf call.
301 * That should really be sufficient, and we can avoid
302 * a lot of kernel calls this way.
303 */
304 #if 1
305 ret = vmw_ioctl_fence_signalled(vws, vfence->handle, vflags);
306
307 if (ret == 0)
308 p_atomic_set(&vfence->signalled, 1);
309 return ret;
310 #else
311 (void) ret;
312 return -1;
313 #endif
314 }
315
316 /**
317 * vmw_fence_finish - Wait for a fence object to signal.
318 *
319 * @vws: Pointer to the winsys screen.
320 * @fence: Handle to the fence object.
321 * @timeout: How long to wait before timing out.
322 * @flag: Fence flags to wait for. If the fence object can't signal
323 * a flag, it is assumed to be already signaled.
324 *
325 * Returns 0 if the wait succeeded. Nonzero otherwise.
326 */
327 int
vmw_fence_finish(struct vmw_winsys_screen * vws,struct pipe_fence_handle * fence,uint64_t timeout,unsigned flag)328 vmw_fence_finish(struct vmw_winsys_screen *vws,
329 struct pipe_fence_handle *fence,
330 uint64_t timeout,
331 unsigned flag)
332 {
333 struct vmw_fence *vfence;
334 int32_t vflags = SVGA_FENCE_FLAG_EXEC;
335 int ret;
336 uint32_t old;
337
338 if (!fence)
339 return 0;
340
341 vfence = vmw_fence(fence);
342
343 if (vfence->imported) {
344 ret = sync_wait(vfence->fence_fd, timeout / 1000000);
345
346 if (!ret)
347 p_atomic_set(&vfence->signalled, 1);
348
349 return !!ret;
350 }
351
352 old = p_atomic_read(&vfence->signalled);
353 vflags &= ~vfence->mask;
354
355 if ((old & vflags) == vflags)
356 return 0;
357
358 ret = vmw_ioctl_fence_finish(vws, vfence->handle, vflags);
359
360 if (ret == 0) {
361 int32_t prev = old;
362
363 do {
364 old = prev;
365 prev = p_atomic_cmpxchg(&vfence->signalled, old, old | vflags);
366 } while (prev != old);
367 }
368
369 return ret;
370 }
371
372 /**
373 * vmw_fence_get_fd
374 *
375 * Returns the file descriptor associated with the fence
376 */
377 int
vmw_fence_get_fd(struct pipe_fence_handle * fence)378 vmw_fence_get_fd(struct pipe_fence_handle *fence)
379 {
380 struct vmw_fence *vfence;
381
382 if (!fence)
383 return -1;
384
385 vfence = vmw_fence(fence);
386 return vfence->fence_fd;
387 }
388
389
390 /**
391 * vmw_fence_ops_fence_reference - wrapper for the pb_fence_ops api.
392 *
393 * wrapper around vmw_fence_reference.
394 */
395 static void
vmw_fence_ops_fence_reference(struct pb_fence_ops * ops,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)396 vmw_fence_ops_fence_reference(struct pb_fence_ops *ops,
397 struct pipe_fence_handle **ptr,
398 struct pipe_fence_handle *fence)
399 {
400 struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
401
402 vmw_fence_reference(vws, ptr, fence);
403 }
404
405 /**
406 * vmw_fence_ops_fence_signalled - wrapper for the pb_fence_ops api.
407 *
408 * wrapper around vmw_fence_signalled.
409 */
410 static int
vmw_fence_ops_fence_signalled(struct pb_fence_ops * ops,struct pipe_fence_handle * fence,unsigned flag)411 vmw_fence_ops_fence_signalled(struct pb_fence_ops *ops,
412 struct pipe_fence_handle *fence,
413 unsigned flag)
414 {
415 struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
416
417 return vmw_fence_signalled(vws, fence, flag);
418 }
419
420
421 /**
422 * vmw_fence_ops_fence_finish - wrapper for the pb_fence_ops api.
423 *
424 * wrapper around vmw_fence_finish.
425 */
426 static int
vmw_fence_ops_fence_finish(struct pb_fence_ops * ops,struct pipe_fence_handle * fence,unsigned flag)427 vmw_fence_ops_fence_finish(struct pb_fence_ops *ops,
428 struct pipe_fence_handle *fence,
429 unsigned flag)
430 {
431 struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
432
433 return vmw_fence_finish(vws, fence, OS_TIMEOUT_INFINITE, flag);
434 }
435
436
437 /**
438 * vmw_fence_ops_destroy - Destroy a pb_fence_ops function table.
439 *
440 * @ops: The function table to destroy.
441 *
442 * Part of the pb_fence_ops api.
443 */
444 static void
vmw_fence_ops_destroy(struct pb_fence_ops * ops)445 vmw_fence_ops_destroy(struct pb_fence_ops *ops)
446 {
447 vmw_fences_release(vmw_fence_ops(ops));
448 FREE(ops);
449 }
450
451
452 /**
453 * vmw_fence_ops_create - Create a pb_fence_ops function table.
454 *
455 * @vws: Pointer to a struct vmw_winsys_screen.
456 *
457 * Returns a pointer to a pb_fence_ops function table to interface
458 * with pipe_buffer. This function is typically called on driver setup.
459 *
460 * Returns NULL on failure.
461 */
462 struct pb_fence_ops *
vmw_fence_ops_create(struct vmw_winsys_screen * vws)463 vmw_fence_ops_create(struct vmw_winsys_screen *vws)
464 {
465 struct vmw_fence_ops *ops;
466
467 ops = CALLOC_STRUCT(vmw_fence_ops);
468 if(!ops)
469 return NULL;
470
471 (void) mtx_init(&ops->mutex, mtx_plain);
472 list_inithead(&ops->not_signaled);
473 ops->base.destroy = &vmw_fence_ops_destroy;
474 ops->base.fence_reference = &vmw_fence_ops_fence_reference;
475 ops->base.fence_signalled = &vmw_fence_ops_fence_signalled;
476 ops->base.fence_finish = &vmw_fence_ops_fence_finish;
477
478 ops->vws = vws;
479
480 return &ops->base;
481 }
482