1 /*
2 * Copyright (c) 2007-2024 Broadcom. All Rights Reserved.
3 * The term “Broadcom” refers to Broadcom Inc.
4 * and/or its subsidiaries.
5 * SPDX-License-Identifier: MIT
6 */
7
8 /**
9 * \file
10 * Implementation of fenced buffers.
11 *
12 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
13 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
14 */
15
16
17 #include "util/detect.h"
18
19 #if DETECT_OS_LINUX || DETECT_OS_BSD || DETECT_OS_SOLARIS
20 #include <unistd.h>
21 #include <sched.h>
22 #endif
23 #include <inttypes.h>
24
25 #include "util/compiler.h"
26 #include "pipe/p_defines.h"
27 #include "util/u_debug.h"
28 #include "util/u_thread.h"
29 #include "util/u_memory.h"
30 #include "util/list.h"
31
32 #include "pipebuffer/pb_buffer.h"
33 #include "pipebuffer/pb_bufmgr.h"
34 #include "pipebuffer/pb_buffer_fenced.h"
35 #include "vmw_screen.h"
36
37
38 /**
39 * Convenience macro (type safe).
40 */
41 #define SUPER(__derived) (&(__derived)->base)
42
43
44 struct fenced_manager
45 {
46 struct pb_manager base;
47 struct pb_manager *provider;
48 struct pb_fence_ops *ops;
49
50 /**
51 * Following members are mutable and protected by this mutex.
52 */
53 mtx_t mutex;
54
55 /**
56 * Fenced buffer list.
57 *
58 * All fenced buffers are placed in this listed, ordered from the oldest
59 * fence to the newest fence.
60 */
61 struct list_head fenced;
62 pb_size num_fenced;
63
64 struct list_head unfenced;
65 pb_size num_unfenced;
66
67 };
68
69
70 /**
71 * Fenced buffer.
72 *
73 * Wrapper around a pipe buffer which adds fencing and reference counting.
74 */
75 struct fenced_buffer
76 {
77 /*
78 * Immutable members.
79 */
80
81 struct pb_buffer base;
82 struct fenced_manager *mgr;
83
84 /*
85 * Following members are mutable and protected by fenced_manager::mutex.
86 */
87
88 struct list_head head;
89
90 /**
91 * Buffer with storage.
92 */
93 struct pb_buffer *buffer;
94 pb_size size;
95
96 /**
97 * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
98 * buffer usage.
99 */
100 unsigned flags;
101
102 unsigned mapcount;
103
104 struct pb_validate *vl;
105 unsigned validation_flags;
106
107 struct pipe_fence_handle *fence;
108 };
109
110
111 static inline struct fenced_manager *
fenced_manager(struct pb_manager * mgr)112 fenced_manager(struct pb_manager *mgr)
113 {
114 assert(mgr);
115 return (struct fenced_manager *)mgr;
116 }
117
118
119 static inline struct fenced_buffer *
fenced_buffer(struct pb_buffer * buf)120 fenced_buffer(struct pb_buffer *buf)
121 {
122 assert(buf);
123 return (struct fenced_buffer *)buf;
124 }
125
126
127 static void
128 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
129
130 static enum pipe_error
131 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
132 struct fenced_buffer *fenced_buf,
133 const struct pb_desc *desc,
134 bool wait);
135 /**
136 * Dump the fenced buffer list.
137 *
138 * Useful to understand failures to allocate buffers.
139 */
140 static void
fenced_manager_dump_locked(struct fenced_manager * fenced_mgr)141 fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
142 {
143 #if MESA_DEBUG
144 struct pb_fence_ops *ops = fenced_mgr->ops;
145 struct list_head *curr, *next;
146 struct fenced_buffer *fenced_buf;
147
148 debug_printf("%10s %7s %8s %7s %10s %s\n",
149 "buffer", "size", "refcount", "storage", "fence", "signalled");
150
151 curr = fenced_mgr->unfenced.next;
152 next = curr->next;
153 while(curr != &fenced_mgr->unfenced) {
154 fenced_buf = list_entry(curr, struct fenced_buffer, head);
155 assert(!fenced_buf->fence);
156 debug_printf("%10p %"PRIu64" %8u %7s\n",
157 (void *) fenced_buf,
158 fenced_buf->base.base.size,
159 p_atomic_read(&fenced_buf->base.base.reference.count),
160 fenced_buf->buffer ? "gpu" : "none");
161 curr = next;
162 next = curr->next;
163 }
164
165 curr = fenced_mgr->fenced.next;
166 next = curr->next;
167 while(curr != &fenced_mgr->fenced) {
168 int signaled;
169 fenced_buf = list_entry(curr, struct fenced_buffer, head);
170 assert(fenced_buf->buffer);
171 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
172 debug_printf("%10p %"PRIu64" %8u %7s %10p %s\n",
173 (void *) fenced_buf,
174 fenced_buf->base.base.size,
175 p_atomic_read(&fenced_buf->base.base.reference.count),
176 "gpu",
177 (void *) fenced_buf->fence,
178 signaled == 0 ? "y" : "n");
179 curr = next;
180 next = curr->next;
181 }
182 #else
183 (void)fenced_mgr;
184 #endif
185 }
186
187
188 static inline void
fenced_buffer_destroy_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)189 fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
190 struct fenced_buffer *fenced_buf)
191 {
192 assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
193
194 assert(!fenced_buf->fence);
195 assert(fenced_buf->head.prev);
196 assert(fenced_buf->head.next);
197 list_del(&fenced_buf->head);
198 assert(fenced_mgr->num_unfenced);
199 --fenced_mgr->num_unfenced;
200
201 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
202
203 FREE(fenced_buf);
204 }
205
206
207 /**
208 * Add the buffer to the fenced list.
209 *
210 * Reference count should be incremented before calling this function.
211 */
212 static inline void
fenced_buffer_add_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)213 fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
214 struct fenced_buffer *fenced_buf)
215 {
216 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
217 assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
218 assert(fenced_buf->fence);
219
220 p_atomic_inc(&fenced_buf->base.base.reference.count);
221
222 list_del(&fenced_buf->head);
223 assert(fenced_mgr->num_unfenced);
224 --fenced_mgr->num_unfenced;
225 list_addtail(&fenced_buf->head, &fenced_mgr->fenced);
226 ++fenced_mgr->num_fenced;
227 }
228
229
230 /**
231 * Remove the buffer from the fenced list, and potentially destroy the buffer
232 * if the reference count reaches zero.
233 *
234 * Returns TRUE if the buffer was detroyed.
235 */
236 static inline bool
fenced_buffer_remove_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)237 fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
238 struct fenced_buffer *fenced_buf)
239 {
240 struct pb_fence_ops *ops = fenced_mgr->ops;
241
242 assert(fenced_buf->fence);
243 assert(fenced_buf->mgr == fenced_mgr);
244
245 ops->fence_reference(ops, &fenced_buf->fence, NULL);
246 fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
247
248 assert(fenced_buf->head.prev);
249 assert(fenced_buf->head.next);
250
251 list_del(&fenced_buf->head);
252 assert(fenced_mgr->num_fenced);
253 --fenced_mgr->num_fenced;
254
255 list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);
256 ++fenced_mgr->num_unfenced;
257
258 if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
259 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
260 return true;
261 }
262
263 return false;
264 }
265
266
267 /**
268 * Wait for the fence to expire, and remove it from the fenced list.
269 *
270 * This function will release and re-acquire the mutex, so any copy of mutable
271 * state must be discarded after calling it.
272 */
273 static inline enum pipe_error
fenced_buffer_finish_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)274 fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
275 struct fenced_buffer *fenced_buf)
276 {
277 struct pb_fence_ops *ops = fenced_mgr->ops;
278 enum pipe_error ret = PIPE_ERROR;
279
280 #if 0
281 debug_warning("waiting for GPU");
282 #endif
283
284 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
285 assert(fenced_buf->fence);
286
287 if(fenced_buf->fence) {
288 struct pipe_fence_handle *fence = NULL;
289 int finished;
290 bool proceed;
291
292 ops->fence_reference(ops, &fence, fenced_buf->fence);
293
294 mtx_unlock(&fenced_mgr->mutex);
295
296 finished = ops->fence_finish(ops, fenced_buf->fence, 0);
297
298 mtx_lock(&fenced_mgr->mutex);
299
300 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
301
302 /*
303 * Only proceed if the fence object didn't change in the meanwhile.
304 * Otherwise assume the work has been already carried out by another
305 * thread that re-acquired the lock before us.
306 */
307 proceed = fence == fenced_buf->fence ? true : false;
308
309 ops->fence_reference(ops, &fence, NULL);
310
311 if(proceed && finished == 0) {
312 /*
313 * Remove from the fenced list
314 */
315
316 bool destroyed;
317
318 destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
319
320 /* TODO: remove consequents buffers with the same fence? */
321
322 assert(!destroyed);
323 (void) destroyed;
324
325 fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
326
327 ret = PIPE_OK;
328 }
329 }
330
331 return ret;
332 }
333
334
335 /**
336 * Remove as many fenced buffers from the fenced list as possible.
337 *
338 * Returns TRUE if at least one buffer was removed.
339 */
340 static bool
fenced_manager_check_signalled_locked(struct fenced_manager * fenced_mgr,bool wait)341 fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
342 bool wait)
343 {
344 struct pb_fence_ops *ops = fenced_mgr->ops;
345 struct list_head *curr, *next;
346 struct fenced_buffer *fenced_buf;
347 struct pipe_fence_handle *prev_fence = NULL;
348 bool ret = false;
349
350 curr = fenced_mgr->fenced.next;
351 next = curr->next;
352 while(curr != &fenced_mgr->fenced) {
353 fenced_buf = list_entry(curr, struct fenced_buffer, head);
354
355 if(fenced_buf->fence != prev_fence) {
356 int signaled;
357
358 if (wait) {
359 signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
360
361 /*
362 * Don't return just now. Instead preemptively check if the
363 * following buffers' fences already expired,
364 * without further waits.
365 */
366 wait = false;
367 }
368 else {
369 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
370 }
371
372 if (signaled != 0) {
373 return ret;
374 }
375
376 prev_fence = fenced_buf->fence;
377 }
378 else {
379 /* This buffer's fence object is identical to the previous buffer's
380 * fence object, so no need to check the fence again.
381 */
382 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
383 }
384
385 fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
386
387 ret = true;
388
389 curr = next;
390 next = curr->next;
391 }
392
393 return ret;
394 }
395
396
397 /**
398 * Destroy the GPU storage.
399 */
400 static void
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer * fenced_buf)401 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
402 {
403 if(fenced_buf->buffer) {
404 pb_reference(&fenced_buf->buffer, NULL);
405 }
406 }
407
408
409 /**
410 * Try to create GPU storage for this buffer.
411 *
412 * This function is a shorthand around pb_manager::create_buffer for
413 * fenced_buffer_create_gpu_storage_locked()'s benefit.
414 */
415 static inline bool
fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf,const struct pb_desc * desc)416 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
417 struct fenced_buffer *fenced_buf,
418 const struct pb_desc *desc)
419 {
420 struct pb_manager *provider = fenced_mgr->provider;
421
422 assert(!fenced_buf->buffer);
423
424 fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
425 fenced_buf->size, desc);
426 return fenced_buf->buffer ? true : false;
427 }
428
429
430 /**
431 * Create GPU storage for this buffer.
432 */
433 static enum pipe_error
fenced_buffer_create_gpu_storage_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf,const struct pb_desc * desc,bool wait)434 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
435 struct fenced_buffer *fenced_buf,
436 const struct pb_desc *desc,
437 bool wait)
438 {
439 assert(!fenced_buf->buffer);
440
441 /*
442 * Check for signaled buffers before trying to allocate.
443 */
444 fenced_manager_check_signalled_locked(fenced_mgr, false);
445
446 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf, desc);
447
448 /*
449 * Keep trying while there is some sort of progress:
450 * - fences are expiring,
451 * - or buffers are being being swapped out from GPU memory into CPU memory.
452 */
453 while(!fenced_buf->buffer &&
454 (fenced_manager_check_signalled_locked(fenced_mgr, false))) {
455 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf,
456 desc);
457 }
458
459 if(!fenced_buf->buffer && wait) {
460 /*
461 * Same as before, but this time around, wait to free buffers if
462 * necessary.
463 */
464 while(!fenced_buf->buffer &&
465 (fenced_manager_check_signalled_locked(fenced_mgr, true))) {
466 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf,
467 desc);
468 }
469 }
470
471 if(!fenced_buf->buffer) {
472 if(0)
473 fenced_manager_dump_locked(fenced_mgr);
474
475 /* give up */
476 return PIPE_ERROR_OUT_OF_MEMORY;
477 }
478
479 return PIPE_OK;
480 }
481
482
483 static void
fenced_buffer_destroy(void * winsys,struct pb_buffer * buf)484 fenced_buffer_destroy(void *winsys, struct pb_buffer *buf)
485 {
486 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
487 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
488
489 assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
490
491 mtx_lock(&fenced_mgr->mutex);
492
493 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
494
495 mtx_unlock(&fenced_mgr->mutex);
496 }
497
498
499 static void *
fenced_buffer_map(struct pb_buffer * buf,unsigned flags,void * flush_ctx)500 fenced_buffer_map(struct pb_buffer *buf,
501 unsigned flags, void *flush_ctx)
502 {
503 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
504 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
505 struct pb_fence_ops *ops = fenced_mgr->ops;
506 void *map = NULL;
507
508 mtx_lock(&fenced_mgr->mutex);
509
510 assert(!(flags & PB_USAGE_GPU_READ_WRITE));
511
512 /*
513 * Serialize writes.
514 */
515 while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
516 ((fenced_buf->flags & PB_USAGE_GPU_READ) &&
517 (flags & PB_USAGE_CPU_WRITE))) {
518
519 /*
520 * Don't wait for the GPU to finish accessing it,
521 * if blocking is forbidden.
522 */
523 if((flags & PB_USAGE_DONTBLOCK) &&
524 ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
525 goto done;
526 }
527
528 if (flags & PB_USAGE_UNSYNCHRONIZED) {
529 break;
530 }
531
532 /*
533 * Wait for the GPU to finish accessing. This will release and re-acquire
534 * the mutex, so all copies of mutable state must be discarded.
535 */
536 fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
537 }
538
539 map = pb_map(fenced_buf->buffer, flags, flush_ctx);
540
541 if(map) {
542 ++fenced_buf->mapcount;
543 fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
544 }
545
546 done:
547 mtx_unlock(&fenced_mgr->mutex);
548
549 return map;
550 }
551
552
553 static void
fenced_buffer_unmap(struct pb_buffer * buf)554 fenced_buffer_unmap(struct pb_buffer *buf)
555 {
556 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
557 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
558
559 mtx_lock(&fenced_mgr->mutex);
560
561 assert(fenced_buf->mapcount);
562 if(fenced_buf->mapcount) {
563 if (fenced_buf->buffer)
564 pb_unmap(fenced_buf->buffer);
565 --fenced_buf->mapcount;
566 if(!fenced_buf->mapcount)
567 fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
568 }
569
570 mtx_unlock(&fenced_mgr->mutex);
571 }
572
573
574 static enum pipe_error
fenced_buffer_validate(struct pb_buffer * buf,struct pb_validate * vl,unsigned flags)575 fenced_buffer_validate(struct pb_buffer *buf,
576 struct pb_validate *vl,
577 unsigned flags)
578 {
579 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
580 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
581 enum pipe_error ret;
582
583 mtx_lock(&fenced_mgr->mutex);
584
585 if(!vl) {
586 /* invalidate */
587 fenced_buf->vl = NULL;
588 fenced_buf->validation_flags = 0;
589 ret = PIPE_OK;
590 goto done;
591 }
592
593 assert(flags & PB_USAGE_GPU_READ_WRITE);
594 assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
595 flags &= PB_USAGE_GPU_READ_WRITE;
596
597 /* Buffer cannot be validated in two different lists */
598 if(fenced_buf->vl && fenced_buf->vl != vl) {
599 ret = PIPE_ERROR_RETRY;
600 goto done;
601 }
602
603 if(fenced_buf->vl == vl &&
604 (fenced_buf->validation_flags & flags) == flags) {
605 /* Nothing to do -- buffer already validated */
606 ret = PIPE_OK;
607 goto done;
608 }
609
610 ret = pb_validate(fenced_buf->buffer, vl, flags);
611 if (ret != PIPE_OK)
612 goto done;
613
614 fenced_buf->vl = vl;
615 fenced_buf->validation_flags |= flags;
616
617 done:
618 mtx_unlock(&fenced_mgr->mutex);
619
620 return ret;
621 }
622
623
624 static void
fenced_buffer_fence(struct pb_buffer * buf,struct pipe_fence_handle * fence)625 fenced_buffer_fence(struct pb_buffer *buf,
626 struct pipe_fence_handle *fence)
627 {
628 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
629 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
630 struct pb_fence_ops *ops = fenced_mgr->ops;
631
632 mtx_lock(&fenced_mgr->mutex);
633
634 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
635 assert(fenced_buf->buffer);
636
637 if(fence != fenced_buf->fence) {
638 assert(fenced_buf->vl);
639 assert(fenced_buf->validation_flags);
640
641 if (fenced_buf->fence) {
642 bool destroyed;
643 destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
644 assert(!destroyed);
645 (void) destroyed;
646 }
647 if (fence) {
648 ops->fence_reference(ops, &fenced_buf->fence, fence);
649 fenced_buf->flags |= fenced_buf->validation_flags;
650 fenced_buffer_add_locked(fenced_mgr, fenced_buf);
651 }
652
653 pb_fence(fenced_buf->buffer, fence);
654
655 fenced_buf->vl = NULL;
656 fenced_buf->validation_flags = 0;
657 }
658
659 mtx_unlock(&fenced_mgr->mutex);
660 }
661
662
663 static void
fenced_buffer_get_base_buffer(struct pb_buffer * buf,struct pb_buffer ** base_buf,pb_size * offset)664 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
665 struct pb_buffer **base_buf,
666 pb_size *offset)
667 {
668 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
669 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
670
671 mtx_lock(&fenced_mgr->mutex);
672
673 assert(fenced_buf->buffer);
674
675 if(fenced_buf->buffer)
676 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
677 else {
678 *base_buf = buf;
679 *offset = 0;
680 }
681
682 mtx_unlock(&fenced_mgr->mutex);
683 }
684
685
686 static const struct pb_vtbl
687 fenced_buffer_vtbl = {
688 fenced_buffer_destroy,
689 fenced_buffer_map,
690 fenced_buffer_unmap,
691 fenced_buffer_validate,
692 fenced_buffer_fence,
693 fenced_buffer_get_base_buffer
694 };
695
696
697 /**
698 * Wrap a buffer in a fenced buffer.
699 */
700 static struct pb_buffer *
fenced_bufmgr_create_buffer(struct pb_manager * mgr,pb_size size,const struct pb_desc * desc)701 fenced_bufmgr_create_buffer(struct pb_manager *mgr,
702 pb_size size,
703 const struct pb_desc *desc)
704 {
705 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
706 struct fenced_buffer *fenced_buf;
707 enum pipe_error ret;
708
709 fenced_buf = CALLOC_STRUCT(fenced_buffer);
710 if(!fenced_buf)
711 goto no_buffer;
712
713 pipe_reference_init(&fenced_buf->base.base.reference, 1);
714 fenced_buf->base.base.alignment_log2 = util_logbase2(desc->alignment);
715 fenced_buf->base.base.usage = desc->usage;
716 fenced_buf->base.base.size = size;
717 fenced_buf->size = size;
718
719 fenced_buf->base.vtbl = &fenced_buffer_vtbl;
720 fenced_buf->mgr = fenced_mgr;
721
722 mtx_lock(&fenced_mgr->mutex);
723
724 /*
725 * Try to create GPU storage without stalling,
726 */
727 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf,
728 desc, true);
729
730 /*
731 * Give up.
732 */
733 if(ret != PIPE_OK) {
734 goto no_storage;
735 }
736
737 assert(fenced_buf->buffer);
738
739 list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);
740 ++fenced_mgr->num_unfenced;
741 mtx_unlock(&fenced_mgr->mutex);
742
743 return &fenced_buf->base;
744
745 no_storage:
746 mtx_unlock(&fenced_mgr->mutex);
747 FREE(fenced_buf);
748 no_buffer:
749 return NULL;
750 }
751
752
753 static void
fenced_bufmgr_flush(struct pb_manager * mgr)754 fenced_bufmgr_flush(struct pb_manager *mgr)
755 {
756 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
757
758 mtx_lock(&fenced_mgr->mutex);
759 while(fenced_manager_check_signalled_locked(fenced_mgr, true))
760 ;
761 mtx_unlock(&fenced_mgr->mutex);
762
763 assert(fenced_mgr->provider->flush);
764 if(fenced_mgr->provider->flush)
765 fenced_mgr->provider->flush(fenced_mgr->provider);
766 }
767
768
769 static void
fenced_bufmgr_destroy(struct pb_manager * mgr)770 fenced_bufmgr_destroy(struct pb_manager *mgr)
771 {
772 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
773
774 mtx_lock(&fenced_mgr->mutex);
775
776 /* Wait on outstanding fences */
777 while (fenced_mgr->num_fenced) {
778 mtx_unlock(&fenced_mgr->mutex);
779 #if DETECT_OS_LINUX || DETECT_OS_BSD || DETECT_OS_SOLARIS
780 sched_yield();
781 #endif
782 mtx_lock(&fenced_mgr->mutex);
783 while(fenced_manager_check_signalled_locked(fenced_mgr, true))
784 ;
785 }
786
787 #if MESA_DEBUG
788 /*assert(!fenced_mgr->num_unfenced);*/
789 #endif
790
791 mtx_unlock(&fenced_mgr->mutex);
792 mtx_destroy(&fenced_mgr->mutex);
793
794 FREE(fenced_mgr);
795 }
796
797
798 struct pb_manager *
simple_fenced_bufmgr_create(struct pb_manager * provider,struct pb_fence_ops * ops)799 simple_fenced_bufmgr_create(struct pb_manager *provider,
800 struct pb_fence_ops *ops)
801 {
802 struct fenced_manager *fenced_mgr;
803
804 if(!provider)
805 return NULL;
806
807 fenced_mgr = CALLOC_STRUCT(fenced_manager);
808 if (!fenced_mgr)
809 return NULL;
810
811 fenced_mgr->base.destroy = fenced_bufmgr_destroy;
812 fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
813 fenced_mgr->base.flush = fenced_bufmgr_flush;
814
815 fenced_mgr->provider = provider;
816 fenced_mgr->ops = ops;
817
818 list_inithead(&fenced_mgr->fenced);
819 fenced_mgr->num_fenced = 0;
820
821 list_inithead(&fenced_mgr->unfenced);
822 fenced_mgr->num_unfenced = 0;
823
824 (void) mtx_init(&fenced_mgr->mutex, mtx_plain);
825
826 return &fenced_mgr->base;
827 }
828