xref: /aosp_15_r20/external/mesa3d/src/freedreno/drm/freedreno_bo.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012-2018 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #include "util/os_mman.h"
10 
11 #include "freedreno_drmif.h"
12 #include "freedreno_drm_perfetto.h"
13 #include "freedreno_priv.h"
14 
15 simple_mtx_t table_lock = SIMPLE_MTX_INITIALIZER;
16 simple_mtx_t fence_lock = SIMPLE_MTX_INITIALIZER;
17 
18 /* set buffer name, and add to table, call w/ table_lock held: */
19 static void
set_name(struct fd_bo * bo,uint32_t name)20 set_name(struct fd_bo *bo, uint32_t name)
21 {
22    bo->name = name;
23    /* add ourself into the handle table: */
24    _mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
25 }
26 
27 static struct fd_bo zombie;
28 
29 /* lookup a buffer, call w/ table_lock held: */
30 static struct fd_bo *
lookup_bo(struct hash_table * tbl,uint32_t key)31 lookup_bo(struct hash_table *tbl, uint32_t key)
32 {
33    struct fd_bo *bo = NULL;
34    struct hash_entry *entry;
35 
36    simple_mtx_assert_locked(&table_lock);
37 
38    entry = _mesa_hash_table_search(tbl, &key);
39    if (entry) {
40       bo = entry->data;
41 
42       /* We could be racing with final unref in another thread, and won
43        * the table_lock preventing the other thread from being able to
44        * remove an object it is about to free.  Fortunately since table
45        * lookup and removal are protected by the same lock (and table
46        * removal happens before obj free) we can easily detect this by
47        * checking for refcnt==0 (ie. 1 after p_atomic_inc_return).
48        */
49       if (p_atomic_inc_return(&bo->refcnt) == 1) {
50          /* Restore the zombified reference count, so if another thread
51           * that ends up calling lookup_bo() gets the table_lock before
52           * the thread deleting the bo does, it doesn't mistakenly see
53           * that the BO is live.
54           *
55           * We are holding the table_lock here so we can't be racing
56           * with another caller of lookup_bo()
57           */
58          p_atomic_dec(&bo->refcnt);
59          return &zombie;
60       }
61 
62       if (!list_is_empty(&bo->node)) {
63          mesa_logw("bo was in cache, size=%u, alloc_flags=0x%x\n",
64                    bo->size, bo->alloc_flags);
65       }
66 
67       /* don't break the bucket if this bo was found in one */
68       list_delinit(&bo->node);
69    }
70    return bo;
71 }
72 
73 void
fd_bo_init_common(struct fd_bo * bo,struct fd_device * dev)74 fd_bo_init_common(struct fd_bo *bo, struct fd_device *dev)
75 {
76    /* Backend should have initialized these: */
77    assert(bo->size);
78    assert(bo->handle);
79    assert(bo->funcs);
80 
81    bo->dev = dev;
82    bo->iova = bo->funcs->iova(bo);
83    bo->reloc_flags = FD_RELOC_FLAGS_INIT;
84 
85    p_atomic_set(&bo->refcnt, 1);
86    list_inithead(&bo->node);
87 
88    bo->max_fences = 1;
89    bo->fences = &bo->_inline_fence;
90 
91    if (!bo->map)
92       VG_BO_ALLOC(bo);
93 }
94 
95 /* allocate a new buffer object, call w/ table_lock held */
96 static struct fd_bo *
import_bo_from_handle(struct fd_device * dev,uint32_t size,uint32_t handle)97 import_bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
98 {
99    struct fd_bo *bo;
100 
101    simple_mtx_assert_locked(&table_lock);
102 
103    bo = dev->funcs->bo_from_handle(dev, size, handle);
104    if (!bo) {
105       struct drm_gem_close req = {
106          .handle = handle,
107       };
108       drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
109       return NULL;
110    }
111 
112    bo->alloc_flags |= FD_BO_SHARED;
113 
114    /* add ourself into the handle table: */
115    _mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
116 
117    return bo;
118 }
119 
120 static struct fd_bo *
bo_new(struct fd_device * dev,uint32_t size,uint32_t flags,struct fd_bo_cache * cache)121 bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
122        struct fd_bo_cache *cache)
123 {
124    struct fd_bo *bo = NULL;
125 
126    if (size < FD_BO_HEAP_BLOCK_SIZE) {
127       uint32_t alloc_flags = flags & ~_FD_BO_HINTS;
128       if ((alloc_flags == 0) && dev->default_heap)
129          bo = fd_bo_heap_alloc(dev->default_heap, size, flags);
130       else if ((flags == RING_FLAGS) && dev->ring_heap)
131          bo = fd_bo_heap_alloc(dev->ring_heap, size, flags);
132       if (bo)
133          return bo;
134    }
135 
136    /* demote cached-coherent to WC if not supported: */
137    if ((flags & FD_BO_CACHED_COHERENT) && !dev->has_cached_coherent)
138       flags &= ~FD_BO_CACHED_COHERENT;
139 
140    bo = fd_bo_cache_alloc(cache, &size, flags);
141    if (bo)
142       return bo;
143 
144    bo = dev->funcs->bo_new(dev, size, flags);
145    if (!bo)
146       return NULL;
147 
148    simple_mtx_lock(&table_lock);
149    /* add ourself into the handle table: */
150    _mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
151    simple_mtx_unlock(&table_lock);
152 
153    bo->alloc_flags = flags;
154 
155    fd_alloc_log(bo, FD_ALLOC_NONE, FD_ALLOC_ACTIVE);
156 
157    return bo;
158 }
159 
160 struct fd_bo *
_fd_bo_new(struct fd_device * dev,uint32_t size,uint32_t flags)161 _fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
162 {
163    struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
164    if (bo)
165       bo->bo_reuse = BO_CACHE;
166    return bo;
167 }
168 
169 void
_fd_bo_set_name(struct fd_bo * bo,const char * fmt,va_list ap)170 _fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
171 {
172    bo->funcs->set_name(bo, fmt, ap);
173 }
174 
175 /* internal function to allocate bo's that use the ringbuffer cache
176  * instead of the normal bo_cache.  The purpose is, because cmdstream
177  * bo's get vmap'd on the kernel side, and that is expensive, we want
178  * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
179  */
180 struct fd_bo *
fd_bo_new_ring(struct fd_device * dev,uint32_t size)181 fd_bo_new_ring(struct fd_device *dev, uint32_t size)
182 {
183    struct fd_bo *bo = bo_new(dev, size, RING_FLAGS, &dev->ring_cache);
184    if (bo) {
185       bo->bo_reuse = RING_CACHE;
186       bo->reloc_flags |= FD_RELOC_DUMP;
187       fd_bo_set_name(bo, "cmdstream");
188    }
189    return bo;
190 }
191 
192 struct fd_bo *
fd_bo_from_handle(struct fd_device * dev,uint32_t handle,uint32_t size)193 fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
194 {
195    struct fd_bo *bo = NULL;
196 
197    simple_mtx_lock(&table_lock);
198 
199    bo = lookup_bo(dev->handle_table, handle);
200    if (bo)
201       goto out_unlock;
202 
203    bo = import_bo_from_handle(dev, size, handle);
204 
205    VG_BO_ALLOC(bo);
206 
207 out_unlock:
208    simple_mtx_unlock(&table_lock);
209 
210    /* We've raced with the handle being closed, so the handle is no longer
211     * valid.  Friends don't let friends share handles.
212     */
213    if (bo == &zombie)
214       return NULL;
215 
216    return bo;
217 }
218 
219 uint32_t
fd_handle_from_dmabuf_drm(struct fd_device * dev,int fd)220 fd_handle_from_dmabuf_drm(struct fd_device *dev, int fd)
221 {
222    uint32_t handle;
223    int ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
224    if (ret)
225       return 0;
226    return handle;
227 }
228 
229 struct fd_bo *
fd_bo_from_dmabuf_drm(struct fd_device * dev,int fd)230 fd_bo_from_dmabuf_drm(struct fd_device *dev, int fd)
231 {
232    int size;
233    uint32_t handle;
234    struct fd_bo *bo;
235 
236 restart:
237    simple_mtx_lock(&table_lock);
238    handle = dev->funcs->handle_from_dmabuf(dev, fd);
239    if (!handle) {
240       simple_mtx_unlock(&table_lock);
241       return NULL;
242    }
243 
244    bo = lookup_bo(dev->handle_table, handle);
245    if (bo)
246       goto out_unlock;
247 
248    /* lseek() to get bo size */
249    size = lseek(fd, 0, SEEK_END);
250    lseek(fd, 0, SEEK_CUR);
251 
252    bo = import_bo_from_handle(dev, size, handle);
253 
254    VG_BO_ALLOC(bo);
255 
256 out_unlock:
257    simple_mtx_unlock(&table_lock);
258 
259    if (bo == &zombie)
260       goto restart;
261 
262    return bo;
263 }
264 
265 struct fd_bo *
fd_bo_from_dmabuf(struct fd_device * dev,int fd)266 fd_bo_from_dmabuf(struct fd_device *dev, int fd)
267 {
268    return dev->funcs->bo_from_dmabuf(dev, fd);
269 }
270 
271 struct fd_bo *
fd_bo_from_name(struct fd_device * dev,uint32_t name)272 fd_bo_from_name(struct fd_device *dev, uint32_t name)
273 {
274    struct drm_gem_open req = {
275       .name = name,
276    };
277    struct fd_bo *bo;
278 
279    simple_mtx_lock(&table_lock);
280 
281    /* check name table first, to see if bo is already open: */
282    bo = lookup_bo(dev->name_table, name);
283    if (bo)
284       goto out_unlock;
285 
286 restart:
287    if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
288       ERROR_MSG("gem-open failed: %s", strerror(errno));
289       goto out_unlock;
290    }
291 
292    bo = lookup_bo(dev->handle_table, req.handle);
293    if (bo)
294       goto out_unlock;
295 
296    bo = import_bo_from_handle(dev, req.size, req.handle);
297    if (bo) {
298       set_name(bo, name);
299       VG_BO_ALLOC(bo);
300    }
301 
302 out_unlock:
303    simple_mtx_unlock(&table_lock);
304 
305    if (bo == &zombie)
306       goto restart;
307 
308    return bo;
309 }
310 
311 void
fd_bo_mark_for_dump(struct fd_bo * bo)312 fd_bo_mark_for_dump(struct fd_bo *bo)
313 {
314    bo->reloc_flags |= FD_RELOC_DUMP;
315 }
316 
317 struct fd_bo *
fd_bo_ref(struct fd_bo * bo)318 fd_bo_ref(struct fd_bo *bo)
319 {
320    ref(&bo->refcnt);
321    return bo;
322 }
323 
324 static void
bo_finalize(struct fd_bo * bo)325 bo_finalize(struct fd_bo *bo)
326 {
327    if (bo->funcs->finalize)
328       bo->funcs->finalize(bo);
329 }
330 
331 static void
dev_flush(struct fd_device * dev)332 dev_flush(struct fd_device *dev)
333 {
334    if (dev->funcs->flush)
335       dev->funcs->flush(dev);
336 }
337 
338 static void
bo_del(struct fd_bo * bo)339 bo_del(struct fd_bo *bo)
340 {
341    bo->funcs->destroy(bo);
342 }
343 
344 static bool
try_recycle(struct fd_bo * bo)345 try_recycle(struct fd_bo *bo)
346 {
347    struct fd_device *dev = bo->dev;
348 
349    /* No point in BO cache for suballocated buffers: */
350    if (suballoc_bo(bo))
351       return false;
352 
353    if (bo->bo_reuse == BO_CACHE)
354       return fd_bo_cache_free(&dev->bo_cache, bo) == 0;
355 
356    if (bo->bo_reuse == RING_CACHE)
357       return fd_bo_cache_free(&dev->ring_cache, bo) == 0;
358 
359    return false;
360 }
361 
362 void
fd_bo_del(struct fd_bo * bo)363 fd_bo_del(struct fd_bo *bo)
364 {
365    if (!unref(&bo->refcnt))
366       return;
367 
368    if (try_recycle(bo))
369       return;
370 
371    struct fd_device *dev = bo->dev;
372 
373    bo_finalize(bo);
374    dev_flush(dev);
375    fd_alloc_log(bo, FD_ALLOC_ACTIVE, FD_ALLOC_NONE);
376    bo_del(bo);
377 }
378 
379 void
fd_bo_del_array(struct fd_bo ** bos,int count)380 fd_bo_del_array(struct fd_bo **bos, int count)
381 {
382    if (!count)
383       return;
384 
385    struct fd_device *dev = bos[0]->dev;
386 
387    /*
388     * First pass, remove objects from the table that either (a) still have
389     * a live reference, or (b) no longer have a reference but are released
390     * to the BO cache:
391     */
392 
393    for (int i = 0; i < count; i++) {
394       if (!unref(&bos[i]->refcnt) || try_recycle(bos[i])) {
395          bos[i--] = bos[--count];
396       } else {
397          /* We are going to delete this one, so finalize it first: */
398          bo_finalize(bos[i]);
399       }
400    }
401 
402    dev_flush(dev);
403 
404    /*
405     * Second pass, delete all of the objects remaining after first pass.
406     */
407 
408    for (int i = 0; i < count; i++) {
409       fd_alloc_log(bos[i], FD_ALLOC_ACTIVE, FD_ALLOC_NONE);
410       bo_del(bos[i]);
411    }
412 }
413 
414 /**
415  * Special interface for fd_bo_cache to batch delete a list of handles.
416  * Similar to fd_bo_del_array() but bypasses the BO cache (since it is
417  * called from the BO cache to expire a list of BOs).
418  */
419 void
fd_bo_del_list_nocache(struct list_head * list)420 fd_bo_del_list_nocache(struct list_head *list)
421 {
422    if (list_is_empty(list))
423       return;
424 
425    struct fd_device *dev = first_bo(list)->dev;
426 
427    foreach_bo (bo, list) {
428       bo_finalize(bo);
429    }
430 
431    dev_flush(dev);
432 
433    foreach_bo_safe (bo, list) {
434       assert(bo->refcnt == 0);
435       bo_del(bo);
436    }
437 }
438 
439 void
fd_bo_fini_fences(struct fd_bo * bo)440 fd_bo_fini_fences(struct fd_bo *bo)
441 {
442    for (int i = 0; i < bo->nr_fences; i++)
443       fd_fence_del(bo->fences[i]);
444 
445    if (bo->fences != &bo->_inline_fence)
446       free(bo->fences);
447 }
448 
449 void
fd_bo_close_handle_drm(struct fd_bo * bo)450 fd_bo_close_handle_drm(struct fd_bo *bo)
451 {
452    struct drm_gem_close req = {
453       .handle = bo->handle,
454    };
455    drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
456 }
457 
458 /**
459  * Helper called by backends bo->funcs->destroy()
460  *
461  * Called under table_lock, bo_del_flush() *must* be called before
462  * table_lock is released (but bo->funcs->destroy() can be called
463  * multiple times before bo_del_flush(), as long as table_lock is
464  * held the entire time)
465  */
466 void
fd_bo_fini_common(struct fd_bo * bo)467 fd_bo_fini_common(struct fd_bo *bo)
468 {
469    struct fd_device *dev = bo->dev;
470    uint32_t handle = bo->handle;
471 
472    VG_BO_FREE(bo);
473 
474    fd_bo_fini_fences(bo);
475 
476    if (bo->map)
477       os_munmap(bo->map, bo->size);
478 
479    if (handle) {
480       simple_mtx_lock(&table_lock);
481       dev->funcs->bo_close_handle(bo);
482       _mesa_hash_table_remove_key(dev->handle_table, &handle);
483       if (bo->name)
484          _mesa_hash_table_remove_key(dev->name_table, &bo->name);
485       simple_mtx_unlock(&table_lock);
486    }
487 
488    free(bo);
489 }
490 
491 static void
bo_flush(struct fd_bo * bo)492 bo_flush(struct fd_bo *bo)
493 {
494    MESA_TRACE_FUNC();
495 
496    simple_mtx_lock(&fence_lock);
497    unsigned nr = bo->nr_fences;
498    struct fd_fence *fences[nr];
499    for (unsigned i = 0; i < nr; i++)
500       fences[i] = fd_fence_ref_locked(bo->fences[i]);
501    simple_mtx_unlock(&fence_lock);
502 
503    for (unsigned i = 0; i < nr; i++) {
504       fd_fence_flush(bo->fences[i]);
505       fd_fence_del(fences[i]);
506    }
507 }
508 
509 int
fd_bo_get_name(struct fd_bo * bo,uint32_t * name)510 fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
511 {
512    if (suballoc_bo(bo))
513       return -1;
514 
515    if (!bo->name) {
516       struct drm_gem_flink req = {
517          .handle = bo->handle,
518       };
519       int ret;
520 
521       ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
522       if (ret) {
523          return ret;
524       }
525 
526       simple_mtx_lock(&table_lock);
527       set_name(bo, req.name);
528       simple_mtx_unlock(&table_lock);
529       bo->bo_reuse = NO_CACHE;
530       bo->alloc_flags |= FD_BO_SHARED;
531       bo_flush(bo);
532    }
533 
534    *name = bo->name;
535 
536    return 0;
537 }
538 
539 uint32_t
fd_bo_handle(struct fd_bo * bo)540 fd_bo_handle(struct fd_bo *bo)
541 {
542    if (suballoc_bo(bo))
543       return 0;
544    bo->bo_reuse = NO_CACHE;
545    bo->alloc_flags |= FD_BO_SHARED;
546    bo_flush(bo);
547    return bo->handle;
548 }
549 
550 int
fd_bo_dmabuf_drm(struct fd_bo * bo)551 fd_bo_dmabuf_drm(struct fd_bo *bo)
552 {
553    int ret, prime_fd;
554 
555    ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC | DRM_RDWR,
556                             &prime_fd);
557    if (ret < 0)
558       return ret;
559 
560    return prime_fd;
561 }
562 
563 int
fd_bo_dmabuf(struct fd_bo * bo)564 fd_bo_dmabuf(struct fd_bo *bo)
565 {
566    int ret;
567 
568    if (suballoc_bo(bo))
569       return -1;
570 
571    ret = bo->funcs->dmabuf(bo);
572    if (ret < 0) {
573       ERROR_MSG("failed to get dmabuf fd: %d", ret);
574       return ret;
575    }
576 
577    bo->bo_reuse = NO_CACHE;
578    bo->alloc_flags |= FD_BO_SHARED;
579    bo_flush(bo);
580 
581    return ret;
582 }
583 
584 uint32_t
fd_bo_size(struct fd_bo * bo)585 fd_bo_size(struct fd_bo *bo)
586 {
587    return bo->size;
588 }
589 
590 bool
fd_bo_is_cached(struct fd_bo * bo)591 fd_bo_is_cached(struct fd_bo *bo)
592 {
593    return !!(bo->alloc_flags & FD_BO_CACHED_COHERENT);
594 }
595 
596 void
fd_bo_set_metadata(struct fd_bo * bo,void * metadata,uint32_t metadata_size)597 fd_bo_set_metadata(struct fd_bo *bo, void *metadata, uint32_t metadata_size)
598 {
599    if (!bo->funcs->set_metadata)
600       return;
601    bo->funcs->set_metadata(bo, metadata, metadata_size);
602 }
603 
604 int
fd_bo_get_metadata(struct fd_bo * bo,void * metadata,uint32_t metadata_size)605 fd_bo_get_metadata(struct fd_bo *bo, void *metadata, uint32_t metadata_size)
606 {
607    if (!bo->funcs->get_metadata)
608       return -ENOSYS;
609    return bo->funcs->get_metadata(bo, metadata, metadata_size);
610 }
611 
612 void *
fd_bo_map_os_mmap(struct fd_bo * bo)613 fd_bo_map_os_mmap(struct fd_bo *bo)
614 {
615    uint64_t offset;
616    int ret;
617    ret = bo->funcs->offset(bo, &offset);
618    if (ret) {
619       return NULL;
620    }
621    return os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
622                   bo->dev->fd, offset);
623 }
624 
625 /* For internal use only, does not check FD_BO_NOMAP: */
626 void *
__fd_bo_map(struct fd_bo * bo)627 __fd_bo_map(struct fd_bo *bo)
628 {
629    if (!bo->map) {
630       bo->map = bo->funcs->map(bo);
631       if (bo->map == MAP_FAILED) {
632          ERROR_MSG("mmap failed: %s", strerror(errno));
633          bo->map = NULL;
634       }
635    }
636 
637    return bo->map;
638 }
639 
640 void *
fd_bo_map(struct fd_bo * bo)641 fd_bo_map(struct fd_bo *bo)
642 {
643    /* don't allow mmap'ing something allocated with FD_BO_NOMAP
644     * for sanity
645     */
646    if (bo->alloc_flags & FD_BO_NOMAP)
647       return NULL;
648 
649    return __fd_bo_map(bo);
650 }
651 
652 static void *
fd_bo_map_for_upload(struct fd_bo * bo)653 fd_bo_map_for_upload(struct fd_bo *bo)
654 {
655    void *addr = __fd_bo_map(bo);
656    if (bo->alloc_flags & FD_BO_NOMAP)
657       VG_BO_MAPPED(bo);
658 
659    return addr;
660 }
661 
662 void
fd_bo_upload(struct fd_bo * bo,void * src,unsigned off,unsigned len)663 fd_bo_upload(struct fd_bo *bo, void *src, unsigned off, unsigned len)
664 {
665    if (bo->funcs->upload) {
666       bo->funcs->upload(bo, src, off, len);
667       return;
668    }
669 
670    memcpy((uint8_t *)fd_bo_map_for_upload(bo) + off, src, len);
671 }
672 
673 bool
fd_bo_prefer_upload(struct fd_bo * bo,unsigned len)674 fd_bo_prefer_upload(struct fd_bo *bo, unsigned len)
675 {
676    if (bo->funcs->prefer_upload)
677       return bo->funcs->prefer_upload(bo, len);
678 
679    return false;
680 }
681 
682 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
683 int
fd_bo_cpu_prep(struct fd_bo * bo,struct fd_pipe * pipe,uint32_t op)684 fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
685 {
686    enum fd_bo_state state = fd_bo_state(bo);
687 
688    if (state == FD_BO_STATE_IDLE)
689       return 0;
690 
691    MESA_TRACE_FUNC();
692 
693    if (op & (FD_BO_PREP_NOSYNC | FD_BO_PREP_FLUSH)) {
694       if (op & FD_BO_PREP_FLUSH)
695          bo_flush(bo);
696 
697       /* If we have *only* been asked to flush, then we aren't really
698        * interested about whether shared buffers are busy, so avoid
699        * the kernel ioctl.
700        */
701       if ((state == FD_BO_STATE_BUSY) ||
702           (op == FD_BO_PREP_FLUSH))
703          return -EBUSY;
704    }
705 
706    /* In case the bo is referenced by a deferred submit, flush up to the
707     * required fence now:
708     */
709    bo_flush(bo);
710 
711    /* FD_BO_PREP_FLUSH is purely a frontend flag, and is not seen/handled
712     * by backend or kernel:
713     */
714    op &= ~FD_BO_PREP_FLUSH;
715 
716    if (!op)
717       return 0;
718 
719    /* Wait on fences.. first grab a reference under the fence lock, and then
720     * wait and drop ref.
721     */
722    simple_mtx_lock(&fence_lock);
723    unsigned nr = bo->nr_fences;
724    struct fd_fence *fences[nr];
725    for (unsigned i = 0; i < nr; i++)
726       fences[i] = fd_fence_ref_locked(bo->fences[i]);
727    simple_mtx_unlock(&fence_lock);
728 
729    for (unsigned i = 0; i < nr; i++) {
730       fd_fence_wait(fences[i]);
731       fd_fence_del(fences[i]);
732    }
733 
734    /* expire completed fences */
735    fd_bo_state(bo);
736 
737    /* None shared buffers will not have any external usage (ie. fences
738     * that we are not aware of) so nothing more to do.
739     */
740    if (!(bo->alloc_flags & FD_BO_SHARED))
741       return 0;
742 
743    /* If buffer is shared, but we are using explicit sync, no need to
744     * fallback to implicit sync:
745     */
746    if (pipe && pipe->no_implicit_sync)
747       return 0;
748 
749    return bo->funcs->cpu_prep(bo, pipe, op);
750 }
751 
752 /**
753  * Cleanup fences, dropping pipe references.  If 'expired' is true, only
754  * cleanup expired fences.
755  *
756  * Normally we expect at most a single fence, the exception being bo's
757  * shared between contexts
758  */
759 static void
cleanup_fences(struct fd_bo * bo)760 cleanup_fences(struct fd_bo *bo)
761 {
762    simple_mtx_assert_locked(&fence_lock);
763 
764    for (int i = 0; i < bo->nr_fences; i++) {
765       struct fd_fence *f = bo->fences[i];
766 
767       if (fd_fence_before(f->pipe->control->fence, f->ufence))
768          continue;
769 
770       bo->nr_fences--;
771 
772       if (bo->nr_fences > 0) {
773          /* Shuffle up the last entry to replace the current slot: */
774          bo->fences[i] = bo->fences[bo->nr_fences];
775          i--;
776       }
777 
778       fd_fence_del_locked(f);
779    }
780 }
781 
782 void
fd_bo_add_fence(struct fd_bo * bo,struct fd_fence * fence)783 fd_bo_add_fence(struct fd_bo *bo, struct fd_fence *fence)
784 {
785    simple_mtx_assert_locked(&fence_lock);
786 
787    if (bo->alloc_flags & _FD_BO_NOSYNC)
788       return;
789 
790    /* The common case is bo re-used on the same pipe it had previously
791     * been used on, so just replace the previous fence.
792     */
793    for (int i = 0; i < bo->nr_fences; i++) {
794       struct fd_fence *f = bo->fences[i];
795       if (f == fence)
796          return;
797       if (f->pipe == fence->pipe) {
798          assert(fd_fence_before(f->ufence, fence->ufence));
799          fd_fence_del_locked(f);
800          bo->fences[i] = fd_fence_ref_locked(fence);
801          return;
802       }
803    }
804 
805    cleanup_fences(bo);
806 
807    /* The first time we grow past a single fence, we need some special
808     * handling, as we've been using the embedded _inline_fence to avoid
809     * a separate allocation:
810     */
811    if (unlikely((bo->nr_fences == 1) &&
812                 (bo->fences == &bo->_inline_fence))) {
813       bo->nr_fences = bo->max_fences = 0;
814       bo->fences = NULL;
815       APPEND(bo, fences, bo->_inline_fence);
816    }
817 
818    APPEND(bo, fences, fd_fence_ref_locked(fence));
819 }
820 
821 enum fd_bo_state
fd_bo_state(struct fd_bo * bo)822 fd_bo_state(struct fd_bo *bo)
823 {
824    /* NOTE: check the nosync case before touching fence_lock in case we end
825     * up here recursively from dropping pipe reference in cleanup_fences().
826     * The pipe's control buffer is specifically nosync to avoid recursive
827     * lock problems here.
828     */
829    if (bo->alloc_flags & (FD_BO_SHARED | _FD_BO_NOSYNC))
830       return FD_BO_STATE_UNKNOWN;
831 
832    /* Speculatively check, if we already know we're idle, no need to acquire
833     * lock and do the cleanup_fences() dance:
834     */
835    if (!bo->nr_fences)
836       return FD_BO_STATE_IDLE;
837 
838    simple_mtx_lock(&fence_lock);
839    cleanup_fences(bo);
840    simple_mtx_unlock(&fence_lock);
841 
842    if (!bo->nr_fences)
843       return FD_BO_STATE_IDLE;
844 
845    return FD_BO_STATE_BUSY;
846 }
847 
848