xref: /aosp_15_r20/external/mesa3d/src/panfrost/lib/kmod/pan_kmod.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Collabora, Ltd.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include <string.h>
8 #include <xf86drm.h>
9 
10 #include "util/u_memory.h"
11 #include "util/macros.h"
12 #include "pan_kmod.h"
13 
14 extern const struct pan_kmod_ops panfrost_kmod_ops;
15 extern const struct pan_kmod_ops panthor_kmod_ops;
16 
17 static const struct {
18    const char *name;
19    const struct pan_kmod_ops *ops;
20 } drivers[] = {
21    {
22       "panfrost",
23       &panfrost_kmod_ops,
24    },
25    {
26       "panthor",
27       &panthor_kmod_ops,
28    },
29 };
30 
31 static void *
default_zalloc(const struct pan_kmod_allocator * allocator,size_t size,UNUSED bool transient)32 default_zalloc(const struct pan_kmod_allocator *allocator, size_t size,
33                UNUSED bool transient)
34 {
35    return os_calloc(1, size);
36 }
37 
38 static void
default_free(const struct pan_kmod_allocator * allocator,void * data)39 default_free(const struct pan_kmod_allocator *allocator, void *data)
40 {
41    os_free(data);
42 }
43 
44 static const struct pan_kmod_allocator default_allocator = {
45    .zalloc = default_zalloc,
46    .free = default_free,
47 };
48 
49 struct pan_kmod_dev *
pan_kmod_dev_create(int fd,uint32_t flags,const struct pan_kmod_allocator * allocator)50 pan_kmod_dev_create(int fd, uint32_t flags,
51                     const struct pan_kmod_allocator *allocator)
52 {
53    drmVersionPtr version = drmGetVersion(fd);
54    struct pan_kmod_dev *dev = NULL;
55 
56    if (!version)
57       return NULL;
58 
59    if (!allocator)
60       allocator = &default_allocator;
61 
62    for (unsigned i = 0; i < ARRAY_SIZE(drivers); i++) {
63       if (!strcmp(drivers[i].name, version->name)) {
64          const struct pan_kmod_ops *ops = drivers[i].ops;
65 
66          dev = ops->dev_create(fd, flags, version, allocator);
67          break;
68       }
69    }
70 
71    drmFreeVersion(version);
72    return dev;
73 }
74 
75 void
pan_kmod_dev_destroy(struct pan_kmod_dev * dev)76 pan_kmod_dev_destroy(struct pan_kmod_dev *dev)
77 {
78    dev->ops->dev_destroy(dev);
79 }
80 
81 struct pan_kmod_bo *
pan_kmod_bo_alloc(struct pan_kmod_dev * dev,struct pan_kmod_vm * exclusive_vm,size_t size,uint32_t flags)82 pan_kmod_bo_alloc(struct pan_kmod_dev *dev, struct pan_kmod_vm *exclusive_vm,
83                   size_t size, uint32_t flags)
84 {
85    struct pan_kmod_bo *bo;
86 
87    bo = dev->ops->bo_alloc(dev, exclusive_vm, size, flags);
88    if (!bo)
89       return NULL;
90 
91    /* We intentionally don't take the lock when filling the sparse array,
92     * because we just created the BO, and haven't exported it yet, so
93     * there's no risk of imports racing with our BO insertion.
94     */
95    struct pan_kmod_bo **slot =
96       util_sparse_array_get(&dev->handle_to_bo.array, bo->handle);
97 
98    if (!slot) {
99       mesa_loge("failed to allocate slot in the handle_to_bo array");
100       bo->dev->ops->bo_free(bo);
101       return NULL;
102    }
103 
104    assert(*slot == NULL);
105    *slot = bo;
106    return bo;
107 }
108 
109 void
pan_kmod_bo_put(struct pan_kmod_bo * bo)110 pan_kmod_bo_put(struct pan_kmod_bo *bo)
111 {
112    if (!bo)
113       return;
114 
115    int32_t refcnt = p_atomic_dec_return(&bo->refcnt);
116 
117    assert(refcnt >= 0);
118 
119    if (refcnt)
120       return;
121 
122    struct pan_kmod_dev *dev = bo->dev;
123 
124    simple_mtx_lock(&dev->handle_to_bo.lock);
125 
126    /* If some import took a ref on this BO while we were trying to acquire the
127     * lock, skip the destruction.
128     */
129    if (!p_atomic_read(&bo->refcnt)) {
130       struct pan_kmod_bo **slot = (struct pan_kmod_bo **)util_sparse_array_get(
131          &dev->handle_to_bo.array, bo->handle);
132 
133       assert(slot);
134       *slot = NULL;
135       bo->dev->ops->bo_free(bo);
136    }
137 
138    simple_mtx_unlock(&dev->handle_to_bo.lock);
139 }
140 
141 static bool
pan_kmod_bo_check_import_flags(struct pan_kmod_bo * bo,uint32_t flags)142 pan_kmod_bo_check_import_flags(struct pan_kmod_bo *bo, uint32_t flags)
143 {
144    uint32_t mask = PAN_KMOD_BO_FLAG_EXECUTABLE |
145                    PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT | PAN_KMOD_BO_FLAG_NO_MMAP |
146                    PAN_KMOD_BO_FLAG_GPU_UNCACHED;
147 
148    /* If the BO exists, make sure the import flags match the original flags. */
149    return (bo->flags & mask) == (flags & mask);
150 }
151 
152 struct pan_kmod_bo *
pan_kmod_bo_import(struct pan_kmod_dev * dev,int fd,uint32_t flags)153 pan_kmod_bo_import(struct pan_kmod_dev *dev, int fd, uint32_t flags)
154 {
155    struct pan_kmod_bo *bo = NULL;
156    struct pan_kmod_bo **slot;
157 
158    simple_mtx_lock(&dev->handle_to_bo.lock);
159 
160    uint32_t handle;
161    int ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
162    if (ret)
163       goto err_unlock;
164 
165    slot = util_sparse_array_get(&dev->handle_to_bo.array, handle);
166    if (!slot)
167       goto err_close_handle;
168 
169    if (*slot) {
170       if (!pan_kmod_bo_check_import_flags(*slot, flags)) {
171          mesa_loge("invalid import flags");
172          goto err_unlock;
173       }
174 
175       bo = *slot;
176 
177       p_atomic_inc(&bo->refcnt);
178    } else {
179       size_t size = lseek(fd, 0, SEEK_END);
180       if (size == 0 || size == (size_t)-1) {
181          mesa_loge("invalid dmabuf size");
182          goto err_close_handle;
183       }
184 
185       bo = dev->ops->bo_import(dev, handle, size, flags);
186       if (!bo)
187          goto err_close_handle;
188 
189       *slot = bo;
190    }
191 
192    assert(p_atomic_read(&bo->refcnt) > 0);
193 
194    simple_mtx_unlock(&dev->handle_to_bo.lock);
195 
196    return bo;
197 
198 err_close_handle:
199    drmCloseBufferHandle(dev->fd, handle);
200 
201 err_unlock:
202    simple_mtx_unlock(&dev->handle_to_bo.lock);
203 
204    return NULL;
205 }
206 
207