xref: /aosp_15_r20/external/mesa3d/src/panfrost/lib/kmod/panthor_kmod.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Collabora, Ltd.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <string.h>
10 #include <xf86drm.h>
11 
12 #include "util/hash_table.h"
13 #include "util/libsync.h"
14 #include "util/macros.h"
15 #include "util/os_time.h"
16 #include "util/simple_mtx.h"
17 #include "util/u_debug.h"
18 #include "util/vma.h"
19 
20 #include "drm-uapi/dma-buf.h"
21 #include "drm-uapi/panthor_drm.h"
22 
23 #include "pan_kmod_backend.h"
24 
25 const struct pan_kmod_ops panthor_kmod_ops;
26 
27 /* Objects used to track VAs returned through async unmaps. */
28 struct panthor_kmod_va_collect {
29    struct list_head node;
30 
31    /* VM sync point at which the VA range should be released. */
32    uint64_t sync_point;
33 
34    /* Start of the VA range to release. */
35    uint64_t va;
36 
37    /* Size of the VA range to release. */
38    size_t size;
39 };
40 
41 struct panthor_kmod_vm {
42    struct pan_kmod_vm base;
43 
44    /* Fields used for auto-VA management. Since the kernel doesn't do it for
45     * us, we need to deal with the VA allocation ourselves.
46     */
47    struct {
48       /* Lock protecting VA allocation/freeing. */
49       simple_mtx_t lock;
50 
51       /* VA heap used to automatically assign a VA. */
52       struct util_vma_heap heap;
53 
54       /* VA ranges to garbage collect. */
55       struct list_head gc_list;
56    } auto_va;
57 
58    /* Fields used for VM activity tracking (TRACK_ACTIVITY flag). */
59    struct {
60       /* VM sync handle. */
61       uint32_t handle;
62 
63       /* Current VM sync point. Incremented every time a GPU job or VM
64        * operation is issued.
65        */
66       uint64_t point;
67 
68       /* Lock protecting insertion of sync points to the timeline syncobj. */
69       simple_mtx_t lock;
70    } sync;
71 };
72 
73 struct panthor_kmod_dev {
74    struct pan_kmod_dev base;
75 
76    /* Userspace mapping of the LATEST_FLUSH_ID register page. */
77    uint32_t *flush_id;
78 
79    /* Cached device properties. Filled at device creation time. */
80    struct {
81       struct drm_panthor_gpu_info gpu;
82       struct drm_panthor_csif_info csif;
83       struct drm_panthor_timestamp_info timestamp;
84    } props;
85 };
86 
87 struct panthor_kmod_bo {
88    struct pan_kmod_bo base;
89    struct {
90       /* BO sync handle. Will point to the VM BO if the object is not shared. */
91       uint32_t handle;
92 
93       /* BO read sync point. Zero when the object is shared. */
94       uint64_t read_point;
95 
96       /* BO write sync point. Zero when the object is shared. */
97       uint64_t write_point;
98    } sync;
99 };
100 
101 static struct pan_kmod_dev *
panthor_kmod_dev_create(int fd,uint32_t flags,drmVersionPtr version,const struct pan_kmod_allocator * allocator)102 panthor_kmod_dev_create(int fd, uint32_t flags, drmVersionPtr version,
103                         const struct pan_kmod_allocator *allocator)
104 {
105    struct panthor_kmod_dev *panthor_dev =
106       pan_kmod_alloc(allocator, sizeof(*panthor_dev));
107    if (!panthor_dev) {
108       mesa_loge("failed to allocate a panthor_kmod_dev object");
109       return NULL;
110    }
111 
112    /* Cache GPU and CSIF information. */
113    struct drm_panthor_dev_query query = {
114       .type = DRM_PANTHOR_DEV_QUERY_GPU_INFO,
115       .size = sizeof(panthor_dev->props.gpu),
116       .pointer = (uint64_t)(uintptr_t)&panthor_dev->props.gpu,
117    };
118 
119    int ret = drmIoctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
120    if (ret) {
121       mesa_loge("DRM_IOCTL_PANTHOR_DEV_QUERY failed (err=%d)", errno);
122       goto err_free_dev;
123    }
124 
125    query = (struct drm_panthor_dev_query){
126       .type = DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
127       .size = sizeof(panthor_dev->props.csif),
128       .pointer = (uint64_t)(uintptr_t)&panthor_dev->props.csif,
129    };
130 
131    ret = drmIoctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
132    if (ret) {
133       mesa_loge("DRM_IOCTL_PANTHOR_DEV_QUERY failed (err=%d)", errno);
134       goto err_free_dev;
135    }
136 
137    if (version->version_major > 1 || version->version_minor >= 1) {
138       query = (struct drm_panthor_dev_query){
139          .type = DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO,
140          .size = sizeof(panthor_dev->props.timestamp),
141          .pointer = (uint64_t)(uintptr_t)&panthor_dev->props.timestamp,
142       };
143 
144       ret = drmIoctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
145       if (ret) {
146          mesa_loge("DRM_IOCTL_PANTHOR_DEV_QUERY failed (err=%d)", errno);
147          goto err_free_dev;
148       }
149    }
150 
151    /* Map the LATEST_FLUSH_ID register at device creation time. */
152    panthor_dev->flush_id = os_mmap(0, getpagesize(), PROT_READ, MAP_SHARED, fd,
153                                    DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET);
154    if (panthor_dev->flush_id == MAP_FAILED) {
155       mesa_loge("failed to mmap the LATEST_FLUSH_ID register (err=%d)", errno);
156       goto err_free_dev;
157    }
158 
159    assert(!ret);
160    pan_kmod_dev_init(&panthor_dev->base, fd, flags, version, &panthor_kmod_ops,
161                      allocator);
162    return &panthor_dev->base;
163 
164 err_free_dev:
165    pan_kmod_free(allocator, panthor_dev);
166    return NULL;
167 }
168 
169 static void
panthor_kmod_dev_destroy(struct pan_kmod_dev * dev)170 panthor_kmod_dev_destroy(struct pan_kmod_dev *dev)
171 {
172    struct panthor_kmod_dev *panthor_dev =
173       container_of(dev, struct panthor_kmod_dev, base);
174 
175    os_munmap(panthor_dev->flush_id, getpagesize());
176    pan_kmod_dev_cleanup(dev);
177    pan_kmod_free(dev->allocator, panthor_dev);
178 }
179 
180 static void
panthor_dev_query_thread_props(const struct panthor_kmod_dev * panthor_dev,struct pan_kmod_dev_props * props)181 panthor_dev_query_thread_props(const struct panthor_kmod_dev *panthor_dev,
182                                struct pan_kmod_dev_props *props)
183 {
184    props->max_threads_per_wg = panthor_dev->props.gpu.thread_max_workgroup_size;
185    props->max_threads_per_core = panthor_dev->props.gpu.max_threads;
186    props->num_registers_per_core =
187       panthor_dev->props.gpu.thread_features & 0x3fffff;
188 
189    /* We assume that all thread properties are populated. If we ever have a GPU
190     * that have one of the THREAD_xxx register that's zero, we can always add a
191     * quirk here.
192     */
193    assert(props->max_threads_per_wg && props->max_threads_per_core &&
194           props->num_registers_per_core);
195 
196    /* There is no THREAD_TLS_ALLOC register on v10+, and the maximum number
197     * of TLS instance per core is assumed to be the maximum number of threads
198     * per core.
199     */
200    props->max_tls_instance_per_core = props->max_threads_per_core;
201 }
202 
203 static void
panthor_dev_query_props(const struct pan_kmod_dev * dev,struct pan_kmod_dev_props * props)204 panthor_dev_query_props(const struct pan_kmod_dev *dev,
205                         struct pan_kmod_dev_props *props)
206 {
207    struct panthor_kmod_dev *panthor_dev =
208       container_of(dev, struct panthor_kmod_dev, base);
209 
210    *props = (struct pan_kmod_dev_props){
211       .gpu_prod_id = panthor_dev->props.gpu.gpu_id >> 16,
212       .gpu_revision = panthor_dev->props.gpu.gpu_id & 0xffff,
213       .gpu_variant = panthor_dev->props.gpu.core_features & 0xff,
214       .shader_present = panthor_dev->props.gpu.shader_present,
215       .tiler_features = panthor_dev->props.gpu.tiler_features,
216       .mem_features = panthor_dev->props.gpu.mem_features,
217       .mmu_features = panthor_dev->props.gpu.mmu_features,
218 
219       /* This register does not exist because AFBC is no longer optional. */
220       .afbc_features = 0,
221 
222       /* Access to timstamp from the GPU is always supported on Panthor. */
223       .gpu_can_query_timestamp = true,
224 
225       .timestamp_frequency = panthor_dev->props.timestamp.timestamp_frequency,
226    };
227 
228    static_assert(sizeof(props->texture_features) ==
229                     sizeof(panthor_dev->props.gpu.texture_features),
230                  "Mismatch in texture_features array size");
231 
232    memcpy(props->texture_features, panthor_dev->props.gpu.texture_features,
233           sizeof(props->texture_features));
234 
235    panthor_dev_query_thread_props(panthor_dev, props);
236 }
237 
238 static struct pan_kmod_va_range
panthor_kmod_dev_query_user_va_range(const struct pan_kmod_dev * dev)239 panthor_kmod_dev_query_user_va_range(const struct pan_kmod_dev *dev)
240 {
241    struct panthor_kmod_dev *panthor_dev =
242       container_of(dev, struct panthor_kmod_dev, base);
243    uint8_t va_bits = MMU_FEATURES_VA_BITS(panthor_dev->props.gpu.mmu_features);
244 
245    /* If we have less than 32-bit VA space it starts to be tricky, so let's
246     * assume we always have at least that.
247     */
248    assert(va_bits >= 32);
249 
250    return (struct pan_kmod_va_range){
251       .start = 0,
252 
253       /* 3G/1G user/kernel VA split for 32-bit VA space. Otherwise, we reserve
254        * half of the VA space for kernel objects.
255        */
256       .size =
257          va_bits == 32 ? (1ull << (va_bits - 2)) * 3 : 1ull << (va_bits - 1),
258    };
259 }
260 
261 static uint32_t
to_panthor_bo_flags(uint32_t flags)262 to_panthor_bo_flags(uint32_t flags)
263 {
264    uint32_t panthor_flags = 0;
265 
266    if (flags & PAN_KMOD_BO_FLAG_NO_MMAP)
267       panthor_flags |= DRM_PANTHOR_BO_NO_MMAP;
268 
269    return panthor_flags;
270 }
271 
272 static struct pan_kmod_bo *
panthor_kmod_bo_alloc(struct pan_kmod_dev * dev,struct pan_kmod_vm * exclusive_vm,size_t size,uint32_t flags)273 panthor_kmod_bo_alloc(struct pan_kmod_dev *dev,
274                       struct pan_kmod_vm *exclusive_vm, size_t size,
275                       uint32_t flags)
276 {
277    /* We don't support allocating on-fault. */
278    if (flags & PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT) {
279       mesa_loge("panthor_kmod doesn't support PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT");
280       return NULL;
281    }
282 
283    struct panthor_kmod_vm *panthor_vm =
284       exclusive_vm ? container_of(exclusive_vm, struct panthor_kmod_vm, base)
285                    : NULL;
286    struct panthor_kmod_bo *bo = pan_kmod_dev_alloc(dev, sizeof(*bo));
287    if (!bo) {
288       mesa_loge("failed to allocate a panthor_kmod_bo object");
289       return NULL;
290    }
291 
292    struct drm_panthor_bo_create req = {
293       .size = size,
294       .flags = to_panthor_bo_flags(flags),
295       .exclusive_vm_id = panthor_vm ? panthor_vm->base.handle : 0,
296    };
297 
298    int ret = drmIoctl(dev->fd, DRM_IOCTL_PANTHOR_BO_CREATE, &req);
299    if (ret) {
300       mesa_loge("DRM_IOCTL_PANTHOR_BO_CREATE failed (err=%d)", errno);
301       goto err_free_bo;
302    }
303 
304    if (!exclusive_vm) {
305       /* For buffers we know will be shared, create our own syncobj. */
306       int ret = drmSyncobjCreate(dev->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
307                                  &bo->sync.handle);
308       if (ret) {
309          mesa_loge("drmSyncobjCreate() failed (err=%d)", errno);
310          goto err_destroy_bo;
311       }
312    } else {
313       /* If the buffer is private to the VM, we just use the VM syncobj. */
314       bo->sync.handle = panthor_vm->sync.handle;
315    }
316 
317    bo->sync.read_point = bo->sync.write_point = 0;
318 
319    pan_kmod_bo_init(&bo->base, dev, exclusive_vm, req.size, flags, req.handle);
320    return &bo->base;
321 
322 err_destroy_bo:
323    drmCloseBufferHandle(dev->fd, bo->base.handle);
324 err_free_bo:
325    pan_kmod_dev_free(dev, bo);
326    return NULL;
327 }
328 
329 static void
panthor_kmod_bo_free(struct pan_kmod_bo * bo)330 panthor_kmod_bo_free(struct pan_kmod_bo *bo)
331 {
332    struct panthor_kmod_bo *panthor_bo =
333       container_of(bo, struct panthor_kmod_bo, base);
334 
335    if (!bo->exclusive_vm)
336       drmSyncobjDestroy(bo->dev->fd, panthor_bo->sync.handle);
337 
338    drmCloseBufferHandle(bo->dev->fd, bo->handle);
339    pan_kmod_dev_free(bo->dev, bo);
340 }
341 
342 static struct pan_kmod_bo *
panthor_kmod_bo_import(struct pan_kmod_dev * dev,uint32_t handle,size_t size,uint32_t flags)343 panthor_kmod_bo_import(struct pan_kmod_dev *dev, uint32_t handle, size_t size,
344                        uint32_t flags)
345 {
346    struct panthor_kmod_bo *panthor_bo =
347       pan_kmod_dev_alloc(dev, sizeof(*panthor_bo));
348    if (!panthor_bo) {
349       mesa_loge("failed to allocate a panthor_kmod_bo object");
350       return NULL;
351    }
352 
353    /* Create a unsignalled syncobj on import. Will serve as a
354     * temporary container for the exported dmabuf sync file.
355     */
356    int ret = drmSyncobjCreate(dev->fd, 0, &panthor_bo->sync.handle);
357    if (ret) {
358       mesa_loge("drmSyncobjCreate() failed (err=%d)", errno);
359       goto err_free_bo;
360    }
361 
362    pan_kmod_bo_init(&panthor_bo->base, dev, NULL, size,
363                     flags | PAN_KMOD_BO_FLAG_IMPORTED, handle);
364    return &panthor_bo->base;
365 
366 err_free_bo:
367    pan_kmod_dev_free(dev, panthor_bo);
368    return NULL;
369 }
370 
371 static int
panthor_kmod_bo_export(struct pan_kmod_bo * bo,int dmabuf_fd)372 panthor_kmod_bo_export(struct pan_kmod_bo *bo, int dmabuf_fd)
373 {
374    struct panthor_kmod_bo *panthor_bo =
375       container_of(bo, struct panthor_kmod_bo, base);
376 
377    bool shared =
378       bo->flags & (PAN_KMOD_BO_FLAG_EXPORTED | PAN_KMOD_BO_FLAG_IMPORTED);
379 
380    /* If the BO wasn't already shared, we migrate our internal sync points to
381     * the dmabuf itself, so implicit sync can work correctly after this point.
382     */
383    if (!shared) {
384       if (panthor_bo->sync.read_point || panthor_bo->sync.write_point) {
385          struct dma_buf_import_sync_file isync = {
386             .flags = DMA_BUF_SYNC_RW,
387          };
388          int ret = drmSyncobjExportSyncFile(bo->dev->fd,
389                                             panthor_bo->sync.handle, &isync.fd);
390          if (ret) {
391             mesa_loge("drmSyncobjExportSyncFile() failed (err=%d)", errno);
392             return -1;
393          }
394 
395          ret = drmIoctl(dmabuf_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE, &isync);
396          close(isync.fd);
397          if (ret) {
398             mesa_loge("DMA_BUF_IOCTL_IMPORT_SYNC_FILE failed (err=%d)", errno);
399             return -1;
400          }
401       }
402 
403       /* Make sure we reset the syncobj on export. We will use it as a
404        * temporary binary syncobj to import sync_file FD from now on.
405        */
406       int ret = drmSyncobjReset(bo->dev->fd, &panthor_bo->sync.handle, 1);
407       if (ret) {
408          mesa_loge("drmSyncobjReset() failed (err=%d)", errno);
409          return -1;
410       }
411 
412       panthor_bo->sync.read_point = 0;
413       panthor_bo->sync.write_point = 0;
414    }
415 
416    bo->flags |= PAN_KMOD_BO_FLAG_EXPORTED;
417    return 0;
418 }
419 
420 static off_t
panthor_kmod_bo_get_mmap_offset(struct pan_kmod_bo * bo)421 panthor_kmod_bo_get_mmap_offset(struct pan_kmod_bo *bo)
422 {
423    struct drm_panthor_bo_mmap_offset req = {.handle = bo->handle};
424    int ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET, &req);
425 
426    if (ret) {
427       mesa_loge("DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET failed (err=%d)", errno);
428       return -1;
429    }
430 
431    return req.offset;
432 }
433 
434 static bool
panthor_kmod_bo_wait(struct pan_kmod_bo * bo,int64_t timeout_ns,bool for_read_only_access)435 panthor_kmod_bo_wait(struct pan_kmod_bo *bo, int64_t timeout_ns,
436                      bool for_read_only_access)
437 {
438    struct panthor_kmod_bo *panthor_bo =
439       container_of(bo, struct panthor_kmod_bo, base);
440    bool shared =
441       bo->flags & (PAN_KMOD_BO_FLAG_EXPORTED | PAN_KMOD_BO_FLAG_IMPORTED);
442 
443    if (shared) {
444       /* If the object is shared, we have to do this export sync-file dance
445        * to reconcile with the implicit sync model. This implies exporting
446        * our GEM object as a dma-buf and closing it right after the
447        * EXPORT_SYNC_FILE, unfortunately.
448        */
449       int dmabuf_fd;
450       int ret =
451          drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &dmabuf_fd);
452 
453       if (ret) {
454          mesa_loge("drmPrimeHandleToFD() failed (err=%d)", errno);
455          return false;
456       }
457 
458       struct dma_buf_export_sync_file esync = {
459          .flags = for_read_only_access ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW,
460       };
461 
462       ret = drmIoctl(dmabuf_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, &esync);
463       close(dmabuf_fd);
464 
465       if (ret) {
466          mesa_loge("DMA_BUF_IOCTL_EXPORT_SYNC_FILE failed (err=%d)", errno);
467          return false;
468       }
469 
470       ret = sync_wait(esync.fd, timeout_ns / 1000000);
471       close(esync.fd);
472       return ret == 0;
473    } else {
474       /* Waiting on non-shared object is much simpler. We just pick the
475        * right sync point based on for_read_only_access and call
476        * drmSyncobjTimelineWait().
477        */
478       uint64_t sync_point =
479          for_read_only_access
480             ? panthor_bo->sync.write_point
481             : MAX2(panthor_bo->sync.write_point, panthor_bo->sync.read_point);
482 
483       if (!sync_point)
484          return true;
485 
486       int64_t abs_timeout_ns = timeout_ns < INT64_MAX - os_time_get_nano()
487                                   ? timeout_ns + os_time_get_nano()
488                                   : INT64_MAX;
489       int ret = drmSyncobjTimelineWait(bo->dev->fd, &panthor_bo->sync.handle,
490                                        &sync_point, 1, abs_timeout_ns,
491                                        DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL, NULL);
492       if (ret >= 0)
493          return true;
494 
495       if (ret != -ETIME)
496          mesa_loge("DMA_BUF_IOCTL_EXPORT_SYNC_FILE failed (err=%d)", ret);
497 
498       return false;
499    }
500 }
501 
502 /* Attach a sync to a buffer object. */
503 int
panthor_kmod_bo_attach_sync_point(struct pan_kmod_bo * bo,uint32_t sync_handle,uint64_t sync_point,bool written)504 panthor_kmod_bo_attach_sync_point(struct pan_kmod_bo *bo, uint32_t sync_handle,
505                                   uint64_t sync_point, bool written)
506 {
507    struct panthor_kmod_bo *panthor_bo =
508       container_of(bo, struct panthor_kmod_bo, base);
509    struct panthor_kmod_vm *panthor_vm =
510       bo->exclusive_vm
511          ? container_of(bo->exclusive_vm, struct panthor_kmod_vm, base)
512          : NULL;
513    bool shared =
514       bo->flags & (PAN_KMOD_BO_FLAG_EXPORTED | PAN_KMOD_BO_FLAG_IMPORTED);
515 
516    if (shared) {
517       /* Reconciling explicit/implicit sync again: we need to import the
518        * new sync point in the dma-buf, so other parties can rely on
519        * implicit deps.
520        */
521       struct dma_buf_import_sync_file isync = {
522          .flags = written ? DMA_BUF_SYNC_RW : DMA_BUF_SYNC_READ,
523       };
524       int dmabuf_fd;
525       int ret = drmSyncobjExportSyncFile(bo->dev->fd, sync_handle, &isync.fd);
526       if (ret) {
527          mesa_loge("drmSyncobjExportSyncFile() failed (err=%d)", errno);
528          return -1;
529       }
530 
531       ret =
532          drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &dmabuf_fd);
533       if (ret) {
534          mesa_loge("drmPrimeHandleToFD() failed (err=%d)", errno);
535          close(isync.fd);
536          return -1;
537       }
538 
539       ret = drmIoctl(dmabuf_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE, &isync);
540       close(dmabuf_fd);
541       close(isync.fd);
542       if (ret) {
543          mesa_loge("DMA_BUF_IOCTL_IMPORT_SYNC_FILE failed (err=%d)", errno);
544          return -1;
545       }
546    } else if (panthor_vm) {
547       /* Private BOs should be passed the VM syncobj. */
548       assert(sync_handle == panthor_vm->sync.handle);
549 
550       panthor_bo->sync.read_point =
551          MAX2(sync_point, panthor_bo->sync.read_point);
552       if (written) {
553          panthor_bo->sync.write_point =
554             MAX2(sync_point, panthor_bo->sync.write_point);
555       }
556    } else {
557       /* For non-private BOs that are not shared yet, we add a new sync point
558        * to our timeline syncobj, and push the sync there.
559        */
560       uint32_t new_sync_point =
561          MAX2(panthor_bo->sync.write_point, panthor_bo->sync.read_point) + 1;
562 
563       int ret = drmSyncobjTransfer(bo->dev->fd, panthor_bo->sync.handle,
564                                    new_sync_point, sync_handle, sync_point, 0);
565       if (ret) {
566          mesa_loge("drmSyncobjTransfer() failed (err=%d)", errno);
567          return -1;
568       }
569 
570       panthor_bo->sync.read_point = new_sync_point;
571       if (written)
572          panthor_bo->sync.write_point = new_sync_point;
573    }
574 
575    return 0;
576 }
577 
578 /* Get the sync point for a read or write operation on a buffer object. */
579 int
panthor_kmod_bo_get_sync_point(struct pan_kmod_bo * bo,uint32_t * sync_handle,uint64_t * sync_point,bool for_read_only_access)580 panthor_kmod_bo_get_sync_point(struct pan_kmod_bo *bo, uint32_t *sync_handle,
581                                uint64_t *sync_point, bool for_read_only_access)
582 {
583    struct panthor_kmod_bo *panthor_bo =
584       container_of(bo, struct panthor_kmod_bo, base);
585    bool shared =
586       bo->flags & (PAN_KMOD_BO_FLAG_EXPORTED | PAN_KMOD_BO_FLAG_IMPORTED);
587 
588    if (shared) {
589       /* Explicit/implicit sync reconciliation point. We need to export
590        * a sync-file from the dmabuf and make it a syncobj.
591        */
592       int dmabuf_fd;
593       int ret =
594          drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &dmabuf_fd);
595       if (ret) {
596          mesa_loge("drmPrimeHandleToFD() failed (err=%d)\n", errno);
597          return -1;
598       }
599 
600       struct dma_buf_export_sync_file esync = {
601          .flags = for_read_only_access ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW,
602       };
603 
604       ret = drmIoctl(dmabuf_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, &esync);
605       close(dmabuf_fd);
606       if (ret) {
607          mesa_loge("DMA_BUF_IOCTL_EXPORT_SYNC_FILE failed (err=%d)", errno);
608          return -1;
609       }
610 
611       /* We store the resulting sync in our BO syncobj, which will be assigned
612        * a new sync next time we enter this function.
613        */
614       ret = drmSyncobjImportSyncFile(bo->dev->fd, panthor_bo->sync.handle,
615                                      esync.fd);
616       close(esync.fd);
617       if (ret) {
618          mesa_loge("drmSyncobjImportSyncFile() failed (err=%d)", errno);
619          return -1;
620       }
621 
622       /* The syncobj is a binary syncobj in that case. */
623       *sync_handle = panthor_bo->sync.handle;
624       *sync_point = 0;
625    } else {
626       /* Fortunately, the non-shared path is much simpler, we just return
627        * the read/write sync point depending on the access type. The syncobj
628        * is a timeline syncobj in that case.
629        */
630       *sync_handle = panthor_bo->sync.handle;
631       *sync_point = for_read_only_access ? panthor_bo->sync.write_point
632                                          : MAX2(panthor_bo->sync.read_point,
633                                                 panthor_bo->sync.write_point);
634    }
635    return 0;
636 }
637 
638 static struct pan_kmod_vm *
panthor_kmod_vm_create(struct pan_kmod_dev * dev,uint32_t flags,uint64_t user_va_start,uint64_t user_va_range)639 panthor_kmod_vm_create(struct pan_kmod_dev *dev, uint32_t flags,
640                        uint64_t user_va_start, uint64_t user_va_range)
641 {
642    struct pan_kmod_dev_props props;
643 
644    panthor_dev_query_props(dev, &props);
645 
646    struct panthor_kmod_vm *panthor_vm =
647       pan_kmod_dev_alloc(dev, sizeof(*panthor_vm));
648    if (!panthor_vm) {
649       mesa_loge("failed to allocate a panthor_kmod_vm object");
650       return NULL;
651    }
652 
653    if (flags & PAN_KMOD_VM_FLAG_AUTO_VA) {
654       simple_mtx_init(&panthor_vm->auto_va.lock, mtx_plain);
655       list_inithead(&panthor_vm->auto_va.gc_list);
656       util_vma_heap_init(&panthor_vm->auto_va.heap, user_va_start,
657                          user_va_range);
658    }
659 
660    if (flags & PAN_KMOD_VM_FLAG_TRACK_ACTIVITY) {
661       simple_mtx_init(&panthor_vm->sync.lock, mtx_plain);
662       panthor_vm->sync.point = 0;
663       if (drmSyncobjCreate(dev->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
664                            &panthor_vm->sync.handle)) {
665          mesa_loge("drmSyncobjCreate() failed (err=%d)", errno);
666          goto err_free_vm;
667       }
668    }
669 
670    struct drm_panthor_vm_create req = {
671       .user_va_range = user_va_start + user_va_range,
672    };
673 
674    if (drmIoctl(dev->fd, DRM_IOCTL_PANTHOR_VM_CREATE, &req)) {
675       mesa_loge("DRM_IOCTL_PANTHOR_VM_CREATE failed (err=%d)", errno);
676       goto err_destroy_sync;
677    }
678 
679    pan_kmod_vm_init(&panthor_vm->base, dev, req.id, flags);
680    return &panthor_vm->base;
681 
682 err_destroy_sync:
683    if (flags & PAN_KMOD_VM_FLAG_TRACK_ACTIVITY) {
684       drmSyncobjDestroy(dev->fd, panthor_vm->sync.handle);
685       simple_mtx_destroy(&panthor_vm->sync.lock);
686    }
687 
688 err_free_vm:
689    if (flags & PAN_KMOD_VM_FLAG_AUTO_VA) {
690       util_vma_heap_finish(&panthor_vm->auto_va.heap);
691       simple_mtx_destroy(&panthor_vm->auto_va.lock);
692    }
693 
694    pan_kmod_dev_free(dev, panthor_vm);
695    return NULL;
696 }
697 
698 static void
panthor_kmod_vm_collect_freed_vas(struct panthor_kmod_vm * vm)699 panthor_kmod_vm_collect_freed_vas(struct panthor_kmod_vm *vm)
700 {
701    if (!(vm->base.flags & PAN_KMOD_VM_FLAG_AUTO_VA))
702       return;
703 
704    bool done = false;
705 
706    simple_mtx_assert_locked(&vm->auto_va.lock);
707    list_for_each_entry_safe_rev(struct panthor_kmod_va_collect, req,
708                                 &vm->auto_va.gc_list, node)
709    {
710       /* Unmaps are queued in order of execution */
711       if (!done) {
712          int ret = drmSyncobjTimelineWait(
713             vm->base.dev->fd, &vm->sync.handle, &req->sync_point, 1, 0,
714             DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL, NULL);
715          if (ret >= 0)
716             done = true;
717          else
718             continue;
719       }
720 
721       list_del(&req->node);
722       util_vma_heap_free(&vm->auto_va.heap, req->va, req->size);
723       pan_kmod_dev_free(vm->base.dev, req);
724    }
725 }
726 
727 static void
panthor_kmod_vm_destroy(struct pan_kmod_vm * vm)728 panthor_kmod_vm_destroy(struct pan_kmod_vm *vm)
729 {
730    struct panthor_kmod_vm *panthor_vm =
731       container_of(vm, struct panthor_kmod_vm, base);
732    struct drm_panthor_vm_destroy req = {.id = vm->handle};
733    int ret = drmIoctl(vm->dev->fd, DRM_IOCTL_PANTHOR_VM_DESTROY, &req);
734    if (ret)
735       mesa_loge("DRM_IOCTL_PANTHOR_VM_DESTROY failed (err=%d)", errno);
736 
737    assert(!ret);
738 
739    if (panthor_vm->base.flags & PAN_KMOD_VM_FLAG_TRACK_ACTIVITY) {
740       drmSyncobjDestroy(vm->dev->fd, panthor_vm->sync.handle);
741       simple_mtx_destroy(&panthor_vm->sync.lock);
742    }
743 
744    if (panthor_vm->base.flags & PAN_KMOD_VM_FLAG_AUTO_VA) {
745       simple_mtx_lock(&panthor_vm->auto_va.lock);
746       list_for_each_entry_safe(struct panthor_kmod_va_collect, req,
747                                &panthor_vm->auto_va.gc_list, node) {
748          list_del(&req->node);
749          util_vma_heap_free(&panthor_vm->auto_va.heap, req->va, req->size);
750          pan_kmod_dev_free(vm->dev, req);
751       }
752       util_vma_heap_finish(&panthor_vm->auto_va.heap);
753       simple_mtx_unlock(&panthor_vm->auto_va.lock);
754       simple_mtx_destroy(&panthor_vm->auto_va.lock);
755    }
756 
757    pan_kmod_dev_free(vm->dev, panthor_vm);
758 }
759 
760 static uint64_t
panthor_kmod_vm_alloc_va(struct panthor_kmod_vm * panthor_vm,size_t size)761 panthor_kmod_vm_alloc_va(struct panthor_kmod_vm *panthor_vm, size_t size)
762 {
763    uint64_t va;
764 
765    assert(panthor_vm->base.flags & PAN_KMOD_VM_FLAG_AUTO_VA);
766 
767    simple_mtx_lock(&panthor_vm->auto_va.lock);
768    panthor_kmod_vm_collect_freed_vas(panthor_vm);
769    va = util_vma_heap_alloc(&panthor_vm->auto_va.heap, size,
770                             size > 0x200000 ? 0x200000 : 0x1000);
771    simple_mtx_unlock(&panthor_vm->auto_va.lock);
772 
773    return va;
774 }
775 
776 static void
panthor_kmod_vm_free_va(struct panthor_kmod_vm * panthor_vm,uint64_t va,size_t size)777 panthor_kmod_vm_free_va(struct panthor_kmod_vm *panthor_vm, uint64_t va,
778                         size_t size)
779 {
780    assert(panthor_vm->base.flags & PAN_KMOD_VM_FLAG_AUTO_VA);
781 
782    simple_mtx_lock(&panthor_vm->auto_va.lock);
783    util_vma_heap_free(&panthor_vm->auto_va.heap, va, size);
784    simple_mtx_unlock(&panthor_vm->auto_va.lock);
785 }
786 
787 static int
panthor_kmod_vm_bind(struct pan_kmod_vm * vm,enum pan_kmod_vm_op_mode mode,struct pan_kmod_vm_op * ops,uint32_t op_count)788 panthor_kmod_vm_bind(struct pan_kmod_vm *vm, enum pan_kmod_vm_op_mode mode,
789                      struct pan_kmod_vm_op *ops, uint32_t op_count)
790 {
791    struct panthor_kmod_vm *panthor_vm =
792       container_of(vm, struct panthor_kmod_vm, base);
793    struct drm_panthor_vm_bind_op *bind_ops = NULL;
794    struct drm_panthor_sync_op *sync_ops = NULL;
795    uint32_t syncop_cnt = 0, syncop_ptr = 0;
796    bool async = mode == PAN_KMOD_VM_OP_MODE_ASYNC ||
797                 mode == PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT;
798    bool auto_va = vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA;
799    bool track_activity = vm->flags & PAN_KMOD_VM_FLAG_TRACK_ACTIVITY;
800    struct panthor_kmod_va_collect *cur_va_collect = NULL;
801    struct list_head va_collect_list;
802    uint32_t va_collect_cnt = 0;
803    int ret = -1;
804 
805    /* For any asynchronous VM bind, we assume the user is managing the VM
806     * address space, so we don't have to collect VMAs in that case.
807     */
808    if (mode == PAN_KMOD_VM_OP_MODE_ASYNC && auto_va) {
809       mesa_loge(
810          "auto-VA allocation is incompatible with PAN_KMOD_VM_OP_MODE_ASYNC");
811       return -1;
812    }
813 
814    if (mode == PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT &&
815        !track_activity) {
816       mesa_loge(
817          "PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT requires PAN_KMOD_VM_FLAG_TRACK_ACTIVITY");
818       return -1;
819    }
820 
821    if (op_count == 0)
822       return 0;
823 
824    /* If this is an async operation and VM activity tracking is enabled, we
825     * reserve one syncop per VM operation for the signaling of our VM timeline
826     * slot.
827     */
828    if (async && track_activity)
829       syncop_cnt += op_count;
830 
831    /* With PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT, we need to push our
832     * wait VM syncobj in all of the submissions, hence the extra syncop per
833     * operation.
834     */
835    if (mode == PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT)
836       syncop_cnt += op_count;
837 
838    for (uint32_t i = 0; i < op_count; i++) {
839       if (pan_kmod_vm_op_check(vm, mode, &ops[i]))
840          return -1;
841 
842       /* If auto-VA is used, for any asynchronous unmap operation, we need
843        * to register a VA collection node and add it to the GC list.
844        */
845       if (auto_va && async && ops[i].type == PAN_KMOD_VM_OP_TYPE_UNMAP &&
846           ops[i].va.size)
847          va_collect_cnt++;
848 
849       syncop_cnt += ops[i].syncs.count;
850    }
851 
852    /* Pre-allocate the VA collection nodes. */
853    list_inithead(&va_collect_list);
854    for (uint32_t i = 0; i < va_collect_cnt; i++) {
855       struct panthor_kmod_va_collect *va_collect =
856          pan_kmod_dev_alloc(vm->dev, sizeof(*va_collect));
857       if (!va_collect) {
858          mesa_loge("panthor_kmod_va_collect allocation failed");
859          goto out_free_va_collect;
860       }
861 
862       if (!i)
863          cur_va_collect = va_collect;
864 
865       list_addtail(&va_collect->node, &va_collect_list);
866    }
867 
868    if (syncop_cnt) {
869       sync_ops =
870          pan_kmod_dev_alloc_transient(vm->dev, sizeof(*sync_ops) * syncop_cnt);
871       if (!sync_ops) {
872          mesa_loge("drm_panthor_sync_op[%d] array allocation failed",
873                    syncop_cnt);
874          goto out_free_va_collect;
875       }
876    }
877 
878    bind_ops =
879       pan_kmod_dev_alloc_transient(vm->dev, sizeof(*bind_ops) * op_count);
880    if (!bind_ops) {
881       mesa_loge("drm_panthor_vm_bind_op[%d] array allocation failed", op_count);
882       goto out_free_sync_ops;
883    }
884 
885    struct drm_panthor_vm_bind req = {
886       .vm_id = vm->handle,
887       .flags =
888          mode != PAN_KMOD_VM_OP_MODE_IMMEDIATE ? DRM_PANTHOR_VM_BIND_ASYNC : 0,
889       .ops = DRM_PANTHOR_OBJ_ARRAY(op_count, bind_ops),
890    };
891 
892    uint64_t vm_orig_sync_point = 0, vm_new_sync_point = 0;
893 
894    if (track_activity)
895       vm_orig_sync_point = vm_new_sync_point = panthor_kmod_vm_sync_lock(vm);
896 
897    for (uint32_t i = 0; i < op_count; i++) {
898       uint32_t op_sync_cnt = ops[i].syncs.count;
899       uint64_t signal_vm_point = 0;
900 
901       if (async && track_activity) {
902          signal_vm_point = ++vm_new_sync_point;
903          op_sync_cnt++;
904          sync_ops[syncop_ptr++] = (struct drm_panthor_sync_op){
905             .flags = DRM_PANTHOR_SYNC_OP_SIGNAL |
906                      DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ,
907             .handle = panthor_vm->sync.handle,
908             .timeline_value = signal_vm_point,
909          };
910       }
911 
912       if (mode == PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT) {
913          op_sync_cnt++;
914          sync_ops[syncop_ptr++] = (struct drm_panthor_sync_op){
915             .flags = DRM_PANTHOR_SYNC_OP_WAIT |
916                      DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ,
917             .handle = panthor_vm->sync.handle,
918             .timeline_value = vm_orig_sync_point,
919          };
920 
921          if (auto_va && ops[i].type == PAN_KMOD_VM_OP_TYPE_UNMAP &&
922              ops[i].va.size) {
923             struct panthor_kmod_va_collect *va_collect = cur_va_collect;
924 
925             assert(&va_collect->node != &va_collect_list);
926             assert(signal_vm_point);
927             va_collect->sync_point = signal_vm_point;
928             va_collect->va = ops[i].va.start;
929             va_collect->size = ops[i].va.size;
930 
931             cur_va_collect = list_entry(cur_va_collect->node.next,
932                                         struct panthor_kmod_va_collect, node);
933          }
934       }
935 
936       for (uint32_t j = 0; j < ops[i].syncs.count; j++) {
937          sync_ops[syncop_ptr++] = (struct drm_panthor_sync_op){
938             .flags = (ops[i].syncs.array[j].type == PAN_KMOD_SYNC_TYPE_WAIT
939                          ? DRM_PANTHOR_SYNC_OP_WAIT
940                          : DRM_PANTHOR_SYNC_OP_SIGNAL) |
941                      DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ,
942             .handle = ops[i].syncs.array[j].handle,
943             .timeline_value = ops[i].syncs.array[j].point,
944          };
945       }
946       op_sync_cnt += ops[i].syncs.count;
947 
948       bind_ops[i].syncs = (struct drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(
949          op_sync_cnt, op_sync_cnt ? &sync_ops[syncop_ptr - op_sync_cnt] : NULL);
950 
951       if (ops[i].type == PAN_KMOD_VM_OP_TYPE_MAP) {
952          bind_ops[i].flags = DRM_PANTHOR_VM_BIND_OP_TYPE_MAP;
953          bind_ops[i].size = ops[i].va.size;
954          bind_ops[i].bo_handle = ops[i].map.bo->handle;
955          bind_ops[i].bo_offset = ops[i].map.bo_offset;
956 
957          if (ops[i].va.start == PAN_KMOD_VM_MAP_AUTO_VA) {
958             bind_ops[i].va =
959                panthor_kmod_vm_alloc_va(panthor_vm, bind_ops[i].size);
960             if (!bind_ops[i].va) {
961                mesa_loge("VA allocation failed");
962                ret = -1;
963                goto out_update_vas;
964             }
965          } else {
966             bind_ops[i].va = ops[i].va.start;
967          }
968 
969          if (ops[i].map.bo->flags & PAN_KMOD_BO_FLAG_EXECUTABLE)
970             bind_ops[i].flags |= DRM_PANTHOR_VM_BIND_OP_MAP_READONLY;
971          else
972             bind_ops[i].flags |= DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC;
973 
974          if (ops[i].map.bo->flags & PAN_KMOD_BO_FLAG_GPU_UNCACHED)
975             bind_ops[i].flags |= DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED;
976 
977       } else if (ops[i].type == PAN_KMOD_VM_OP_TYPE_UNMAP) {
978          bind_ops[i].flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
979          bind_ops[i].va = ops[i].va.start;
980          bind_ops[i].size = ops[i].va.size;
981       } else {
982          assert(ops[i].type == PAN_KMOD_VM_OP_TYPE_SYNC_ONLY);
983          bind_ops[i].flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
984       }
985    }
986 
987    ret = drmIoctl(vm->dev->fd, DRM_IOCTL_PANTHOR_VM_BIND, &req);
988    if (ret)
989       mesa_loge("DRM_IOCTL_PANTHOR_VM_BIND failed (err=%d)", errno);
990 
991    if (!ret && va_collect_cnt) {
992       assert(&cur_va_collect->node == &va_collect_list);
993       simple_mtx_lock(&panthor_vm->auto_va.lock);
994       list_splicetail(&va_collect_list, &panthor_vm->auto_va.gc_list);
995       list_inithead(&va_collect_list);
996       simple_mtx_unlock(&panthor_vm->auto_va.lock);
997    }
998 
999 out_update_vas:
1000    if (track_activity) {
1001       panthor_kmod_vm_sync_unlock(vm,
1002                                   ret ? vm_orig_sync_point : vm_new_sync_point);
1003    }
1004 
1005    for (uint32_t i = 0; i < op_count; i++) {
1006       if (ops[i].type == PAN_KMOD_VM_OP_TYPE_MAP &&
1007           ops[i].va.start == PAN_KMOD_VM_MAP_AUTO_VA) {
1008          if (!ret) {
1009             ops[i].va.start = bind_ops[i].va;
1010          } else if (bind_ops[i].va != 0) {
1011             panthor_kmod_vm_free_va(panthor_vm, bind_ops[i].va,
1012                                     bind_ops[i].size);
1013          }
1014       }
1015 
1016       if (ops[i].type == PAN_KMOD_VM_OP_TYPE_UNMAP && auto_va && !async &&
1017           !ret) {
1018          panthor_kmod_vm_free_va(panthor_vm, bind_ops[i].va, bind_ops[i].size);
1019       }
1020    }
1021 
1022    pan_kmod_dev_free(vm->dev, bind_ops);
1023 
1024 out_free_sync_ops:
1025    pan_kmod_dev_free(vm->dev, sync_ops);
1026 
1027 out_free_va_collect:
1028    list_for_each_entry_safe(struct panthor_kmod_va_collect, va_collect,
1029                             &va_collect_list, node) {
1030       list_del(&va_collect->node);
1031       pan_kmod_dev_free(vm->dev, va_collect);
1032    }
1033 
1034    return ret;
1035 }
1036 
1037 static enum pan_kmod_vm_state
panthor_kmod_vm_query_state(struct pan_kmod_vm * vm)1038 panthor_kmod_vm_query_state(struct pan_kmod_vm *vm)
1039 {
1040    struct drm_panthor_vm_get_state query = {.vm_id = vm->handle};
1041    int ret = drmIoctl(vm->dev->fd, DRM_IOCTL_PANTHOR_VM_GET_STATE, &query);
1042 
1043    if (ret || query.state == DRM_PANTHOR_VM_STATE_UNUSABLE)
1044       return PAN_KMOD_VM_FAULTY;
1045 
1046    return PAN_KMOD_VM_USABLE;
1047 }
1048 
1049 uint32_t
panthor_kmod_vm_sync_handle(struct pan_kmod_vm * vm)1050 panthor_kmod_vm_sync_handle(struct pan_kmod_vm *vm)
1051 {
1052    struct panthor_kmod_vm *panthor_vm =
1053       container_of(vm, struct panthor_kmod_vm, base);
1054 
1055    assert(vm->flags & PAN_KMOD_VM_FLAG_TRACK_ACTIVITY);
1056    return panthor_vm->sync.handle;
1057 }
1058 
1059 uint64_t
panthor_kmod_vm_sync_lock(struct pan_kmod_vm * vm)1060 panthor_kmod_vm_sync_lock(struct pan_kmod_vm *vm)
1061 {
1062    struct panthor_kmod_vm *panthor_vm =
1063       container_of(vm, struct panthor_kmod_vm, base);
1064 
1065    assert(vm->flags & PAN_KMOD_VM_FLAG_TRACK_ACTIVITY);
1066 
1067    simple_mtx_lock(&panthor_vm->sync.lock);
1068    return panthor_vm->sync.point;
1069 }
1070 
1071 void
panthor_kmod_vm_sync_unlock(struct pan_kmod_vm * vm,uint64_t new_sync_point)1072 panthor_kmod_vm_sync_unlock(struct pan_kmod_vm *vm, uint64_t new_sync_point)
1073 {
1074    struct panthor_kmod_vm *panthor_vm =
1075       container_of(vm, struct panthor_kmod_vm, base);
1076 
1077    assert(vm->flags & PAN_KMOD_VM_FLAG_TRACK_ACTIVITY);
1078    assert(new_sync_point >= panthor_vm->sync.point);
1079 
1080    /* Check that the new syncpoint has a fence attached to it. */
1081    assert(new_sync_point == panthor_vm->sync.point ||
1082           drmSyncobjTimelineWait(
1083              vm->dev->fd, &panthor_vm->sync.handle, &new_sync_point, 1, 0,
1084              DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, NULL) >= 0);
1085 
1086    panthor_vm->sync.point = new_sync_point;
1087    simple_mtx_unlock(&panthor_vm->sync.lock);
1088 }
1089 
1090 uint32_t
panthor_kmod_get_flush_id(const struct pan_kmod_dev * dev)1091 panthor_kmod_get_flush_id(const struct pan_kmod_dev *dev)
1092 {
1093    struct panthor_kmod_dev *panthor_dev =
1094       container_of(dev, struct panthor_kmod_dev, base);
1095 
1096    return *(panthor_dev->flush_id);
1097 }
1098 
1099 const struct drm_panthor_csif_info *
panthor_kmod_get_csif_props(const struct pan_kmod_dev * dev)1100 panthor_kmod_get_csif_props(const struct pan_kmod_dev *dev)
1101 {
1102    struct panthor_kmod_dev *panthor_dev =
1103       container_of(dev, struct panthor_kmod_dev, base);
1104 
1105    return &panthor_dev->props.csif;
1106 }
1107 
1108 static uint64_t
panthor_kmod_query_timestamp(const struct pan_kmod_dev * dev)1109 panthor_kmod_query_timestamp(const struct pan_kmod_dev *dev)
1110 {
1111    if (dev->driver.version.major <= 1 && dev->driver.version.minor < 1)
1112       return 0;
1113 
1114    struct drm_panthor_timestamp_info timestamp_info;
1115 
1116    struct drm_panthor_dev_query query = (struct drm_panthor_dev_query){
1117       .type = DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO,
1118       .size = sizeof(timestamp_info),
1119       .pointer = (uint64_t)(uintptr_t)&timestamp_info,
1120    };
1121 
1122    int ret = drmIoctl(dev->fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
1123    if (ret) {
1124       mesa_loge("DRM_IOCTL_PANTHOR_DEV_QUERY failed (err=%d)", errno);
1125       return 0;
1126    }
1127 
1128    return timestamp_info.current_timestamp;
1129 }
1130 
1131 const struct pan_kmod_ops panthor_kmod_ops = {
1132    .dev_create = panthor_kmod_dev_create,
1133    .dev_destroy = panthor_kmod_dev_destroy,
1134    .dev_query_props = panthor_dev_query_props,
1135    .dev_query_user_va_range = panthor_kmod_dev_query_user_va_range,
1136    .bo_alloc = panthor_kmod_bo_alloc,
1137    .bo_free = panthor_kmod_bo_free,
1138    .bo_import = panthor_kmod_bo_import,
1139    .bo_export = panthor_kmod_bo_export,
1140    .bo_get_mmap_offset = panthor_kmod_bo_get_mmap_offset,
1141    .bo_wait = panthor_kmod_bo_wait,
1142    .vm_create = panthor_kmod_vm_create,
1143    .vm_destroy = panthor_kmod_vm_destroy,
1144    .vm_bind = panthor_kmod_vm_bind,
1145    .vm_query_state = panthor_kmod_vm_query_state,
1146    .query_timestamp = panthor_kmod_query_timestamp,
1147 };
1148