1 /*
2 * Copyright © 2023 Collabora, Ltd.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * This file exposes some core KMD functionalities in a driver-agnostic way.
7 * The drivers are still assumed to be regular DRM drivers, such that some
8 * operations can be handled generically.
9 *
10 * Any operation that's too specific to be abstracted can either have a backend
11 * specific helper exposed through pan_kmod_<backend>.h, or no helper at all
12 * (in the latter case, users are expected to call the ioctl directly).
13 *
14 * If some operations are not natively supported by a KMD, the kmod backend
15 * should fail or emulate the functionality (if deemed necessary).
16 */
17
18 #pragma once
19
20 #include <fcntl.h>
21 #include <unistd.h>
22 #include <xf86drm.h>
23
24 #include "drm-uapi/drm.h"
25
26 #include "util/log.h"
27 #include "util/macros.h"
28 #include "util/os_file.h"
29 #include "util/os_mman.h"
30 #include "util/ralloc.h"
31 #include "util/simple_mtx.h"
32 #include "util/sparse_array.h"
33 #include "util/u_atomic.h"
34
35 #include "kmod/panthor_kmod.h"
36
37 #if defined(__cplusplus)
38 extern "C" {
39 #endif
40
41 struct pan_kmod_dev;
42
43 /* GPU VM creation flags. */
44 enum pan_kmod_vm_flags {
45 /* Set if you want the VM to automatically assign virtual addresses when
46 * pan_kmod_vm_map(). If this flag is set, all pan_kmod_vm_map() calls
47 * must have va=PAN_KMOD_VM_MAP_AUTO_VA.
48 */
49 PAN_KMOD_VM_FLAG_AUTO_VA = BITFIELD_BIT(0),
50
51 /* Let the backend know whether it should track the VM activity or not.
52 * Needed if PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT is used.
53 */
54 PAN_KMOD_VM_FLAG_TRACK_ACTIVITY = BITFIELD_BIT(1),
55 };
56
57 /* Object representing a GPU VM. */
58 struct pan_kmod_vm {
59 /* Combination of pan_kmod_vm_flags flags. */
60 uint32_t flags;
61
62 /* The VM handle returned by the KMD. If the KMD supports only one VM per
63 * context, this should be zero.
64 */
65 uint32_t handle;
66
67 /* Device this VM was created from. */
68 struct pan_kmod_dev *dev;
69 };
70
71 /* Buffer object flags. */
72 enum pan_kmod_bo_flags {
73 /* Allow GPU execution on this buffer. */
74 PAN_KMOD_BO_FLAG_EXECUTABLE = BITFIELD_BIT(0),
75
76 /* Allocate memory when a GPU fault occurs instead of allocating
77 * up-front.
78 */
79 PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT = BITFIELD_BIT(1),
80
81 /* If set, the buffer object will never be CPU-mapped in userspace. */
82 PAN_KMOD_BO_FLAG_NO_MMAP = BITFIELD_BIT(2),
83
84 /* Set when the buffer object has been exported. Users don't directly
85 * control this flag, it's set when pan_kmod_bo_export() is called.
86 */
87 PAN_KMOD_BO_FLAG_EXPORTED = BITFIELD_BIT(3),
88
89 /* Set when the buffer object has been impported. Users don't directly
90 * control this flag, it's set when pan_kmod_bo_import() is called.
91 */
92 PAN_KMOD_BO_FLAG_IMPORTED = BITFIELD_BIT(4),
93
94 /* If set, the buffer in mapped GPU-uncached when pan_kmod_vm_map()
95 * is called.
96 */
97 PAN_KMOD_BO_FLAG_GPU_UNCACHED = BITFIELD_BIT(5),
98 };
99
100 /* Buffer object. */
101 struct pan_kmod_bo {
102 /* Atomic reference count. The only reason we need to refcnt BOs at this
103 * level is because of how DRM prime import works: the import logic
104 * returns the handle of an existing object if the object was previously
105 * imported or was created by the driver.
106 * In order to prevent call GEM_CLOSE on an object that's still supposed
107 * to be active, we need count the number of users left.
108 */
109 int32_t refcnt;
110
111 /* Size of the buffer object. */
112 size_t size;
113
114 /* Handle attached to the buffer object. */
115 uint32_t handle;
116
117 /* Combination of pan_kmod_bo_flags flags. */
118 uint32_t flags;
119
120 /* If non-NULL, the buffer object can only by mapped on this VM. Typical
121 * the case for all internal/non-shareable buffers. The backend can
122 * optimize things based on this information. Calling pan_kmod_bo_export()
123 * on such buffer objects is forbidden.
124 */
125 struct pan_kmod_vm *exclusive_vm;
126
127 /* The device this buffer object was created from. */
128 struct pan_kmod_dev *dev;
129
130 /* User private data. Use pan_kmod_bo_{set,get}_user_priv() to access it. */
131 void *user_priv;
132 };
133
134 /* List of GPU properties needed by the UMD. */
135 struct pan_kmod_dev_props {
136 /* GPU product ID. */
137 uint32_t gpu_prod_id;
138
139 /* GPU revision. */
140 uint32_t gpu_revision;
141
142 /* GPU variant. */
143 uint32_t gpu_variant;
144
145 /* Bitmask encoding the number of shader cores exposed by the GPU. */
146 uint64_t shader_present;
147
148 /* Tiler features bits. */
149 uint32_t tiler_features;
150
151 /* Memory related feature bits. */
152 uint32_t mem_features;
153
154 /* MMU feature bits. */
155 uint32_t mmu_features;
156 #define MMU_FEATURES_VA_BITS(mmu_features) (mmu_features & 0xff)
157
158 /* Texture feature bits. */
159 uint32_t texture_features[4];
160
161 /* Maximum number of threads per core. */
162 uint32_t max_threads_per_core;
163
164 /* Maximum number of threads per workgroup. */
165 uint32_t max_threads_per_wg;
166
167 /* Number of registers per core. Can be used to determine the maximum
168 * number of threads that can be allocated for a specific shader based on
169 * the number of registers assigned to this shader.
170 */
171 uint32_t num_registers_per_core;
172
173 /* Maximum number of thread-local storage instance per core.
174 * If the GPU doesn't have a THREAD_TLS_ALLOC register or the register
175 * value is zero, the backend should assign the value of max_threads_per_core
176 * here.
177 */
178 uint32_t max_tls_instance_per_core;
179
180 /* AFBC feature bits. */
181 uint32_t afbc_features;
182
183 /* Support cycle count and timestamp propagation as job requirement */
184 bool gpu_can_query_timestamp;
185
186 /* GPU Timestamp frequency */
187 uint64_t timestamp_frequency;
188 };
189
190 /* Memory allocator for kmod internal allocations. */
191 struct pan_kmod_allocator {
192 /* Allocate and set to zero. */
193 void *(*zalloc)(const struct pan_kmod_allocator *allocator, size_t size,
194 bool transient);
195
196 /* Free. */
197 void (*free)(const struct pan_kmod_allocator *allocator, void *data);
198
199 /* Private data allocator data. Can be NULL if unused. */
200 void *priv;
201 };
202
203 /* Synchronization type. */
204 enum pan_kmod_sync_type {
205 PAN_KMOD_SYNC_TYPE_WAIT = 0,
206 PAN_KMOD_SYNC_TYPE_SIGNAL,
207 };
208
209 /* Synchronization operation. */
210 struct pan_kmod_sync_op {
211 /* Type of operation. */
212 enum pan_kmod_sync_type type;
213
214 /* Syncobj handle. */
215 uint32_t handle;
216
217 /* Syncobj point. Zero for binary syncobjs. */
218 uint64_t point;
219 };
220
221 /* Special value passed to pan_kmod_vm_map() to signify the VM it should
222 * automatically allocate a VA. Only valid if the VM was created with
223 * PAN_KMOD_VM_FLAG_AUTO_VA.
224 */
225 #define PAN_KMOD_VM_MAP_AUTO_VA ~0ull
226
227 /* Special value return when the vm_map() operation failed. */
228 #define PAN_KMOD_VM_MAP_FAILED ~0ull
229
230 /* VM operations can be executed in different modes. */
231 enum pan_kmod_vm_op_mode {
232 /* The map/unmap operation is executed immediately, which might cause
233 * GPU faults if the GPU was still accessing buffers when we unmap or
234 * remap.
235 */
236 PAN_KMOD_VM_OP_MODE_IMMEDIATE,
237
238 /* The map/unmap operation is executed asynchronously, and the user
239 * provides explicit wait/signal sync operations.
240 */
241 PAN_KMOD_VM_OP_MODE_ASYNC,
242
243 /* The map/unmap operation is executed when the next GPU/VM idle-point
244 * is reached. This guarantees fault-free unmap/remap operations when the
245 * kmod user doesn't want to deal with synchronizations explicitly.
246 */
247 PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT,
248 };
249
250 /* VM operation type. */
251 enum pan_kmod_vm_op_type {
252 /* Map a buffer object. */
253 PAN_KMOD_VM_OP_TYPE_MAP,
254
255 /* Unmap a VA range. */
256 PAN_KMOD_VM_OP_TYPE_UNMAP,
257
258 /* Do nothing. Used as a way to execute sync operations on a VM queue,
259 * without touching the VM.
260 */
261 PAN_KMOD_VM_OP_TYPE_SYNC_ONLY,
262 };
263
264 /* VM operation data. */
265 struct pan_kmod_vm_op {
266 /* The type of operation being requested. */
267 enum pan_kmod_vm_op_type type;
268
269 /* VA range. */
270 struct {
271 /* Start of the VA range.
272 * Must be PAN_KMOD_VM_MAP_AUTO_VA if PAN_KMOD_VM_FLAG_AUTO_VA was set
273 * at VM creation time. In that case, the allocated VA is returned
274 * in this field.
275 */
276 uint64_t start;
277
278 /* Size of the VA range */
279 size_t size;
280 } va;
281
282 union {
283 /* Arguments specific to map operations. */
284 struct {
285 /* Buffer object to map. */
286 struct pan_kmod_bo *bo;
287
288 /* Offset in the buffer object. */
289 off_t bo_offset;
290 } map;
291 };
292
293 /* Synchronization operations attached to the VM operation. */
294 struct {
295 /* Number of synchronization operations. Must be zero if mode is
296 * PAN_KMOD_VM_OP_MODE_IMMEDIATE or PAN_KMOD_VM_OP_MODE_WAIT_IDLE.
297 */
298 uint32_t count;
299
300 /* Array of synchronization operation descriptors. NULL if count is zero. */
301 const struct pan_kmod_sync_op *array;
302 } syncs;
303 };
304
305 /* VM state. */
306 enum pan_kmod_vm_state {
307 PAN_KMOD_VM_USABLE,
308 PAN_KMOD_VM_FAULTY,
309 };
310
311 /* Device flags. */
312 enum pan_kmod_dev_flags {
313 /* Set when the fd passed to pan_kmod_create() is expected to be
314 * owned by the device, iff the device creation succeeded.
315 */
316 PAN_KMOD_DEV_FLAG_OWNS_FD = (1 << 0),
317 };
318
319 /* Encode a virtual address range. */
320 struct pan_kmod_va_range {
321 /* Start of the VA range. */
322 uint64_t start;
323
324 /* Size of the VA range. */
325 uint64_t size;
326 };
327
328 /* KMD backend vtable.
329 *
330 * All methods described there are mandatory, unless explicitly flagged as
331 * optional.
332 */
333 struct pan_kmod_ops {
334 /* Create a pan_kmod_dev object.
335 * Return NULL if the creation fails for any reason.
336 */
337 struct pan_kmod_dev *(*dev_create)(
338 int fd, uint32_t flags, const drmVersionPtr version,
339 const struct pan_kmod_allocator *allocator);
340
341 /* Destroy a pan_kmod_dev object. */
342 void (*dev_destroy)(struct pan_kmod_dev *dev);
343
344 /* Query device properties. */
345 void (*dev_query_props)(const struct pan_kmod_dev *dev,
346 struct pan_kmod_dev_props *props);
347
348 /* Query the maxium user VA range.
349 * Users are free to use a subset of this range if they need less VA space.
350 * This method is optional, when not specified, kmod assumes the whole VA
351 * space (extracted from MMU_FEATURES.VA_BITS) is usable.
352 */
353 struct pan_kmod_va_range (*dev_query_user_va_range)(
354 const struct pan_kmod_dev *dev);
355
356 /* Allocate a buffer object.
357 * Return NULL if the creation fails for any reason.
358 */
359 struct pan_kmod_bo *(*bo_alloc)(struct pan_kmod_dev *dev,
360 struct pan_kmod_vm *exclusive_vm,
361 size_t size, uint32_t flags);
362
363 /* Free buffer object. */
364 void (*bo_free)(struct pan_kmod_bo *bo);
365
366 /* Import a buffer object.
367 * Return NULL if the import fails for any reason.
368 */
369 struct pan_kmod_bo *(*bo_import)(struct pan_kmod_dev *dev, uint32_t handle,
370 size_t size, uint32_t flags);
371
372 /* Post export operations.
373 * Return 0 on success, -1 otherwise.
374 * This method is optional.
375 */
376 int (*bo_export)(struct pan_kmod_bo *bo, int dmabuf_fd);
377
378 /* Get the file offset to use to mmap() a buffer object. */
379 off_t (*bo_get_mmap_offset)(struct pan_kmod_bo *bo);
380
381 /* Wait for a buffer object to be ready for read or read/write accesses. */
382 bool (*bo_wait)(struct pan_kmod_bo *bo, int64_t timeout_ns,
383 bool for_read_only_access);
384
385 /* Make a buffer object evictable. This method is optional. */
386 void (*bo_make_evictable)(struct pan_kmod_bo *bo);
387
388 /* Make the buffer object unevictable. This method is optional. */
389 bool (*bo_make_unevictable)(struct pan_kmod_bo *bo);
390
391 /* Create a VM object. */
392 struct pan_kmod_vm *(*vm_create)(struct pan_kmod_dev *dev, uint32_t flags,
393 uint64_t va_start, uint64_t va_range);
394
395 /* Destroy a VM object. */
396 void (*vm_destroy)(struct pan_kmod_vm *vm);
397
398 /* Execute VM operations.
399 * Return 0 if the submission suceeds, -1 otherwise.
400 * For PAN_KMOD_VM_OP_MODE_IMMEDIATE submissions, the return value also
401 * reflects the successfulness of the VM operation, for other modes,
402 * if any of the VM operation fails, the VM might be flagged as unusable
403 * and users should create a new VM to recover.
404 */
405 int (*vm_bind)(struct pan_kmod_vm *vm, enum pan_kmod_vm_op_mode mode,
406 struct pan_kmod_vm_op *ops, uint32_t op_count);
407
408 /* Query the VM state.
409 * This method is optional. When missing the VM is assumed to always be
410 * usable.
411 */
412 enum pan_kmod_vm_state (*vm_query_state)(struct pan_kmod_vm *vm);
413
414 /* Query the current GPU timestamp */
415 uint64_t (*query_timestamp)(const struct pan_kmod_dev *dev);
416 };
417
418 /* KMD information. */
419 struct pan_kmod_driver {
420 /* KMD version. */
421 struct {
422 uint32_t major;
423 uint32_t minor;
424 } version;
425 };
426
427 /* Device object. */
428 struct pan_kmod_dev {
429 /* FD attached to the device. */
430 int fd;
431
432 /* Device flags. */
433 uint32_t flags;
434
435 /* KMD backing this device. */
436 struct pan_kmod_driver driver;
437
438 /* kmod backend ops assigned at device creation. */
439 const struct pan_kmod_ops *ops;
440
441 /* DRM prime import returns the handle of a pre-existing GEM if we are
442 * importing an object that was created by us or previously imported.
443 * We need to make sure we return the same pan_kmod_bo in that case,
444 * otherwise freeing one pan_kmod_bo will make all other BOs sharing
445 * the same handle invalid.
446 */
447 struct {
448 struct util_sparse_array array;
449 simple_mtx_t lock;
450 } handle_to_bo;
451
452 /* Allocator attached to the device. */
453 const struct pan_kmod_allocator *allocator;
454
455 /* User private data. Use pan_kmod_dev_{set,get}_user_priv() to access it. */
456 void *user_priv;
457 };
458
459 struct pan_kmod_dev *
460 pan_kmod_dev_create(int fd, uint32_t flags,
461 const struct pan_kmod_allocator *allocator);
462
463 void pan_kmod_dev_destroy(struct pan_kmod_dev *dev);
464
465 static inline void
pan_kmod_dev_query_props(const struct pan_kmod_dev * dev,struct pan_kmod_dev_props * props)466 pan_kmod_dev_query_props(const struct pan_kmod_dev *dev,
467 struct pan_kmod_dev_props *props)
468 {
469 dev->ops->dev_query_props(dev, props);
470 }
471
472 static inline struct pan_kmod_va_range
pan_kmod_dev_query_user_va_range(const struct pan_kmod_dev * dev)473 pan_kmod_dev_query_user_va_range(const struct pan_kmod_dev *dev)
474 {
475 if (dev->ops->dev_query_user_va_range)
476 return dev->ops->dev_query_user_va_range(dev);
477
478 struct pan_kmod_dev_props props;
479
480 pan_kmod_dev_query_props(dev, &props);
481 return (struct pan_kmod_va_range){
482 .start = 0,
483 .size = 1ull << MMU_FEATURES_VA_BITS(props.mmu_features),
484 };
485 }
486
487 static inline void
pan_kmod_dev_set_user_priv(struct pan_kmod_dev * dev,void * data)488 pan_kmod_dev_set_user_priv(struct pan_kmod_dev *dev, void *data)
489 {
490 dev->user_priv = data;
491 }
492
493 static inline void *
pan_kmod_dev_get_user_priv(struct pan_kmod_dev * dev)494 pan_kmod_dev_get_user_priv(struct pan_kmod_dev *dev)
495 {
496 return dev->user_priv;
497 }
498
499 struct pan_kmod_bo *pan_kmod_bo_alloc(struct pan_kmod_dev *dev,
500 struct pan_kmod_vm *exclusive_vm,
501 size_t size, uint32_t flags);
502
503 static inline struct pan_kmod_bo *
pan_kmod_bo_get(struct pan_kmod_bo * bo)504 pan_kmod_bo_get(struct pan_kmod_bo *bo)
505 {
506 if (!bo)
507 return NULL;
508
509 ASSERTED int32_t refcnt = p_atomic_inc_return(&bo->refcnt);
510
511 /* If refcnt was zero before our increment, we're in trouble. */
512 assert(refcnt > 1);
513
514 return bo;
515 }
516
517 void pan_kmod_bo_put(struct pan_kmod_bo *bo);
518
519 static inline void *
pan_kmod_bo_cmdxchg_user_priv(struct pan_kmod_bo * bo,void * old_data,void * new_data)520 pan_kmod_bo_cmdxchg_user_priv(struct pan_kmod_bo *bo, void *old_data,
521 void *new_data)
522 {
523 return (void *)p_atomic_cmpxchg((uintptr_t *)&bo->user_priv,
524 (uintptr_t)old_data, (uintptr_t)new_data);
525 }
526
527 static inline void
pan_kmod_bo_set_user_priv(struct pan_kmod_bo * bo,void * data)528 pan_kmod_bo_set_user_priv(struct pan_kmod_bo *bo, void *data)
529 {
530 bo->user_priv = data;
531 }
532
533 static inline void *
pan_kmod_bo_get_user_priv(const struct pan_kmod_bo * bo)534 pan_kmod_bo_get_user_priv(const struct pan_kmod_bo *bo)
535 {
536 return bo->user_priv;
537 }
538
539 struct pan_kmod_bo *pan_kmod_bo_import(struct pan_kmod_dev *dev, int fd,
540 uint32_t flags);
541
542 static inline int
pan_kmod_bo_export(struct pan_kmod_bo * bo)543 pan_kmod_bo_export(struct pan_kmod_bo *bo)
544 {
545 int fd;
546
547 if (drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &fd)) {
548 mesa_loge("drmPrimeHandleToFD() failed (err=%d)", errno);
549 return -1;
550 }
551
552 if (bo->dev->ops->bo_export && bo->dev->ops->bo_export(bo, fd)) {
553 close(fd);
554 return -1;
555 }
556
557 bo->flags |= PAN_KMOD_BO_FLAG_EXPORTED;
558 return fd;
559 }
560
561 static inline bool
pan_kmod_bo_wait(struct pan_kmod_bo * bo,int64_t timeout_ns,bool for_read_only_access)562 pan_kmod_bo_wait(struct pan_kmod_bo *bo, int64_t timeout_ns,
563 bool for_read_only_access)
564 {
565 return bo->dev->ops->bo_wait(bo, timeout_ns, for_read_only_access);
566 }
567
568 static inline void
pan_kmod_bo_make_evictable(struct pan_kmod_bo * bo)569 pan_kmod_bo_make_evictable(struct pan_kmod_bo *bo)
570 {
571 if (bo->dev->ops->bo_make_evictable)
572 bo->dev->ops->bo_make_evictable(bo);
573 }
574
575 static inline bool
pan_kmod_bo_make_unevictable(struct pan_kmod_bo * bo)576 pan_kmod_bo_make_unevictable(struct pan_kmod_bo *bo)
577 {
578 if (bo->dev->ops->bo_make_unevictable)
579 return bo->dev->ops->bo_make_unevictable(bo);
580
581 return true;
582 }
583
584 static inline void *
pan_kmod_bo_mmap(struct pan_kmod_bo * bo,off_t bo_offset,size_t size,int prot,int flags,void * host_addr)585 pan_kmod_bo_mmap(struct pan_kmod_bo *bo, off_t bo_offset, size_t size, int prot,
586 int flags, void *host_addr)
587 {
588 off_t mmap_offset;
589
590 if (bo_offset + size > bo->size)
591 return MAP_FAILED;
592
593 mmap_offset = bo->dev->ops->bo_get_mmap_offset(bo);
594 if (mmap_offset < 0)
595 return MAP_FAILED;
596
597 host_addr = os_mmap(host_addr, size, prot, flags, bo->dev->fd,
598 mmap_offset + bo_offset);
599 if (host_addr == MAP_FAILED)
600 mesa_loge("mmap() failed (err=%d)", errno);
601
602 return host_addr;
603 }
604
605 static inline size_t
pan_kmod_bo_size(struct pan_kmod_bo * bo)606 pan_kmod_bo_size(struct pan_kmod_bo *bo)
607 {
608 return bo->size;
609 }
610
611 static inline uint32_t
pan_kmod_bo_handle(struct pan_kmod_bo * bo)612 pan_kmod_bo_handle(struct pan_kmod_bo *bo)
613 {
614 return bo->handle;
615 }
616
617 static inline struct pan_kmod_vm *
pan_kmod_vm_create(struct pan_kmod_dev * dev,uint32_t flags,uint64_t va_start,uint64_t va_range)618 pan_kmod_vm_create(struct pan_kmod_dev *dev, uint32_t flags, uint64_t va_start,
619 uint64_t va_range)
620 {
621 return dev->ops->vm_create(dev, flags, va_start, va_range);
622 }
623
624 static inline void
pan_kmod_vm_destroy(struct pan_kmod_vm * vm)625 pan_kmod_vm_destroy(struct pan_kmod_vm *vm)
626 {
627 vm->dev->ops->vm_destroy(vm);
628 }
629
630 static inline int
pan_kmod_vm_bind(struct pan_kmod_vm * vm,enum pan_kmod_vm_op_mode mode,struct pan_kmod_vm_op * ops,uint32_t op_count)631 pan_kmod_vm_bind(struct pan_kmod_vm *vm, enum pan_kmod_vm_op_mode mode,
632 struct pan_kmod_vm_op *ops, uint32_t op_count)
633 {
634 return vm->dev->ops->vm_bind(vm, mode, ops, op_count);
635 }
636
637 static inline enum pan_kmod_vm_state
pan_kmod_vm_query_state(struct pan_kmod_vm * vm)638 pan_kmod_vm_query_state(struct pan_kmod_vm *vm)
639 {
640 if (vm->dev->ops->vm_query_state)
641 return vm->dev->ops->vm_query_state(vm);
642
643 return PAN_KMOD_VM_USABLE;
644 }
645
646 static inline uint32_t
pan_kmod_vm_handle(struct pan_kmod_vm * vm)647 pan_kmod_vm_handle(struct pan_kmod_vm *vm)
648 {
649 return vm->handle;
650 }
651
652 static inline uint64_t
pan_kmod_query_timestamp(const struct pan_kmod_dev * dev)653 pan_kmod_query_timestamp(const struct pan_kmod_dev *dev)
654 {
655 return dev->ops->query_timestamp(dev);
656 }
657
658 #if defined(__cplusplus)
659 } // extern "C"
660 #endif
661