1 /*
2 * Copyright 2021 Alyssa Rosenzweig
3 * Copyright 2019 Collabora, Ltd.
4 * SPDX-License-Identifier: MIT
5 */
6
7 #pragma once
8
9 #include <stdbool.h>
10 #include <stddef.h>
11 #include <stdint.h>
12 #include <time.h>
13 #include "util/list.h"
14
15 struct agx_device;
16
17 enum agx_bo_flags {
18 /* BO is shared across processes (imported or exported) and therefore cannot
19 * be cached locally
20 */
21 AGX_BO_SHARED = 1 << 0,
22
23 /* BO must be allocated in the low 32-bits of VA space */
24 AGX_BO_LOW_VA = 1 << 1,
25
26 /* BO is executable */
27 AGX_BO_EXEC = 1 << 2,
28
29 /* BO should be mapped write-back on the CPU (else, write combine) */
30 AGX_BO_WRITEBACK = 1 << 3,
31
32 /* BO could potentially be shared (imported or exported) and therefore cannot
33 * be allocated as private
34 */
35 AGX_BO_SHAREABLE = 1 << 4,
36
37 /* BO is read-only from the GPU side
38 */
39 AGX_BO_READONLY = 1 << 5,
40 };
41
42 enum agx_va_flags {
43 /* VA must be inside the USC region, otherwise unrestricted. */
44 AGX_VA_USC = (1 << 0),
45
46 /* VA must be fixed, otherwise allocated by the driver. */
47 AGX_VA_FIXED = (1 << 1),
48 };
49
50 struct agx_va {
51 enum agx_va_flags flags;
52 uint64_t addr;
53 uint64_t size_B;
54 };
55
56 struct agx_ptr {
57 /* If CPU mapped, CPU address. NULL if not mapped */
58 void *cpu;
59
60 /* Mapped GPU address */
61 uint64_t gpu;
62 };
63
64 struct agx_bo {
65 /* Must be first for casting */
66 struct list_head bucket_link;
67
68 /* Used to link the BO to the BO cache LRU list. */
69 struct list_head lru_link;
70
71 /* The time this BO was used last, so we can evict stale BOs. */
72 time_t last_used;
73
74 /* Creation attributes */
75 enum agx_bo_flags flags;
76 size_t size;
77 size_t align;
78
79 /* Mapping */
80 struct agx_va *va;
81 void *map;
82
83 /* Process-local index */
84 uint32_t handle;
85
86 /* DMA-BUF fd clone for adding fences to imports/exports */
87 int prime_fd;
88
89 /* Current writer, if any (queue in upper 32 bits, syncobj in lower 32 bits) */
90 uint64_t writer;
91
92 /* Update atomically */
93 int32_t refcnt;
94
95 /* For debugging */
96 const char *label;
97
98 /* virtio blob_id */
99 uint32_t blob_id;
100 uint32_t vbo_res_id;
101 };
102
103 static inline uint32_t
agx_bo_writer_syncobj(uint64_t writer)104 agx_bo_writer_syncobj(uint64_t writer)
105 {
106 return writer;
107 }
108
109 static inline uint32_t
agx_bo_writer_queue(uint64_t writer)110 agx_bo_writer_queue(uint64_t writer)
111 {
112 return writer >> 32;
113 }
114
115 static inline uint64_t
agx_bo_writer(uint32_t queue,uint32_t syncobj)116 agx_bo_writer(uint32_t queue, uint32_t syncobj)
117 {
118 return (((uint64_t)queue) << 32) | syncobj;
119 }
120
121 struct agx_bo *agx_bo_create(struct agx_device *dev, unsigned size,
122 unsigned align, enum agx_bo_flags flags,
123 const char *label);
124
125 void agx_bo_reference(struct agx_bo *bo);
126 void agx_bo_unreference(struct agx_device *dev, struct agx_bo *bo);
127 struct agx_bo *agx_bo_import(struct agx_device *dev, int fd);
128 int agx_bo_export(struct agx_device *dev, struct agx_bo *bo);
129
130 void agx_bo_free(struct agx_device *dev, struct agx_bo *bo);
131 struct agx_bo *agx_bo_cache_fetch(struct agx_device *dev, size_t size,
132 size_t align, uint32_t flags,
133 const bool dontwait);
134 void agx_bo_cache_evict_all(struct agx_device *dev);
135