1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright (C) 2019 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 #include "panvk_mempool.h"
27 #include "panvk_priv_bo.h"
28
29 #include "kmod/pan_kmod.h"
30
31 void
panvk_bo_pool_cleanup(struct panvk_bo_pool * bo_pool)32 panvk_bo_pool_cleanup(struct panvk_bo_pool *bo_pool)
33 {
34 list_for_each_entry_safe(struct panvk_priv_bo, bo, &bo_pool->free_bos,
35 node) {
36 list_del(&bo->node);
37 panvk_priv_bo_unref(bo);
38 }
39 }
40
41 /* Knockoff u_upload_mgr. Uploads wherever we left off, allocating new entries
42 * when needed.
43 *
44 * In "owned" mode, a single parent owns the entire pool, and the pool owns all
45 * created BOs. All BOs are tracked and addable as
46 * panvk_pool_get_bo_handles. Freeing occurs at the level of an entire pool.
47 * This is useful for streaming uploads, where the batch owns the pool.
48 *
49 * In "unowned" mode, the pool is freestanding. It does not track created BOs
50 * or hold references. Instead, the consumer must manage the created BOs. This
51 * is more flexible, enabling non-transient CSO state or shader code to be
52 * packed with conservative lifetime handling.
53 */
54
55 static struct panvk_priv_bo *
panvk_pool_alloc_backing(struct panvk_pool * pool,size_t sz)56 panvk_pool_alloc_backing(struct panvk_pool *pool, size_t sz)
57 {
58 size_t bo_sz = ALIGN_POT(MAX2(pool->base.slab_size, sz), 4096);
59 struct panvk_priv_bo *bo;
60
61 /* If there's a free BO in our BO pool, let's pick it. */
62 if (pool->bo_pool && bo_sz == pool->base.slab_size &&
63 !list_is_empty(&pool->bo_pool->free_bos)) {
64 bo =
65 list_first_entry(&pool->bo_pool->free_bos, struct panvk_priv_bo, node);
66 list_del(&bo->node);
67 } else {
68 /* We don't know what the BO will be used for, so let's flag it
69 * RW and attach it to both the fragment and vertex/tiler jobs.
70 * TODO: if we want fine grained BO assignment we should pass
71 * flags to this function and keep the read/write,
72 * fragment/vertex+tiler pools separate.
73 */
74 bo = panvk_priv_bo_create(pool->dev, bo_sz, pool->props.create_flags,
75 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
76 }
77
78 if (bo == NULL)
79 return NULL;
80
81 if (pool->props.owns_bos) {
82 if (pan_kmod_bo_size(bo->bo) == pool->base.slab_size)
83 list_addtail(&bo->node, &pool->bos);
84 else
85 list_addtail(&bo->node, &pool->big_bos);
86 pool->bo_count++;
87 }
88
89 size_t new_remaining_size = pan_kmod_bo_size(bo->bo) - sz;
90 size_t prev_remaining_size =
91 pool->transient_bo
92 ? pan_kmod_bo_size(pool->transient_bo->bo) - pool->transient_offset
93 : 0;
94
95 /* If there's less room in the new BO after the allocation, we stick to the
96 * previous one. We also don't hold on BOs that are bigger than the pool
97 * allocation granularity, to avoid memory fragmentation (retaining a big
98 * BO which has just one tiny allocation active is not great). */
99 if (prev_remaining_size < new_remaining_size &&
100 (pool->props.owns_bos || bo_sz <= pool->base.slab_size)) {
101 if (!pool->props.owns_bos)
102 panvk_priv_bo_unref(pool->transient_bo);
103
104 pool->transient_bo = bo;
105 pool->transient_offset = 0;
106 }
107
108 return bo;
109 }
110
111 struct panvk_priv_mem
panvk_pool_alloc_mem(struct panvk_pool * pool,struct panvk_pool_alloc_info info)112 panvk_pool_alloc_mem(struct panvk_pool *pool, struct panvk_pool_alloc_info info)
113 {
114 assert(info.alignment == util_next_power_of_two(info.alignment));
115
116 if (pool->props.needs_locking)
117 simple_mtx_lock(&pool->lock);
118
119 /* Find or create a suitable BO */
120 struct panvk_priv_bo *bo = pool->transient_bo;
121 unsigned offset = ALIGN_POT(pool->transient_offset, info.alignment);
122
123 /* If we don't fit, allocate a new backing */
124 if (unlikely(bo == NULL || (offset + info.size) >= pool->base.slab_size)) {
125 bo = panvk_pool_alloc_backing(pool, info.size);
126 offset = 0;
127 }
128
129 if (bo != NULL && pool->transient_bo == bo) {
130 pool->transient_offset = offset + info.size;
131 if (!pool->props.owns_bos)
132 panvk_priv_bo_ref(bo);
133 }
134
135 struct panvk_priv_mem ret = {
136 .bo = bo,
137 .offset = offset,
138 };
139
140 if (pool->props.needs_locking)
141 simple_mtx_unlock(&pool->lock);
142
143 return ret;
144 }
145
146 static struct panfrost_ptr
panvk_pool_alloc_aligned(struct panvk_pool * pool,size_t sz,unsigned alignment)147 panvk_pool_alloc_aligned(struct panvk_pool *pool, size_t sz, unsigned alignment)
148 {
149 /* We just return the host/dev address, so callers can't
150 * release the BO ref they acquired. */
151 assert(pool->props.owns_bos);
152
153 struct panvk_pool_alloc_info info = {
154 .size = sz,
155 .alignment = alignment,
156 };
157 struct panvk_priv_mem mem = panvk_pool_alloc_mem(pool, info);
158
159 return (struct panfrost_ptr){
160 .cpu = panvk_priv_mem_host_addr(mem),
161 .gpu = panvk_priv_mem_dev_addr(mem),
162 };
163 }
PAN_POOL_ALLOCATOR(struct panvk_pool,panvk_pool_alloc_aligned)164 PAN_POOL_ALLOCATOR(struct panvk_pool, panvk_pool_alloc_aligned)
165
166 void
167 panvk_pool_init(struct panvk_pool *pool, struct panvk_device *dev,
168 struct panvk_bo_pool *bo_pool,
169 const struct panvk_pool_properties *props)
170 {
171 memset(pool, 0, sizeof(*pool));
172 pool->props = *props;
173 simple_mtx_init(&pool->lock, mtx_plain);
174 pan_pool_init(&pool->base, pool->props.slab_size);
175 pool->dev = dev;
176 pool->bo_pool = bo_pool;
177
178 list_inithead(&pool->bos);
179 list_inithead(&pool->big_bos);
180
181 if (props->prealloc)
182 panvk_pool_alloc_backing(pool, pool->base.slab_size);
183 }
184
185 void
panvk_pool_reset(struct panvk_pool * pool)186 panvk_pool_reset(struct panvk_pool *pool)
187 {
188 if (pool->bo_pool) {
189 list_splicetail(&pool->bos, &pool->bo_pool->free_bos);
190 list_inithead(&pool->bos);
191 } else {
192 list_for_each_entry_safe(struct panvk_priv_bo, bo, &pool->bos, node) {
193 list_del(&bo->node);
194 panvk_priv_bo_unref(bo);
195 }
196 }
197
198 list_for_each_entry_safe(struct panvk_priv_bo, bo, &pool->big_bos, node) {
199 list_del(&bo->node);
200 panvk_priv_bo_unref(bo);
201 }
202
203 if (!pool->props.owns_bos)
204 panvk_priv_bo_unref(pool->transient_bo);
205
206 pool->bo_count = 0;
207 pool->transient_bo = NULL;
208 }
209
210 void
panvk_pool_cleanup(struct panvk_pool * pool)211 panvk_pool_cleanup(struct panvk_pool *pool)
212 {
213 panvk_pool_reset(pool);
214 }
215
216 void
panvk_pool_get_bo_handles(struct panvk_pool * pool,uint32_t * handles)217 panvk_pool_get_bo_handles(struct panvk_pool *pool, uint32_t *handles)
218 {
219 unsigned idx = 0;
220
221 list_for_each_entry(struct panvk_priv_bo, bo, &pool->bos, node)
222 handles[idx++] = pan_kmod_bo_handle(bo->bo);
223
224 list_for_each_entry(struct panvk_priv_bo, bo, &pool->big_bos, node)
225 handles[idx++] = pan_kmod_bo_handle(bo->bo);
226 }
227