1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <stdlib.h>
25 #include <string.h>
26 #include <errno.h>
27 #include "amdgpu.h"
28 #include "amdgpu_drm.h"
29 #include "amdgpu_internal.h"
30 #include "util_math.h"
31
amdgpu_va_range_query(amdgpu_device_handle dev,enum amdgpu_gpu_va_range type,uint64_t * start,uint64_t * end)32 drm_public int amdgpu_va_range_query(amdgpu_device_handle dev,
33 enum amdgpu_gpu_va_range type,
34 uint64_t *start, uint64_t *end)
35 {
36 if (type != amdgpu_gpu_va_range_general)
37 return -EINVAL;
38
39 *start = dev->dev_info.virtual_address_offset;
40 *end = dev->dev_info.virtual_address_max;
41 return 0;
42 }
43
amdgpu_vamgr_init(struct amdgpu_bo_va_mgr * mgr,uint64_t start,uint64_t max,uint64_t alignment)44 drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
45 uint64_t max, uint64_t alignment)
46 {
47 struct amdgpu_bo_va_hole *n;
48
49 mgr->va_max = max;
50 mgr->va_alignment = alignment;
51
52 list_inithead(&mgr->va_holes);
53 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
54 pthread_mutex_lock(&mgr->bo_va_mutex);
55 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
56 n->size = mgr->va_max - start;
57 n->offset = start;
58 list_add(&n->list, &mgr->va_holes);
59 pthread_mutex_unlock(&mgr->bo_va_mutex);
60 }
61
amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr * mgr)62 drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
63 {
64 struct amdgpu_bo_va_hole *hole, *tmp;
65 LIST_FOR_EACH_ENTRY_SAFE(hole, tmp, &mgr->va_holes, list) {
66 list_del(&hole->list);
67 free(hole);
68 }
69 pthread_mutex_destroy(&mgr->bo_va_mutex);
70 }
71
72 static drm_private int
amdgpu_vamgr_subtract_hole(struct amdgpu_bo_va_hole * hole,uint64_t start_va,uint64_t end_va)73 amdgpu_vamgr_subtract_hole(struct amdgpu_bo_va_hole *hole, uint64_t start_va,
74 uint64_t end_va)
75 {
76 if (start_va > hole->offset && end_va - hole->offset < hole->size) {
77 struct amdgpu_bo_va_hole *n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
78 if (!n)
79 return -ENOMEM;
80
81 n->size = start_va - hole->offset;
82 n->offset = hole->offset;
83 list_add(&n->list, &hole->list);
84
85 hole->size -= (end_va - hole->offset);
86 hole->offset = end_va;
87 } else if (start_va > hole->offset) {
88 hole->size = start_va - hole->offset;
89 } else if (end_va - hole->offset < hole->size) {
90 hole->size -= (end_va - hole->offset);
91 hole->offset = end_va;
92 } else {
93 list_del(&hole->list);
94 free(hole);
95 }
96
97 return 0;
98 }
99
100 static drm_private int
amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr * mgr,uint64_t size,uint64_t alignment,uint64_t base_required,bool search_from_top,uint64_t * va_out)101 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
102 uint64_t alignment, uint64_t base_required,
103 bool search_from_top, uint64_t *va_out)
104 {
105 struct amdgpu_bo_va_hole *hole, *n;
106 uint64_t offset = 0;
107 int ret;
108
109
110 alignment = MAX2(alignment, mgr->va_alignment);
111 size = ALIGN(size, mgr->va_alignment);
112
113 if (base_required % alignment)
114 return -EINVAL;
115
116 pthread_mutex_lock(&mgr->bo_va_mutex);
117 if (!search_from_top) {
118 LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
119 if (base_required) {
120 if (hole->offset > base_required ||
121 (hole->offset + hole->size) < (base_required + size))
122 continue;
123 offset = base_required;
124 } else {
125 uint64_t waste = hole->offset % alignment;
126 waste = waste ? alignment - waste : 0;
127 offset = hole->offset + waste;
128 if (offset >= (hole->offset + hole->size) ||
129 size > (hole->offset + hole->size) - offset) {
130 continue;
131 }
132 }
133 ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
134 pthread_mutex_unlock(&mgr->bo_va_mutex);
135 *va_out = offset;
136 return ret;
137 }
138 } else {
139 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
140 if (base_required) {
141 if (hole->offset > base_required ||
142 (hole->offset + hole->size) < (base_required + size))
143 continue;
144 offset = base_required;
145 } else {
146 if (size > hole->size)
147 continue;
148
149 offset = hole->offset + hole->size - size;
150 offset -= offset % alignment;
151 if (offset < hole->offset) {
152 continue;
153 }
154 }
155
156 ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
157 pthread_mutex_unlock(&mgr->bo_va_mutex);
158 *va_out = offset;
159 return ret;
160 }
161 }
162
163 pthread_mutex_unlock(&mgr->bo_va_mutex);
164 return -ENOMEM;
165 }
166
167 static drm_private void
amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr * mgr,uint64_t va,uint64_t size)168 amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
169 {
170 struct amdgpu_bo_va_hole *hole, *next;
171
172 if (va == AMDGPU_INVALID_VA_ADDRESS)
173 return;
174
175 size = ALIGN(size, mgr->va_alignment);
176
177 pthread_mutex_lock(&mgr->bo_va_mutex);
178 hole = container_of(&mgr->va_holes, hole, list);
179 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
180 if (next->offset < va)
181 break;
182 hole = next;
183 }
184
185 if (&hole->list != &mgr->va_holes) {
186 /* Grow upper hole if it's adjacent */
187 if (hole->offset == (va + size)) {
188 hole->offset = va;
189 hole->size += size;
190 /* Merge lower hole if it's adjacent */
191 if (next != hole &&
192 &next->list != &mgr->va_holes &&
193 (next->offset + next->size) == va) {
194 next->size += hole->size;
195 list_del(&hole->list);
196 free(hole);
197 }
198 goto out;
199 }
200 }
201
202 /* Grow lower hole if it's adjacent */
203 if (next != hole && &next->list != &mgr->va_holes &&
204 (next->offset + next->size) == va) {
205 next->size += size;
206 goto out;
207 }
208
209 /* FIXME on allocation failure we just lose virtual address space
210 * maybe print a warning
211 */
212 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
213 if (next) {
214 next->size = size;
215 next->offset = va;
216 list_add(&next->list, &hole->list);
217 }
218
219 out:
220 pthread_mutex_unlock(&mgr->bo_va_mutex);
221 }
222
amdgpu_va_range_alloc(amdgpu_device_handle dev,enum amdgpu_gpu_va_range va_range_type,uint64_t size,uint64_t va_base_alignment,uint64_t va_base_required,uint64_t * va_base_allocated,amdgpu_va_handle * va_range_handle,uint64_t flags)223 drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
224 enum amdgpu_gpu_va_range va_range_type,
225 uint64_t size,
226 uint64_t va_base_alignment,
227 uint64_t va_base_required,
228 uint64_t *va_base_allocated,
229 amdgpu_va_handle *va_range_handle,
230 uint64_t flags)
231 {
232 return amdgpu_va_range_alloc2(&dev->va_mgr, va_range_type, size,
233 va_base_alignment, va_base_required,
234 va_base_allocated, va_range_handle,
235 flags);
236 }
237
amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,enum amdgpu_gpu_va_range va_range_type,uint64_t size,uint64_t va_base_alignment,uint64_t va_base_required,uint64_t * va_base_allocated,amdgpu_va_handle * va_range_handle,uint64_t flags)238 drm_public int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
239 enum amdgpu_gpu_va_range va_range_type,
240 uint64_t size,
241 uint64_t va_base_alignment,
242 uint64_t va_base_required,
243 uint64_t *va_base_allocated,
244 amdgpu_va_handle *va_range_handle,
245 uint64_t flags)
246 {
247 struct amdgpu_bo_va_mgr *vamgr;
248 bool search_from_top = !!(flags & AMDGPU_VA_RANGE_REPLAYABLE);
249 int ret;
250
251 /* Clear the flag when the high VA manager is not initialized */
252 if (flags & AMDGPU_VA_RANGE_HIGH && !va_mgr->vamgr_high_32.va_max)
253 flags &= ~AMDGPU_VA_RANGE_HIGH;
254
255 if (flags & AMDGPU_VA_RANGE_HIGH) {
256 if (flags & AMDGPU_VA_RANGE_32_BIT)
257 vamgr = &va_mgr->vamgr_high_32;
258 else
259 vamgr = &va_mgr->vamgr_high;
260 } else {
261 if (flags & AMDGPU_VA_RANGE_32_BIT)
262 vamgr = &va_mgr->vamgr_32;
263 else
264 vamgr = &va_mgr->vamgr_low;
265 }
266
267 va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
268 size = ALIGN(size, vamgr->va_alignment);
269
270 ret = amdgpu_vamgr_find_va(vamgr, size,
271 va_base_alignment, va_base_required,
272 search_from_top, va_base_allocated);
273
274 if (!(flags & AMDGPU_VA_RANGE_32_BIT) && ret) {
275 /* fallback to 32bit address */
276 if (flags & AMDGPU_VA_RANGE_HIGH)
277 vamgr = &va_mgr->vamgr_high_32;
278 else
279 vamgr = &va_mgr->vamgr_32;
280 ret = amdgpu_vamgr_find_va(vamgr, size,
281 va_base_alignment, va_base_required,
282 search_from_top, va_base_allocated);
283 }
284
285 if (!ret) {
286 struct amdgpu_va* va;
287 va = calloc(1, sizeof(struct amdgpu_va));
288 if(!va){
289 amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
290 return -ENOMEM;
291 }
292 va->address = *va_base_allocated;
293 va->size = size;
294 va->range = va_range_type;
295 va->vamgr = vamgr;
296 *va_range_handle = va;
297 }
298
299 return ret;
300 }
301
amdgpu_va_range_free(amdgpu_va_handle va_range_handle)302 drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
303 {
304 if(!va_range_handle || !va_range_handle->address)
305 return 0;
306
307 amdgpu_vamgr_free_va(va_range_handle->vamgr,
308 va_range_handle->address,
309 va_range_handle->size);
310 free(va_range_handle);
311 return 0;
312 }
313
amdgpu_va_get_start_addr(amdgpu_va_handle va_handle)314 drm_public uint64_t amdgpu_va_get_start_addr(amdgpu_va_handle va_handle)
315 {
316 return va_handle->address;
317 }
318
amdgpu_va_manager_alloc(void)319 drm_public amdgpu_va_manager_handle amdgpu_va_manager_alloc(void)
320 {
321 amdgpu_va_manager_handle r = calloc(1, sizeof(struct amdgpu_va_manager));
322 return r;
323 }
324
amdgpu_va_manager_init(struct amdgpu_va_manager * va_mgr,uint64_t low_va_offset,uint64_t low_va_max,uint64_t high_va_offset,uint64_t high_va_max,uint32_t virtual_address_alignment)325 drm_public void amdgpu_va_manager_init(struct amdgpu_va_manager *va_mgr,
326 uint64_t low_va_offset, uint64_t low_va_max,
327 uint64_t high_va_offset, uint64_t high_va_max,
328 uint32_t virtual_address_alignment)
329 {
330 uint64_t start, max;
331
332 start = low_va_offset;
333 max = MIN2(low_va_max, 0x100000000ULL);
334 amdgpu_vamgr_init(&va_mgr->vamgr_32, start, max,
335 virtual_address_alignment);
336
337 start = max;
338 max = MAX2(low_va_max, 0x100000000ULL);
339 amdgpu_vamgr_init(&va_mgr->vamgr_low, start, max,
340 virtual_address_alignment);
341
342 start = high_va_offset;
343 max = MIN2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
344 amdgpu_vamgr_init(&va_mgr->vamgr_high_32, start, max,
345 virtual_address_alignment);
346
347 start = max;
348 max = MAX2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
349 amdgpu_vamgr_init(&va_mgr->vamgr_high, start, max,
350 virtual_address_alignment);
351 }
352
amdgpu_va_manager_deinit(struct amdgpu_va_manager * va_mgr)353 drm_public void amdgpu_va_manager_deinit(struct amdgpu_va_manager *va_mgr)
354 {
355 amdgpu_vamgr_deinit(&va_mgr->vamgr_32);
356 amdgpu_vamgr_deinit(&va_mgr->vamgr_low);
357 amdgpu_vamgr_deinit(&va_mgr->vamgr_high_32);
358 amdgpu_vamgr_deinit(&va_mgr->vamgr_high);
359 }
360