xref: /aosp_15_r20/external/mesa3d/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2011 Marek Olšák <[email protected]>
3  * Copyright © 2015 Advanced Micro Devices, Inc.
4  *
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #include <sys/ioctl.h>
9 
10 #include "amdgpu_cs.h"
11 
12 #include "util/hash_table.h"
13 #include "util/os_time.h"
14 #include "util/u_hash_table.h"
15 #include "util/u_process.h"
16 #include "frontend/drm_driver.h"
17 #include "drm-uapi/amdgpu_drm.h"
18 #include "drm-uapi/dma-buf.h"
19 #include <xf86drm.h>
20 #include <stdio.h>
21 #include <inttypes.h>
22 
23 #ifndef AMDGPU_VA_RANGE_HIGH
24 #define AMDGPU_VA_RANGE_HIGH	0x2
25 #endif
26 
27 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
28 #define DEBUG_SPARSE_COMMITS 0
29 
30 struct amdgpu_sparse_backing_chunk {
31    uint32_t begin, end;
32 };
33 
amdgpu_bo_fence_wait(struct amdgpu_winsys * aws,struct pipe_fence_handle ** fence,uint64_t timeout,int64_t abs_timeout)34 static bool amdgpu_bo_fence_wait(struct amdgpu_winsys *aws,
35                                  struct pipe_fence_handle **fence,
36                                  uint64_t timeout, int64_t abs_timeout)
37 {
38    if (timeout == 0) {
39       bool idle = amdgpu_fence_wait(*fence, 0, false);
40 
41       if (!idle) {
42          simple_mtx_unlock(&aws->bo_fence_lock);
43          return false; /* busy */
44       }
45 
46       /* It's idle. Remove it from the ring to skip checking it again later. */
47       amdgpu_fence_reference(fence, NULL);
48    } else {
49       struct pipe_fence_handle *tmp_fence = NULL;
50       amdgpu_fence_reference(&tmp_fence, *fence);
51 
52       /* While waiting, unlock the mutex. */
53       simple_mtx_unlock(&aws->bo_fence_lock);
54 
55       bool idle = amdgpu_fence_wait(tmp_fence, abs_timeout, true);
56       if (!idle) {
57          amdgpu_fence_reference(&tmp_fence, NULL);
58          return false; /* busy */
59       }
60 
61       simple_mtx_lock(&aws->bo_fence_lock);
62       /* It's idle. Remove it from the ring to skip checking it again later. */
63       if (tmp_fence == *fence)
64          amdgpu_fence_reference(fence, NULL);
65       amdgpu_fence_reference(&tmp_fence, NULL);
66    }
67 
68    return true;
69 }
70 
amdgpu_bo_wait(struct radeon_winsys * rws,struct pb_buffer_lean * _buf,uint64_t timeout,unsigned usage)71 static bool amdgpu_bo_wait(struct radeon_winsys *rws,
72                            struct pb_buffer_lean *_buf, uint64_t timeout,
73                            unsigned usage)
74 {
75    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
76    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
77    int64_t abs_timeout = 0;
78 
79    assert(p_atomic_read(&bo->num_active_ioctls) >= 0);
80 
81    if (timeout == 0) {
82       if (p_atomic_read(&bo->num_active_ioctls))
83          return false;
84 
85    } else {
86       abs_timeout = os_time_get_absolute_timeout(timeout);
87 
88       /* Wait if any ioctl is being submitted with this buffer. */
89       if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
90          return false;
91    }
92 
93    if (is_real_bo(bo) && (get_real_bo(bo)->is_shared || get_real_bo(bo)->slab_has_busy_alt_fences)) {
94       /* We can't use user fences for shared buffers, because user fences are local to this
95        * process only. If we want to wait for all buffer uses in all processes, we have to
96        * use amdgpu_bo_wait_for_idle.
97        *
98        * Additionally, if this is a slab buffer and one of the slab entries has non-NULL
99        * alt_fence, we can't easily wait for that here. Instead, use the kernel ioctl to wait
100        * for the buffer.
101        */
102       bool buffer_busy = true;
103       int r;
104 
105       r = amdgpu_bo_wait_for_idle(get_real_bo(bo)->bo_handle, timeout, &buffer_busy);
106       if (r)
107          fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__, r);
108 
109       if (!buffer_busy)
110          get_real_bo(bo)->slab_has_busy_alt_fences = false;
111       return !buffer_busy;
112    }
113 
114    simple_mtx_lock(&aws->bo_fence_lock);
115 
116    u_foreach_bit(i, bo->fences.valid_fence_mask) {
117       struct pipe_fence_handle **fence = get_fence_from_ring(aws, &bo->fences, i);
118 
119       if (fence) {
120          /* This also unlocks the mutex on failure. */
121          if (!amdgpu_bo_fence_wait(aws, fence, timeout, abs_timeout))
122             return false;
123       }
124 
125       bo->fences.valid_fence_mask &= ~BITFIELD_BIT(i); /* remove the fence from the BO */
126    }
127 
128    /* Also wait for alt_fence. */
129    if (bo->alt_fence) {
130       /* This also unlocks the mutex on failure. */
131       if (!amdgpu_bo_fence_wait(aws, &bo->alt_fence, timeout, abs_timeout))
132          return false;
133    }
134 
135    simple_mtx_unlock(&aws->bo_fence_lock);
136    return true; /* idle */
137 }
138 
get_slab_entry_offset(struct amdgpu_winsys_bo * bo)139 static inline unsigned get_slab_entry_offset(struct amdgpu_winsys_bo *bo)
140 {
141    struct amdgpu_bo_slab_entry *slab_entry_bo = get_slab_entry_bo(bo);
142    struct amdgpu_bo_real_reusable_slab *slab_bo =
143       (struct amdgpu_bo_real_reusable_slab *)get_slab_entry_real_bo(bo);
144    unsigned entry_index = slab_entry_bo - slab_bo->entries;
145 
146    return slab_bo->slab.entry_size * entry_index;
147 }
148 
amdgpu_bo_get_initial_domain(struct pb_buffer_lean * buf)149 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
150       struct pb_buffer_lean *buf)
151 {
152    return ((struct amdgpu_winsys_bo*)buf)->base.placement;
153 }
154 
amdgpu_bo_get_flags(struct pb_buffer_lean * buf)155 static enum radeon_bo_flag amdgpu_bo_get_flags(
156       struct pb_buffer_lean *buf)
157 {
158    return ((struct amdgpu_winsys_bo*)buf)->base.usage;
159 }
160 
amdgpu_bo_remove_fences(struct amdgpu_winsys_bo * bo)161 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
162 {
163    bo->fences.valid_fence_mask = 0;
164    amdgpu_fence_reference(&bo->alt_fence, NULL);
165 }
166 
amdgpu_bo_destroy(struct amdgpu_winsys * aws,struct pb_buffer_lean * _buf)167 void amdgpu_bo_destroy(struct amdgpu_winsys *aws, struct pb_buffer_lean *_buf)
168 {
169    struct amdgpu_bo_real *bo = get_real_bo(amdgpu_winsys_bo(_buf));
170    struct amdgpu_screen_winsys *sws_iter;
171 
172    simple_mtx_lock(&aws->bo_export_table_lock);
173 
174    /* amdgpu_bo_from_handle might have revived the bo */
175    if (p_atomic_read(&bo->b.base.reference.count)) {
176       simple_mtx_unlock(&aws->bo_export_table_lock);
177       return;
178    }
179 
180    _mesa_hash_table_remove_key(aws->bo_export_table, bo->bo_handle);
181 
182    if (bo->b.base.placement & RADEON_DOMAIN_VRAM_GTT) {
183       amdgpu_bo_va_op(bo->bo_handle, 0, bo->b.base.size,
184                       amdgpu_va_get_start_addr(bo->va_handle), 0, AMDGPU_VA_OP_UNMAP);
185       amdgpu_va_range_free(bo->va_handle);
186    }
187 
188    simple_mtx_unlock(&aws->bo_export_table_lock);
189 
190    if (!bo->is_user_ptr && bo->cpu_ptr) {
191       bo->cpu_ptr = NULL;
192       amdgpu_bo_unmap(&aws->dummy_sws.base, &bo->b.base);
193    }
194    assert(bo->is_user_ptr || bo->map_count == 0);
195 
196    amdgpu_bo_free(bo->bo_handle);
197 
198 #if MESA_DEBUG
199    if (aws->debug_all_bos) {
200       simple_mtx_lock(&aws->global_bo_list_lock);
201       list_del(&bo->global_list_item);
202       aws->num_buffers--;
203       simple_mtx_unlock(&aws->global_bo_list_lock);
204    }
205 #endif
206 
207    /* Close all KMS handles retrieved for other DRM file descriptions */
208    simple_mtx_lock(&aws->sws_list_lock);
209    for (sws_iter = aws->sws_list; sws_iter; sws_iter = sws_iter->next) {
210       struct hash_entry *entry;
211 
212       if (!sws_iter->kms_handles)
213          continue;
214 
215       entry = _mesa_hash_table_search(sws_iter->kms_handles, bo);
216       if (entry) {
217          struct drm_gem_close args = { .handle = (uintptr_t)entry->data };
218 
219          drmIoctl(sws_iter->fd, DRM_IOCTL_GEM_CLOSE, &args);
220          _mesa_hash_table_remove(sws_iter->kms_handles, entry);
221       }
222    }
223    simple_mtx_unlock(&aws->sws_list_lock);
224 
225    amdgpu_bo_remove_fences(&bo->b);
226 
227    if (bo->b.base.placement & RADEON_DOMAIN_VRAM)
228       aws->allocated_vram -= align64(bo->b.base.size, aws->info.gart_page_size);
229    else if (bo->b.base.placement & RADEON_DOMAIN_GTT)
230       aws->allocated_gtt -= align64(bo->b.base.size, aws->info.gart_page_size);
231 
232    simple_mtx_destroy(&bo->map_lock);
233    FREE(bo);
234 }
235 
amdgpu_bo_destroy_or_cache(struct radeon_winsys * rws,struct pb_buffer_lean * _buf)236 static void amdgpu_bo_destroy_or_cache(struct radeon_winsys *rws, struct pb_buffer_lean *_buf)
237 {
238    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
239    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
240 
241    assert(is_real_bo(bo)); /* slab buffers have a separate vtbl */
242 
243    if (bo->type >= AMDGPU_BO_REAL_REUSABLE)
244       pb_cache_add_buffer(&aws->bo_cache, &((struct amdgpu_bo_real_reusable*)bo)->cache_entry);
245    else
246       amdgpu_bo_destroy(aws, _buf);
247 }
248 
amdgpu_clean_up_buffer_managers(struct amdgpu_winsys * aws)249 static void amdgpu_clean_up_buffer_managers(struct amdgpu_winsys *aws)
250 {
251    pb_slabs_reclaim(&aws->bo_slabs);
252    pb_cache_release_all_buffers(&aws->bo_cache);
253 }
254 
amdgpu_bo_do_map(struct radeon_winsys * rws,struct amdgpu_bo_real * bo,void ** cpu)255 static bool amdgpu_bo_do_map(struct radeon_winsys *rws, struct amdgpu_bo_real *bo, void **cpu)
256 {
257    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
258 
259    assert(!bo->is_user_ptr);
260 
261    int r = amdgpu_bo_cpu_map(bo->bo_handle, cpu);
262    if (r) {
263       /* Clean up buffer managers and try again. */
264       amdgpu_clean_up_buffer_managers(aws);
265       r = amdgpu_bo_cpu_map(bo->bo_handle, cpu);
266       if (r)
267          return false;
268    }
269 
270    if (p_atomic_inc_return(&bo->map_count) == 1) {
271       if (bo->b.base.placement & RADEON_DOMAIN_VRAM)
272          aws->mapped_vram += bo->b.base.size;
273       else if (bo->b.base.placement & RADEON_DOMAIN_GTT)
274          aws->mapped_gtt += bo->b.base.size;
275       aws->num_mapped_buffers++;
276    }
277 
278    return true;
279 }
280 
amdgpu_bo_map(struct radeon_winsys * rws,struct pb_buffer_lean * buf,struct radeon_cmdbuf * rcs,enum pipe_map_flags usage)281 void *amdgpu_bo_map(struct radeon_winsys *rws,
282                     struct pb_buffer_lean *buf,
283                     struct radeon_cmdbuf *rcs,
284                     enum pipe_map_flags usage)
285 {
286    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
287    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
288    struct amdgpu_bo_real *real;
289    struct amdgpu_cs *cs = rcs ? amdgpu_cs(rcs) : NULL;
290 
291    assert(bo->type != AMDGPU_BO_SPARSE);
292 
293    /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
294    if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
295       /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
296       if (usage & PIPE_MAP_DONTBLOCK) {
297          if (!(usage & PIPE_MAP_WRITE)) {
298             /* Mapping for read.
299              *
300              * Since we are mapping for read, we don't need to wait
301              * if the GPU is using the buffer for read too
302              * (neither one is changing it).
303              *
304              * Only check whether the buffer is being used for write. */
305             if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
306                                                                RADEON_USAGE_WRITE)) {
307                cs->flush_cs(cs->flush_data,
308 			    RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
309                return NULL;
310             }
311 
312             if (!amdgpu_bo_wait(rws, (struct pb_buffer_lean*)bo, 0,
313                                 RADEON_USAGE_WRITE)) {
314                return NULL;
315             }
316          } else {
317             if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
318                cs->flush_cs(cs->flush_data,
319 			    RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
320                return NULL;
321             }
322 
323             if (!amdgpu_bo_wait(rws, (struct pb_buffer_lean*)bo, 0,
324                                 RADEON_USAGE_READWRITE)) {
325                return NULL;
326             }
327          }
328       } else {
329          uint64_t time = os_time_get_nano();
330 
331          if (!(usage & PIPE_MAP_WRITE)) {
332             /* Mapping for read.
333              *
334              * Since we are mapping for read, we don't need to wait
335              * if the GPU is using the buffer for read too
336              * (neither one is changing it).
337              *
338              * Only check whether the buffer is being used for write. */
339             if (cs) {
340                if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
341                                                             RADEON_USAGE_WRITE)) {
342                   cs->flush_cs(cs->flush_data,
343 			       RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
344                } else {
345                   /* Try to avoid busy-waiting in amdgpu_bo_wait. */
346                   if (p_atomic_read(&bo->num_active_ioctls))
347                      amdgpu_cs_sync_flush(rcs);
348                }
349             }
350 
351             amdgpu_bo_wait(rws, (struct pb_buffer_lean*)bo, OS_TIMEOUT_INFINITE,
352                            RADEON_USAGE_WRITE);
353          } else {
354             /* Mapping for write. */
355             if (cs) {
356                if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
357                   cs->flush_cs(cs->flush_data,
358 			       RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
359                } else {
360                   /* Try to avoid busy-waiting in amdgpu_bo_wait. */
361                   if (p_atomic_read(&bo->num_active_ioctls))
362                      amdgpu_cs_sync_flush(rcs);
363                }
364             }
365 
366             amdgpu_bo_wait(rws, (struct pb_buffer_lean*)bo, OS_TIMEOUT_INFINITE,
367                            RADEON_USAGE_READWRITE);
368          }
369 
370          aws->buffer_wait_time += os_time_get_nano() - time;
371       }
372    }
373 
374    /* Buffer synchronization has been checked, now actually map the buffer. */
375    void *cpu = NULL;
376    uint64_t offset = 0;
377 
378    if (is_real_bo(bo)) {
379       real = get_real_bo(bo);
380    } else {
381       real = get_slab_entry_real_bo(bo);
382       offset = get_slab_entry_offset(bo);
383    }
384 
385    if (usage & RADEON_MAP_TEMPORARY) {
386       if (real->is_user_ptr) {
387          cpu = real->cpu_ptr;
388       } else {
389          if (!amdgpu_bo_do_map(rws, real, &cpu))
390             return NULL;
391       }
392    } else {
393       cpu = p_atomic_read(&real->cpu_ptr);
394       if (!cpu) {
395          simple_mtx_lock(&real->map_lock);
396          /* Must re-check due to the possibility of a race. Re-check need not
397           * be atomic thanks to the lock. */
398          cpu = real->cpu_ptr;
399          if (!cpu) {
400             if (!amdgpu_bo_do_map(rws, real, &cpu)) {
401                simple_mtx_unlock(&real->map_lock);
402                return NULL;
403             }
404             p_atomic_set(&real->cpu_ptr, cpu);
405          }
406          simple_mtx_unlock(&real->map_lock);
407       }
408    }
409 
410    return (uint8_t*)cpu + offset;
411 }
412 
amdgpu_bo_unmap(struct radeon_winsys * rws,struct pb_buffer_lean * buf)413 void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer_lean *buf)
414 {
415    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
416    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
417    struct amdgpu_bo_real *real;
418 
419    assert(bo->type != AMDGPU_BO_SPARSE);
420 
421    real = is_real_bo(bo) ? get_real_bo(bo) : get_slab_entry_real_bo(bo);
422 
423    if (real->is_user_ptr)
424       return;
425 
426    assert(real->map_count != 0 && "too many unmaps");
427    if (p_atomic_dec_zero(&real->map_count)) {
428       assert(!real->cpu_ptr &&
429              "too many unmaps or forgot RADEON_MAP_TEMPORARY flag");
430 
431       if (real->b.base.placement & RADEON_DOMAIN_VRAM)
432          aws->mapped_vram -= real->b.base.size;
433       else if (real->b.base.placement & RADEON_DOMAIN_GTT)
434          aws->mapped_gtt -= real->b.base.size;
435       aws->num_mapped_buffers--;
436    }
437 
438    amdgpu_bo_cpu_unmap(real->bo_handle);
439 }
440 
amdgpu_add_buffer_to_global_list(struct amdgpu_winsys * aws,struct amdgpu_bo_real * bo)441 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys *aws, struct amdgpu_bo_real *bo)
442 {
443 #if MESA_DEBUG
444    if (aws->debug_all_bos) {
445       simple_mtx_lock(&aws->global_bo_list_lock);
446       list_addtail(&bo->global_list_item, &aws->global_bo_list);
447       aws->num_buffers++;
448       simple_mtx_unlock(&aws->global_bo_list_lock);
449    }
450 #endif
451 }
452 
amdgpu_get_optimal_alignment(struct amdgpu_winsys * aws,uint64_t size,unsigned alignment)453 static unsigned amdgpu_get_optimal_alignment(struct amdgpu_winsys *aws,
454                                              uint64_t size, unsigned alignment)
455 {
456    /* Increase the alignment for faster address translation and better memory
457     * access pattern.
458     */
459    if (size >= aws->info.pte_fragment_size) {
460       alignment = MAX2(alignment, aws->info.pte_fragment_size);
461    } else if (size) {
462       unsigned msb = util_last_bit(size);
463 
464       alignment = MAX2(alignment, 1u << (msb - 1));
465    }
466    return alignment;
467 }
468 
amdgpu_create_bo(struct amdgpu_winsys * aws,uint64_t size,unsigned alignment,enum radeon_bo_domain initial_domain,unsigned flags,int heap)469 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *aws,
470                                                  uint64_t size,
471                                                  unsigned alignment,
472                                                  enum radeon_bo_domain initial_domain,
473                                                  unsigned flags,
474                                                  int heap)
475 {
476    struct amdgpu_bo_alloc_request request = {0};
477    amdgpu_bo_handle buf_handle;
478    uint64_t va = 0;
479    struct amdgpu_bo_real *bo;
480    amdgpu_va_handle va_handle = NULL;
481    int r;
482 
483    /* VRAM or GTT must be specified, but not both at the same time. */
484    assert(util_bitcount(initial_domain & (RADEON_DOMAIN_VRAM_GTT |
485                                           RADEON_DOMAIN_GDS |
486                                           RADEON_DOMAIN_OA)) == 1);
487 
488    alignment = amdgpu_get_optimal_alignment(aws, size, alignment);
489 
490    if (heap >= 0 && flags & RADEON_FLAG_NO_INTERPROCESS_SHARING) {
491       struct amdgpu_bo_real_reusable *new_bo;
492       bool slab_backing = flags & RADEON_FLAG_WINSYS_SLAB_BACKING;
493 
494       if (slab_backing)
495          new_bo = (struct amdgpu_bo_real_reusable *)CALLOC_STRUCT(amdgpu_bo_real_reusable_slab);
496       else
497          new_bo = CALLOC_STRUCT(amdgpu_bo_real_reusable);
498 
499       if (!new_bo)
500          return NULL;
501 
502       bo = &new_bo->b;
503       pb_cache_init_entry(&aws->bo_cache, &new_bo->cache_entry, &bo->b.base, heap);
504       bo->b.type = slab_backing ? AMDGPU_BO_REAL_REUSABLE_SLAB : AMDGPU_BO_REAL_REUSABLE;
505    } else {
506       bo = CALLOC_STRUCT(amdgpu_bo_real);
507       if (!bo)
508          return NULL;
509 
510       bo->b.type = AMDGPU_BO_REAL;
511    }
512 
513    request.alloc_size = size;
514    request.phys_alignment = alignment;
515 
516    if (initial_domain & RADEON_DOMAIN_VRAM) {
517       request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
518 
519       /* Since VRAM and GTT have almost the same performance on APUs, we could
520        * just set GTT. However, in order to decrease GTT(RAM) usage, which is
521        * shared with the OS, allow VRAM placements too. The idea is not to use
522        * VRAM usefully, but to use it so that it's not unused and wasted.
523        */
524       if (!aws->info.has_dedicated_vram)
525          request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
526    }
527 
528    if (initial_domain & RADEON_DOMAIN_GTT)
529       request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
530    if (initial_domain & RADEON_DOMAIN_GDS)
531       request.preferred_heap |= AMDGPU_GEM_DOMAIN_GDS;
532    if (initial_domain & RADEON_DOMAIN_OA)
533       request.preferred_heap |= AMDGPU_GEM_DOMAIN_OA;
534 
535    if (flags & RADEON_FLAG_NO_CPU_ACCESS)
536       request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
537    if (flags & RADEON_FLAG_GTT_WC)
538       request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
539 
540    if (flags & RADEON_FLAG_DISCARDABLE &&
541        aws->info.drm_minor >= 47)
542       request.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
543 
544    if (aws->zero_all_vram_allocs &&
545        (request.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM))
546       request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
547 
548    if ((flags & RADEON_FLAG_ENCRYPTED) &&
549        aws->info.has_tmz_support) {
550       request.flags |= AMDGPU_GEM_CREATE_ENCRYPTED;
551 
552       if (!(flags & RADEON_FLAG_DRIVER_INTERNAL)) {
553          struct amdgpu_screen_winsys *sws_iter;
554          simple_mtx_lock(&aws->sws_list_lock);
555          for (sws_iter = aws->sws_list; sws_iter; sws_iter = sws_iter->next) {
556             *((bool*) &sws_iter->base.uses_secure_bos) = true;
557          }
558          simple_mtx_unlock(&aws->sws_list_lock);
559       }
560    }
561 
562    if (flags & RADEON_FLAG_GFX12_ALLOW_DCC)
563       request.flags |= AMDGPU_GEM_CREATE_GFX12_DCC;
564 
565    r = amdgpu_bo_alloc(aws->dev, &request, &buf_handle);
566    if (r) {
567       fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
568       fprintf(stderr, "amdgpu:    size      : %"PRIu64" bytes\n", size);
569       fprintf(stderr, "amdgpu:    alignment : %u bytes\n", alignment);
570       fprintf(stderr, "amdgpu:    domains   : %u\n", initial_domain);
571       fprintf(stderr, "amdgpu:    flags   : %" PRIx64 "\n", request.flags);
572       goto error_bo_alloc;
573    }
574 
575    if (initial_domain & RADEON_DOMAIN_VRAM_GTT) {
576       unsigned va_gap_size = aws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
577 
578       r = amdgpu_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
579                                 size + va_gap_size, alignment,
580                                 0, &va, &va_handle,
581                                 (flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) |
582                                 AMDGPU_VA_RANGE_HIGH);
583       if (r)
584          goto error_va_alloc;
585 
586       unsigned vm_flags = AMDGPU_VM_PAGE_READABLE |
587                           AMDGPU_VM_PAGE_WRITEABLE |
588                           AMDGPU_VM_PAGE_EXECUTABLE;
589 
590       if (flags & RADEON_FLAG_GL2_BYPASS)
591          vm_flags |= AMDGPU_VM_MTYPE_UC;
592 
593       r = amdgpu_bo_va_op_raw(aws->dev, buf_handle, 0, size, va, vm_flags, AMDGPU_VA_OP_MAP);
594       if (r)
595          goto error_va_map;
596    }
597 
598    simple_mtx_init(&bo->map_lock, mtx_plain);
599    pipe_reference_init(&bo->b.base.reference, 1);
600    bo->b.base.placement = initial_domain;
601    bo->b.base.alignment_log2 = util_logbase2(alignment);
602    bo->b.base.usage = flags;
603    bo->b.base.size = size;
604    bo->b.unique_id = __sync_fetch_and_add(&aws->next_bo_unique_id, 1);
605    bo->bo_handle = buf_handle;
606    bo->va_handle = va_handle;
607 
608    if (initial_domain & RADEON_DOMAIN_VRAM)
609       aws->allocated_vram += align64(size, aws->info.gart_page_size);
610    else if (initial_domain & RADEON_DOMAIN_GTT)
611       aws->allocated_gtt += align64(size, aws->info.gart_page_size);
612 
613    amdgpu_bo_export(bo->bo_handle, amdgpu_bo_handle_type_kms, &bo->kms_handle);
614    amdgpu_add_buffer_to_global_list(aws, bo);
615 
616    return &bo->b;
617 
618 error_va_map:
619    amdgpu_va_range_free(va_handle);
620 
621 error_va_alloc:
622    amdgpu_bo_free(buf_handle);
623 
624 error_bo_alloc:
625    FREE(bo);
626    return NULL;
627 }
628 
amdgpu_bo_can_reclaim(struct amdgpu_winsys * aws,struct pb_buffer_lean * _buf)629 bool amdgpu_bo_can_reclaim(struct amdgpu_winsys *aws, struct pb_buffer_lean *_buf)
630 {
631    return amdgpu_bo_wait(&aws->dummy_sws.base, _buf, 0, RADEON_USAGE_READWRITE);
632 }
633 
amdgpu_bo_can_reclaim_slab(void * priv,struct pb_slab_entry * entry)634 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
635 {
636    struct amdgpu_bo_slab_entry *bo = container_of(entry, struct amdgpu_bo_slab_entry, entry);
637 
638    return amdgpu_bo_can_reclaim(priv, &bo->b.base);
639 }
640 
get_slab_wasted_size(struct amdgpu_winsys * aws,struct amdgpu_bo_slab_entry * bo)641 static unsigned get_slab_wasted_size(struct amdgpu_winsys *aws, struct amdgpu_bo_slab_entry *bo)
642 {
643    assert(bo->b.base.size <= bo->entry.slab->entry_size);
644    assert(bo->b.base.size < (1 << bo->b.base.alignment_log2) ||
645           bo->b.base.size < 1 << aws->bo_slabs.min_order ||
646           bo->b.base.size > bo->entry.slab->entry_size / 2);
647    return bo->entry.slab->entry_size - bo->b.base.size;
648 }
649 
amdgpu_bo_slab_destroy(struct radeon_winsys * rws,struct pb_buffer_lean * _buf)650 static void amdgpu_bo_slab_destroy(struct radeon_winsys *rws, struct pb_buffer_lean *_buf)
651 {
652    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
653    struct amdgpu_bo_slab_entry *bo = get_slab_entry_bo(amdgpu_winsys_bo(_buf));
654 
655    if (bo->b.base.placement & RADEON_DOMAIN_VRAM)
656       aws->slab_wasted_vram -= get_slab_wasted_size(aws, bo);
657    else
658       aws->slab_wasted_gtt -= get_slab_wasted_size(aws, bo);
659 
660    pb_slab_free(&aws->bo_slabs, &bo->entry);
661 }
662 
663 /* Return the power of two size of a slab entry matching the input size. */
get_slab_pot_entry_size(struct amdgpu_winsys * aws,unsigned size)664 static unsigned get_slab_pot_entry_size(struct amdgpu_winsys *aws, unsigned size)
665 {
666    unsigned entry_size = util_next_power_of_two(size);
667    unsigned min_entry_size = 1 << aws->bo_slabs.min_order;
668 
669    return MAX2(entry_size, min_entry_size);
670 }
671 
672 /* Return the slab entry alignment. */
get_slab_entry_alignment(struct amdgpu_winsys * aws,unsigned size)673 static unsigned get_slab_entry_alignment(struct amdgpu_winsys *aws, unsigned size)
674 {
675    unsigned entry_size = get_slab_pot_entry_size(aws, size);
676 
677    if (size <= entry_size * 3 / 4)
678       return entry_size / 4;
679 
680    return entry_size;
681 }
682 
amdgpu_bo_slab_alloc(void * priv,unsigned heap,unsigned entry_size,unsigned group_index)683 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, unsigned entry_size,
684                                      unsigned group_index)
685 {
686    struct amdgpu_winsys *aws = priv;
687    enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
688    enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
689 
690    /* Determine the slab buffer size. */
691    unsigned max_entry_size = 1 << (aws->bo_slabs.min_order + aws->bo_slabs.num_orders - 1);
692 
693    assert(entry_size <= max_entry_size);
694 
695    /* The slab size is twice the size of the largest possible entry. */
696    unsigned slab_size = max_entry_size * 2;
697 
698    if (!util_is_power_of_two_nonzero(entry_size)) {
699       assert(util_is_power_of_two_nonzero(entry_size * 4 / 3));
700 
701       /* If the entry size is 3/4 of a power of two, we would waste space and not gain
702        * anything if we allocated only twice the power of two for the backing buffer:
703        *   2 * 3/4 = 1.5 usable with buffer size 2
704        *
705        * Allocating 5 times the entry size leads us to the next power of two and results
706        * in a much better memory utilization:
707        *   5 * 3/4 = 3.75 usable with buffer size 4
708        */
709       if (entry_size * 5 > slab_size)
710          slab_size = util_next_power_of_two(entry_size * 5);
711    }
712 
713    /* The largest slab should have the same size as the PTE fragment
714     * size to get faster address translation.
715     */
716    slab_size = MAX2(slab_size, aws->info.pte_fragment_size);
717 
718    flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING |
719             RADEON_FLAG_NO_SUBALLOC |
720             RADEON_FLAG_WINSYS_SLAB_BACKING;
721 
722    struct amdgpu_bo_real_reusable_slab *slab_bo =
723       (struct amdgpu_bo_real_reusable_slab*)amdgpu_bo_create(aws, slab_size, slab_size,
724                                                              domains, flags);
725    if (!slab_bo)
726       return NULL;
727 
728    /* The slab is not suballocated. */
729    assert(is_real_bo(&slab_bo->b.b.b));
730    assert(slab_bo->b.b.b.type == AMDGPU_BO_REAL_REUSABLE_SLAB);
731 
732    /* We can get a buffer from pb_cache that is slightly larger. */
733    slab_size = slab_bo->b.b.b.base.size;
734 
735    slab_bo->slab.num_entries = slab_size / entry_size;
736    slab_bo->slab.num_free = slab_bo->slab.num_entries;
737    slab_bo->slab.group_index = group_index;
738    slab_bo->slab.entry_size = entry_size;
739    slab_bo->entries = os_malloc_aligned(slab_bo->slab.num_entries * sizeof(*slab_bo->entries),
740                                         CACHE_LINE_SIZE);
741    if (!slab_bo->entries)
742       goto fail;
743 
744    memset(slab_bo->entries, 0, slab_bo->slab.num_entries * sizeof(*slab_bo->entries));
745    list_inithead(&slab_bo->slab.free);
746 
747    for (unsigned i = 0; i < slab_bo->slab.num_entries; ++i) {
748       struct amdgpu_bo_slab_entry *bo = &slab_bo->entries[i];
749 
750       bo->b.base.placement = domains;
751       bo->b.base.alignment_log2 = util_logbase2(get_slab_entry_alignment(aws, entry_size));
752       bo->b.base.size = entry_size;
753       bo->b.type = AMDGPU_BO_SLAB_ENTRY;
754 
755       bo->entry.slab = &slab_bo->slab;
756       list_addtail(&bo->entry.head, &slab_bo->slab.free);
757    }
758 
759    /* Wasted alignment due to slabs with 3/4 allocations being aligned to a power of two. */
760    assert(slab_bo->slab.num_entries * entry_size <= slab_size);
761    if (domains & RADEON_DOMAIN_VRAM)
762       aws->slab_wasted_vram += slab_size - slab_bo->slab.num_entries * entry_size;
763    else
764       aws->slab_wasted_gtt += slab_size - slab_bo->slab.num_entries * entry_size;
765 
766    return &slab_bo->slab;
767 
768 fail:
769    amdgpu_winsys_bo_reference(aws, (struct amdgpu_winsys_bo**)&slab_bo, NULL);
770    return NULL;
771 }
772 
amdgpu_bo_slab_free(struct amdgpu_winsys * aws,struct pb_slab * slab)773 void amdgpu_bo_slab_free(struct amdgpu_winsys *aws, struct pb_slab *slab)
774 {
775    struct amdgpu_bo_real_reusable_slab *bo = get_bo_from_slab(slab);
776    unsigned slab_size = bo->b.b.b.base.size;
777 
778    assert(bo->slab.num_entries * bo->slab.entry_size <= slab_size);
779    if (bo->b.b.b.base.placement & RADEON_DOMAIN_VRAM)
780       aws->slab_wasted_vram -= slab_size - bo->slab.num_entries * bo->slab.entry_size;
781    else
782       aws->slab_wasted_gtt -= slab_size - bo->slab.num_entries * bo->slab.entry_size;
783 
784    for (unsigned i = 0; i < bo->slab.num_entries; ++i)
785       amdgpu_bo_remove_fences(&bo->entries[i].b);
786 
787    os_free_aligned(bo->entries);
788    amdgpu_winsys_bo_reference(aws, (struct amdgpu_winsys_bo**)&bo, NULL);
789 }
790 
791 #if DEBUG_SPARSE_COMMITS
792 static void
sparse_dump(struct amdgpu_bo_sparse * bo,const char * func)793 sparse_dump(struct amdgpu_bo_sparse *bo, const char *func)
794 {
795    fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
796                    "Commitments:\n",
797            __func__, bo, bo->b.base.size, bo->num_va_pages, func);
798 
799    struct amdgpu_sparse_backing *span_backing = NULL;
800    uint32_t span_first_backing_page = 0;
801    uint32_t span_first_va_page = 0;
802    uint32_t va_page = 0;
803 
804    for (;;) {
805       struct amdgpu_sparse_backing *backing = 0;
806       uint32_t backing_page = 0;
807 
808       if (va_page < bo->num_va_pages) {
809          backing = bo->commitments[va_page].backing;
810          backing_page = bo->commitments[va_page].page;
811       }
812 
813       if (span_backing &&
814           (backing != span_backing ||
815            backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
816          fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
817                  span_first_va_page, va_page - 1, span_backing,
818                  span_first_backing_page,
819                  span_first_backing_page + (va_page - span_first_va_page) - 1);
820 
821          span_backing = NULL;
822       }
823 
824       if (va_page >= bo->num_va_pages)
825          break;
826 
827       if (backing && !span_backing) {
828          span_backing = backing;
829          span_first_backing_page = backing_page;
830          span_first_va_page = va_page;
831       }
832 
833       va_page++;
834    }
835 
836    fprintf(stderr, "Backing:\n");
837 
838    list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->backing, list) {
839       fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->b.base.size);
840       for (unsigned i = 0; i < backing->num_chunks; ++i)
841          fprintf(stderr, "   %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
842    }
843 }
844 #endif
845 
846 /*
847  * Attempt to allocate the given number of backing pages. Fewer pages may be
848  * allocated (depending on the fragmentation of existing backing buffers),
849  * which will be reflected by a change to *pnum_pages.
850  */
851 static struct amdgpu_sparse_backing *
sparse_backing_alloc(struct amdgpu_winsys * aws,struct amdgpu_bo_sparse * bo,uint32_t * pstart_page,uint32_t * pnum_pages)852 sparse_backing_alloc(struct amdgpu_winsys *aws, struct amdgpu_bo_sparse *bo,
853                      uint32_t *pstart_page, uint32_t *pnum_pages)
854 {
855    struct amdgpu_sparse_backing *best_backing;
856    unsigned best_idx;
857    uint32_t best_num_pages;
858 
859    best_backing = NULL;
860    best_idx = 0;
861    best_num_pages = 0;
862 
863    /* This is a very simple and inefficient best-fit algorithm. */
864    list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->backing, list) {
865       for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
866          uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
867          if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
868             (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
869             best_backing = backing;
870             best_idx = idx;
871             best_num_pages = cur_num_pages;
872          }
873       }
874    }
875 
876    /* Allocate a new backing buffer if necessary. */
877    if (!best_backing) {
878       struct pb_buffer_lean *buf;
879       uint64_t size;
880       uint32_t pages;
881 
882       best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
883       if (!best_backing)
884          return NULL;
885 
886       best_backing->max_chunks = 4;
887       best_backing->chunks = CALLOC(best_backing->max_chunks,
888                                     sizeof(*best_backing->chunks));
889       if (!best_backing->chunks) {
890          FREE(best_backing);
891          return NULL;
892       }
893 
894       assert(bo->num_backing_pages < DIV_ROUND_UP(bo->b.base.size, RADEON_SPARSE_PAGE_SIZE));
895 
896       size = MIN3(bo->b.base.size / 16,
897                   8 * 1024 * 1024,
898                   bo->b.base.size - (uint64_t)bo->num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
899       size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
900 
901       buf = amdgpu_bo_create(aws, size, RADEON_SPARSE_PAGE_SIZE,
902                              bo->b.base.placement,
903                              (bo->b.base.usage & ~RADEON_FLAG_SPARSE &
904                               /* Set the interprocess sharing flag to disable pb_cache because
905                                * amdgpu_bo_wait doesn't wait for active CS jobs.
906                                */
907                               ~RADEON_FLAG_NO_INTERPROCESS_SHARING) | RADEON_FLAG_NO_SUBALLOC);
908       if (!buf) {
909          FREE(best_backing->chunks);
910          FREE(best_backing);
911          return NULL;
912       }
913 
914       /* We might have gotten a bigger buffer than requested via caching. */
915       pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
916 
917       best_backing->bo = get_real_bo(amdgpu_winsys_bo(buf));
918       best_backing->num_chunks = 1;
919       best_backing->chunks[0].begin = 0;
920       best_backing->chunks[0].end = pages;
921 
922       list_add(&best_backing->list, &bo->backing);
923       bo->num_backing_pages += pages;
924 
925       best_idx = 0;
926       best_num_pages = pages;
927    }
928 
929    *pnum_pages = MIN2(*pnum_pages, best_num_pages);
930    *pstart_page = best_backing->chunks[best_idx].begin;
931    best_backing->chunks[best_idx].begin += *pnum_pages;
932 
933    if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
934       memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
935               sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
936       best_backing->num_chunks--;
937    }
938 
939    return best_backing;
940 }
941 
942 static void
sparse_free_backing_buffer(struct amdgpu_winsys * aws,struct amdgpu_bo_sparse * bo,struct amdgpu_sparse_backing * backing)943 sparse_free_backing_buffer(struct amdgpu_winsys *aws, struct amdgpu_bo_sparse *bo,
944                            struct amdgpu_sparse_backing *backing)
945 {
946    bo->num_backing_pages -= backing->bo->b.base.size / RADEON_SPARSE_PAGE_SIZE;
947 
948    /* Add fences from bo to backing->bo. */
949    simple_mtx_lock(&aws->bo_fence_lock);
950    u_foreach_bit(i, bo->b.fences.valid_fence_mask) {
951       add_seq_no_to_list(aws, &backing->bo->b.fences, i, bo->b.fences.seq_no[i]);
952    }
953    simple_mtx_unlock(&aws->bo_fence_lock);
954 
955    list_del(&backing->list);
956    amdgpu_winsys_bo_reference(aws, (struct amdgpu_winsys_bo**)&backing->bo, NULL);
957    FREE(backing->chunks);
958    FREE(backing);
959 }
960 
961 /*
962  * Return a range of pages from the given backing buffer back into the
963  * free structure.
964  */
965 static bool
sparse_backing_free(struct amdgpu_winsys * aws,struct amdgpu_bo_sparse * bo,struct amdgpu_sparse_backing * backing,uint32_t start_page,uint32_t num_pages)966 sparse_backing_free(struct amdgpu_winsys *aws, struct amdgpu_bo_sparse *bo,
967                     struct amdgpu_sparse_backing *backing,
968                     uint32_t start_page, uint32_t num_pages)
969 {
970    uint32_t end_page = start_page + num_pages;
971    unsigned low = 0;
972    unsigned high = backing->num_chunks;
973 
974    /* Find the first chunk with begin >= start_page. */
975    while (low < high) {
976       unsigned mid = low + (high - low) / 2;
977 
978       if (backing->chunks[mid].begin >= start_page)
979          high = mid;
980       else
981          low = mid + 1;
982    }
983 
984    assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
985    assert(low == 0 || backing->chunks[low - 1].end <= start_page);
986 
987    if (low > 0 && backing->chunks[low - 1].end == start_page) {
988       backing->chunks[low - 1].end = end_page;
989 
990       if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
991          backing->chunks[low - 1].end = backing->chunks[low].end;
992          memmove(&backing->chunks[low], &backing->chunks[low + 1],
993                  sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
994          backing->num_chunks--;
995       }
996    } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
997       backing->chunks[low].begin = start_page;
998    } else {
999       if (backing->num_chunks >= backing->max_chunks) {
1000          unsigned new_max_chunks = 2 * backing->max_chunks;
1001          struct amdgpu_sparse_backing_chunk *new_chunks =
1002             REALLOC(backing->chunks,
1003                     sizeof(*backing->chunks) * backing->max_chunks,
1004                     sizeof(*backing->chunks) * new_max_chunks);
1005          if (!new_chunks)
1006             return false;
1007 
1008          backing->max_chunks = new_max_chunks;
1009          backing->chunks = new_chunks;
1010       }
1011 
1012       memmove(&backing->chunks[low + 1], &backing->chunks[low],
1013               sizeof(*backing->chunks) * (backing->num_chunks - low));
1014       backing->chunks[low].begin = start_page;
1015       backing->chunks[low].end = end_page;
1016       backing->num_chunks++;
1017    }
1018 
1019    if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
1020        backing->chunks[0].end == backing->bo->b.base.size / RADEON_SPARSE_PAGE_SIZE)
1021       sparse_free_backing_buffer(aws, bo, backing);
1022 
1023    return true;
1024 }
1025 
amdgpu_bo_sparse_destroy(struct radeon_winsys * rws,struct pb_buffer_lean * _buf)1026 static void amdgpu_bo_sparse_destroy(struct radeon_winsys *rws, struct pb_buffer_lean *_buf)
1027 {
1028    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
1029    struct amdgpu_bo_sparse *bo = get_sparse_bo(amdgpu_winsys_bo(_buf));
1030    int r;
1031 
1032    r = amdgpu_bo_va_op_raw(aws->dev, NULL, 0,
1033                            (uint64_t)bo->num_va_pages * RADEON_SPARSE_PAGE_SIZE,
1034                            amdgpu_va_get_start_addr(bo->va_handle), 0, AMDGPU_VA_OP_CLEAR);
1035    if (r) {
1036       fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
1037    }
1038 
1039    while (!list_is_empty(&bo->backing)) {
1040       sparse_free_backing_buffer(aws, bo,
1041                                  container_of(bo->backing.next,
1042                                               struct amdgpu_sparse_backing, list));
1043    }
1044 
1045    amdgpu_va_range_free(bo->va_handle);
1046    FREE(bo->commitments);
1047    simple_mtx_destroy(&bo->commit_lock);
1048    FREE(bo);
1049 }
1050 
1051 static struct pb_buffer_lean *
amdgpu_bo_sparse_create(struct amdgpu_winsys * aws,uint64_t size,enum radeon_bo_domain domain,enum radeon_bo_flag flags)1052 amdgpu_bo_sparse_create(struct amdgpu_winsys *aws, uint64_t size,
1053                         enum radeon_bo_domain domain,
1054                         enum radeon_bo_flag flags)
1055 {
1056    struct amdgpu_bo_sparse *bo;
1057    uint64_t map_size;
1058    uint64_t va_gap_size;
1059    int r;
1060 
1061    /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
1062     * that exceed this limit. This is not really a restriction: we don't have
1063     * that much virtual address space anyway.
1064     */
1065    if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
1066       return NULL;
1067 
1068    bo = CALLOC_STRUCT(amdgpu_bo_sparse);
1069    if (!bo)
1070       return NULL;
1071 
1072    simple_mtx_init(&bo->commit_lock, mtx_plain);
1073    pipe_reference_init(&bo->b.base.reference, 1);
1074    bo->b.base.placement = domain;
1075    bo->b.base.alignment_log2 = util_logbase2(RADEON_SPARSE_PAGE_SIZE);
1076    bo->b.base.usage = flags;
1077    bo->b.base.size = size;
1078    bo->b.unique_id =  __sync_fetch_and_add(&aws->next_bo_unique_id, 1);
1079    bo->b.type = AMDGPU_BO_SPARSE;
1080 
1081    bo->num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
1082    bo->commitments = CALLOC(bo->num_va_pages, sizeof(*bo->commitments));
1083    if (!bo->commitments)
1084       goto error_alloc_commitments;
1085 
1086    list_inithead(&bo->backing);
1087 
1088    /* For simplicity, we always map a multiple of the page size. */
1089    map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
1090    va_gap_size = aws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
1091 
1092    uint64_t gpu_address;
1093    r = amdgpu_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
1094                              map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
1095                              0, &gpu_address, &bo->va_handle, AMDGPU_VA_RANGE_HIGH);
1096    if (r)
1097       goto error_va_alloc;
1098 
1099    r = amdgpu_bo_va_op_raw(aws->dev, NULL, 0, map_size, gpu_address,
1100                            AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
1101    if (r)
1102       goto error_va_map;
1103 
1104    return &bo->b.base;
1105 
1106 error_va_map:
1107    amdgpu_va_range_free(bo->va_handle);
1108 error_va_alloc:
1109    FREE(bo->commitments);
1110 error_alloc_commitments:
1111    simple_mtx_destroy(&bo->commit_lock);
1112    FREE(bo);
1113    return NULL;
1114 }
1115 
1116 static bool
amdgpu_bo_sparse_commit(struct radeon_winsys * rws,struct pb_buffer_lean * buf,uint64_t offset,uint64_t size,bool commit)1117 amdgpu_bo_sparse_commit(struct radeon_winsys *rws, struct pb_buffer_lean *buf,
1118                         uint64_t offset, uint64_t size, bool commit)
1119 {
1120    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
1121    struct amdgpu_bo_sparse *bo = get_sparse_bo(amdgpu_winsys_bo(buf));
1122    struct amdgpu_sparse_commitment *comm;
1123    uint32_t va_page, end_va_page;
1124    bool ok = true;
1125    int r;
1126 
1127    assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
1128    assert(offset <= bo->b.base.size);
1129    assert(size <= bo->b.base.size - offset);
1130    assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->b.base.size);
1131 
1132    comm = bo->commitments;
1133    va_page = offset / RADEON_SPARSE_PAGE_SIZE;
1134    end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
1135 
1136    simple_mtx_lock(&bo->commit_lock);
1137 
1138 #if DEBUG_SPARSE_COMMITS
1139    sparse_dump(bo, __func__);
1140 #endif
1141 
1142    if (commit) {
1143       while (va_page < end_va_page) {
1144          uint32_t span_va_page;
1145 
1146          /* Skip pages that are already committed. */
1147          if (comm[va_page].backing) {
1148             va_page++;
1149             continue;
1150          }
1151 
1152          /* Determine length of uncommitted span. */
1153          span_va_page = va_page;
1154          while (va_page < end_va_page && !comm[va_page].backing)
1155             va_page++;
1156 
1157          /* Fill the uncommitted span with chunks of backing memory. */
1158          while (span_va_page < va_page) {
1159             struct amdgpu_sparse_backing *backing;
1160             uint32_t backing_start, backing_size;
1161 
1162             backing_size = va_page - span_va_page;
1163             backing = sparse_backing_alloc(aws, bo, &backing_start, &backing_size);
1164             if (!backing) {
1165                ok = false;
1166                goto out;
1167             }
1168 
1169             r = amdgpu_bo_va_op_raw(aws->dev, backing->bo->bo_handle,
1170                                     (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
1171                                     (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
1172                                     amdgpu_va_get_start_addr(bo->va_handle) +
1173                                     (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
1174                                     AMDGPU_VM_PAGE_READABLE |
1175                                     AMDGPU_VM_PAGE_WRITEABLE |
1176                                     AMDGPU_VM_PAGE_EXECUTABLE,
1177                                     AMDGPU_VA_OP_REPLACE);
1178             if (r) {
1179                ok = sparse_backing_free(aws, bo, backing, backing_start, backing_size);
1180                assert(ok && "sufficient memory should already be allocated");
1181 
1182                ok = false;
1183                goto out;
1184             }
1185 
1186             while (backing_size) {
1187                comm[span_va_page].backing = backing;
1188                comm[span_va_page].page = backing_start;
1189                span_va_page++;
1190                backing_start++;
1191                backing_size--;
1192             }
1193          }
1194       }
1195    } else {
1196       r = amdgpu_bo_va_op_raw(aws->dev, NULL, 0,
1197                               (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
1198                               amdgpu_va_get_start_addr(bo->va_handle) +
1199                               (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
1200                               AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
1201       if (r) {
1202          ok = false;
1203          goto out;
1204       }
1205 
1206       while (va_page < end_va_page) {
1207          struct amdgpu_sparse_backing *backing;
1208          uint32_t backing_start;
1209          uint32_t span_pages;
1210 
1211          /* Skip pages that are already uncommitted. */
1212          if (!comm[va_page].backing) {
1213             va_page++;
1214             continue;
1215          }
1216 
1217          /* Group contiguous spans of pages. */
1218          backing = comm[va_page].backing;
1219          backing_start = comm[va_page].page;
1220          comm[va_page].backing = NULL;
1221 
1222          span_pages = 1;
1223          va_page++;
1224 
1225          while (va_page < end_va_page &&
1226                 comm[va_page].backing == backing &&
1227                 comm[va_page].page == backing_start + span_pages) {
1228             comm[va_page].backing = NULL;
1229             va_page++;
1230             span_pages++;
1231          }
1232 
1233          if (!sparse_backing_free(aws, bo, backing, backing_start, span_pages)) {
1234             /* Couldn't allocate tracking data structures, so we have to leak */
1235             fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
1236             ok = false;
1237          }
1238       }
1239    }
1240 out:
1241 
1242    simple_mtx_unlock(&bo->commit_lock);
1243 
1244    return ok;
1245 }
1246 
1247 static unsigned
amdgpu_bo_find_next_committed_memory(struct pb_buffer_lean * buf,uint64_t range_offset,unsigned * range_size)1248 amdgpu_bo_find_next_committed_memory(struct pb_buffer_lean *buf,
1249                                      uint64_t range_offset, unsigned *range_size)
1250 {
1251    struct amdgpu_bo_sparse *bo = get_sparse_bo(amdgpu_winsys_bo(buf));
1252    struct amdgpu_sparse_commitment *comm;
1253    uint32_t va_page, end_va_page;
1254    uint32_t span_va_page, start_va_page;
1255    unsigned uncommitted_range_prev, uncommitted_range_next;
1256 
1257    if (*range_size == 0)
1258       return 0;
1259 
1260    assert(*range_size + range_offset <= bo->b.base.size);
1261 
1262    uncommitted_range_prev = uncommitted_range_next = 0;
1263    comm = bo->commitments;
1264    start_va_page = va_page = range_offset / RADEON_SPARSE_PAGE_SIZE;
1265    end_va_page = (*range_size + range_offset) / RADEON_SPARSE_PAGE_SIZE;
1266 
1267    simple_mtx_lock(&bo->commit_lock);
1268    /* Lookup the first committed page with backing physical storage */
1269    while (va_page < end_va_page && !comm[va_page].backing)
1270       va_page++;
1271 
1272    /* Fisrt committed page lookup failed, return early. */
1273    if (va_page == end_va_page && !comm[va_page].backing) {
1274       uncommitted_range_prev = *range_size;
1275       *range_size = 0;
1276       simple_mtx_unlock(&bo->commit_lock);
1277       return uncommitted_range_prev;
1278    }
1279 
1280    /* Lookup the first uncommitted page without backing physical storage */
1281    span_va_page = va_page;
1282    while (va_page < end_va_page && comm[va_page].backing)
1283       va_page++;
1284    simple_mtx_unlock(&bo->commit_lock);
1285 
1286    /* Calc byte count that need to skip before committed range */
1287    if (span_va_page != start_va_page)
1288       uncommitted_range_prev = span_va_page * RADEON_SPARSE_PAGE_SIZE - range_offset;
1289 
1290    /* Calc byte count that need to skip after committed range */
1291    if (va_page != end_va_page || !comm[va_page].backing) {
1292       uncommitted_range_next = *range_size + range_offset - va_page * RADEON_SPARSE_PAGE_SIZE;
1293    }
1294 
1295    /* Calc size of first committed part */
1296    *range_size = *range_size - uncommitted_range_next - uncommitted_range_prev;
1297    return *range_size ? uncommitted_range_prev : uncommitted_range_prev + uncommitted_range_next;
1298 }
1299 
amdgpu_buffer_get_metadata(struct radeon_winsys * rws,struct pb_buffer_lean * _buf,struct radeon_bo_metadata * md,struct radeon_surf * surf)1300 static void amdgpu_buffer_get_metadata(struct radeon_winsys *rws,
1301                                        struct pb_buffer_lean *_buf,
1302                                        struct radeon_bo_metadata *md,
1303                                        struct radeon_surf *surf)
1304 {
1305    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
1306    struct amdgpu_bo_real *bo = get_real_bo(amdgpu_winsys_bo(_buf));
1307    struct amdgpu_bo_info info = {0};
1308    int r;
1309 
1310    r = amdgpu_bo_query_info(bo->bo_handle, &info);
1311    if (r)
1312       return;
1313 
1314    ac_surface_apply_bo_metadata(&aws->info, surf, info.metadata.tiling_info,
1315                                 &md->mode);
1316 
1317    md->size_metadata = info.metadata.size_metadata;
1318    memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1319 }
1320 
amdgpu_buffer_set_metadata(struct radeon_winsys * rws,struct pb_buffer_lean * _buf,struct radeon_bo_metadata * md,struct radeon_surf * surf)1321 static void amdgpu_buffer_set_metadata(struct radeon_winsys *rws,
1322                                        struct pb_buffer_lean *_buf,
1323                                        struct radeon_bo_metadata *md,
1324                                        struct radeon_surf *surf)
1325 {
1326    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
1327    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1328    struct amdgpu_bo_real *real = is_real_bo(bo) ? get_real_bo(bo) : get_slab_entry_real_bo(bo);
1329    struct amdgpu_bo_metadata metadata = {0};
1330 
1331    ac_surface_compute_bo_metadata(&aws->info, surf, &metadata.tiling_info);
1332 
1333    metadata.size_metadata = md->size_metadata;
1334    memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1335 
1336    amdgpu_bo_set_metadata(real->bo_handle, &metadata);
1337 }
1338 
1339 struct pb_buffer_lean *
amdgpu_bo_create(struct amdgpu_winsys * aws,uint64_t size,unsigned alignment,enum radeon_bo_domain domain,enum radeon_bo_flag flags)1340 amdgpu_bo_create(struct amdgpu_winsys *aws,
1341                  uint64_t size,
1342                  unsigned alignment,
1343                  enum radeon_bo_domain domain,
1344                  enum radeon_bo_flag flags)
1345 {
1346    struct amdgpu_winsys_bo *bo;
1347 
1348    radeon_canonicalize_bo_flags(&domain, &flags);
1349 
1350    /* Handle sparse buffers first. */
1351    if (flags & RADEON_FLAG_SPARSE) {
1352       assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1353 
1354       return amdgpu_bo_sparse_create(aws, size, domain, flags);
1355    }
1356 
1357    unsigned max_slab_entry_size = 1 << (aws->bo_slabs.min_order + aws->bo_slabs.num_orders - 1);
1358    int heap = radeon_get_heap_index(domain, flags);
1359 
1360    /* Sub-allocate small buffers from slabs. */
1361    if (heap >= 0 && size <= max_slab_entry_size) {
1362       struct pb_slab_entry *entry;
1363       unsigned alloc_size = size;
1364 
1365       /* Always use slabs for sizes less than 4 KB because the kernel aligns
1366        * everything to 4 KB.
1367        */
1368       if (size < alignment && alignment <= 4 * 1024)
1369          alloc_size = alignment;
1370 
1371       if (alignment > get_slab_entry_alignment(aws, alloc_size)) {
1372          /* 3/4 allocations can return too small alignment. Try again with a power of two
1373           * allocation size.
1374           */
1375          unsigned pot_size = get_slab_pot_entry_size(aws, alloc_size);
1376 
1377          if (alignment <= pot_size) {
1378             /* This size works but wastes some memory to fulfil the alignment. */
1379             alloc_size = pot_size;
1380          } else {
1381             goto no_slab; /* can't fulfil alignment requirements */
1382          }
1383       }
1384 
1385       entry = pb_slab_alloc(&aws->bo_slabs, alloc_size, heap);
1386       if (!entry) {
1387          /* Clean up buffer managers and try again. */
1388          amdgpu_clean_up_buffer_managers(aws);
1389 
1390          entry = pb_slab_alloc(&aws->bo_slabs, alloc_size, heap);
1391       }
1392       if (!entry)
1393          return NULL;
1394 
1395       struct amdgpu_bo_slab_entry *slab_bo = container_of(entry, struct amdgpu_bo_slab_entry, entry);
1396       pipe_reference_init(&slab_bo->b.base.reference, 1);
1397       slab_bo->b.base.size = size;
1398       slab_bo->b.unique_id = __sync_fetch_and_add(&aws->next_bo_unique_id, 1);
1399       assert(alignment <= 1 << slab_bo->b.base.alignment_log2);
1400 
1401       if (domain & RADEON_DOMAIN_VRAM)
1402          aws->slab_wasted_vram += get_slab_wasted_size(aws, slab_bo);
1403       else
1404          aws->slab_wasted_gtt += get_slab_wasted_size(aws, slab_bo);
1405 
1406       return &slab_bo->b.base;
1407    }
1408 no_slab:
1409 
1410    /* Align size to page size. This is the minimum alignment for normal
1411     * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1412     * like constant/uniform buffers, can benefit from better and more reuse.
1413     */
1414    if (domain & RADEON_DOMAIN_VRAM_GTT) {
1415       size = align64(size, aws->info.gart_page_size);
1416       alignment = align(alignment, aws->info.gart_page_size);
1417    }
1418 
1419    bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
1420                             !(flags & RADEON_FLAG_DISCARDABLE);
1421 
1422    if (use_reusable_pool) {
1423        /* RADEON_FLAG_NO_SUBALLOC is irrelevant for the cache. */
1424        heap = radeon_get_heap_index(domain, flags & ~RADEON_FLAG_NO_SUBALLOC);
1425        assert(heap >= 0 && heap < RADEON_NUM_HEAPS);
1426 
1427        /* Get a buffer from the cache. */
1428        bo = (struct amdgpu_winsys_bo*)
1429             pb_cache_reclaim_buffer(&aws->bo_cache, size, alignment, 0, heap);
1430        if (bo) {
1431           /* If the buffer is amdgpu_bo_real_reusable, but we need amdgpu_bo_real_reusable_slab,
1432            * keep the allocation but make the structure bigger.
1433            */
1434           if (flags & RADEON_FLAG_WINSYS_SLAB_BACKING && bo->type == AMDGPU_BO_REAL_REUSABLE) {
1435              const unsigned orig_size = sizeof(struct amdgpu_bo_real_reusable);
1436              const unsigned new_size = sizeof(struct amdgpu_bo_real_reusable_slab);
1437              struct amdgpu_winsys_bo *new_bo =
1438                 (struct amdgpu_winsys_bo*)REALLOC(bo, orig_size, new_size);
1439 
1440              if (!new_bo) {
1441                 amdgpu_winsys_bo_reference(aws, &bo, NULL);
1442                 return NULL;
1443              }
1444 
1445              memset((uint8_t*)new_bo + orig_size, 0, new_size - orig_size);
1446              bo = new_bo;
1447              bo->type = AMDGPU_BO_REAL_REUSABLE_SLAB;
1448           }
1449           return &bo->base;
1450        }
1451    }
1452 
1453    /* Create a new one. */
1454    bo = amdgpu_create_bo(aws, size, alignment, domain, flags, heap);
1455    if (!bo) {
1456       /* Clean up buffer managers and try again. */
1457       amdgpu_clean_up_buffer_managers(aws);
1458 
1459       bo = amdgpu_create_bo(aws, size, alignment, domain, flags, heap);
1460       if (!bo)
1461          return NULL;
1462    }
1463 
1464    return &bo->base;
1465 }
1466 
1467 static struct pb_buffer_lean *
amdgpu_buffer_create(struct radeon_winsys * rws,uint64_t size,unsigned alignment,enum radeon_bo_domain domain,enum radeon_bo_flag flags)1468 amdgpu_buffer_create(struct radeon_winsys *rws,
1469                      uint64_t size,
1470                      unsigned alignment,
1471                      enum radeon_bo_domain domain,
1472                      enum radeon_bo_flag flags)
1473 {
1474    struct pb_buffer_lean * res = amdgpu_bo_create(amdgpu_winsys(rws), size, alignment, domain,
1475                            flags);
1476    return res;
1477 }
1478 
amdgpu_bo_from_handle(struct radeon_winsys * rws,struct winsys_handle * whandle,unsigned vm_alignment,bool is_prime_linear_buffer)1479 static struct pb_buffer_lean *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1480                                                struct winsys_handle *whandle,
1481                                                unsigned vm_alignment,
1482                                                bool is_prime_linear_buffer)
1483 {
1484    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
1485    struct amdgpu_bo_real *bo = NULL;
1486    enum amdgpu_bo_handle_type type;
1487    struct amdgpu_bo_import_result result = {0};
1488    uint64_t va;
1489    amdgpu_va_handle va_handle = NULL;
1490    struct amdgpu_bo_info info = {0};
1491    enum radeon_bo_domain initial = 0;
1492    enum radeon_bo_flag flags = 0;
1493    int r;
1494 
1495    switch (whandle->type) {
1496    case WINSYS_HANDLE_TYPE_SHARED:
1497       type = amdgpu_bo_handle_type_gem_flink_name;
1498       break;
1499    case WINSYS_HANDLE_TYPE_FD:
1500       type = amdgpu_bo_handle_type_dma_buf_fd;
1501       break;
1502    default:
1503       return NULL;
1504    }
1505 
1506    r = amdgpu_bo_import(aws->dev, type, whandle->handle, &result);
1507    if (r)
1508       return NULL;
1509 
1510    simple_mtx_lock(&aws->bo_export_table_lock);
1511    bo = util_hash_table_get(aws->bo_export_table, result.buf_handle);
1512 
1513    /* If the amdgpu_winsys_bo instance already exists, bump the reference
1514     * counter and return it.
1515     */
1516    if (bo) {
1517       p_atomic_inc(&bo->b.base.reference.count);
1518       simple_mtx_unlock(&aws->bo_export_table_lock);
1519 
1520       /* Release the buffer handle, because we don't need it anymore.
1521        * This function is returning an existing buffer, which has its own
1522        * handle.
1523        */
1524       amdgpu_bo_free(result.buf_handle);
1525       return &bo->b.base;
1526    }
1527 
1528    /* Get initial domains. */
1529    r = amdgpu_bo_query_info(result.buf_handle, &info);
1530    if (r)
1531       goto error;
1532 
1533    r = amdgpu_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
1534                              result.alloc_size,
1535                              amdgpu_get_optimal_alignment(aws, result.alloc_size,
1536                                                           vm_alignment),
1537                              0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH);
1538    if (r)
1539       goto error;
1540 
1541    bo = CALLOC_STRUCT(amdgpu_bo_real);
1542    if (!bo)
1543       goto error;
1544 
1545    r = amdgpu_bo_va_op_raw(aws->dev, result.buf_handle, 0, result.alloc_size, va,
1546                            AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
1547                            AMDGPU_VM_PAGE_EXECUTABLE |
1548                            (is_prime_linear_buffer ? AMDGPU_VM_MTYPE_UC : 0),
1549                            AMDGPU_VA_OP_MAP);
1550    if (r)
1551       goto error;
1552 
1553    if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1554       initial |= RADEON_DOMAIN_VRAM;
1555    if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1556       initial |= RADEON_DOMAIN_GTT;
1557    if (info.alloc_flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
1558       flags |= RADEON_FLAG_NO_CPU_ACCESS;
1559    if (info.alloc_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1560       flags |= RADEON_FLAG_GTT_WC;
1561    if (info.alloc_flags & AMDGPU_GEM_CREATE_ENCRYPTED) {
1562       /* Imports are always possible even if the importer isn't using TMZ.
1563        * For instance libweston needs to import the buffer to be able to determine
1564        * if it can be used for scanout.
1565        */
1566       flags |= RADEON_FLAG_ENCRYPTED;
1567       *((bool*)&rws->uses_secure_bos) = true;
1568    }
1569    if (info.alloc_flags & AMDGPU_GEM_CREATE_GFX12_DCC)
1570       flags |= RADEON_FLAG_GFX12_ALLOW_DCC;
1571 
1572    /* Initialize the structure. */
1573    pipe_reference_init(&bo->b.base.reference, 1);
1574    bo->b.base.placement = initial;
1575    bo->b.base.alignment_log2 = util_logbase2(info.phys_alignment ?
1576                                              info.phys_alignment : aws->info.gart_page_size);
1577    bo->b.base.usage = flags;
1578    bo->b.base.size = result.alloc_size;
1579    bo->b.type = AMDGPU_BO_REAL;
1580    bo->b.unique_id = __sync_fetch_and_add(&aws->next_bo_unique_id, 1);
1581    simple_mtx_init(&bo->map_lock, mtx_plain);
1582    bo->bo_handle = result.buf_handle;
1583    bo->va_handle = va_handle;
1584    bo->is_shared = true;
1585 
1586    if (bo->b.base.placement & RADEON_DOMAIN_VRAM)
1587       aws->allocated_vram += align64(bo->b.base.size, aws->info.gart_page_size);
1588    else if (bo->b.base.placement & RADEON_DOMAIN_GTT)
1589       aws->allocated_gtt += align64(bo->b.base.size, aws->info.gart_page_size);
1590 
1591    amdgpu_bo_export(bo->bo_handle, amdgpu_bo_handle_type_kms, &bo->kms_handle);
1592 
1593    amdgpu_add_buffer_to_global_list(aws, bo);
1594 
1595    _mesa_hash_table_insert(aws->bo_export_table, bo->bo_handle, bo);
1596    simple_mtx_unlock(&aws->bo_export_table_lock);
1597 
1598    return &bo->b.base;
1599 
1600 error:
1601    simple_mtx_unlock(&aws->bo_export_table_lock);
1602    if (bo)
1603       FREE(bo);
1604    if (va_handle)
1605       amdgpu_va_range_free(va_handle);
1606    amdgpu_bo_free(result.buf_handle);
1607    return NULL;
1608 }
1609 
amdgpu_bo_get_handle(struct radeon_winsys * rws,struct pb_buffer_lean * buffer,struct winsys_handle * whandle)1610 static bool amdgpu_bo_get_handle(struct radeon_winsys *rws,
1611                                  struct pb_buffer_lean *buffer,
1612                                  struct winsys_handle *whandle)
1613 {
1614    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
1615    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
1616    enum amdgpu_bo_handle_type type;
1617    struct hash_entry *entry;
1618    int r;
1619 
1620    /* Don't allow exports of slab entries and sparse buffers. */
1621    if (!is_real_bo(amdgpu_winsys_bo(buffer)))
1622       return false;
1623 
1624    struct amdgpu_bo_real *bo = get_real_bo(amdgpu_winsys_bo(buffer));
1625 
1626    /* This removes the REUSABLE enum if it's set. */
1627    bo->b.type = AMDGPU_BO_REAL;
1628 
1629    switch (whandle->type) {
1630    case WINSYS_HANDLE_TYPE_SHARED:
1631       type = amdgpu_bo_handle_type_gem_flink_name;
1632       break;
1633    case WINSYS_HANDLE_TYPE_KMS:
1634       if (sws->fd == aws->fd) {
1635          whandle->handle = bo->kms_handle;
1636 
1637          if (bo->is_shared)
1638             return true;
1639 
1640          goto hash_table_set;
1641       }
1642 
1643       simple_mtx_lock(&aws->sws_list_lock);
1644       entry = _mesa_hash_table_search(sws->kms_handles, bo);
1645       simple_mtx_unlock(&aws->sws_list_lock);
1646       if (entry) {
1647          whandle->handle = (uintptr_t)entry->data;
1648          return true;
1649       }
1650       FALLTHROUGH;
1651    case WINSYS_HANDLE_TYPE_FD:
1652       type = amdgpu_bo_handle_type_dma_buf_fd;
1653       break;
1654    default:
1655       return false;
1656    }
1657 
1658    r = amdgpu_bo_export(bo->bo_handle, type, &whandle->handle);
1659    if (r)
1660       return false;
1661 
1662 #if defined(DMA_BUF_SET_NAME_B)
1663    if (whandle->type == WINSYS_HANDLE_TYPE_FD &&
1664        !bo->is_shared) {
1665       char dmabufname[32];
1666       snprintf(dmabufname, 32, "%d-%s", getpid(), util_get_process_name());
1667       r = ioctl(whandle->handle, DMA_BUF_SET_NAME_B, (uint64_t)(uintptr_t)dmabufname);
1668    }
1669 #endif
1670 
1671    if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
1672       int dma_fd = whandle->handle;
1673 
1674       r = drmPrimeFDToHandle(sws->fd, dma_fd, &whandle->handle);
1675       close(dma_fd);
1676 
1677       if (r)
1678          return false;
1679 
1680       simple_mtx_lock(&aws->sws_list_lock);
1681       _mesa_hash_table_insert_pre_hashed(sws->kms_handles,
1682                                          bo->kms_handle, bo,
1683                                          (void*)(uintptr_t)whandle->handle);
1684       simple_mtx_unlock(&aws->sws_list_lock);
1685    }
1686 
1687  hash_table_set:
1688    simple_mtx_lock(&aws->bo_export_table_lock);
1689    _mesa_hash_table_insert(aws->bo_export_table, bo->bo_handle, bo);
1690    simple_mtx_unlock(&aws->bo_export_table_lock);
1691 
1692    bo->is_shared = true;
1693    return true;
1694 }
1695 
amdgpu_bo_from_ptr(struct radeon_winsys * rws,void * pointer,uint64_t size,enum radeon_bo_flag flags)1696 static struct pb_buffer_lean *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1697 					    void *pointer, uint64_t size,
1698 					    enum radeon_bo_flag flags)
1699 {
1700     struct amdgpu_winsys *aws = amdgpu_winsys(rws);
1701     amdgpu_bo_handle buf_handle;
1702     struct amdgpu_bo_real *bo;
1703     uint64_t va;
1704     amdgpu_va_handle va_handle;
1705     /* Avoid failure when the size is not page aligned */
1706     uint64_t aligned_size = align64(size, aws->info.gart_page_size);
1707 
1708     bo = CALLOC_STRUCT(amdgpu_bo_real);
1709     if (!bo)
1710         return NULL;
1711 
1712     if (amdgpu_create_bo_from_user_mem(aws->dev, pointer,
1713                                        aligned_size, &buf_handle))
1714         goto error;
1715 
1716     if (amdgpu_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
1717                               aligned_size,
1718                               amdgpu_get_optimal_alignment(aws, aligned_size,
1719                                                            aws->info.gart_page_size),
1720                               0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH))
1721         goto error_va_alloc;
1722 
1723     if (amdgpu_bo_va_op(buf_handle, 0, aligned_size, va, 0, AMDGPU_VA_OP_MAP))
1724         goto error_va_map;
1725 
1726     /* Initialize it. */
1727     bo->is_user_ptr = true;
1728     pipe_reference_init(&bo->b.base.reference, 1);
1729     bo->b.base.placement = RADEON_DOMAIN_GTT;
1730     bo->b.base.alignment_log2 = 0;
1731     bo->b.base.size = size;
1732     bo->b.type = AMDGPU_BO_REAL;
1733     bo->b.unique_id = __sync_fetch_and_add(&aws->next_bo_unique_id, 1);
1734     simple_mtx_init(&bo->map_lock, mtx_plain);
1735     bo->bo_handle = buf_handle;
1736     bo->cpu_ptr = pointer;
1737     bo->va_handle = va_handle;
1738 
1739     aws->allocated_gtt += aligned_size;
1740 
1741     amdgpu_add_buffer_to_global_list(aws, bo);
1742 
1743     amdgpu_bo_export(bo->bo_handle, amdgpu_bo_handle_type_kms, &bo->kms_handle);
1744 
1745     return (struct pb_buffer_lean*)bo;
1746 
1747 error_va_map:
1748     amdgpu_va_range_free(va_handle);
1749 
1750 error_va_alloc:
1751     amdgpu_bo_free(buf_handle);
1752 
1753 error:
1754     FREE(bo);
1755     return NULL;
1756 }
1757 
amdgpu_bo_is_user_ptr(struct pb_buffer_lean * buf)1758 static bool amdgpu_bo_is_user_ptr(struct pb_buffer_lean *buf)
1759 {
1760    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1761 
1762    return is_real_bo(bo) ? get_real_bo(bo)->is_user_ptr : false;
1763 }
1764 
amdgpu_bo_is_suballocated(struct pb_buffer_lean * buf)1765 static bool amdgpu_bo_is_suballocated(struct pb_buffer_lean *buf)
1766 {
1767    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1768 
1769    return bo->type == AMDGPU_BO_SLAB_ENTRY;
1770 }
1771 
amdgpu_bo_get_va(struct pb_buffer_lean * buf)1772 uint64_t amdgpu_bo_get_va(struct pb_buffer_lean *buf)
1773 {
1774    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
1775 
1776    if (bo->type == AMDGPU_BO_SLAB_ENTRY) {
1777       struct amdgpu_bo_real_reusable_slab *slab_bo =
1778          (struct amdgpu_bo_real_reusable_slab *)get_slab_entry_real_bo(bo);
1779 
1780       return amdgpu_va_get_start_addr(slab_bo->b.b.va_handle) + get_slab_entry_offset(bo);
1781    } else if (bo->type == AMDGPU_BO_SPARSE) {
1782       return amdgpu_va_get_start_addr(get_sparse_bo(bo)->va_handle);
1783    } else {
1784       return amdgpu_va_get_start_addr(get_real_bo(bo)->va_handle);
1785    }
1786 }
1787 
amdgpu_buffer_destroy(struct radeon_winsys * rws,struct pb_buffer_lean * buf)1788 static void amdgpu_buffer_destroy(struct radeon_winsys *rws, struct pb_buffer_lean *buf)
1789 {
1790    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
1791 
1792    if (bo->type == AMDGPU_BO_SLAB_ENTRY)
1793       amdgpu_bo_slab_destroy(rws, buf);
1794    else if (bo->type == AMDGPU_BO_SPARSE)
1795       amdgpu_bo_sparse_destroy(rws, buf);
1796    else
1797       amdgpu_bo_destroy_or_cache(rws, buf);
1798 }
1799 
amdgpu_bo_init_functions(struct amdgpu_screen_winsys * sws)1800 void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *sws)
1801 {
1802    sws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1803    sws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1804    sws->base.buffer_map = amdgpu_bo_map;
1805    sws->base.buffer_unmap = amdgpu_bo_unmap;
1806    sws->base.buffer_wait = amdgpu_bo_wait;
1807    sws->base.buffer_create = amdgpu_buffer_create;
1808    sws->base.buffer_destroy = amdgpu_buffer_destroy;
1809    sws->base.buffer_from_handle = amdgpu_bo_from_handle;
1810    sws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1811    sws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1812    sws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
1813    sws->base.buffer_get_handle = amdgpu_bo_get_handle;
1814    sws->base.buffer_commit = amdgpu_bo_sparse_commit;
1815    sws->base.buffer_find_next_committed_memory = amdgpu_bo_find_next_committed_memory;
1816    sws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1817    sws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1818    sws->base.buffer_get_flags = amdgpu_bo_get_flags;
1819 }
1820