xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/iris/iris_bufmgr.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef IRIS_BUFMGR_H
25 #define IRIS_BUFMGR_H
26 
27 #include <stdbool.h>
28 #include <stdint.h>
29 #include <stdio.h>
30 #include <sys/types.h>
31 
32 #include "c11/threads.h"
33 #include "common/intel_bind_timeline.h"
34 #include "util/macros.h"
35 #include "util/u_atomic.h"
36 #include "util/u_dynarray.h"
37 #include "util/list.h"
38 #include "util/simple_mtx.h"
39 #include "pipe/p_defines.h"
40 #include "pipebuffer/pb_slab.h"
41 #include "intel/dev/intel_device_info.h"
42 
43 struct intel_device_info;
44 struct util_debug_callback;
45 struct isl_surf;
46 struct iris_syncobj;
47 
48 /**
49  * Memory zones.  When allocating a buffer, you can request that it is
50  * placed into a specific region of the virtual address space (PPGTT).
51  *
52  * Most buffers can go anywhere (IRIS_MEMZONE_OTHER).  Some buffers are
53  * accessed via an offset from a base address.  STATE_BASE_ADDRESS has
54  * a maximum 4GB size for each region, so we need to restrict those
55  * buffers to be within 4GB of the base.  Each memory zone corresponds
56  * to a particular base address.
57  *
58  * We lay out the virtual address space as follows:
59  *
60  * - [0,   4K): Nothing            (empty page for null address)
61  * - [4K,  4G): Shaders            (Instruction Base Address)
62  * - [4G,  8G): Surfaces & Binders (Surface State Base Address, Bindless ...)
63  * - [8G, 12G): Dynamic            (Dynamic State Base Address)
64  * - [12G, *):  Other              (everything else in the full 48-bit VMA)
65  *
66  * A special buffer for border color lives at the start of the dynamic state
67  * memory zone.  This unfortunately has to be handled specially because the
68  * SAMPLER_STATE "Indirect State Pointer" field is only a 24-bit pointer.
69  *
70  * Each GL context uses a separate GEM context, which technically gives them
71  * each a separate VMA.  However, we assign address globally, so buffers will
72  * have the same address in all GEM contexts.  This lets us have a single BO
73  * field for the address, which is easy and cheap.
74  */
75 enum iris_memory_zone {
76    IRIS_MEMZONE_SHADER,
77    IRIS_MEMZONE_BINDER,
78    IRIS_MEMZONE_SCRATCH,
79    IRIS_MEMZONE_SURFACE,
80    IRIS_MEMZONE_DYNAMIC,
81    IRIS_MEMZONE_OTHER,
82 
83    IRIS_MEMZONE_BORDER_COLOR_POOL,
84 };
85 
86 /* Intentionally exclude single buffer "zones" */
87 #define IRIS_MEMZONE_COUNT (IRIS_MEMZONE_OTHER + 1)
88 
89 #define IRIS_SCRATCH_ZONE_SIZE (8 * 1024 * 1024)
90 #define IRIS_BINDER_ZONE_SIZE ((1ull << 30) - IRIS_SCRATCH_ZONE_SIZE)
91 
92 #define IRIS_MEMZONE_SHADER_START     (0ull * (1ull << 32))
93 #define IRIS_MEMZONE_BINDER_START     (1ull * (1ull << 32))
94 #define IRIS_MEMZONE_SCRATCH_START    IRIS_MEMZONE_BINDER_START
95 #define IRIS_MEMZONE_SURFACE_START    (IRIS_MEMZONE_BINDER_START + IRIS_BINDER_ZONE_SIZE)
96 #define IRIS_MEMZONE_DYNAMIC_START    (2ull * (1ull << 32))
97 #define IRIS_MEMZONE_OTHER_START      (3ull * (1ull << 32))
98 
99 #define IRIS_BORDER_COLOR_POOL_ADDRESS IRIS_MEMZONE_DYNAMIC_START
100 #define IRIS_BORDER_COLOR_POOL_SIZE (64 * 4096)
101 
102 /**
103  * Classification of the various incoherent caches of the GPU into a number of
104  * caching domains.
105  */
106 enum iris_domain {
107    /** Render color cache. */
108    IRIS_DOMAIN_RENDER_WRITE = 0,
109    /** (Hi)Z/stencil cache. */
110    IRIS_DOMAIN_DEPTH_WRITE,
111    /** Data port (HDC) cache. */
112    IRIS_DOMAIN_DATA_WRITE,
113    /** Any other read-write cache. */
114    IRIS_DOMAIN_OTHER_WRITE,
115    /** Vertex cache. */
116    IRIS_DOMAIN_VF_READ,
117    /** Texture cache. */
118    IRIS_DOMAIN_SAMPLER_READ,
119    /** Pull-style shader constant loads. */
120    IRIS_DOMAIN_PULL_CONSTANT_READ,
121    /** Any other read-only cache, including reads from non-L3 clients. */
122    IRIS_DOMAIN_OTHER_READ,
123    /** Number of caching domains. */
124    NUM_IRIS_DOMAINS,
125    /** Not a real cache, use to opt out of the cache tracking mechanism. */
126    IRIS_DOMAIN_NONE = NUM_IRIS_DOMAINS
127 };
128 
129 /**
130  * Whether a caching domain is guaranteed not to write any data to memory.
131  */
132 static inline bool
iris_domain_is_read_only(enum iris_domain access)133 iris_domain_is_read_only(enum iris_domain access)
134 {
135    return access >= IRIS_DOMAIN_VF_READ &&
136           access <= IRIS_DOMAIN_OTHER_READ;
137 }
138 
139 static inline bool
iris_domain_is_l3_coherent(const struct intel_device_info * devinfo,enum iris_domain access)140 iris_domain_is_l3_coherent(const struct intel_device_info *devinfo,
141                            enum iris_domain access)
142 {
143    /* VF reads are coherent with the L3 on Tigerlake+ because we set
144     * the "L3 Bypass Disable" bit in the vertex/index buffer packets.
145     */
146    if (access == IRIS_DOMAIN_VF_READ)
147       return devinfo->ver >= 12;
148 
149    return access != IRIS_DOMAIN_OTHER_WRITE &&
150           access != IRIS_DOMAIN_OTHER_READ;
151 }
152 
153 enum iris_mmap_mode {
154    IRIS_MMAP_NONE, /**< Cannot be mapped */
155    IRIS_MMAP_UC, /**< Fully uncached memory map */
156    IRIS_MMAP_WC, /**< Write-combining map with no caching of reads */
157    IRIS_MMAP_WB, /**< Write-back mapping with CPU caches enabled */
158 };
159 
160 enum iris_heap {
161    /**
162     * System memory which is CPU-cached at (at least 1-way) coherent.
163     *
164     * This will use WB (write-back) CPU mappings.
165     *
166     * LLC systems and discrete cards (which enable snooping) will mostly use
167     * this heap.  Non-LLC systems will only use it when explicit coherency is
168     * required, as snooping is expensive there.
169     */
170    IRIS_HEAP_SYSTEM_MEMORY_CACHED_COHERENT,
171 
172    /**
173     * System memory which is not CPU cached.
174     *
175     * This will use WC (write-combining) CPU mappings, which has uncached
176     * performance for reads.  This can be used for scanout on integrated
177     * GPUs (which is never coherent with CPU caches).  It will be used for
178     * most buffers on non-LLC platforms, where cache coherency is expensive.
179     */
180    IRIS_HEAP_SYSTEM_MEMORY_UNCACHED,
181 
182    /** IRIS_HEAP_SYSTEM_MEMORY_UNCACHED + compressed, only supported in Xe2 */
183    IRIS_HEAP_SYSTEM_MEMORY_UNCACHED_COMPRESSED,
184 
185    /** Device-local memory (VRAM).  Cannot be placed in system memory! */
186    IRIS_HEAP_DEVICE_LOCAL,
187    IRIS_HEAP_MAX_NO_VRAM = IRIS_HEAP_DEVICE_LOCAL,
188 
189    /** Device-local compressed memory, only supported in Xe2 */
190    IRIS_HEAP_DEVICE_LOCAL_COMPRESSED,
191 
192    /** Device-local memory that may be evicted to system memory if needed. */
193    IRIS_HEAP_DEVICE_LOCAL_PREFERRED,
194 
195    /**
196     * Device-local memory (VRAM) + guarantee that is CPU visible.
197     *
198     * To be used in cases that cannot be placed in system memory!
199     * This will only be used when running in small PCIe bar systems.
200     */
201    IRIS_HEAP_DEVICE_LOCAL_CPU_VISIBLE_SMALL_BAR,
202    IRIS_HEAP_MAX_LARGE_BAR = IRIS_HEAP_DEVICE_LOCAL_CPU_VISIBLE_SMALL_BAR,
203 
204    IRIS_HEAP_MAX,
205 };
206 
207 extern const char *iris_heap_to_string[];
208 
209 static inline bool
iris_heap_is_device_local(enum iris_heap heap)210 iris_heap_is_device_local(enum iris_heap heap)
211 {
212    return heap == IRIS_HEAP_DEVICE_LOCAL ||
213           heap == IRIS_HEAP_DEVICE_LOCAL_PREFERRED ||
214           heap == IRIS_HEAP_DEVICE_LOCAL_CPU_VISIBLE_SMALL_BAR ||
215           heap == IRIS_HEAP_DEVICE_LOCAL_COMPRESSED;
216 }
217 
218 #define IRIS_BATCH_COUNT 3
219 
220 struct iris_bo_screen_deps {
221    struct iris_syncobj *write_syncobjs[IRIS_BATCH_COUNT];
222    struct iris_syncobj *read_syncobjs[IRIS_BATCH_COUNT];
223 };
224 
225 struct iris_bo {
226    /**
227     * Size in bytes of the buffer object.
228     *
229     * The size may be larger than the size originally requested for the
230     * allocation, such as being aligned to page size.
231     */
232    uint64_t size;
233 
234    /** Buffer manager context associated with this buffer object */
235    struct iris_bufmgr *bufmgr;
236 
237    /** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */
238    uint32_t hash;
239 
240    /** The GEM handle for this buffer object. */
241    uint32_t gem_handle;
242 
243    /**
244     * Canonical virtual address of the buffer inside the PPGTT (Per-Process Graphics
245     * Translation Table).
246     *
247     * Although each hardware context has its own VMA, we assign BO's to the
248     * same address in all contexts, for simplicity.
249     */
250    uint64_t address;
251 
252    /**
253     * If non-zero, then this bo has an aux-map translation to this address.
254     */
255    uint64_t aux_map_address;
256 
257    /**
258     * If this BO is referenced by a batch, this _may_ be the index into the
259     * batch->exec_bos[] list.
260     *
261     * Note that a single buffer may be used by multiple batches/contexts,
262     * and thus appear in multiple lists, but we only track one index here.
263     * In the common case one can guess that batch->exec_bos[bo->index] == bo
264     * and double check if that's true to avoid a linear list walk.
265     *
266     * XXX: this is not ideal now that we have more than one batch per context,
267     * XXX: as the index will flop back and forth between the render index and
268     * XXX: compute index...
269     */
270    unsigned index;
271 
272    int refcount;
273    const char *name;
274 
275    /** BO cache list */
276    struct list_head head;
277 
278    /**
279     * Synchronization sequence number of most recent access of this BO from
280     * each caching domain.
281     *
282     * Although this is a global field, use in multiple contexts should be
283     * safe, see iris_emit_buffer_barrier_for() for details.
284     *
285     * Also align it to 64 bits. This will make atomic operations faster on 32
286     * bit platforms.
287     */
288    alignas(8) uint64_t last_seqnos[NUM_IRIS_DOMAINS];
289 
290    /** Up to one per screen, may need realloc. */
291    struct iris_bo_screen_deps *deps;
292    int deps_size;
293 
294    /**
295     * Boolean of whether the GPU is definitely not accessing the buffer.
296     *
297     * This is only valid when reusable, since non-reusable
298     * buffers are those that have been shared with other
299     * processes, so we don't know their state.
300     */
301    bool idle;
302 
303    /** Was this buffer zeroed at allocation time? */
304    bool zeroed;
305 
306    union {
307       struct {
308          time_t free_time;
309 
310          /** Mapped address for the buffer, saved across map/unmap cycles */
311          void *map;
312 
313          /** List of GEM handle exports of this buffer (bo_export) */
314          struct list_head exports;
315 
316          /**
317           * Kernel-assigned global name for this object
318           *
319           * List contains both flink named and prime fd'd objects
320           */
321          unsigned global_name;
322 
323          /** Prime fd used for shared buffers, -1 otherwise. */
324          int prime_fd;
325 
326          /** The mmap coherency mode selected at BO allocation time */
327          enum iris_mmap_mode mmap_mode;
328 
329          /** The heap selected at BO allocation time */
330          enum iris_heap heap;
331 
332          /** Was this buffer imported from an external client? */
333          bool imported;
334 
335          /** Has this buffer been exported to external clients? */
336          bool exported;
337 
338          /** Boolean of whether this buffer can be re-used */
339          bool reusable;
340 
341          /** Boolean of whether this buffer points into user memory */
342          bool userptr;
343 
344          /** Boolean of whether this buffer is protected (HW encryption) */
345          bool protected;
346 
347          /** Boolean of whether this buffer needs to be captured in error dump.
348           * Xe KMD requires this to be set before vm bind while i915 needs
349           * this set before batch_submit().
350           */
351          bool capture;
352       } real;
353       struct {
354          struct pb_slab_entry entry;
355          struct iris_bo *real;
356       } slab;
357    };
358 };
359 
360 /* No special attributes. */
361 #define BO_ALLOC_PLAIN           0
362 /* Content is set to 0, only done in cache and slabs code paths. */
363 #define BO_ALLOC_ZEROED          (1<<0)
364 /* Allocate a cached and coherent BO, this has a performance cost in
365  * integrated platforms without LLC.
366  * Should only be used in BOs that will be written and read from CPU often.
367  */
368 #define BO_ALLOC_COHERENT        (1<<1)
369 /* Place BO only on smem. */
370 #define BO_ALLOC_SMEM            (1<<2)
371 /* BO can be sent to display. */
372 #define BO_ALLOC_SCANOUT         (1<<3)
373 /* No sub-allocation(slabs). */
374 #define BO_ALLOC_NO_SUBALLOC     (1<<4)
375 /* Place BO only on lmem. */
376 #define BO_ALLOC_LMEM            (1<<5)
377 /* Content is protected, can't be mapped and needs special handling.  */
378 #define BO_ALLOC_PROTECTED       (1<<6)
379 /* BO can be exported to other applications. */
380 #define BO_ALLOC_SHARED          (1<<7)
381 /* BO will be captured in the KMD error dump. */
382 #define BO_ALLOC_CAPTURE         (1<<8)
383 /* Can be mapped. */
384 #define BO_ALLOC_CPU_VISIBLE     (1<<9)
385 /* BO content is compressed. */
386 #define BO_ALLOC_COMPRESSED      (1<<10)
387 
388 /**
389  * Allocate a buffer object.
390  *
391  * Buffer objects are not necessarily initially mapped into CPU virtual
392  * address space or graphics device aperture.  They must be mapped
393  * using iris_bo_map() to be used by the CPU.
394  */
395 struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr,
396                               const char *name,
397                               uint64_t size,
398                               uint32_t alignment,
399                               enum iris_memory_zone memzone,
400                               unsigned flags);
401 
402 struct iris_bo *
403 iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
404                        void *ptr, size_t size,
405                        enum iris_memory_zone memzone);
406 
407 /** Takes a reference on a buffer object */
408 static inline void
iris_bo_reference(struct iris_bo * bo)409 iris_bo_reference(struct iris_bo *bo)
410 {
411    p_atomic_inc(&bo->refcount);
412 }
413 
414 /**
415  * Releases a reference on a buffer object, freeing the data if
416  * no references remain.
417  */
418 void iris_bo_unreference(struct iris_bo *bo);
419 
420 #define MAP_READ          PIPE_MAP_READ
421 #define MAP_WRITE         PIPE_MAP_WRITE
422 #define MAP_ASYNC         PIPE_MAP_UNSYNCHRONIZED
423 #define MAP_PERSISTENT    PIPE_MAP_PERSISTENT
424 #define MAP_COHERENT      PIPE_MAP_COHERENT
425 /* internal */
426 #define MAP_RAW           (PIPE_MAP_DRV_PRV << 0)
427 #define MAP_INTERNAL_MASK (MAP_RAW)
428 
429 #define MAP_FLAGS         (MAP_READ | MAP_WRITE | MAP_ASYNC | \
430                            MAP_PERSISTENT | MAP_COHERENT | MAP_INTERNAL_MASK)
431 
432 /**
433  * Maps the buffer into userspace.
434  *
435  * This function will block waiting for any existing execution on the
436  * buffer to complete, first.  The resulting mapping is returned.
437  */
438 MUST_CHECK void *iris_bo_map(struct util_debug_callback *dbg,
439                              struct iris_bo *bo, unsigned flags);
440 
441 /**
442  * Reduces the refcount on the userspace mapping of the buffer
443  * object.
444  */
iris_bo_unmap(struct iris_bo * bo)445 static inline int iris_bo_unmap(struct iris_bo *bo) { return 0; }
446 
447 /**
448  * Waits for rendering to an object by the GPU to have completed.
449  *
450  * This is not required for any access to the BO by bo_map,
451  * bo_subdata, etc.  It is merely a way for the driver to implement
452  * glFinish.
453  */
454 void iris_bo_wait_rendering(struct iris_bo *bo);
455 
456 
457 /**
458  * Unref a buffer manager instance.
459  */
460 void iris_bufmgr_unref(struct iris_bufmgr *bufmgr);
461 
462 /**
463  * Create a visible name for a buffer which can be used by other apps
464  *
465  * \param buf Buffer to create a name for
466  * \param name Returned name
467  */
468 int iris_bo_flink(struct iris_bo *bo, uint32_t *name);
469 
470 /**
471  * Returns true if the BO is backed by a real GEM object, false if it's
472  * a wrapper that's suballocated from a larger BO.
473  */
474 static inline bool
iris_bo_is_real(struct iris_bo * bo)475 iris_bo_is_real(struct iris_bo *bo)
476 {
477    return bo->gem_handle != 0;
478 }
479 
480 /**
481  * Unwrap any slab-allocated wrapper BOs to get the BO for the underlying
482  * backing storage, which is a real BO associated with a GEM object.
483  */
484 static inline struct iris_bo *
iris_get_backing_bo(struct iris_bo * bo)485 iris_get_backing_bo(struct iris_bo *bo)
486 {
487    if (!iris_bo_is_real(bo))
488       bo = bo->slab.real;
489 
490    /* We only allow one level of wrapping. */
491    assert(iris_bo_is_real(bo));
492 
493    return bo;
494 }
495 
496 /**
497  * Is this buffer shared with external clients (imported or exported)?
498  */
499 static inline bool
iris_bo_is_external(const struct iris_bo * bo)500 iris_bo_is_external(const struct iris_bo *bo)
501 {
502    bo = iris_get_backing_bo((struct iris_bo *) bo);
503    return bo->real.exported || bo->real.imported;
504 }
505 
506 static inline bool
iris_bo_is_imported(const struct iris_bo * bo)507 iris_bo_is_imported(const struct iris_bo *bo)
508 {
509    bo = iris_get_backing_bo((struct iris_bo *) bo);
510    return bo->real.imported;
511 }
512 
513 static inline bool
iris_bo_is_exported(const struct iris_bo * bo)514 iris_bo_is_exported(const struct iris_bo *bo)
515 {
516    bo = iris_get_backing_bo((struct iris_bo *) bo);
517    return bo->real.exported;
518 }
519 
520 /**
521  * True if the BO prefers to reside in device-local memory.
522  *
523  * We don't consider eviction here; this is meant to be a performance hint.
524  * It will return true for BOs allocated from the LMEM or LMEM+SMEM heaps,
525  * even if the buffer has been temporarily evicted to system memory.
526  */
527 static inline bool
iris_bo_likely_local(const struct iris_bo * bo)528 iris_bo_likely_local(const struct iris_bo *bo)
529 {
530    if (!bo)
531       return false;
532 
533    bo = iris_get_backing_bo((struct iris_bo *) bo);
534    return iris_heap_is_device_local(bo->real.heap);
535 }
536 
537 static inline enum iris_mmap_mode
iris_bo_mmap_mode(const struct iris_bo * bo)538 iris_bo_mmap_mode(const struct iris_bo *bo)
539 {
540    bo = iris_get_backing_bo((struct iris_bo *) bo);
541    return bo->real.mmap_mode;
542 }
543 
544 /**
545  * Mark a buffer as being shared with other external clients.
546  */
547 void iris_bo_mark_exported(struct iris_bo *bo);
548 
549 /**
550  * Returns true  if mapping the buffer for write could cause the process
551  * to block, due to the object being active in the GPU.
552  */
553 bool iris_bo_busy(struct iris_bo *bo);
554 
555 struct iris_bufmgr *iris_bufmgr_get_for_fd(int fd, bool bo_reuse);
556 int iris_bufmgr_get_fd(struct iris_bufmgr *bufmgr);
557 
558 struct iris_bo *iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
559                                              const char *name,
560                                              unsigned handle);
561 
562 void* iris_bufmgr_get_aux_map_context(struct iris_bufmgr *bufmgr);
563 
564 int iris_gem_get_tiling(struct iris_bo *bo, uint32_t *tiling);
565 int iris_gem_set_tiling(struct iris_bo *bo, const struct isl_surf *surf);
566 
567 int iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd);
568 struct iris_bo *iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd,
569                                       const uint64_t modifier);
570 
571 /**
572  * Exports a bo as a GEM handle into a given DRM file descriptor
573  * \param bo Buffer to export
574  * \param drm_fd File descriptor where the new handle is created
575  * \param out_handle Pointer to store the new handle
576  *
577  * Returns 0 if the buffer was successfully exported, a non zero error code
578  * otherwise.
579  */
580 int iris_bo_export_gem_handle_for_device(struct iris_bo *bo, int drm_fd,
581                                          uint32_t *out_handle);
582 
583 /**
584  * Returns the BO's address relative to the appropriate base address.
585  *
586  * All of our base addresses are programmed to the start of a 4GB region,
587  * so simply returning the bottom 32 bits of the BO address will give us
588  * the offset from whatever base address corresponds to that memory region.
589  */
590 static inline uint32_t
iris_bo_offset_from_base_address(struct iris_bo * bo)591 iris_bo_offset_from_base_address(struct iris_bo *bo)
592 {
593    /* This only works for buffers in the memory zones corresponding to a
594     * base address - the top, unbounded memory zone doesn't have a base.
595     */
596    assert(bo->address < IRIS_MEMZONE_OTHER_START);
597    return bo->address;
598 }
599 
600 /**
601  * Track access of a BO from the specified caching domain and sequence number.
602  *
603  * Can be used without locking.  Only the most recent access (i.e. highest
604  * seqno) is tracked.
605  */
606 static inline void
iris_bo_bump_seqno(struct iris_bo * bo,uint64_t seqno,enum iris_domain type)607 iris_bo_bump_seqno(struct iris_bo *bo, uint64_t seqno,
608                    enum iris_domain type)
609 {
610    uint64_t *const last_seqno = &bo->last_seqnos[type];
611    uint64_t tmp, prev_seqno = p_atomic_read(last_seqno);
612 
613    while (prev_seqno < seqno &&
614           prev_seqno != (tmp = p_atomic_cmpxchg(last_seqno, prev_seqno, seqno)))
615       prev_seqno = tmp;
616 }
617 
618 /**
619  * Return the PAT entry based for the given heap.
620  */
621 const struct intel_device_info_pat_entry *
622 iris_heap_to_pat_entry(const struct intel_device_info *devinfo,
623                        enum iris_heap heap);
624 
625 enum iris_memory_zone iris_memzone_for_address(uint64_t address);
626 
627 int iris_bufmgr_create_screen_id(struct iris_bufmgr *bufmgr);
628 
629 simple_mtx_t *iris_bufmgr_get_bo_deps_lock(struct iris_bufmgr *bufmgr);
630 
631 /**
632  * A pool containing SAMPLER_BORDER_COLOR_STATE entries.
633  *
634  * See iris_border_color.c for more information.
635  */
636 struct iris_border_color_pool {
637    struct iris_bo *bo;
638    void *map;
639    unsigned insert_point;
640 
641    /** Map from border colors to offsets in the buffer. */
642    struct hash_table *ht;
643 
644    /** Protects insert_point and the hash table. */
645    simple_mtx_t lock;
646 };
647 
648 struct iris_border_color_pool *iris_bufmgr_get_border_color_pool(
649       struct iris_bufmgr *bufmgr);
650 
651 /* iris_border_color.c */
652 void iris_init_border_color_pool(struct iris_bufmgr *bufmgr,
653                                  struct iris_border_color_pool *pool);
654 void iris_destroy_border_color_pool(struct iris_border_color_pool *pool);
655 uint32_t iris_upload_border_color(struct iris_border_color_pool *pool,
656                                   union pipe_color_union *color);
657 
658 uint64_t iris_bufmgr_vram_size(struct iris_bufmgr *bufmgr);
659 uint64_t iris_bufmgr_sram_size(struct iris_bufmgr *bufmgr);
660 const struct intel_device_info *iris_bufmgr_get_device_info(struct iris_bufmgr *bufmgr);
661 const struct iris_kmd_backend *
662 iris_bufmgr_get_kernel_driver_backend(struct iris_bufmgr *bufmgr);
663 uint32_t iris_bufmgr_get_global_vm_id(struct iris_bufmgr *bufmgr);
664 bool iris_bufmgr_use_global_vm_id(struct iris_bufmgr *bufmgr);
665 struct intel_bind_timeline *iris_bufmgr_get_bind_timeline(struct iris_bufmgr *bufmgr);
666 bool iris_bufmgr_compute_engine_supported(struct iris_bufmgr *bufmgr);
667 uint64_t iris_bufmgr_get_dummy_aux_address(struct iris_bufmgr *bufmgr);
668 
669 enum iris_madvice {
670    IRIS_MADVICE_WILL_NEED = 0,
671    IRIS_MADVICE_DONT_NEED = 1,
672 };
673 
674 void iris_bo_import_sync_state(struct iris_bo *bo, int sync_file_fd);
675 struct iris_syncobj *iris_bo_export_sync_state(struct iris_bo *bo);
676 
677 #endif /* IRIS_BUFMGR_H */
678