xref: /aosp_15_r20/external/mesa3d/src/gallium/include/winsys/radeon_winsys.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2008 Corbin Simpson <[email protected]>
3  * Copyright 2010 Marek Olšák <[email protected]>
4  * Copyright 2018 Advanced Micro Devices, Inc.
5  *
6  * SPDX-License-Identifier: MIT
7  */
8 
9 #ifndef RADEON_WINSYS_H
10 #define RADEON_WINSYS_H
11 
12 /* The public winsys interface header for the radeon driver. */
13 
14 /* Skip command submission. Same as RADEON_NOOP=1. */
15 #define RADEON_FLUSH_NOOP                     (1u << 29)
16 
17 /* Toggle the secure submission boolean after the flush */
18 #define RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION (1u << 30)
19 
20 /* Whether the next IB can start immediately and not wait for draws and
21  * dispatches from the current IB to finish. */
22 #define RADEON_FLUSH_START_NEXT_GFX_IB_NOW    (1u << 31)
23 
24 #define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW                                                   \
25    (PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
26 
27 #include "amd/common/ac_gpu_info.h"
28 #include "amd/common/ac_surface.h"
29 #include "pipebuffer/pb_buffer.h"
30 
31 /* Tiling flags. */
32 enum radeon_bo_layout
33 {
34    RADEON_LAYOUT_LINEAR = 0,
35    RADEON_LAYOUT_TILED,
36    RADEON_LAYOUT_SQUARETILED,
37 
38    RADEON_LAYOUT_UNKNOWN
39 };
40 
41 enum radeon_bo_domain
42 { /* bitfield */
43   RADEON_DOMAIN_GTT = 2,
44   RADEON_DOMAIN_VRAM = 4,
45   RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT,
46   RADEON_DOMAIN_GDS = 8,
47   RADEON_DOMAIN_OA = 16,
48 };
49 
50 enum radeon_bo_flag
51 { /* bitfield */
52   RADEON_FLAG_GTT_WC = (1 << 0),
53   RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
54   RADEON_FLAG_NO_SUBALLOC = (1 << 2),
55   RADEON_FLAG_SPARSE = (1 << 3),
56   RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
57   RADEON_FLAG_32BIT = (1 << 6),
58   RADEON_FLAG_ENCRYPTED = (1 << 7),
59   RADEON_FLAG_GL2_BYPASS = (1 << 8), /* only gfx9 and newer */
60   RADEON_FLAG_DRIVER_INTERNAL = (1 << 9),
61    /* Discard on eviction (instead of moving the buffer to GTT).
62     * This guarantees that this buffer will never be moved to GTT.
63     */
64   RADEON_FLAG_DISCARDABLE = (1 << 10),
65   RADEON_FLAG_WINSYS_SLAB_BACKING = (1 << 11), /* only used by the winsys */
66   RADEON_FLAG_GFX12_ALLOW_DCC = (1 << 12), /* allow DCC, VRAM only */
67 };
68 
69 static inline void
si_res_print_flags(enum radeon_bo_flag flags)70 si_res_print_flags(enum radeon_bo_flag flags) {
71    if (flags & RADEON_FLAG_GTT_WC)
72       fprintf(stderr, "GTT_WC ");
73    if (flags & RADEON_FLAG_NO_CPU_ACCESS)
74       fprintf(stderr, "NO_CPU_ACCESS ");
75    if (flags & RADEON_FLAG_NO_SUBALLOC)
76       fprintf(stderr, "NO_SUBALLOC ");
77    if (flags & RADEON_FLAG_SPARSE)
78       fprintf(stderr, "SPARSE ");
79    if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING)
80       fprintf(stderr, "NO_INTERPROCESS_SHARING ");
81    if (flags & RADEON_FLAG_32BIT)
82       fprintf(stderr, "32BIT ");
83    if (flags & RADEON_FLAG_ENCRYPTED)
84       fprintf(stderr, "ENCRYPTED ");
85    if (flags & RADEON_FLAG_GL2_BYPASS)
86       fprintf(stderr, "GL2_BYPASS ");
87    if (flags & RADEON_FLAG_DRIVER_INTERNAL)
88       fprintf(stderr, "DRIVER_INTERNAL ");
89    if (flags & RADEON_FLAG_DISCARDABLE)
90       fprintf(stderr, "DISCARDABLE ");
91    if (flags & RADEON_FLAG_GFX12_ALLOW_DCC)
92       fprintf(stderr, "GFX12_ALLOW_DCC ");
93 }
94 
95 enum radeon_map_flags
96 {
97    /* Indicates that the caller will unmap the buffer.
98     *
99     * Not unmapping buffers is an important performance optimization for
100     * OpenGL (avoids kernel overhead for frequently mapped buffers).
101     */
102    RADEON_MAP_TEMPORARY = (PIPE_MAP_DRV_PRV << 0),
103 };
104 
105 #define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
106 
107 enum radeon_value_id
108 {
109    RADEON_REQUESTED_VRAM_MEMORY,
110    RADEON_REQUESTED_GTT_MEMORY,
111    RADEON_MAPPED_VRAM,
112    RADEON_MAPPED_GTT,
113    RADEON_SLAB_WASTED_VRAM,
114    RADEON_SLAB_WASTED_GTT,
115    RADEON_BUFFER_WAIT_TIME_NS,
116    RADEON_NUM_MAPPED_BUFFERS,
117    RADEON_TIMESTAMP,
118    RADEON_NUM_GFX_IBS,
119    RADEON_NUM_SDMA_IBS,
120    RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
121    RADEON_GFX_IB_SIZE_COUNTER,
122    RADEON_NUM_BYTES_MOVED,
123    RADEON_NUM_EVICTIONS,
124    RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
125    RADEON_VRAM_USAGE,
126    RADEON_VRAM_VIS_USAGE,
127    RADEON_GTT_USAGE,
128    RADEON_GPU_TEMPERATURE,
129    RADEON_CURRENT_SCLK,
130    RADEON_CURRENT_MCLK,
131    RADEON_CS_THREAD_TIME,
132 };
133 
134 enum radeon_ctx_priority
135 {
136    RADEON_CTX_PRIORITY_LOW = 0,
137    RADEON_CTX_PRIORITY_MEDIUM,
138    RADEON_CTX_PRIORITY_HIGH,
139    RADEON_CTX_PRIORITY_REALTIME,
140 };
141 
142 enum radeon_ctx_pstate
143 {
144    RADEON_CTX_PSTATE_NONE = 0,
145    RADEON_CTX_PSTATE_STANDARD,
146    RADEON_CTX_PSTATE_MIN_SCLK,
147    RADEON_CTX_PSTATE_MIN_MCLK,
148    RADEON_CTX_PSTATE_PEAK,
149 };
150 
151 
152 /* Each group of two has the same priority. */
153 #define RADEON_PRIO_FENCE_TRACE (1 << 0)
154 #define RADEON_PRIO_SO_FILLED_SIZE (1 << 1)
155 
156 #define RADEON_PRIO_QUERY (1 << 2)
157 #define RADEON_PRIO_IB (1 << 3)
158 
159 #define RADEON_PRIO_DRAW_INDIRECT (1 << 4)
160 #define RADEON_PRIO_INDEX_BUFFER (1 << 5)
161 
162 #define RADEON_PRIO_CP_DMA (1 << 6)
163 #define RADEON_PRIO_BORDER_COLORS (1 << 7)
164 
165 #define RADEON_PRIO_CONST_BUFFER (1 << 8)
166 #define RADEON_PRIO_DESCRIPTORS (1 << 9)
167 
168 #define RADEON_PRIO_SAMPLER_BUFFER (1 << 10)
169 #define RADEON_PRIO_VERTEX_BUFFER (1 << 11)
170 
171 #define RADEON_PRIO_SHADER_RW_BUFFER (1 << 12)
172 #define RADEON_PRIO_SAMPLER_TEXTURE (1 << 13)
173 
174 #define RADEON_PRIO_SHADER_RW_IMAGE (1 << 14)
175 #define RADEON_PRIO_SAMPLER_TEXTURE_MSAA (1 << 15)
176 
177 #define RADEON_PRIO_COLOR_BUFFER (1 << 16)
178 #define RADEON_PRIO_DEPTH_BUFFER (1 << 17)
179 
180 #define RADEON_PRIO_COLOR_BUFFER_MSAA (1 << 18)
181 #define RADEON_PRIO_DEPTH_BUFFER_MSAA (1 << 19)
182 
183 #define RADEON_PRIO_SEPARATE_META (1 << 20)
184 #define RADEON_PRIO_SHADER_BINARY (1 << 21) /* the hw can't hide instruction cache misses */
185 
186 #define RADEON_PRIO_SHADER_RINGS (1 << 22)
187 #define RADEON_PRIO_SCRATCH_BUFFER (1 << 23)
188 
189 #define RADEON_ALL_PRIORITIES (RADEON_USAGE_READ - 1)
190 
191 /* Upper bits of priorities are used by usage flags. */
192 #define RADEON_USAGE_READ (1 << 27)
193 #define RADEON_USAGE_WRITE (1 << 28)
194 #define RADEON_USAGE_READWRITE (RADEON_USAGE_READ | RADEON_USAGE_WRITE)
195 
196 /* The winsys ensures that the CS submission will be scheduled after
197  * previously flushed CSs referencing this BO in a conflicting way.
198  */
199 #define RADEON_USAGE_SYNCHRONIZED (1 << 29)
200 
201 /* When used, an implicit sync is done to make sure a compute shader
202  * will read the written values from a previous draw.
203  */
204 #define RADEON_USAGE_CB_NEEDS_IMPLICIT_SYNC (1u << 30)
205 #define RADEON_USAGE_DB_NEEDS_IMPLICIT_SYNC (1u << 31)
206 
207 struct winsys_handle;
208 struct radeon_winsys_ctx;
209 
210 struct radeon_cmdbuf_chunk {
211    unsigned cdw;    /* Number of used dwords. */
212    unsigned max_dw; /* Maximum number of dwords. */
213    uint32_t *buf;   /* The base pointer of the chunk. */
214 };
215 
216 struct radeon_cmdbuf {
217    struct radeon_cmdbuf_chunk current;
218    struct radeon_cmdbuf_chunk *prev;
219    uint16_t num_prev; /* Number of previous chunks. */
220    uint16_t max_prev; /* Space in array pointed to by prev. */
221    unsigned prev_dw;  /* Total number of dwords in previous chunks. */
222 
223    /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
224    uint32_t used_vram_kb;
225    uint32_t used_gart_kb;
226 
227    /* Private winsys data. */
228    void *priv;
229    void *csc; /* amdgpu_cs_context */
230 };
231 
232 /* Tiling info for display code, DRI sharing, and other data. */
233 struct radeon_bo_metadata {
234    /* Tiling flags describing the texture layout for display code
235     * and DRI sharing.
236     */
237    union {
238       struct {
239          enum radeon_bo_layout microtile;
240          enum radeon_bo_layout macrotile;
241          unsigned pipe_config;
242          unsigned bankw;
243          unsigned bankh;
244          unsigned tile_split;
245          unsigned mtilea;
246          unsigned num_banks;
247          unsigned stride;
248          bool scanout;
249       } legacy;
250    } u;
251 
252    enum radeon_surf_mode mode;   /* Output from buffer_get_metadata */
253 
254    /* Additional metadata associated with the buffer, in bytes.
255     * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
256     * Supported by amdgpu only.
257     */
258    uint32_t size_metadata;
259    uint32_t metadata[64];
260 };
261 
262 enum radeon_feature_id
263 {
264    RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
265    RADEON_FID_R300_CMASK_ACCESS,
266 };
267 
268 struct radeon_bo_list_item {
269    uint64_t bo_size;
270    uint64_t vm_address;
271    uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
272 };
273 
274 struct radeon_winsys {
275    /**
276     * The screen object this winsys was created for
277     */
278    struct pipe_screen *screen;
279    /**
280     * Has the application created at least one TMZ buffer.
281     */
282    const bool uses_secure_bos;
283 
284    /**
285     * Decrement the winsys reference count.
286     *
287     * \param ws  The winsys this function is called for.
288     * \return    True if the winsys and screen should be destroyed.
289     */
290    bool (*unref)(struct radeon_winsys *ws);
291 
292    /**
293     * Destroy this winsys.
294     *
295     * \param ws        The winsys this function is called from.
296     */
297    void (*destroy)(struct radeon_winsys *ws);
298 
299    /**
300     * Get FD for winsys if winsys provides one
301     */
302    int (*get_fd)(struct radeon_winsys *ws);
303 
304    /**
305     * Query an info structure from winsys.
306     *
307     * \param ws        The winsys this function is called from.
308     * \param info      Return structure
309     */
310    void (*query_info)(struct radeon_winsys *ws, struct radeon_info *info);
311 
312    /**
313     * A hint for the winsys that it should pin its execution threads to
314     * a group of cores sharing a specific L3 cache if the CPU has multiple
315     * L3 caches. This is needed for good multithreading performance on
316     * AMD Zen CPUs.
317     */
318    void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cpu);
319 
320    /**************************************************************************
321     * Buffer management. Buffer attributes are mostly fixed over its lifetime.
322     *
323     * Remember that gallium gets to choose the interface it needs, and the
324     * window systems must then implement that interface (rather than the
325     * other way around...).
326     *************************************************************************/
327 
328    /**
329     * Create a buffer object.
330     *
331     * \param ws        The winsys this function is called from.
332     * \param size      The size to allocate.
333     * \param alignment An alignment of the buffer in memory.
334     * \param use_reusable_pool Whether the cache buffer manager should be used.
335     * \param domain    A bitmask of the RADEON_DOMAIN_* flags.
336     * \return          The created buffer object.
337     */
338    struct pb_buffer_lean *(*buffer_create)(struct radeon_winsys *ws, uint64_t size,
339                                            unsigned alignment, enum radeon_bo_domain domain,
340                                            enum radeon_bo_flag flags);
341 
342    /**
343     * Don't use directly. Use radeon_bo_reference.
344     */
345    void (*buffer_destroy)(struct radeon_winsys *ws, struct pb_buffer_lean *buf);
346 
347    /**
348     * Map the entire data store of a buffer object into the client's address
349     * space.
350     *
351     * Callers are expected to unmap buffers again if and only if the
352     * RADEON_MAP_TEMPORARY flag is set in \p usage.
353     *
354     * \param buf       A winsys buffer object to map.
355     * \param cs        A command stream to flush if the buffer is referenced by it.
356     * \param usage     A bitmask of the PIPE_MAP_* and RADEON_MAP_* flags.
357     * \return          The pointer at the beginning of the buffer.
358     */
359    void *(*buffer_map)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
360                        struct radeon_cmdbuf *cs, enum pipe_map_flags usage);
361 
362    /**
363     * Unmap a buffer object from the client's address space.
364     *
365     * \param buf       A winsys buffer object to unmap.
366     */
367    void (*buffer_unmap)(struct radeon_winsys *ws, struct pb_buffer_lean *buf);
368 
369    /**
370     * Wait for the buffer and return true if the buffer is not used
371     * by the device.
372     *
373     * The timeout of 0 will only return the status.
374     * The timeout of OS_TIMEOUT_INFINITE will always wait until the buffer
375     * is idle.
376     */
377    bool (*buffer_wait)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
378                        uint64_t timeout, unsigned usage);
379 
380    /**
381     * Return buffer metadata.
382     * (tiling info for display code, DRI sharing, and other data)
383     *
384     * \param buf       A winsys buffer object to get the flags from.
385     * \param md        Metadata
386     */
387    void (*buffer_get_metadata)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
388                                struct radeon_bo_metadata *md, struct radeon_surf *surf);
389 
390    /**
391     * Set buffer metadata.
392     * (tiling info for display code, DRI sharing, and other data)
393     *
394     * \param buf       A winsys buffer object to set the flags for.
395     * \param md        Metadata
396     */
397    void (*buffer_set_metadata)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
398                                struct radeon_bo_metadata *md, struct radeon_surf *surf);
399 
400    /**
401     * Get a winsys buffer from a winsys handle. The internal structure
402     * of the handle is platform-specific and only a winsys should access it.
403     *
404     * \param ws        The winsys this function is called from.
405     * \param whandle   A winsys handle pointer as was received from a state
406     *                  tracker.
407     */
408    struct pb_buffer_lean *(*buffer_from_handle)(struct radeon_winsys *ws,
409                                                 struct winsys_handle *whandle,
410                                                 unsigned vm_alignment,
411                                                 bool is_prime_linear_buffer);
412 
413    /**
414     * Get a winsys buffer from a user pointer. The resulting buffer can't
415     * be exported. Both pointer and size must be page aligned.
416     *
417     * \param ws        The winsys this function is called from.
418     * \param pointer   User pointer to turn into a buffer object.
419     * \param Size      Size in bytes for the new buffer.
420     */
421    struct pb_buffer_lean *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer,
422                                              uint64_t size, enum radeon_bo_flag flags);
423 
424    /**
425     * Whether the buffer was created from a user pointer.
426     *
427     * \param buf       A winsys buffer object
428     * \return          whether \p buf was created via buffer_from_ptr
429     */
430    bool (*buffer_is_user_ptr)(struct pb_buffer_lean *buf);
431 
432    /** Whether the buffer was suballocated. */
433    bool (*buffer_is_suballocated)(struct pb_buffer_lean *buf);
434 
435    /**
436     * Get a winsys handle from a winsys buffer. The internal structure
437     * of the handle is platform-specific and only a winsys should access it.
438     *
439     * \param ws        The winsys instance for which the handle is to be valid
440     * \param buf       A winsys buffer object to get the handle from.
441     * \param whandle   A winsys handle pointer.
442     * \return          true on success.
443     */
444    bool (*buffer_get_handle)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
445                              struct winsys_handle *whandle);
446 
447    /**
448     * Change the commitment of a (64KB-page aligned) region of the given
449     * sparse buffer.
450     *
451     * \warning There is no automatic synchronization with command submission.
452     *
453     * \note Only implemented by the amdgpu winsys.
454     *
455     * \return false on out of memory or other failure, true on success.
456     */
457    bool (*buffer_commit)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
458                          uint64_t offset, uint64_t size, bool commit);
459 
460    /**
461     * Calc size of the first committed part of the given sparse buffer.
462     * \note Only implemented by the amdgpu winsys.
463     * \return the skipped count if the range_offset fall into a hole.
464     */
465    unsigned (*buffer_find_next_committed_memory)(struct pb_buffer_lean *buf,
466                         uint64_t range_offset, unsigned *range_size);
467    /**
468     * Return the virtual address of a buffer.
469     *
470     * When virtual memory is not in use, this is the offset relative to the
471     * relocation base (non-zero for sub-allocated buffers).
472     *
473     * \param buf       A winsys buffer object
474     * \return          virtual address
475     */
476    uint64_t (*buffer_get_virtual_address)(struct pb_buffer_lean *buf);
477 
478    /**
479     * Return the offset of this buffer relative to the relocation base.
480     * This is only non-zero for sub-allocated buffers.
481     *
482     * This is only supported in the radeon winsys, since amdgpu uses virtual
483     * addresses in submissions even for the video engines.
484     *
485     * \param buf      A winsys buffer object
486     * \return         the offset for relocations
487     */
488    unsigned (*buffer_get_reloc_offset)(struct pb_buffer_lean *buf);
489 
490    /**
491     * Query the initial placement of the buffer from the kernel driver.
492     */
493    enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer_lean *buf);
494 
495    /**
496     * Query the flags used for creation of this buffer.
497     *
498     * Note that for imported buffer this may be lossy since not all flags
499     * are passed 1:1.
500     */
501    enum radeon_bo_flag (*buffer_get_flags)(struct pb_buffer_lean *buf);
502 
503    /**************************************************************************
504     * Command submission.
505     *
506     * Each pipe context should create its own command stream and submit
507     * commands independently of other contexts.
508     *************************************************************************/
509 
510    /**
511     * Create a command submission context.
512     * Various command streams can be submitted to the same context.
513     *
514     * \param allow_context_lost  If true, lost contexts skip command submission and report
515     *                            the reset status.
516     *                            If false, losing the context results in undefined behavior.
517     */
518    struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws,
519                                            enum radeon_ctx_priority priority,
520                                            bool allow_context_lost);
521 
522    /**
523     * Destroy a context.
524     */
525    void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
526 
527    /**
528     * Set a reset status for the context due to a software failure, such as an allocation failure
529     * or a skipped draw.
530     */
531    void (*ctx_set_sw_reset_status)(struct radeon_winsys_ctx *ctx, enum pipe_reset_status status,
532                                    const char *format, ...);
533 
534    /**
535     * Query a GPU reset status.
536     */
537    enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx,
538                                                     bool full_reset_only,
539                                                     bool *needs_reset, bool *reset_completed);
540 
541    /**
542     * Create a command stream.
543     *
544     * \param cs        The returned structure that is initialized by cs_create.
545     * \param ctx       The submission context
546     * \param ip_type   The IP type (GFX, DMA, UVD)
547     * \param flush     Flush callback function associated with the command stream.
548     * \param user      User pointer that will be passed to the flush callback.
549     *
550     * \return true on success
551     */
552    bool (*cs_create)(struct radeon_cmdbuf *cs,
553                      struct radeon_winsys_ctx *ctx, enum amd_ip_type amd_ip_type,
554                      void (*flush)(void *ctx, unsigned flags,
555                                    struct pipe_fence_handle **fence),
556                      void *flush_ctx);
557 
558    /**
559     * Set up and enable mid command buffer preemption for the command stream.
560     *
561     * \param cs               Command stream
562     * \param preamble_ib      Non-preemptible preamble IB for the context.
563     * \param preamble_num_dw  Number of dwords in the preamble IB.
564     */
565    bool (*cs_setup_preemption)(struct radeon_cmdbuf *cs, const uint32_t *preamble_ib,
566                                unsigned preamble_num_dw);
567 
568    /**
569     * Destroy a command stream.
570     *
571     * \param cs        A command stream to destroy.
572     */
573    void (*cs_destroy)(struct radeon_cmdbuf *cs);
574 
575    /**
576     * Add a buffer. Each buffer used by a CS must be added using this function.
577     *
578     * \param cs      Command stream
579     * \param buf     Buffer
580     * \param usage   Usage
581     * \param domain  Bitmask of the RADEON_DOMAIN_* flags.
582     * \return Buffer index.
583     */
584    unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer_lean *buf,
585                              unsigned usage, enum radeon_bo_domain domain);
586 
587    /**
588     * Return the index of an already-added buffer.
589     *
590     * Not supported on amdgpu. Drivers with GPUVM should not care about
591     * buffer indices.
592     *
593     * \param cs        Command stream
594     * \param buf       Buffer
595     * \return          The buffer index, or -1 if the buffer has not been added.
596     */
597    int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer_lean *buf);
598 
599    /**
600     * Return true if there is enough memory in VRAM and GTT for the buffers
601     * added so far. If the validation fails, all buffers which have
602     * been added since the last call of cs_validate will be removed and
603     * the CS will be flushed (provided there are still any buffers).
604     *
605     * \param cs        A command stream to validate.
606     */
607    bool (*cs_validate)(struct radeon_cmdbuf *cs);
608 
609    /**
610     * Check whether the given number of dwords is available in the IB.
611     * Optionally chain a new chunk of the IB if necessary and supported.
612     *
613     * \param cs        A command stream.
614     * \param dw        Number of CS dwords requested by the caller.
615     * \return true if there is enough space
616     */
617    bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw);
618 
619    /**
620     * Return the buffer list.
621     *
622     * This is the buffer list as passed to the kernel, i.e. it only contains
623     * the parent buffers of sub-allocated buffers.
624     *
625     * \param cs    Command stream
626     * \param list  Returned buffer list. Set to NULL to query the count only.
627     * \return      The buffer count.
628     */
629    unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs, struct radeon_bo_list_item *list);
630 
631    /**
632     * Flush a command stream.
633     *
634     * \param cs          A command stream to flush.
635     * \param flags,      PIPE_FLUSH_* flags.
636     * \param fence       Pointer to a fence. If non-NULL, a fence is inserted
637     *                    after the CS and is returned through this parameter.
638     * \return Negative POSIX error code or 0 for success.
639     *         Asynchronous submissions never return an error.
640     */
641    int (*cs_flush)(struct radeon_cmdbuf *cs, unsigned flags, struct pipe_fence_handle **fence);
642 
643    /**
644     * Create a fence before the CS is flushed.
645     * The user must flush manually to complete the initializaton of the fence.
646     *
647     * The fence must not be used for anything except \ref cs_add_fence_dependency
648     * before the flush.
649     */
650    struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
651 
652    /**
653     * Return true if a buffer is referenced by a command stream.
654     *
655     * \param cs        A command stream.
656     * \param buf       A winsys buffer.
657     */
658    bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs, struct pb_buffer_lean *buf,
659                                    unsigned usage);
660 
661    /**
662     * Request access to a feature for a command stream.
663     *
664     * \param cs        A command stream.
665     * \param fid       Feature ID, one of RADEON_FID_*
666     * \param enable    Whether to enable or disable the feature.
667     */
668    bool (*cs_request_feature)(struct radeon_cmdbuf *cs, enum radeon_feature_id fid, bool enable);
669    /**
670     * Make sure all asynchronous flush of the cs have completed
671     *
672     * \param cs        A command stream.
673     */
674    void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
675 
676    /**
677     * Add a fence dependency to the CS, so that the CS will wait for
678     * the fence before execution.
679     */
680    void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence);
681 
682    /**
683     * Signal a syncobj when the CS finishes execution.
684     */
685    void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence);
686 
687    /**
688     * Returns the amd_ip_type type of a CS.
689     */
690    enum amd_ip_type (*cs_get_ip_type)(struct radeon_cmdbuf *cs);
691 
692    /**
693     * Wait for the fence and return true if the fence has been signalled.
694     * The timeout of 0 will only return the status.
695     * The timeout of OS_TIMEOUT_INFINITE will always wait until the fence
696     * is signalled.
697     */
698    bool (*fence_wait)(struct radeon_winsys *ws, struct pipe_fence_handle *fence, uint64_t timeout);
699 
700    /**
701     * Reference counting for fences.
702     */
703    void (*fence_reference)(struct radeon_winsys *ws, struct pipe_fence_handle **dst,
704                            struct pipe_fence_handle *src);
705 
706    /**
707     * Create a new fence object corresponding to the given syncobj fd.
708     */
709    struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws, int fd);
710 
711    /**
712     * Create a new fence object corresponding to the given sync_file.
713     */
714    struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws, int fd);
715 
716    /**
717     * Return a sync_file FD corresponding to the given fence object.
718     */
719    int (*fence_export_sync_file)(struct radeon_winsys *ws, struct pipe_fence_handle *fence);
720 
721    /**
722     * Return a sync file FD that is already signalled.
723     */
724    int (*export_signalled_sync_file)(struct radeon_winsys *ws);
725 
726    /**
727     * Initialize surface
728     *
729     * \param ws        The winsys this function is called from.
730     * \param info      radeon_info from the driver
731     * \param tex       Input texture description
732     * \param flags     Bitmask of RADEON_SURF_* flags
733     * \param bpe       Bytes per pixel, it can be different for Z buffers.
734     * \param mode      Preferred tile mode. (linear, 1D, or 2D)
735     * \param surf      Output structure
736     */
737    int (*surface_init)(struct radeon_winsys *ws, const struct radeon_info *info,
738                        const struct pipe_resource *tex, uint64_t flags,
739                        unsigned bpe, enum radeon_surf_mode mode, struct radeon_surf *surf);
740 
741    uint64_t (*query_value)(struct radeon_winsys *ws, enum radeon_value_id value);
742 
743    bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset, unsigned num_registers,
744                           uint32_t *out);
745 
746    /**
747     * Secure context
748     */
749    bool (*cs_is_secure)(struct radeon_cmdbuf *cs);
750 
751    /**
752     * Stable pstate
753     */
754    bool (*cs_set_pstate)(struct radeon_cmdbuf *cs, enum radeon_ctx_pstate state);
755 
756    /**
757     * Pass the VAs to the buffers where various information is saved by the FW during mcbp.
758     */
759    void (*cs_set_mcbp_reg_shadowing_va)(struct radeon_cmdbuf *cs, uint64_t regs_va,
760                                                                   uint64_t csa_va);
761 };
762 
radeon_emitted(struct radeon_cmdbuf * cs,unsigned num_dw)763 static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
764 {
765    return cs && (cs->prev_dw + cs->current.cdw > num_dw);
766 }
767 
radeon_emit(struct radeon_cmdbuf * cs,uint32_t value)768 static inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
769 {
770    cs->current.buf[cs->current.cdw++] = value;
771 }
772 
radeon_emit_array(struct radeon_cmdbuf * cs,const uint32_t * values,unsigned count)773 static inline void radeon_emit_array(struct radeon_cmdbuf *cs, const uint32_t *values,
774                                      unsigned count)
775 {
776    memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
777    cs->current.cdw += count;
778 }
779 
radeon_uses_secure_bos(struct radeon_winsys * ws)780 static inline bool radeon_uses_secure_bos(struct radeon_winsys* ws)
781 {
782   return ws->uses_secure_bos;
783 }
784 
785 static inline void
radeon_bo_reference(struct radeon_winsys * rws,struct pb_buffer_lean ** dst,struct pb_buffer_lean * src)786 radeon_bo_reference(struct radeon_winsys *rws, struct pb_buffer_lean **dst,
787                     struct pb_buffer_lean *src)
788 {
789    struct pb_buffer_lean *old = *dst;
790 
791    if (pipe_reference(&(*dst)->reference, &src->reference))
792       rws->buffer_destroy(rws, old);
793    *dst = src;
794 }
795 
796 /* Same as radeon_bo_reference, but ignore the value in *dst. */
797 static inline void
radeon_bo_set_reference(struct pb_buffer_lean ** dst,struct pb_buffer_lean * src)798 radeon_bo_set_reference(struct pb_buffer_lean **dst, struct pb_buffer_lean *src)
799 {
800    *dst = src;
801    pipe_reference(NULL, &src->reference); /* only increment refcount */
802 }
803 
804 /* Unreference dst, but don't assign anything. */
805 static inline void
radeon_bo_drop_reference(struct radeon_winsys * rws,struct pb_buffer_lean * dst)806 radeon_bo_drop_reference(struct radeon_winsys *rws, struct pb_buffer_lean *dst)
807 {
808    if (pipe_reference(&dst->reference, NULL)) /* only decrement refcount */
809       rws->buffer_destroy(rws, dst);
810 }
811 
812 /* The following bits describe the heaps managed by slab allocators (pb_slab) and
813  * the allocation cache (pb_cache).
814  */
815 #define RADEON_HEAP_BIT_VRAM           (1 << 0) /* if false, it's GTT */
816 #define RADEON_HEAP_BIT_GL2_BYPASS     (1 << 1) /* both VRAM and GTT */
817 #define RADEON_HEAP_BIT_32BIT          (1 << 2) /* both VRAM and GTT */
818 #define RADEON_HEAP_BIT_ENCRYPTED      (1 << 3) /* both VRAM and GTT */
819 
820 #define RADEON_HEAP_BIT_NO_CPU_ACCESS  (1 << 4) /* VRAM only */
821 #define RADEON_HEAP_BIT_GFX12_ALLOW_DCC (1 << 5) /* VRAM only */
822 
823 #define RADEON_HEAP_BIT_WC             (1 << 4) /* GTT only, VRAM implies this to be true */
824 
825 /* The number of all possible heap descriptions using the bits above. */
826 #define RADEON_NUM_HEAPS               (1 << 6)
827 
radeon_domain_from_heap(int heap)828 static inline enum radeon_bo_domain radeon_domain_from_heap(int heap)
829 {
830    assert(heap >= 0);
831 
832    if (heap & RADEON_HEAP_BIT_VRAM)
833       return RADEON_DOMAIN_VRAM;
834    else
835       return RADEON_DOMAIN_GTT;
836 }
837 
radeon_flags_from_heap(int heap)838 static inline unsigned radeon_flags_from_heap(int heap)
839 {
840    assert(heap >= 0);
841 
842    unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING;
843 
844    if (heap & RADEON_HEAP_BIT_GL2_BYPASS)
845       flags |= RADEON_FLAG_GL2_BYPASS;
846    if (heap & RADEON_HEAP_BIT_32BIT)
847       flags |= RADEON_FLAG_32BIT;
848    if (heap & RADEON_HEAP_BIT_ENCRYPTED)
849       flags |= RADEON_FLAG_ENCRYPTED;
850 
851    if (heap & RADEON_HEAP_BIT_VRAM) {
852       flags |= RADEON_FLAG_GTT_WC;
853       if (heap & RADEON_HEAP_BIT_NO_CPU_ACCESS)
854          flags |= RADEON_FLAG_NO_CPU_ACCESS;
855       if (heap & RADEON_HEAP_BIT_GFX12_ALLOW_DCC)
856          flags |= RADEON_FLAG_GFX12_ALLOW_DCC;
857    } else {
858       /* GTT only */
859       if (heap & RADEON_HEAP_BIT_WC)
860          flags |= RADEON_FLAG_GTT_WC;
861    }
862 
863    return flags;
864 }
865 
866 /* This cleans up flags, so that we can comfortably assume that no invalid flag combinations
867  * are set.
868  */
radeon_canonicalize_bo_flags(enum radeon_bo_domain * _domain,enum radeon_bo_flag * _flags)869 static void radeon_canonicalize_bo_flags(enum radeon_bo_domain *_domain,
870                                          enum radeon_bo_flag *_flags)
871 {
872    unsigned domain = *_domain;
873    unsigned flags = *_flags;
874 
875    /* Only set 1 domain, e.g. ignore GTT if VRAM is set. */
876    if (domain)
877       domain = BITFIELD_BIT(ffs(domain) - 1);
878    else
879       domain = RADEON_DOMAIN_VRAM;
880 
881    switch (domain) {
882    case RADEON_DOMAIN_VRAM:
883       flags |= RADEON_FLAG_GTT_WC;
884       break;
885    case RADEON_DOMAIN_GTT:
886       flags &= ~RADEON_FLAG_NO_CPU_ACCESS;
887       flags &= ~RADEON_FLAG_GFX12_ALLOW_DCC;
888       break;
889    case RADEON_DOMAIN_GDS:
890    case RADEON_DOMAIN_OA:
891       flags |= RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_NO_CPU_ACCESS;
892       flags &= ~RADEON_FLAG_SPARSE;
893       break;
894    }
895 
896    /* Sparse buffers must have NO_CPU_ACCESS set. */
897    if (flags & RADEON_FLAG_SPARSE)
898       flags |= RADEON_FLAG_NO_CPU_ACCESS;
899 
900    *_domain = (enum radeon_bo_domain)domain;
901    *_flags = (enum radeon_bo_flag)flags;
902 }
903 
904 /* Return the heap index for winsys allocators, or -1 on failure. */
radeon_get_heap_index(enum radeon_bo_domain domain,enum radeon_bo_flag flags)905 static inline int radeon_get_heap_index(enum radeon_bo_domain domain, enum radeon_bo_flag flags)
906 {
907    radeon_canonicalize_bo_flags(&domain, &flags);
908 
909    /* Resources with interprocess sharing don't use any winsys allocators. */
910    if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
911       return -1;
912 
913    /* These are unsupported flags. */
914    /* RADEON_FLAG_DRIVER_INTERNAL is ignored. It doesn't affect allocators. */
915    if (flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE |
916                 RADEON_FLAG_DISCARDABLE))
917       return -1;
918 
919    int heap = 0;
920 
921    if (flags & RADEON_FLAG_GL2_BYPASS)
922       heap |= RADEON_HEAP_BIT_GL2_BYPASS;
923    if (flags & RADEON_FLAG_32BIT)
924       heap |= RADEON_HEAP_BIT_32BIT;
925    if (flags & RADEON_FLAG_ENCRYPTED)
926       heap |= RADEON_HEAP_BIT_ENCRYPTED;
927 
928    if (domain == RADEON_DOMAIN_VRAM) {
929       /* VRAM | GTT shouldn't occur, but if it does, ignore GTT. */
930       heap |= RADEON_HEAP_BIT_VRAM;
931       if (flags & RADEON_FLAG_NO_CPU_ACCESS)
932          heap |= RADEON_HEAP_BIT_NO_CPU_ACCESS;
933       if (flags & RADEON_FLAG_GFX12_ALLOW_DCC)
934          heap |= RADEON_HEAP_BIT_GFX12_ALLOW_DCC;
935       /* RADEON_FLAG_WC is ignored and implied to be true for VRAM */
936    } else if (domain == RADEON_DOMAIN_GTT) {
937       /* GTT is implied by RADEON_HEAP_BIT_VRAM not being set. */
938       if (flags & RADEON_FLAG_GTT_WC)
939          heap |= RADEON_HEAP_BIT_WC;
940       /* RADEON_FLAG_NO_CPU_ACCESS is ignored and implied to be false for GTT */
941    } else {
942       return -1; /*  */
943    }
944 
945    assert(heap < RADEON_NUM_HEAPS);
946    return heap;
947 }
948 
949 typedef struct pipe_screen *(*radeon_screen_create_t)(struct radeon_winsys *,
950                                                       const struct pipe_screen_config *);
951 
952 /* These functions create the radeon_winsys instance for the corresponding kernel driver. */
953 struct radeon_winsys *
954 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
955 		     radeon_screen_create_t screen_create);
956 struct radeon_winsys *
957 radeon_drm_winsys_create(int fd, const struct pipe_screen_config *config,
958 			 radeon_screen_create_t screen_create);
959 
960 #endif
961