xref: /aosp_15_r20/external/mesa3d/src/amd/common/ac_gpu_info.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2017 Advanced Micro Devices, Inc.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "ac_gpu_info.h"
8 #include "ac_shader_util.h"
9 #include "ac_debug.h"
10 #include "ac_surface.h"
11 
12 #include "addrlib/src/amdgpu_asic_addr.h"
13 #include "sid.h"
14 #include "util/macros.h"
15 #include "util/u_cpu_detect.h"
16 #include "util/u_math.h"
17 #include "util/os_misc.h"
18 #include "util/bitset.h"
19 
20 #include <stdio.h>
21 #include <ctype.h>
22 
23 #define AMDGPU_MI100_RANGE       0x32, 0x3C
24 #define AMDGPU_MI200_RANGE       0x3C, 0xFF
25 #define AMDGPU_GFX940_RANGE      0x46, 0xFF
26 
27 #define ASICREV_IS_MI100(r)      ASICREV_IS(r, MI100)
28 #define ASICREV_IS_MI200(r)      ASICREV_IS(r, MI200)
29 #define ASICREV_IS_GFX940(r)     ASICREV_IS(r, GFX940)
30 
31 #ifdef _WIN32
32 #define DRM_CAP_ADDFB2_MODIFIERS 0x10
33 #define DRM_CAP_SYNCOBJ 0x13
34 #define DRM_CAP_SYNCOBJ_TIMELINE 0x14
35 #define AMDGPU_GEM_DOMAIN_GTT 0x2
36 #define AMDGPU_GEM_DOMAIN_VRAM 0x4
37 #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
38 #define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
39 #define AMDGPU_HW_IP_GFX 0
40 #define AMDGPU_HW_IP_COMPUTE 1
41 #define AMDGPU_HW_IP_DMA 2
42 #define AMDGPU_HW_IP_UVD 3
43 #define AMDGPU_HW_IP_VCE 4
44 #define AMDGPU_HW_IP_UVD_ENC 5
45 #define AMDGPU_HW_IP_VCN_DEC 6
46 #define AMDGPU_HW_IP_VCN_ENC 7
47 #define AMDGPU_HW_IP_VCN_JPEG 8
48 #define AMDGPU_HW_IP_VPE 9
49 #define AMDGPU_IDS_FLAGS_FUSION 0x1
50 #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
51 #define AMDGPU_IDS_FLAGS_TMZ 0x4
52 #define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
53 #define AMDGPU_INFO_FW_VCE 0x1
54 #define AMDGPU_INFO_FW_UVD 0x2
55 #define AMDGPU_INFO_FW_GFX_ME 0x04
56 #define AMDGPU_INFO_FW_GFX_PFP 0x05
57 #define AMDGPU_INFO_FW_GFX_CE 0x06
58 #define AMDGPU_INFO_FW_VCN 0x0e
59 #define AMDGPU_INFO_DEV_INFO 0x16
60 #define AMDGPU_INFO_MEMORY 0x19
61 #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
62 #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
63 #define AMDGPU_INFO_FW_GFX_MEC 0x08
64 #define AMDGPU_INFO_MAX_IBS 0x22
65 
66 #define AMDGPU_VRAM_TYPE_UNKNOWN 0
67 #define AMDGPU_VRAM_TYPE_GDDR1 1
68 #define AMDGPU_VRAM_TYPE_DDR2  2
69 #define AMDGPU_VRAM_TYPE_GDDR3 3
70 #define AMDGPU_VRAM_TYPE_GDDR4 4
71 #define AMDGPU_VRAM_TYPE_GDDR5 5
72 #define AMDGPU_VRAM_TYPE_HBM   6
73 #define AMDGPU_VRAM_TYPE_DDR3  7
74 #define AMDGPU_VRAM_TYPE_DDR4  8
75 #define AMDGPU_VRAM_TYPE_GDDR6 9
76 #define AMDGPU_VRAM_TYPE_DDR5  10
77 #define AMDGPU_VRAM_TYPE_LPDDR4 11
78 #define AMDGPU_VRAM_TYPE_LPDDR5 12
79 
80 #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
81 #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
82 #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
83 #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
84 #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
85 #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
86 #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
87 #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
88 #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
89 
90 struct drm_amdgpu_heap_info {
91    uint64_t total_heap_size;
92 };
93 struct drm_amdgpu_memory_info {
94    struct drm_amdgpu_heap_info vram;
95    struct drm_amdgpu_heap_info cpu_accessible_vram;
96    struct drm_amdgpu_heap_info gtt;
97 };
98 struct drm_amdgpu_info_device {
99 	/** PCI Device ID */
100 	uint32_t device_id;
101 	/** Internal chip revision: A0, A1, etc.) */
102 	uint32_t chip_rev;
103 	uint32_t external_rev;
104 	/** Revision id in PCI Config space */
105 	uint32_t pci_rev;
106 	uint32_t family;
107 	uint32_t num_shader_engines;
108 	uint32_t num_shader_arrays_per_engine;
109 	/* in KHz */
110 	uint32_t gpu_counter_freq;
111 	uint64_t max_engine_clock;
112 	uint64_t max_memory_clock;
113 	/* cu information */
114 	uint32_t cu_active_number;
115 	/* NOTE: cu_ao_mask is INVALID, DON'T use it */
116 	uint32_t cu_ao_mask;
117 	uint32_t cu_bitmap[4][4];
118 	/** Render backend pipe mask. One render backend is CB+DB. */
119 	uint32_t enabled_rb_pipes_mask;
120 	uint32_t num_rb_pipes;
121 	uint32_t num_hw_gfx_contexts;
122 	/* PCIe version (the smaller of the GPU and the CPU/motherboard) */
123 	uint32_t pcie_gen;
124 	uint64_t ids_flags;
125 	/** Starting virtual address for UMDs. */
126 	uint64_t virtual_address_offset;
127 	/** The maximum virtual address */
128 	uint64_t virtual_address_max;
129 	/** Required alignment of virtual addresses. */
130 	uint32_t virtual_address_alignment;
131 	/** Page table entry - fragment size */
132 	uint32_t pte_fragment_size;
133 	uint32_t gart_page_size;
134 	/** constant engine ram size*/
135 	uint32_t ce_ram_size;
136 	/** video memory type info*/
137 	uint32_t vram_type;
138 	/** video memory bit width*/
139 	uint32_t vram_bit_width;
140 	/* vce harvesting instance */
141 	uint32_t vce_harvest_config;
142 	/* gfx double offchip LDS buffers */
143 	uint32_t gc_double_offchip_lds_buf;
144 	/* NGG Primitive Buffer */
145 	uint64_t prim_buf_gpu_addr;
146 	/* NGG Position Buffer */
147 	uint64_t pos_buf_gpu_addr;
148 	/* NGG Control Sideband */
149 	uint64_t cntl_sb_buf_gpu_addr;
150 	/* NGG Parameter Cache */
151 	uint64_t param_buf_gpu_addr;
152 	uint32_t prim_buf_size;
153 	uint32_t pos_buf_size;
154 	uint32_t cntl_sb_buf_size;
155 	uint32_t param_buf_size;
156 	/* wavefront size*/
157 	uint32_t wave_front_size;
158 	/* shader visible vgprs*/
159 	uint32_t num_shader_visible_vgprs;
160 	/* CU per shader array*/
161 	uint32_t num_cu_per_sh;
162 	/* number of tcc blocks*/
163 	uint32_t num_tcc_blocks;
164 	/* gs vgt table depth*/
165 	uint32_t gs_vgt_table_depth;
166 	/* gs primitive buffer depth*/
167 	uint32_t gs_prim_buffer_depth;
168 	/* max gs wavefront per vgt*/
169 	uint32_t max_gs_waves_per_vgt;
170 	/* PCIe number of lanes (the smaller of the GPU and the CPU/motherboard) */
171 	uint32_t pcie_num_lanes;
172 	/* always on cu bitmap */
173 	uint32_t cu_ao_bitmap[4][4];
174 	/** Starting high virtual address for UMDs. */
175 	uint64_t high_va_offset;
176 	/** The maximum high virtual address */
177 	uint64_t high_va_max;
178 	/* gfx10 pa_sc_tile_steering_override */
179 	uint32_t pa_sc_tile_steering_override;
180 	/* disabled TCCs */
181 	uint64_t tcc_disabled_mask;
182 	uint64_t min_engine_clock;
183 	uint64_t min_memory_clock;
184 	/* The following fields are only set on gfx11+, older chips set 0. */
185 	uint32_t tcp_cache_size;       /* AKA GL0, VMEM cache */
186 	uint32_t num_sqc_per_wgp;
187 	uint32_t sqc_data_cache_size;  /* AKA SMEM cache */
188 	uint32_t sqc_inst_cache_size;
189 	uint32_t gl1c_cache_size;
190 	uint32_t gl2c_cache_size;
191 	uint64_t mall_size;            /* AKA infinity cache */
192 	/* high 32 bits of the rb pipes mask */
193 	uint32_t enabled_rb_pipes_mask_hi;
194 	/* shadow area size for gfx11 */
195 	uint32_t shadow_size;
196 	/* shadow area base virtual alignment for gfx11 */
197 	uint32_t shadow_alignment;
198 	/* context save area size for gfx11 */
199 	uint32_t csa_size;
200 	/* context save area base virtual alignment for gfx11 */
201 	uint32_t csa_alignment;
202 };
203 struct drm_amdgpu_info_hw_ip {
204    uint32_t hw_ip_version_major;
205    uint32_t hw_ip_version_minor;
206    uint32_t ib_start_alignment;
207    uint32_t ib_size_alignment;
208    uint32_t available_rings;
209    uint32_t ip_discovery_version;
210 };
211 typedef struct _drmPciBusInfo {
212    uint16_t domain;
213    uint8_t bus;
214    uint8_t dev;
215    uint8_t func;
216 } drmPciBusInfo, *drmPciBusInfoPtr;
217 typedef struct _drmDevice {
218    union {
219       drmPciBusInfoPtr pci;
220    } businfo;
221 } drmDevice, *drmDevicePtr;
222 enum amdgpu_sw_info {
223    amdgpu_sw_info_address32_hi = 0,
224 };
225 typedef struct amdgpu_device *amdgpu_device_handle;
226 typedef struct amdgpu_bo *amdgpu_bo_handle;
227 struct amdgpu_bo_alloc_request {
228    uint64_t alloc_size;
229    uint64_t phys_alignment;
230    uint32_t preferred_heap;
231    uint64_t flags;
232 };
233 struct amdgpu_gds_resource_info {
234    uint32_t gds_gfx_partition_size;
235    uint32_t gds_total_size;
236 };
237 struct amdgpu_buffer_size_alignments {
238    uint64_t size_local;
239    uint64_t size_remote;
240 };
241 struct amdgpu_heap_info {
242    uint64_t heap_size;
243 };
244 struct amdgpu_gpu_info {
245    uint32_t asic_id;
246    uint32_t chip_external_rev;
247    uint32_t family_id;
248    uint64_t ids_flags;
249    uint64_t max_engine_clk;
250    uint64_t max_memory_clk;
251    uint32_t num_shader_engines;
252    uint32_t num_shader_arrays_per_engine;
253    uint32_t rb_pipes;
254    uint32_t enabled_rb_pipes_mask;
255    uint32_t gpu_counter_freq;
256    uint32_t mc_arb_ramcfg;
257    uint32_t gb_addr_cfg;
258    uint32_t gb_tile_mode[32];
259    uint32_t gb_macro_tile_mode[16];
260    uint32_t cu_bitmap[4][4];
261    uint32_t vram_type;
262    uint32_t vram_bit_width;
263    uint32_t ce_ram_size;
264    uint32_t vce_harvest_config;
265    uint32_t pci_rev_id;
266 };
drmGetCap(int fd,uint64_t capability,uint64_t * value)267 static int drmGetCap(int fd, uint64_t capability, uint64_t *value)
268 {
269    return -EINVAL;
270 }
drmFreeDevice(drmDevicePtr * device)271 static void drmFreeDevice(drmDevicePtr *device)
272 {
273 }
drmGetDevice2(int fd,uint32_t flags,drmDevicePtr * device)274 static int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device)
275 {
276    return -ENODEV;
277 }
amdgpu_bo_alloc(amdgpu_device_handle dev,struct amdgpu_bo_alloc_request * alloc_buffer,amdgpu_bo_handle * buf_handle)278 static int amdgpu_bo_alloc(amdgpu_device_handle dev,
279    struct amdgpu_bo_alloc_request *alloc_buffer,
280    amdgpu_bo_handle *buf_handle)
281 {
282    return -EINVAL;
283 }
amdgpu_bo_free(amdgpu_bo_handle buf_handle)284 static int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
285 {
286    return -EINVAL;
287 }
amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,struct amdgpu_buffer_size_alignments * info)288 static int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
289    struct amdgpu_buffer_size_alignments
290    *info)
291 {
292    return -EINVAL;
293 }
amdgpu_query_firmware_version(amdgpu_device_handle dev,unsigned fw_type,unsigned ip_instance,unsigned index,uint32_t * version,uint32_t * feature)294 static int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
295    unsigned ip_instance, unsigned index,
296    uint32_t *version, uint32_t *feature)
297 {
298    return -EINVAL;
299 }
amdgpu_query_hw_ip_info(amdgpu_device_handle dev,unsigned type,unsigned ip_instance,struct drm_amdgpu_info_hw_ip * info)300 static int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
301    unsigned ip_instance,
302    struct drm_amdgpu_info_hw_ip *info)
303 {
304    return -EINVAL;
305 }
amdgpu_query_hw_ip_count(amdgpu_device_handle dev,unsigned type,uint32_t * count)306 static int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
307    uint32_t *count)
308 {
309    return -EINVAL;
310 }
amdgpu_query_heap_info(amdgpu_device_handle dev,uint32_t heap,uint32_t flags,struct amdgpu_heap_info * info)311 static int amdgpu_query_heap_info(amdgpu_device_handle dev, uint32_t heap,
312    uint32_t flags, struct amdgpu_heap_info *info)
313 {
314    return -EINVAL;
315 }
amdgpu_query_gpu_info(amdgpu_device_handle dev,struct amdgpu_gpu_info * info)316 static int amdgpu_query_gpu_info(amdgpu_device_handle dev,
317    struct amdgpu_gpu_info *info)
318 {
319    return -EINVAL;
320 }
amdgpu_query_info(amdgpu_device_handle dev,unsigned info_id,unsigned size,void * value)321 static int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
322    unsigned size, void *value)
323 {
324    return -EINVAL;
325 }
amdgpu_query_sw_info(amdgpu_device_handle dev,enum amdgpu_sw_info info,void * value)326 static int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
327    void *value)
328 {
329    return -EINVAL;
330 }
amdgpu_query_gds_info(amdgpu_device_handle dev,struct amdgpu_gds_resource_info * gds_info)331 static int amdgpu_query_gds_info(amdgpu_device_handle dev,
332    struct amdgpu_gds_resource_info *gds_info)
333 {
334    return -EINVAL;
335 }
amdgpu_query_video_caps_info(amdgpu_device_handle dev,unsigned cap_type,unsigned size,void * value)336 static int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
337                                  unsigned size, void *value)
338 {
339    return -EINVAL;
340 }
amdgpu_get_marketing_name(amdgpu_device_handle dev)341 static const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
342 {
343    return NULL;
344 }
readlink(const char * path,char * buf,size_t bufsiz)345 static intptr_t readlink(const char *path, char *buf, size_t bufsiz)
346 {
347    return -1;
348 }
349 #else
350 #include "drm-uapi/amdgpu_drm.h"
351 #include <amdgpu.h>
352 #include <xf86drm.h>
353 #include <unistd.h>
354 #endif
355 
356 #define CIK_TILE_MODE_COLOR_2D 14
357 
has_timeline_syncobj(int fd)358 static bool has_timeline_syncobj(int fd)
359 {
360    uint64_t value;
361    if (drmGetCap(fd, DRM_CAP_SYNCOBJ_TIMELINE, &value))
362       return false;
363    return value ? true : false;
364 }
365 
has_modifiers(int fd)366 static bool has_modifiers(int fd)
367 {
368    uint64_t value;
369    if (drmGetCap(fd, DRM_CAP_ADDFB2_MODIFIERS, &value))
370       return false;
371    return value ? true : false;
372 }
373 
fix_vram_size(uint64_t size)374 static uint64_t fix_vram_size(uint64_t size)
375 {
376    /* The VRAM size is underreported, so we need to fix it, because
377     * it's used to compute the number of memory modules for harvesting.
378     */
379    return align64(size, 256 * 1024 * 1024);
380 }
381 
382 static bool
has_tmz_support(amdgpu_device_handle dev,struct radeon_info * info,uint32_t ids_flags)383 has_tmz_support(amdgpu_device_handle dev, struct radeon_info *info, uint32_t ids_flags)
384 {
385    struct amdgpu_bo_alloc_request request = {0};
386    int r;
387    amdgpu_bo_handle bo;
388 
389    if (ids_flags & AMDGPU_IDS_FLAGS_TMZ)
390       return true;
391 
392    /* AMDGPU_IDS_FLAGS_TMZ is supported starting from drm_minor 40 */
393    if (info->drm_minor >= 40)
394       return false;
395 
396    /* Find out ourselves if TMZ is enabled */
397    if (info->gfx_level < GFX9)
398       return false;
399 
400    if (info->drm_minor < 36)
401       return false;
402 
403    request.alloc_size = 256;
404    request.phys_alignment = 1024;
405    request.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
406    request.flags = AMDGPU_GEM_CREATE_ENCRYPTED;
407    r = amdgpu_bo_alloc(dev, &request, &bo);
408    if (r)
409       return false;
410    amdgpu_bo_free(bo);
411    return true;
412 }
413 
set_custom_cu_en_mask(struct radeon_info * info)414 static void set_custom_cu_en_mask(struct radeon_info *info)
415 {
416    info->spi_cu_en = ~0;
417 
418    const char *cu_env_var = os_get_option("AMD_CU_MASK");
419    if (!cu_env_var)
420       return;
421 
422    int size = strlen(cu_env_var);
423    char *str = alloca(size + 1);
424    memset(str, 0, size + 1);
425 
426    size = 0;
427 
428    /* Strip whitespace. */
429    for (unsigned src = 0; cu_env_var[src]; src++) {
430       if (cu_env_var[src] != ' ' && cu_env_var[src] != '\t' &&
431           cu_env_var[src] != '\n' && cu_env_var[src] != '\r') {
432          str[size++] = cu_env_var[src];
433       }
434    }
435 
436    /* The following syntax is used, all whitespace is ignored:
437     *   ID = [0-9][0-9]*                         ex. base 10 numbers
438     *   ID_list = (ID | ID-ID)[, (ID | ID-ID)]*  ex. 0,2-4,7
439     *   CU_list = 0x[0-F]* | ID_list             ex. 0x337F OR 0,2-4,7
440     *   AMD_CU_MASK = CU_list
441     *
442     * It's a CU mask within a shader array. It's applied to all shader arrays.
443     */
444    bool is_good_form = true;
445    uint32_t spi_cu_en = 0;
446 
447    if (size > 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) {
448       str += 2;
449       size -= 2;
450 
451       for (unsigned i = 0; i < size; i++)
452          is_good_form &= isxdigit(str[i]) != 0;
453 
454       if (!is_good_form) {
455          fprintf(stderr, "amd: invalid AMD_CU_MASK: ill-formed hex value\n");
456       } else {
457          spi_cu_en = strtol(str, NULL, 16);
458       }
459    } else {
460       /* Parse ID_list. */
461       long first = 0, last = -1;
462 
463       if (!isdigit(*str)) {
464          is_good_form = false;
465       } else {
466          while (*str) {
467             bool comma = false;
468 
469             if (isdigit(*str)) {
470                first = last = strtol(str, &str, 10);
471             } else if (*str == '-') {
472                str++;
473                /* Parse a digit after a dash. */
474                if (isdigit(*str)) {
475                   last = strtol(str, &str, 10);
476                } else {
477                   fprintf(stderr, "amd: invalid AMD_CU_MASK: expected a digit after -\n");
478                   is_good_form = false;
479                   break;
480                }
481             } else if (*str == ',') {
482                comma = true;
483                str++;
484                if (!isdigit(*str)) {
485                   fprintf(stderr, "amd: invalid AMD_CU_MASK: expected a digit after ,\n");
486                   is_good_form = false;
487                   break;
488                }
489             }
490 
491             if (comma || !*str) {
492                if (first > last) {
493                   fprintf(stderr, "amd: invalid AMD_CU_MASK: range not increasing (%li, %li)\n", first, last);
494                   is_good_form = false;
495                   break;
496                }
497                if (last > 31) {
498                   fprintf(stderr, "amd: invalid AMD_CU_MASK: index too large (%li)\n", last);
499                   is_good_form = false;
500                   break;
501                }
502 
503                spi_cu_en |= BITFIELD_RANGE(first, last - first + 1);
504                last = -1;
505             }
506          }
507       }
508    }
509 
510    /* The mask is parsed. Now assign bits to CUs. */
511    if (is_good_form) {
512       bool error = false;
513 
514       /* Clear bits that have no effect. */
515       spi_cu_en &= BITFIELD_MASK(info->max_good_cu_per_sa);
516 
517       if (!spi_cu_en) {
518          fprintf(stderr, "amd: invalid AMD_CU_MASK: at least 1 CU in each SA must be enabled\n");
519          error = true;
520       }
521 
522       if (info->has_graphics) {
523          uint32_t min_full_cu_mask = BITFIELD_MASK(info->min_good_cu_per_sa);
524 
525          /* The hw ignores all non-compute CU masks if any of them is 0. Disallow that. */
526          if ((spi_cu_en & min_full_cu_mask) == 0) {
527             fprintf(stderr, "amd: invalid AMD_CU_MASK: at least 1 CU from 0x%x per SA must be "
528                             "enabled (SPI limitation)\n", min_full_cu_mask);
529             error = true;
530          }
531 
532          /* We usually disable 1 or 2 CUs for VS and GS, which means at last 1 other CU
533           * must be enabled.
534           */
535          uint32_t cu_mask_ge, unused;
536          ac_compute_late_alloc(info, false, false, false, &unused, &cu_mask_ge);
537          cu_mask_ge &= min_full_cu_mask;
538 
539          if ((spi_cu_en & cu_mask_ge) == 0) {
540             fprintf(stderr, "amd: invalid AMD_CU_MASK: at least 1 CU from 0x%x per SA must be "
541                             "enabled (late alloc constraint for GE)\n", cu_mask_ge);
542             error = true;
543          }
544 
545          if ((min_full_cu_mask & spi_cu_en & ~cu_mask_ge) == 0) {
546             fprintf(stderr, "amd: invalid AMD_CU_MASK: at least 1 CU from 0x%x per SA must be "
547                             "enabled (late alloc constraint for PS)\n",
548                     min_full_cu_mask & ~cu_mask_ge);
549             error = true;
550          }
551       }
552 
553       if (!error) {
554          info->spi_cu_en = spi_cu_en;
555          info->spi_cu_en_has_effect = spi_cu_en & BITFIELD_MASK(info->max_good_cu_per_sa);
556       }
557    }
558 }
559 
ac_query_pci_bus_info(int fd,struct radeon_info * info)560 static bool ac_query_pci_bus_info(int fd, struct radeon_info *info)
561 {
562    drmDevicePtr devinfo;
563 
564    /* Get PCI info. */
565    int r = drmGetDevice2(fd, 0, &devinfo);
566    if (r) {
567       fprintf(stderr, "amdgpu: drmGetDevice2 failed.\n");
568       info->pci.valid = false;
569       return false;
570    }
571    info->pci.domain = devinfo->businfo.pci->domain;
572    info->pci.bus = devinfo->businfo.pci->bus;
573    info->pci.dev = devinfo->businfo.pci->dev;
574    info->pci.func = devinfo->businfo.pci->func;
575    info->pci.valid = true;
576 
577    drmFreeDevice(&devinfo);
578    return true;
579 }
580 
handle_env_var_force_family(struct radeon_info * info)581 static void handle_env_var_force_family(struct radeon_info *info)
582 {
583    const char *family = debug_get_option("AMD_FORCE_FAMILY", NULL);
584 
585    if (!family)
586       return;
587 
588    for (unsigned i = CHIP_TAHITI; i < CHIP_LAST; i++) {
589       if (!strcmp(family, ac_get_llvm_processor_name(i))) {
590          /* Override family and gfx_level. */
591          info->family = i;
592          info->name = "NOOP";
593          info->gfx_level = ac_get_gfx_level(i);
594          info->family_id = ac_get_family_id(i);
595          info->family_overridden = true;
596          return;
597       }
598    }
599 
600    fprintf(stderr, "radeonsi: Unknown family: %s\n", family);
601    exit(1);
602 }
603 
ac_query_gpu_info(int fd,void * dev_p,struct radeon_info * info,bool require_pci_bus_info)604 bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
605                        bool require_pci_bus_info)
606 {
607    struct amdgpu_gpu_info amdinfo;
608    struct drm_amdgpu_info_device device_info = {0};
609    struct amdgpu_buffer_size_alignments alignment_info = {0};
610    uint32_t vidip_fw_version = 0, vidip_fw_feature = 0;
611    uint32_t num_instances = 0;
612    int r, i, j;
613    amdgpu_device_handle dev = dev_p;
614 
615    STATIC_ASSERT(AMDGPU_HW_IP_GFX == AMD_IP_GFX);
616    STATIC_ASSERT(AMDGPU_HW_IP_COMPUTE == AMD_IP_COMPUTE);
617    STATIC_ASSERT(AMDGPU_HW_IP_DMA == AMD_IP_SDMA);
618    STATIC_ASSERT(AMDGPU_HW_IP_UVD == AMD_IP_UVD);
619    STATIC_ASSERT(AMDGPU_HW_IP_VCE == AMD_IP_VCE);
620    STATIC_ASSERT(AMDGPU_HW_IP_UVD_ENC == AMD_IP_UVD_ENC);
621    STATIC_ASSERT(AMDGPU_HW_IP_VCN_DEC == AMD_IP_VCN_DEC);
622    STATIC_ASSERT(AMDGPU_HW_IP_VCN_ENC == AMD_IP_VCN_ENC);
623    STATIC_ASSERT(AMDGPU_HW_IP_VCN_JPEG == AMD_IP_VCN_JPEG);
624    STATIC_ASSERT(AMDGPU_HW_IP_VPE == AMD_IP_VPE);
625 
626    handle_env_var_force_family(info);
627 
628    if (!ac_query_pci_bus_info(fd, info)) {
629       if (require_pci_bus_info)
630          return false;
631    }
632 
633    assert(info->drm_major == 3);
634    info->is_amdgpu = true;
635 
636    if (info->drm_minor < 27) {
637       fprintf(stderr, "amdgpu: DRM version is %u.%u.%u, but this driver is "
638                       "only compatible with 3.27.0 (kernel 4.20+) or later.\n",
639               info->drm_major, info->drm_minor, info->drm_patchlevel);
640       return false;
641    }
642 
643    uint64_t cap;
644    r = drmGetCap(fd, DRM_CAP_SYNCOBJ, &cap);
645    if (r != 0 || cap == 0) {
646       fprintf(stderr, "amdgpu: syncobj support is missing but is required.\n");
647       return false;
648    }
649 
650    /* Query hardware and driver information. */
651    r = amdgpu_query_gpu_info(dev, &amdinfo);
652    if (r) {
653       fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
654       return false;
655    }
656 
657    r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(device_info), &device_info);
658    if (r) {
659       fprintf(stderr, "amdgpu: amdgpu_query_info(dev_info) failed.\n");
660       return false;
661    }
662 
663    r = amdgpu_query_buffer_size_alignment(dev, &alignment_info);
664    if (r) {
665       fprintf(stderr, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
666       return false;
667    }
668 
669    for (unsigned ip_type = 0; ip_type < AMD_NUM_IP_TYPES; ip_type++) {
670       struct drm_amdgpu_info_hw_ip ip_info = {0};
671 
672       r = amdgpu_query_hw_ip_info(dev, ip_type, 0, &ip_info);
673       if (r || !ip_info.available_rings)
674          continue;
675 
676       /* Gfx6-8 don't set ip_discovery_version. */
677       if (info->drm_minor >= 48 && ip_info.ip_discovery_version) {
678          info->ip[ip_type].ver_major = (ip_info.ip_discovery_version >> 16) & 0xff;
679          info->ip[ip_type].ver_minor = (ip_info.ip_discovery_version >> 8) & 0xff;
680          info->ip[ip_type].ver_rev = ip_info.ip_discovery_version & 0xff;
681       } else {
682          info->ip[ip_type].ver_major = ip_info.hw_ip_version_major;
683          info->ip[ip_type].ver_minor = ip_info.hw_ip_version_minor;
684 
685          /* Fix incorrect IP versions reported by the kernel. */
686          if (device_info.family == FAMILY_NV &&
687              (ASICREV_IS(device_info.external_rev, NAVI10) ||
688               ASICREV_IS(device_info.external_rev, NAVI12) ||
689               ASICREV_IS(device_info.external_rev, NAVI14)))
690             info->ip[AMD_IP_GFX].ver_minor = info->ip[AMD_IP_COMPUTE].ver_minor = 1;
691          else if (device_info.family == FAMILY_NV ||
692                   device_info.family == FAMILY_VGH ||
693                   device_info.family == FAMILY_RMB ||
694                   device_info.family == FAMILY_RPL ||
695                   device_info.family == FAMILY_MDN)
696             info->ip[AMD_IP_GFX].ver_minor = info->ip[AMD_IP_COMPUTE].ver_minor = 3;
697       }
698       info->ip[ip_type].num_queues = util_bitcount(ip_info.available_rings);
699 
700       /* query ip count */
701       r = amdgpu_query_hw_ip_count(dev, ip_type, &num_instances);
702       if (!r)
703          info->ip[ip_type].num_instances = num_instances;
704 
705       /* According to the kernel, only SDMA and VPE require 256B alignment, but use it
706        * for all queues because the kernel reports wrong limits for some of the queues.
707        * This is only space allocation alignment, so it's OK to keep it like this even
708        * when it's greater than what the queues require.
709        */
710       info->ip[ip_type].ib_alignment = MAX3(ip_info.ib_start_alignment,
711                                             ip_info.ib_size_alignment, 256);
712    }
713 
714    /* Set dword padding minus 1. */
715    info->ip[AMD_IP_GFX].ib_pad_dw_mask = 0x7;
716    info->ip[AMD_IP_COMPUTE].ib_pad_dw_mask = 0x7;
717    info->ip[AMD_IP_SDMA].ib_pad_dw_mask = 0xf;
718    info->ip[AMD_IP_UVD].ib_pad_dw_mask = 0xf;
719    info->ip[AMD_IP_VCE].ib_pad_dw_mask = 0x3f;
720    info->ip[AMD_IP_UVD_ENC].ib_pad_dw_mask = 0x3f;
721    info->ip[AMD_IP_VCN_DEC].ib_pad_dw_mask = 0xf;
722    info->ip[AMD_IP_VCN_ENC].ib_pad_dw_mask = 0x3f;
723    info->ip[AMD_IP_VCN_JPEG].ib_pad_dw_mask = 0xf;
724    info->ip[AMD_IP_VPE].ib_pad_dw_mask = 0xf;
725 
726    /* Only require gfx or compute. */
727    if (!info->ip[AMD_IP_GFX].num_queues && !info->ip[AMD_IP_COMPUTE].num_queues) {
728       fprintf(stderr, "amdgpu: failed to find gfx or compute.\n");
729       return false;
730    }
731 
732    r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_ME, 0, 0, &info->me_fw_version,
733                                      &info->me_fw_feature);
734    if (r) {
735       fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(me) failed.\n");
736       return false;
737    }
738 
739    r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_MEC, 0, 0, &info->mec_fw_version,
740                                      &info->mec_fw_feature);
741    if (r) {
742       fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(mec) failed.\n");
743       return false;
744    }
745 
746    r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_PFP, 0, 0, &info->pfp_fw_version,
747                                      &info->pfp_fw_feature);
748    if (r) {
749       fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(pfp) failed.\n");
750       return false;
751    }
752 
753    if (info->ip[AMD_IP_VCN_DEC].num_queues || info->ip[AMD_IP_VCN_UNIFIED].num_queues) {
754       r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_VCN, 0, 0, &vidip_fw_version, &vidip_fw_feature);
755       if (r) {
756          fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vcn) failed.\n");
757          return false;
758       } else {
759          info->vcn_dec_version = (vidip_fw_version & 0x0F000000) >> 24;
760          info->vcn_enc_major_version = (vidip_fw_version & 0x00F00000) >> 20;
761          info->vcn_enc_minor_version = (vidip_fw_version & 0x000FF000) >> 12;
762       }
763    } else {
764       if (info->ip[AMD_IP_VCE].num_queues) {
765          r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_VCE, 0, 0, &vidip_fw_version, &vidip_fw_feature);
766          if (r) {
767             fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
768             return false;
769          } else
770             info->vce_fw_version = vidip_fw_version;
771       }
772 
773       if (info->ip[AMD_IP_UVD].num_queues) {
774          r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_UVD, 0, 0, &vidip_fw_version, &vidip_fw_feature);
775          if (r) {
776             fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(uvd) failed.\n");
777             return false;
778          } else
779             info->uvd_fw_version = vidip_fw_version;
780       }
781    }
782 
783    r = amdgpu_query_sw_info(dev, amdgpu_sw_info_address32_hi, &info->address32_hi);
784    if (r) {
785       fprintf(stderr, "amdgpu: amdgpu_query_sw_info(address32_hi) failed.\n");
786       return false;
787    }
788 
789    struct drm_amdgpu_memory_info meminfo = {0};
790 
791    r = amdgpu_query_info(dev, AMDGPU_INFO_MEMORY, sizeof(meminfo), &meminfo);
792    if (r) {
793       fprintf(stderr, "amdgpu: amdgpu_query_info(memory) failed.\n");
794       return false;
795    }
796 
797    /* Note: usable_heap_size values can be random and can't be relied on. */
798    info->gart_size_kb = DIV_ROUND_UP(meminfo.gtt.total_heap_size, 1024);
799    info->vram_size_kb = DIV_ROUND_UP(fix_vram_size(meminfo.vram.total_heap_size), 1024);
800    info->vram_vis_size_kb = DIV_ROUND_UP(meminfo.cpu_accessible_vram.total_heap_size, 1024);
801 
802    if (info->drm_minor >= 41) {
803       amdgpu_query_video_caps_info(dev, AMDGPU_INFO_VIDEO_CAPS_DECODE,
804                                    sizeof(info->dec_caps), &(info->dec_caps));
805       amdgpu_query_video_caps_info(dev, AMDGPU_INFO_VIDEO_CAPS_ENCODE,
806                                    sizeof(info->enc_caps), &(info->enc_caps));
807    }
808 
809    /* Add some margin of error, though this shouldn't be needed in theory. */
810    info->all_vram_visible = info->vram_size_kb * 0.9 < info->vram_vis_size_kb;
811 
812    /* Set chip identification. */
813    info->pci_id = device_info.device_id;
814    info->pci_rev_id = device_info.pci_rev;
815    info->vce_harvest_config = device_info.vce_harvest_config;
816 
817 #define identify_chip2(asic, chipname)                                                             \
818    if (ASICREV_IS(device_info.external_rev, asic)) {                                             \
819       info->family = CHIP_##chipname;                                                              \
820       info->name = #chipname;                                                                      \
821    }
822 #define identify_chip(chipname) identify_chip2(chipname, chipname)
823 
824    if (!info->family_overridden) {
825       switch (device_info.family) {
826       case FAMILY_SI:
827          identify_chip(TAHITI);
828          identify_chip(PITCAIRN);
829          identify_chip2(CAPEVERDE, VERDE);
830          identify_chip(OLAND);
831          identify_chip(HAINAN);
832          break;
833       case FAMILY_CI:
834          identify_chip(BONAIRE);
835          identify_chip(HAWAII);
836          break;
837       case FAMILY_KV:
838          identify_chip2(SPECTRE, KAVERI);
839          identify_chip2(SPOOKY, KAVERI);
840          identify_chip2(KALINDI, KABINI);
841          identify_chip2(GODAVARI, KABINI);
842          break;
843       case FAMILY_VI:
844          identify_chip(ICELAND);
845          identify_chip(TONGA);
846          identify_chip(FIJI);
847          identify_chip(POLARIS10);
848          identify_chip(POLARIS11);
849          identify_chip(POLARIS12);
850          identify_chip(VEGAM);
851          break;
852       case FAMILY_CZ:
853          identify_chip(CARRIZO);
854          identify_chip(STONEY);
855          break;
856       case FAMILY_AI:
857          identify_chip(VEGA10);
858          identify_chip(VEGA12);
859          identify_chip(VEGA20);
860          identify_chip(MI100);
861          identify_chip(MI200);
862          identify_chip(GFX940);
863          break;
864       case FAMILY_RV:
865          identify_chip(RAVEN);
866          identify_chip(RAVEN2);
867          identify_chip(RENOIR);
868          break;
869       case FAMILY_NV:
870          identify_chip(NAVI10);
871          identify_chip(NAVI12);
872          identify_chip(NAVI14);
873          identify_chip(NAVI21);
874          identify_chip(NAVI22);
875          identify_chip(NAVI23);
876          identify_chip(NAVI24);
877          break;
878       case FAMILY_VGH:
879          identify_chip(VANGOGH);
880          break;
881       case FAMILY_RMB:
882          identify_chip(REMBRANDT);
883          break;
884       case FAMILY_RPL:
885          identify_chip2(RAPHAEL, RAPHAEL_MENDOCINO);
886          break;
887       case FAMILY_MDN:
888          identify_chip2(MENDOCINO, RAPHAEL_MENDOCINO);
889          break;
890       case FAMILY_NV3:
891          identify_chip(NAVI31);
892          identify_chip(NAVI32);
893          identify_chip(NAVI33);
894          break;
895       case FAMILY_GFX1103:
896          identify_chip(GFX1103_R1);
897          identify_chip(GFX1103_R2);
898          identify_chip2(GFX1103_R1X, GFX1103_R1);
899          identify_chip2(GFX1103_R2X, GFX1103_R2);
900          break;
901       case FAMILY_GFX1150:
902          identify_chip(GFX1150);
903          identify_chip(GFX1151);
904          identify_chip(GFX1152);
905          break;
906       case FAMILY_GFX12:
907          identify_chip(GFX1200);
908          identify_chip(GFX1201);
909          break;
910       }
911 
912       if (info->ip[AMD_IP_GFX].ver_major == 12 && info->ip[AMD_IP_GFX].ver_minor == 0)
913          info->gfx_level = GFX12;
914       else if (info->ip[AMD_IP_GFX].ver_major == 11 && info->ip[AMD_IP_GFX].ver_minor == 5)
915          info->gfx_level = GFX11_5;
916       else if (info->ip[AMD_IP_GFX].ver_major == 11 && info->ip[AMD_IP_GFX].ver_minor == 0)
917          info->gfx_level = GFX11;
918       else if (info->ip[AMD_IP_GFX].ver_major == 10 && info->ip[AMD_IP_GFX].ver_minor == 3)
919          info->gfx_level = GFX10_3;
920       else if (info->ip[AMD_IP_GFX].ver_major == 10 && info->ip[AMD_IP_GFX].ver_minor == 1)
921          info->gfx_level = GFX10;
922       else if (info->ip[AMD_IP_GFX].ver_major == 9 || info->ip[AMD_IP_COMPUTE].ver_major == 9)
923          info->gfx_level = GFX9;
924       else if (info->ip[AMD_IP_GFX].ver_major == 8)
925          info->gfx_level = GFX8;
926       else if (info->ip[AMD_IP_GFX].ver_major == 7)
927          info->gfx_level = GFX7;
928       else if (info->ip[AMD_IP_GFX].ver_major == 6)
929          info->gfx_level = GFX6;
930       else {
931          fprintf(stderr, "amdgpu: Unknown gfx version: %u.%u\n",
932                  info->ip[AMD_IP_GFX].ver_major, info->ip[AMD_IP_GFX].ver_minor);
933          return false;
934       }
935 
936       info->family_id = device_info.family;
937       info->chip_external_rev = device_info.external_rev;
938       info->chip_rev = device_info.chip_rev;
939       info->marketing_name = amdgpu_get_marketing_name(dev);
940       info->is_pro_graphics = info->marketing_name && (strstr(info->marketing_name, "Pro") ||
941                                                        strstr(info->marketing_name, "PRO") ||
942                                                        strstr(info->marketing_name, "Frontier"));
943    }
944 
945    if (!info->name) {
946       fprintf(stderr, "amdgpu: unknown (family_id, chip_external_rev): (%u, %u)\n",
947               device_info.family, device_info.external_rev);
948       return false;
949    }
950 
951    memset(info->lowercase_name, 0, sizeof(info->lowercase_name));
952    for (unsigned i = 0; info->name[i] && i < ARRAY_SIZE(info->lowercase_name) - 1; i++)
953       info->lowercase_name[i] = tolower(info->name[i]);
954 
955    char proc_fd[64];
956    snprintf(proc_fd, sizeof(proc_fd), "/proc/self/fd/%u", fd);
957    UNUSED int _result = readlink(proc_fd, info->dev_filename, sizeof(info->dev_filename));
958 
959 #define VCN_IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv))
960 
961    for (unsigned i = AMD_IP_VCN_DEC; i <= AMD_IP_VCN_JPEG; ++i) {
962       if (!info->ip[i].num_queues)
963          continue;
964 
965       switch(VCN_IP_VERSION(info->ip[i].ver_major,
966                             info->ip[i].ver_minor,
967                             info->ip[i].ver_rev)) {
968       case VCN_IP_VERSION(1, 0, 0):
969          info->vcn_ip_version = VCN_1_0_0;
970          break;
971       case VCN_IP_VERSION(1, 0, 1):
972          info->vcn_ip_version = VCN_1_0_1;
973          break;
974       case VCN_IP_VERSION(2, 0, 0):
975          info->vcn_ip_version = VCN_2_0_0;
976          break;
977       case VCN_IP_VERSION(2, 0, 2):
978          info->vcn_ip_version = VCN_2_0_2;
979          break;
980       case VCN_IP_VERSION(2, 0, 3):
981          info->vcn_ip_version = VCN_2_0_3;
982          break;
983       case VCN_IP_VERSION(2, 2, 0):
984          info->vcn_ip_version = VCN_2_2_0;
985          break;
986       case VCN_IP_VERSION(2, 5, 0):
987          info->vcn_ip_version = VCN_2_5_0;
988          break;
989       case VCN_IP_VERSION(2, 6, 0):
990          info->vcn_ip_version = VCN_2_6_0;
991          break;
992       case VCN_IP_VERSION(3, 0, 0):
993          /* Navi24 version need to be revised if it fallbacks to the older way
994 	  * with default version as 3.0.0, since Navi24 has different feature
995 	  * sets from other VCN3 family */
996          info->vcn_ip_version = (info->family != CHIP_NAVI24) ? VCN_3_0_0 : VCN_3_0_33;
997          break;
998       case VCN_IP_VERSION(3, 0, 2):
999          info->vcn_ip_version = VCN_3_0_2;
1000          break;
1001       case VCN_IP_VERSION(3, 0, 16):
1002          info->vcn_ip_version = VCN_3_0_16;
1003          break;
1004       case VCN_IP_VERSION(3, 0, 33):
1005          info->vcn_ip_version = VCN_3_0_33;
1006          break;
1007       case VCN_IP_VERSION(3, 1, 1):
1008          info->vcn_ip_version = VCN_3_1_1;
1009          break;
1010       case VCN_IP_VERSION(3, 1, 2):
1011          info->vcn_ip_version = VCN_3_1_2;
1012          break;
1013       case VCN_IP_VERSION(4, 0, 0):
1014          info->vcn_ip_version = VCN_4_0_0;
1015          break;
1016       case VCN_IP_VERSION(4, 0, 2):
1017          info->vcn_ip_version = VCN_4_0_2;
1018          break;
1019       case VCN_IP_VERSION(4, 0, 3):
1020          info->vcn_ip_version = VCN_4_0_3;
1021          break;
1022       case VCN_IP_VERSION(4, 0, 4):
1023          info->vcn_ip_version = VCN_4_0_4;
1024          break;
1025       case VCN_IP_VERSION(4, 0, 5):
1026          info->vcn_ip_version = VCN_4_0_5;
1027          break;
1028       case VCN_IP_VERSION(4, 0, 6):
1029          info->vcn_ip_version = VCN_4_0_6;
1030          break;
1031       case VCN_IP_VERSION(5, 0, 0):
1032          info->vcn_ip_version = VCN_5_0_0;
1033          break;
1034       default:
1035          info->vcn_ip_version = VCN_UNKNOWN;
1036       }
1037       break;
1038    }
1039 
1040    /* Set which chips have dedicated VRAM. */
1041    info->has_dedicated_vram = !(device_info.ids_flags & AMDGPU_IDS_FLAGS_FUSION);
1042 
1043    /* The kernel can split large buffers in VRAM but not in GTT, so large
1044     * allocations can fail or cause buffer movement failures in the kernel.
1045     */
1046    if (info->has_dedicated_vram)
1047       info->max_heap_size_kb = info->vram_size_kb;
1048    else
1049       info->max_heap_size_kb = info->gart_size_kb;
1050 
1051    info->vram_type = device_info.vram_type;
1052    info->memory_bus_width = device_info.vram_bit_width;
1053 
1054    /* Set which chips have uncached device memory. */
1055    info->has_l2_uncached = info->gfx_level >= GFX9;
1056 
1057    /* Set hardware information. */
1058    /* convert the shader/memory clocks from KHz to MHz */
1059    info->max_gpu_freq_mhz = device_info.max_engine_clock / 1000;
1060    info->memory_freq_mhz_effective = info->memory_freq_mhz = device_info.max_memory_clock / 1000;
1061    info->max_tcc_blocks = device_info.num_tcc_blocks;
1062    info->max_se = device_info.num_shader_engines;
1063    info->max_sa_per_se = device_info.num_shader_arrays_per_engine;
1064    info->num_cu_per_sh = device_info.num_cu_per_sh;
1065    info->enabled_rb_mask = device_info.enabled_rb_pipes_mask;
1066    if (info->drm_minor >= 52)
1067       info->enabled_rb_mask |= (uint64_t)device_info.enabled_rb_pipes_mask_hi << 32;
1068 
1069    info->memory_freq_mhz_effective *= ac_memory_ops_per_clock(info->vram_type);
1070 
1071    info->has_userptr = true;
1072    info->has_syncobj = true;
1073    info->has_timeline_syncobj = has_timeline_syncobj(fd);
1074    info->has_fence_to_handle = true;
1075    info->has_local_buffers = true;
1076    info->has_bo_metadata = true;
1077    info->has_eqaa_surface_allocator = info->gfx_level < GFX11;
1078    /* Disable sparse mappings on GFX6 due to VM faults in CP DMA. Enable them once
1079     * these faults are mitigated in software.
1080     */
1081    info->has_sparse_vm_mappings = info->gfx_level >= GFX7;
1082    info->has_scheduled_fence_dependency = info->drm_minor >= 28;
1083    info->has_gang_submit = info->drm_minor >= 49;
1084    info->has_gpuvm_fault_query = info->drm_minor >= 55;
1085    info->has_tmz_support = has_tmz_support(dev, info, device_info.ids_flags);
1086    info->kernel_has_modifiers = has_modifiers(fd);
1087    info->uses_kernel_cu_mask = false; /* Not implemented in the kernel. */
1088    info->has_graphics = info->ip[AMD_IP_GFX].num_queues > 0;
1089 
1090    info->pa_sc_tile_steering_override = device_info.pa_sc_tile_steering_override;
1091    info->max_render_backends = device_info.num_rb_pipes;
1092    /* The value returned by the kernel driver was wrong. */
1093    if (info->family == CHIP_KAVERI)
1094       info->max_render_backends = 2;
1095 
1096    info->clock_crystal_freq = device_info.gpu_counter_freq;
1097    if (!info->clock_crystal_freq) {
1098       fprintf(stderr, "amdgpu: clock crystal frequency is 0, timestamps will be wrong\n");
1099       info->clock_crystal_freq = 1;
1100    }
1101 
1102    if (info->gfx_level >= GFX10) {
1103       info->tcc_cache_line_size = info->gfx_level >= GFX12 ? 256 : 128;
1104 
1105       if (info->drm_minor >= 35) {
1106          info->num_tcc_blocks = info->max_tcc_blocks - util_bitcount64(device_info.tcc_disabled_mask);
1107       } else {
1108          /* This is a hack, but it's all we can do without a kernel upgrade. */
1109          info->num_tcc_blocks = info->vram_size_kb / (512 * 1024);
1110          if (info->num_tcc_blocks > info->max_tcc_blocks)
1111             info->num_tcc_blocks /= 2;
1112       }
1113    } else {
1114       if (!info->has_graphics && info->family >= CHIP_MI200)
1115          info->tcc_cache_line_size = 128;
1116       else
1117          info->tcc_cache_line_size = 64;
1118 
1119       info->num_tcc_blocks = info->max_tcc_blocks;
1120    }
1121 
1122    info->tcc_rb_non_coherent = info->gfx_level < GFX12 &&
1123                                !util_is_power_of_two_or_zero(info->num_tcc_blocks) &&
1124                                info->num_rb != info->num_tcc_blocks;
1125    info->cp_sdma_ge_use_system_memory_scope = info->gfx_level == GFX12;
1126 
1127    if (info->drm_minor >= 52) {
1128       info->sqc_inst_cache_size = device_info.sqc_inst_cache_size * 1024;
1129       info->sqc_scalar_cache_size = device_info.sqc_data_cache_size * 1024;
1130       info->num_sqc_per_wgp = device_info.num_sqc_per_wgp;
1131    }
1132 
1133    /* Firmware wrongly reports 0 bytes of MALL being present on Navi33.
1134     * Work around this by manually computing cache sizes. */
1135    if (info->gfx_level >= GFX11 && info->drm_minor >= 52 && info->family != CHIP_NAVI33) {
1136       info->tcp_cache_size = device_info.tcp_cache_size * 1024;
1137       info->l1_cache_size = device_info.gl1c_cache_size * 1024;
1138       info->l2_cache_size = device_info.gl2c_cache_size * 1024;
1139       info->l3_cache_size_mb = DIV_ROUND_UP(device_info.mall_size, 1024 * 1024);
1140    } else {
1141       if (info->gfx_level >= GFX11) {
1142          info->tcp_cache_size = 32768;
1143          info->l1_cache_size = 256 * 1024;
1144       } else {
1145          info->tcp_cache_size = 16384;
1146          info->l1_cache_size = 128 * 1024;
1147       }
1148 
1149       if (info->gfx_level >= GFX10_3 && info->has_dedicated_vram) {
1150          info->l3_cache_size_mb = info->num_tcc_blocks *
1151                                   (info->family == CHIP_NAVI21 ||
1152                                    info->family == CHIP_NAVI22 ? 8 : 4);
1153       }
1154 
1155       switch (info->family) {
1156       case CHIP_TAHITI:
1157       case CHIP_PITCAIRN:
1158       case CHIP_OLAND:
1159       case CHIP_HAWAII:
1160       case CHIP_KABINI:
1161       case CHIP_TONGA:
1162       case CHIP_STONEY:
1163       case CHIP_RAVEN2:
1164          info->l2_cache_size = info->num_tcc_blocks * 64 * 1024;
1165          break;
1166       case CHIP_VERDE:
1167       case CHIP_HAINAN:
1168       case CHIP_BONAIRE:
1169       case CHIP_KAVERI:
1170       case CHIP_ICELAND:
1171       case CHIP_CARRIZO:
1172       case CHIP_FIJI:
1173       case CHIP_POLARIS12:
1174       case CHIP_VEGAM:
1175       case CHIP_RAPHAEL_MENDOCINO:
1176          info->l2_cache_size = info->num_tcc_blocks * 128 * 1024;
1177          break;
1178       default:
1179          info->l2_cache_size = info->num_tcc_blocks * 256 * 1024;
1180          break;
1181       case CHIP_REMBRANDT:
1182       case CHIP_GFX1103_R1:
1183          info->l2_cache_size = info->num_tcc_blocks * 512 * 1024;
1184          break;
1185       }
1186    }
1187 
1188    info->mc_arb_ramcfg = amdinfo.mc_arb_ramcfg;
1189    info->gb_addr_config = amdinfo.gb_addr_cfg;
1190    if (info->gfx_level >= GFX9) {
1191       if (!info->has_graphics && info->family >= CHIP_GFX940)
1192          info->gb_addr_config = 0;
1193 
1194       info->num_tile_pipes = 1 << G_0098F8_NUM_PIPES(info->gb_addr_config);
1195       info->pipe_interleave_bytes = 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config);
1196    } else {
1197       unsigned pipe_config = G_009910_PIPE_CONFIG(amdinfo.gb_tile_mode[CIK_TILE_MODE_COLOR_2D]);
1198       info->num_tile_pipes = ac_pipe_config_to_num_pipes(pipe_config);
1199       info->pipe_interleave_bytes = 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(info->gb_addr_config);
1200    }
1201    info->r600_has_virtual_memory = true;
1202 
1203    /* LDS is 64KB per CU (4 SIMDs on GFX6-9), which is 16KB per SIMD (usage above
1204     * 16KB makes some SIMDs unoccupied).
1205     *
1206     * GFX10+: LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
1207     * GFX7+: Workgroups can use up to 64KB.
1208     * GFX6: There is 64KB LDS per CU, but a workgroup can only use up to 32KB.
1209     */
1210    info->lds_size_per_workgroup = info->gfx_level >= GFX10  ? 128 * 1024
1211                                   : info->gfx_level >= GFX7 ? 64 * 1024
1212                                                             : 32 * 1024;
1213 
1214    /* lds_encode_granularity is the block size used for encoding registers.
1215     * lds_alloc_granularity is what the hardware will align the LDS size to.
1216     */
1217    info->lds_encode_granularity = info->gfx_level >= GFX7 ? 128 * 4 : 64 * 4;
1218    info->lds_alloc_granularity = info->gfx_level >= GFX10_3 ? 256 * 4 : info->lds_encode_granularity;
1219 
1220    /* The mere presence of CLEAR_STATE in the IB causes random GPU hangs
1221     * on GFX6. Some CLEAR_STATE cause asic hang on radeon kernel, etc.
1222     * SPI_VS_OUT_CONFIG. So only enable GFX7 CLEAR_STATE on amdgpu kernel.
1223     */
1224    info->has_clear_state = info->gfx_level >= GFX7 && info->gfx_level < GFX12;
1225 
1226    info->has_distributed_tess =
1227       info->gfx_level >= GFX10 || (info->gfx_level >= GFX8 && info->max_se >= 2);
1228 
1229    info->has_dcc_constant_encode =
1230       info->family == CHIP_RAVEN2 || info->family == CHIP_RENOIR || info->gfx_level >= GFX10;
1231 
1232    /* TC-compat HTILE is only available for GFX8-GFX11.5. */
1233    info->has_tc_compatible_htile = info->gfx_level >= GFX8 && info->gfx_level < GFX12;
1234 
1235    info->has_etc_support = info->family == CHIP_STONEY || info->family == CHIP_VEGA10 ||
1236                            info->family == CHIP_RAVEN || info->family == CHIP_RAVEN2;
1237 
1238    info->has_rbplus = info->family == CHIP_STONEY || info->gfx_level >= GFX9;
1239 
1240    /* Some chips have RB+ registers, but don't support RB+. Those must
1241     * always disable it.
1242     */
1243    info->rbplus_allowed =
1244       info->has_rbplus &&
1245       (info->family == CHIP_STONEY || info->family == CHIP_VEGA12 || info->family == CHIP_RAVEN ||
1246        info->family == CHIP_RAVEN2 || info->family == CHIP_RENOIR || info->gfx_level >= GFX10_3);
1247 
1248    info->has_out_of_order_rast =
1249       info->gfx_level >= GFX8 && info->gfx_level <= GFX9 && info->max_se >= 2;
1250 
1251    /* Whether chips support double rate packed math instructions. */
1252    info->has_packed_math_16bit = info->gfx_level >= GFX9;
1253 
1254    /* Whether chips support dot product instructions. A subset of these support a smaller
1255     * instruction encoding which accumulates with the destination.
1256     */
1257    info->has_accelerated_dot_product =
1258       info->family == CHIP_VEGA20 ||
1259       (info->family >= CHIP_MI100 && info->family != CHIP_NAVI10);
1260 
1261    /* TODO: Figure out how to use LOAD_CONTEXT_REG on GFX6-GFX7. */
1262    info->has_load_ctx_reg_pkt =
1263       info->gfx_level >= GFX9 || (info->gfx_level >= GFX8 && info->me_fw_feature >= 41);
1264 
1265    info->cpdma_prefetch_writes_memory = info->gfx_level <= GFX8;
1266 
1267    info->has_gfx9_scissor_bug = info->family == CHIP_VEGA10 || info->family == CHIP_RAVEN;
1268 
1269    info->has_tc_compat_zrange_bug = info->gfx_level >= GFX8 && info->gfx_level <= GFX9;
1270 
1271    info->has_small_prim_filter_sample_loc_bug =
1272       (info->family >= CHIP_POLARIS10 && info->family <= CHIP_POLARIS12) ||
1273       info->family == CHIP_VEGA10 || info->family == CHIP_RAVEN;
1274 
1275    info->has_ls_vgpr_init_bug = info->family == CHIP_VEGA10 || info->family == CHIP_RAVEN;
1276 
1277    /* DB_DFSM_CONTROL.POPS_DRAIN_PS_ON_OVERLAP must be enabled for 8 or more coverage or
1278     * depth/stencil samples with POPS (PAL waMiscPopsMissedOverlap).
1279     */
1280    info->has_pops_missed_overlap_bug = info->family == CHIP_VEGA10 || info->family == CHIP_RAVEN;
1281 
1282    /* GFX6 hw bug when the IBO addr is 0 which causes invalid clamping (underflow).
1283     * Setting the IB addr to 2 or higher solves this issue.
1284     */
1285    info->has_null_index_buffer_clamping_bug = info->gfx_level == GFX6;
1286 
1287    /* Drawing from 0-sized index buffers causes hangs on gfx10. */
1288    info->has_zero_index_buffer_bug = info->gfx_level == GFX10;
1289 
1290    /* Whether chips are affected by the image load/sample/gather hw bug when
1291     * DCC is enabled (ie. WRITE_COMPRESS_ENABLE should be 0).
1292     */
1293    info->has_image_load_dcc_bug = info->family == CHIP_NAVI23 ||
1294                                   info->family == CHIP_VANGOGH ||
1295                                   info->family == CHIP_REMBRANDT;
1296 
1297    /* DB has a bug when ITERATE_256 is set to 1 that can cause a hang. The
1298     * workaround is to set DECOMPRESS_ON_Z_PLANES to 2 for 4X MSAA D/S images.
1299     */
1300    info->has_two_planes_iterate256_bug = info->gfx_level == GFX10;
1301 
1302    /* GFX10+Navi21: NGG->legacy transitions require VGT_FLUSH. */
1303    info->has_vgt_flush_ngg_legacy_bug = info->gfx_level == GFX10 ||
1304                                         info->family == CHIP_NAVI21;
1305 
1306    /* First Navi2x chips have a hw bug that doesn't allow to write
1307     * depth/stencil from a FS for multi-pixel fragments.
1308     */
1309    info->has_vrs_ds_export_bug = info->family == CHIP_NAVI21 ||
1310                                  info->family == CHIP_NAVI22 ||
1311                                  info->family == CHIP_VANGOGH;
1312 
1313    /* HW bug workaround when CS threadgroups > 256 threads and async compute
1314     * isn't used, i.e. only one compute job can run at a time.  If async
1315     * compute is possible, the threadgroup size must be limited to 256 threads
1316     * on all queues to avoid the bug.
1317     * Only GFX6 and certain GFX7 chips are affected.
1318     *
1319     * FIXME: RADV doesn't limit the number of threads for async compute.
1320     */
1321    info->has_cs_regalloc_hang_bug = info->gfx_level == GFX6 ||
1322                                     info->family == CHIP_BONAIRE ||
1323                                     info->family == CHIP_KABINI;
1324 
1325    /* HW bug workaround with async compute dispatches when threadgroup > 4096.
1326     * The workaround is to change the "threadgroup" dimension mode to "thread"
1327     * dimension mode.
1328     */
1329    info->has_async_compute_threadgroup_bug = info->family == CHIP_ICELAND ||
1330                                              info->family == CHIP_TONGA;
1331 
1332    /* GFX7 CP requires 32 bytes alignment for the indirect buffer arguments on
1333     * the compute queue.
1334     */
1335    info->has_async_compute_align32_bug = info->gfx_level == GFX7;
1336 
1337    /* Support for GFX10.3 was added with F32_ME_FEATURE_VERSION_31 but the
1338     * feature version wasn't bumped.
1339     */
1340    info->has_32bit_predication = (info->gfx_level >= GFX10 &&
1341                                   info->me_fw_feature >= 32) ||
1342                                  (info->gfx_level == GFX9 &&
1343                                   info->me_fw_feature >= 52);
1344 
1345    /* Firmware bug with DISPATCH_TASKMESH_INDIRECT_MULTI_ACE packets.
1346     * On old MEC FW versions, it hangs the GPU when indirect count is zero.
1347     */
1348    info->has_taskmesh_indirect0_bug = info->gfx_level == GFX10_3 &&
1349                                       info->mec_fw_version < 100;
1350 
1351    info->has_export_conflict_bug = info->gfx_level == GFX11;
1352 
1353    /* When LLVM is fixed to handle multiparts shaders, this value will depend
1354     * on the known good versions of LLVM. Until then, enable the equivalent WA
1355     * in the nir -> llvm backend.
1356     */
1357    info->needs_llvm_wait_wa = info->gfx_level == GFX11;
1358 
1359    /* Convert the SDMA version in the current GPU to an enum. */
1360    info->sdma_ip_version =
1361       (enum sdma_version)SDMA_VERSION_VALUE(info->ip[AMD_IP_SDMA].ver_major,
1362                                             info->ip[AMD_IP_SDMA].ver_minor);
1363 
1364    /* SDMA v1.0-3.x (GFX6-8) can't ignore page faults on unmapped sparse resources. */
1365    info->sdma_supports_sparse = info->sdma_ip_version >= SDMA_4_0;
1366 
1367    /* SDMA v5.0+ (GFX10+) supports DCC and HTILE, but Navi 10 has issues with it according to PAL. */
1368    info->sdma_supports_compression = info->sdma_ip_version >= SDMA_5_0 && info->family != CHIP_NAVI10;
1369 
1370    /* Get the number of good compute units. */
1371    info->num_cu = 0;
1372    for (i = 0; i < info->max_se; i++) {
1373       for (j = 0; j < info->max_sa_per_se; j++) {
1374          if (info->gfx_level >= GFX11) {
1375             assert(info->max_sa_per_se <= 2);
1376             info->cu_mask[i][j] = device_info.cu_bitmap[i % 4][(i / 4) * 2 + j];
1377          } else if (info->family == CHIP_MI100) {
1378             /* The CU bitmap in amd gpu info structure is
1379              * 4x4 size array, and it's usually suitable for Vega
1380              * ASICs which has 4*2 SE/SA layout.
1381              * But for MI100, SE/SA layout is changed to 8*1.
1382              * To mostly reduce the impact, we make it compatible
1383              * with current bitmap array as below:
1384              *    SE4 --> cu_bitmap[0][1]
1385              *    SE5 --> cu_bitmap[1][1]
1386              *    SE6 --> cu_bitmap[2][1]
1387              *    SE7 --> cu_bitmap[3][1]
1388              */
1389             assert(info->max_sa_per_se == 1);
1390             info->cu_mask[i][0] = device_info.cu_bitmap[i % 4][i / 4];
1391          } else {
1392             info->cu_mask[i][j] = device_info.cu_bitmap[i][j];
1393          }
1394          info->num_cu += util_bitcount(info->cu_mask[i][j]);
1395       }
1396    }
1397 
1398    if (info->gfx_level >= GFX10_3 && info->max_se > 1) {
1399       uint32_t enabled_se_mask = 0;
1400 
1401       /* Derive the enabled SE mask from the CU mask. */
1402       for (unsigned se = 0; se < info->max_se; se++) {
1403          for (unsigned sa = 0; sa < info->max_sa_per_se; sa++) {
1404             if (info->cu_mask[se][sa]) {
1405                enabled_se_mask |= BITFIELD_BIT(se);
1406                break;
1407             }
1408          }
1409       }
1410       info->num_se = util_bitcount(enabled_se_mask);
1411 
1412       /* Trim the number of enabled RBs based on the number of enabled SEs because the RB mask
1413        * might include disabled SEs.
1414        */
1415       if (info->gfx_level >= GFX12) {
1416          unsigned num_rb_per_se = info->max_render_backends / info->max_se;
1417 
1418          for (unsigned se = 0; se < info->max_se; se++) {
1419             if (!(BITFIELD_BIT(se) & enabled_se_mask))
1420                info->enabled_rb_mask &= ~(BITFIELD_MASK(num_rb_per_se) << (se * num_rb_per_se));
1421          }
1422       }
1423    } else {
1424       /* GFX10 and older always enable all SEs because they don't support SE harvesting. */
1425       info->num_se = info->max_se;
1426    }
1427 
1428    info->num_rb = util_bitcount64(info->enabled_rb_mask);
1429 
1430    /* On GFX10, only whole WGPs (in units of 2 CUs) can be disabled,
1431     * and max - min <= 2.
1432     */
1433    unsigned cu_group = info->gfx_level >= GFX10 ? 2 : 1;
1434    info->max_good_cu_per_sa =
1435       DIV_ROUND_UP(info->num_cu, (info->num_se * info->max_sa_per_se * cu_group)) *
1436       cu_group;
1437    info->min_good_cu_per_sa =
1438       (info->num_cu / (info->num_se * info->max_sa_per_se * cu_group)) * cu_group;
1439 
1440    memcpy(info->si_tile_mode_array, amdinfo.gb_tile_mode, sizeof(amdinfo.gb_tile_mode));
1441    memcpy(info->cik_macrotile_mode_array, amdinfo.gb_macro_tile_mode,
1442           sizeof(amdinfo.gb_macro_tile_mode));
1443 
1444    info->pte_fragment_size = alignment_info.size_local;
1445    info->gart_page_size = alignment_info.size_remote;
1446 
1447    info->gfx_ib_pad_with_type2 = info->gfx_level == GFX6;
1448    /* CDNA starting with GFX940 shouldn't use CP DMA. */
1449    info->has_cp_dma = info->has_graphics || info->family < CHIP_GFX940;
1450 
1451    if (info->gfx_level >= GFX11 && info->gfx_level < GFX12) {
1452       /* With num_cu = 4 in gfx11 measured power for idle, video playback and observed
1453        * power savings, hence enable dcc with retile for gfx11 with num_cu >= 4.
1454        */
1455        info->use_display_dcc_with_retile_blit = info->num_cu >= 4;
1456    } else if (info->gfx_level == GFX10_3) {
1457       /* Displayable DCC with retiling is known to increase power consumption on Raphael
1458        * and Mendocino, so disable it on the smallest APUs. We need a proof that
1459        * displayable DCC doesn't regress bigger chips in the same way.
1460        */
1461       info->use_display_dcc_with_retile_blit = info->num_cu > 4;
1462    } else if (info->gfx_level == GFX9 && !info->has_dedicated_vram &&
1463               info->drm_minor >= 31) {
1464       if (info->max_render_backends == 1) {
1465          info->use_display_dcc_unaligned = true;
1466       } else {
1467          /* there may be power increase for small APUs with less num_cu. */
1468          info->use_display_dcc_with_retile_blit = info->num_cu > 4;
1469       }
1470    }
1471 
1472    info->has_stable_pstate = info->drm_minor >= 45;
1473 
1474    if (info->gfx_level >= GFX12) {
1475       /* Gfx12 doesn't use pc_lines and pbb_max_alloc_count. */
1476    } else if (info->gfx_level >= GFX11) {
1477       info->pc_lines = 1024;
1478       info->pbb_max_alloc_count = 16; /* minimum is 2, maximum is 256 */
1479    } else if (info->gfx_level >= GFX9 && info->has_graphics) {
1480       unsigned pc_lines = 0;
1481 
1482       switch (info->family) {
1483       case CHIP_VEGA10:
1484       case CHIP_VEGA12:
1485       case CHIP_VEGA20:
1486          pc_lines = 2048;
1487          break;
1488       case CHIP_RAVEN:
1489       case CHIP_RAVEN2:
1490       case CHIP_RENOIR:
1491       case CHIP_NAVI10:
1492       case CHIP_NAVI12:
1493       case CHIP_NAVI21:
1494       case CHIP_NAVI22:
1495       case CHIP_NAVI23:
1496          pc_lines = 1024;
1497          break;
1498       case CHIP_NAVI14:
1499       case CHIP_NAVI24:
1500          pc_lines = 512;
1501          break;
1502       case CHIP_VANGOGH:
1503       case CHIP_REMBRANDT:
1504       case CHIP_RAPHAEL_MENDOCINO:
1505          pc_lines = 256;
1506          break;
1507       default:
1508          assert(0);
1509       }
1510 
1511       info->pc_lines = pc_lines;
1512 
1513       if (info->gfx_level >= GFX10) {
1514          info->pbb_max_alloc_count = pc_lines / 3;
1515       } else {
1516          info->pbb_max_alloc_count = MIN2(128, pc_lines / (4 * info->max_se));
1517       }
1518    }
1519 
1520    if (info->gfx_level >= GFX10_3)
1521       info->max_waves_per_simd = 16;
1522    else if (info->gfx_level == GFX10)
1523       info->max_waves_per_simd = 20;
1524    else if (info->family >= CHIP_POLARIS10 && info->family <= CHIP_VEGAM)
1525       info->max_waves_per_simd = 8;
1526    else
1527       info->max_waves_per_simd = 10;
1528 
1529    if (info->gfx_level >= GFX10) {
1530       info->num_physical_sgprs_per_simd = 128 * info->max_waves_per_simd;
1531       info->min_sgpr_alloc = 128;
1532       info->sgpr_alloc_granularity = 128;
1533    } else if (info->gfx_level >= GFX8) {
1534       info->num_physical_sgprs_per_simd = 800;
1535       info->min_sgpr_alloc = 16;
1536       info->sgpr_alloc_granularity = 16;
1537    } else {
1538       info->num_physical_sgprs_per_simd = 512;
1539       info->min_sgpr_alloc = 8;
1540       info->sgpr_alloc_granularity = 8;
1541    }
1542 
1543    info->has_3d_cube_border_color_mipmap = info->has_graphics || info->family == CHIP_MI100;
1544    info->has_image_opcodes = debug_get_bool_option("AMD_IMAGE_OPCODES",
1545                                                    info->has_graphics || info->family < CHIP_GFX940);
1546    info->never_stop_sq_perf_counters = info->gfx_level == GFX10 ||
1547                                        info->gfx_level == GFX10_3;
1548    info->never_send_perfcounter_stop = info->gfx_level == GFX11;
1549    info->has_sqtt_rb_harvest_bug = (info->family == CHIP_NAVI23 ||
1550                                     info->family == CHIP_NAVI24 ||
1551                                     info->family == CHIP_REMBRANDT ||
1552                                     info->family == CHIP_VANGOGH) &&
1553                                    util_bitcount64(info->enabled_rb_mask) !=
1554                                    info->max_render_backends;
1555 
1556    /* On GFX10.3, the polarity of AUTO_FLUSH_MODE is inverted. */
1557    info->has_sqtt_auto_flush_mode_bug = info->gfx_level == GFX10_3;
1558 
1559    info->max_sgpr_alloc = info->family == CHIP_TONGA || info->family == CHIP_ICELAND ? 96 : 104;
1560 
1561    if (!info->has_graphics && info->family >= CHIP_MI200) {
1562       info->min_wave64_vgpr_alloc = 8;
1563       info->max_vgpr_alloc = 512;
1564       info->wave64_vgpr_alloc_granularity = 8;
1565    } else {
1566       info->min_wave64_vgpr_alloc = 4;
1567       info->max_vgpr_alloc = 256;
1568       info->wave64_vgpr_alloc_granularity = 4;
1569    }
1570 
1571    /* Some GPU info was broken before DRM 3.45.0. */
1572    if (info->drm_minor >= 45 && device_info.num_shader_visible_vgprs) {
1573       /* The Gfx10 VGPR count is in Wave32, so divide it by 2 for Wave64.
1574        * Gfx6-9 numbers are in Wave64.
1575        */
1576       if (info->gfx_level >= GFX10)
1577          info->num_physical_wave64_vgprs_per_simd = device_info.num_shader_visible_vgprs / 2;
1578       else
1579          info->num_physical_wave64_vgprs_per_simd = device_info.num_shader_visible_vgprs;
1580    } else if (info->gfx_level >= GFX10) {
1581       info->num_physical_wave64_vgprs_per_simd = 512;
1582    } else {
1583       info->num_physical_wave64_vgprs_per_simd = 256;
1584    }
1585 
1586    info->num_simd_per_compute_unit = info->gfx_level >= GFX10 ? 2 : 4;
1587 
1588    /* BIG_PAGE is supported since gfx10.3 and requires VRAM. VRAM is only guaranteed
1589     * with AMDGPU_GEM_CREATE_DISCARDABLE. DISCARDABLE was added in DRM 3.47.0.
1590     */
1591    info->discardable_allows_big_page = info->gfx_level >= GFX10_3 && info->gfx_level < GFX12 &&
1592                                        info->has_dedicated_vram &&
1593                                        info->drm_minor >= 47;
1594 
1595    /* The maximum number of scratch waves. The number is only a function of the number of CUs.
1596     * It should be large enough to hold at least 1 threadgroup. Use the minimum per-SA CU count.
1597     *
1598     * We can decrease the number to make it fit into the infinity cache.
1599     */
1600    const unsigned max_waves_per_tg = 32; /* 1024 threads in Wave32 */
1601    info->max_scratch_waves = MAX2(32 * info->min_good_cu_per_sa * info->max_sa_per_se * info->num_se,
1602                                   max_waves_per_tg);
1603    info->has_scratch_base_registers = info->gfx_level >= GFX11 ||
1604                                       (!info->has_graphics && info->family >= CHIP_GFX940);
1605    info->max_gflops = (info->gfx_level >= GFX11 ? 256 : 128) * info->num_cu * info->max_gpu_freq_mhz / 1000;
1606    info->memory_bandwidth_gbps = DIV_ROUND_UP(info->memory_freq_mhz_effective * info->memory_bus_width / 8, 1000);
1607    info->has_pcie_bandwidth_info = info->drm_minor >= 51;
1608 
1609    if (info->has_pcie_bandwidth_info) {
1610       info->pcie_gen = device_info.pcie_gen;
1611       info->pcie_num_lanes = device_info.pcie_num_lanes;
1612 
1613       /* Source: https://en.wikipedia.org/wiki/PCI_Express#History_and_revisions */
1614       switch (info->pcie_gen) {
1615       case 1:
1616          info->pcie_bandwidth_mbps = info->pcie_num_lanes * 0.25 * 1024;
1617          break;
1618       case 2:
1619          info->pcie_bandwidth_mbps = info->pcie_num_lanes * 0.5 * 1024;
1620          break;
1621       case 3:
1622          info->pcie_bandwidth_mbps = info->pcie_num_lanes * 0.985 * 1024;
1623          break;
1624       case 4:
1625          info->pcie_bandwidth_mbps = info->pcie_num_lanes * 1.969 * 1024;
1626          break;
1627       case 5:
1628          info->pcie_bandwidth_mbps = info->pcie_num_lanes * 3.938 * 1024;
1629          break;
1630       case 6:
1631          info->pcie_bandwidth_mbps = info->pcie_num_lanes * 7.563 * 1024;
1632          break;
1633       case 7:
1634          info->pcie_bandwidth_mbps = info->pcie_num_lanes * 15.125 * 1024;
1635          break;
1636       }
1637    }
1638 
1639    /* The number of IBs per submit isn't infinite, it depends on the IP type
1640     * (ie. some initial setup needed for a submit) and the packet size.
1641     * It can be calculated according to the kernel source code as:
1642     * (ring->max_dw - emit_frame_size) / emit_ib_size
1643     */
1644    r = amdgpu_query_info(dev, AMDGPU_INFO_MAX_IBS,
1645                          sizeof(info->max_submitted_ibs), info->max_submitted_ibs);
1646    if (r) {
1647       /* When the number of IBs can't be queried from the kernel, we choose a
1648        * rough estimate that should work well (as of kernel 6.3).
1649        */
1650       for (unsigned i = 0; i < AMD_NUM_IP_TYPES; ++i)
1651          info->max_submitted_ibs[i] = 50;
1652 
1653       info->max_submitted_ibs[AMD_IP_GFX] = info->gfx_level >= GFX7 ? 192 : 144;
1654       info->max_submitted_ibs[AMD_IP_COMPUTE] = 124;
1655       info->max_submitted_ibs[AMD_IP_VCN_JPEG] = 16;
1656       for (unsigned i = 0; i < AMD_NUM_IP_TYPES; ++i) {
1657          /* Clear out max submitted IB count for IPs that have no queues. */
1658          if (!info->ip[i].num_queues)
1659             info->max_submitted_ibs[i] = 0;
1660       }
1661    }
1662 
1663    if (info->gfx_level >= GFX11) {
1664       unsigned num_prim_exports = 0, num_pos_exports = 0;
1665 
1666       if (info->gfx_level >= GFX12) {
1667          info->attribute_ring_size_per_se = 1024 * 1024;
1668          num_prim_exports = 16368; /* also includes gs_alloc_req */
1669          num_pos_exports = 16384;
1670       } else if (info->l3_cache_size_mb) {
1671          info->attribute_ring_size_per_se = 1400 * 1024;
1672       } else {
1673          assert(info->num_se == 1);
1674 
1675          if (info->l2_cache_size >= 2 * 1024 * 1024)
1676             info->attribute_ring_size_per_se = 768 * 1024;
1677          else
1678             info->attribute_ring_size_per_se = info->l2_cache_size / 2;
1679       }
1680 
1681       /* The size must be aligned to 64K per SE and must be at most 16M in total. */
1682       info->attribute_ring_size_per_se = align(info->attribute_ring_size_per_se, 64 * 1024);
1683       assert(info->attribute_ring_size_per_se * info->max_se <= 16 * 1024 * 1024);
1684 
1685       /* Compute the pos and prim ring sizes and offsets. */
1686       info->pos_ring_size_per_se = align(num_pos_exports * 16, 32);
1687       info->prim_ring_size_per_se = align(num_prim_exports * 4, 32);
1688       assert(info->gfx_level >= GFX12 ||
1689              (!info->pos_ring_size_per_se && !info->prim_ring_size_per_se));
1690 
1691       uint32_t max_se_squared = info->max_se * info->max_se;
1692       uint32_t attribute_ring_size = info->attribute_ring_size_per_se * info->max_se;
1693       uint32_t pos_ring_size = align(info->pos_ring_size_per_se * max_se_squared, 64 * 1024);
1694       uint32_t prim_ring_size = align(info->prim_ring_size_per_se * max_se_squared, 64 * 1024);
1695 
1696       info->pos_ring_offset = attribute_ring_size;
1697       info->prim_ring_offset = info->pos_ring_offset + pos_ring_size;
1698       info->total_attribute_pos_prim_ring_size = info->prim_ring_offset + prim_ring_size;
1699 
1700       info->conformant_trunc_coord =
1701          info->drm_minor >= 52 &&
1702          device_info.ids_flags & AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
1703    }
1704 
1705    if (info->gfx_level >= GFX11 && device_info.shadow_size > 0) {
1706       info->has_fw_based_shadowing = true;
1707       info->fw_based_mcbp.shadow_size = device_info.shadow_size;
1708       info->fw_based_mcbp.shadow_alignment = device_info.shadow_alignment;
1709       info->fw_based_mcbp.csa_size = device_info.csa_size;
1710       info->fw_based_mcbp.csa_alignment = device_info.csa_alignment;
1711    }
1712 
1713    /* WARNING: Register shadowing decreases performance by up to 50% on GFX11 with current FW. */
1714    info->register_shadowing_required = device_info.ids_flags & AMDGPU_IDS_FLAGS_PREEMPTION &&
1715                                        info->gfx_level < GFX11;
1716 
1717    if (info->gfx_level >= GFX12) {
1718       info->has_set_context_pairs = true;
1719       info->has_set_sh_pairs = true;
1720       info->has_set_uconfig_pairs = true;
1721    } else if (info->gfx_level >= GFX11 && info->has_dedicated_vram) {
1722       info->has_set_context_pairs_packed = true;
1723       info->has_set_sh_pairs_packed = info->register_shadowing_required;
1724    }
1725 
1726    set_custom_cu_en_mask(info);
1727 
1728    const char *ib_filename = debug_get_option("AMD_PARSE_IB", NULL);
1729    if (ib_filename) {
1730       FILE *f = fopen(ib_filename, "r");
1731       if (f) {
1732          fseek(f, 0, SEEK_END);
1733          size_t size = ftell(f);
1734          uint32_t *ib = (uint32_t *)malloc(size);
1735          fseek(f, 0, SEEK_SET);
1736          size_t n_read = fread(ib, 1, size, f);
1737          fclose(f);
1738 
1739          if (n_read != size) {
1740             fprintf(stderr, "failed to read %zu bytes from '%s'\n", size, ib_filename);
1741             exit(1);
1742          }
1743 
1744          struct ac_ib_parser ib_parser = {
1745             .f = stdout,
1746             .ib = ib,
1747             .num_dw = size / 4,
1748             .gfx_level = info->gfx_level,
1749             .family = info->family,
1750             .ip_type = AMD_IP_GFX,
1751          };
1752 
1753          ac_parse_ib(&ib_parser, "IB");
1754          free(ib);
1755          exit(0);
1756       }
1757    }
1758    return true;
1759 }
1760 
ac_compute_driver_uuid(char * uuid,size_t size)1761 void ac_compute_driver_uuid(char *uuid, size_t size)
1762 {
1763    char amd_uuid[] = "AMD-MESA-DRV";
1764 
1765    assert(size >= sizeof(amd_uuid));
1766 
1767    memset(uuid, 0, size);
1768    strncpy(uuid, amd_uuid, size);
1769 }
1770 
ac_compute_device_uuid(const struct radeon_info * info,char * uuid,size_t size)1771 void ac_compute_device_uuid(const struct radeon_info *info, char *uuid, size_t size)
1772 {
1773    uint32_t *uint_uuid = (uint32_t *)uuid;
1774 
1775    assert(size >= sizeof(uint32_t) * 4);
1776 
1777    /**
1778     * Use the device info directly instead of using a sha1. GL/VK UUIDs
1779     * are 16 byte vs 20 byte for sha1, and the truncation that would be
1780     * required would get rid of part of the little entropy we have.
1781     * */
1782    memset(uuid, 0, size);
1783    if (!info->pci.valid) {
1784       fprintf(stderr,
1785               "ac_compute_device_uuid's output is based on invalid pci bus info.\n");
1786    }
1787    uint_uuid[0] = info->pci.domain;
1788    uint_uuid[1] = info->pci.bus;
1789    uint_uuid[2] = info->pci.dev;
1790    uint_uuid[3] = info->pci.func;
1791 }
1792 
ac_print_gpu_info(const struct radeon_info * info,FILE * f)1793 void ac_print_gpu_info(const struct radeon_info *info, FILE *f)
1794 {
1795    fprintf(f, "Device info:\n");
1796    fprintf(f, "    name = %s\n", info->name);
1797    fprintf(f, "    marketing_name = %s\n", info->marketing_name);
1798    fprintf(f, "    dev_filename = %s\n", info->dev_filename);
1799    fprintf(f, "    num_se = %i\n", info->num_se);
1800    fprintf(f, "    num_rb = %i\n", info->num_rb);
1801    fprintf(f, "    num_cu = %i\n", info->num_cu);
1802    fprintf(f, "    max_gpu_freq = %i MHz\n", info->max_gpu_freq_mhz);
1803    fprintf(f, "    max_gflops = %u GFLOPS\n", info->max_gflops);
1804 
1805    if (info->sqc_inst_cache_size) {
1806       fprintf(f, "    sqc_inst_cache_size = %i KB (%u per WGP)\n",
1807               DIV_ROUND_UP(info->sqc_inst_cache_size, 1024), info->num_sqc_per_wgp);
1808    }
1809    if (info->sqc_scalar_cache_size) {
1810       fprintf(f, "    sqc_scalar_cache_size = %i KB (%u per WGP)\n",
1811               DIV_ROUND_UP(info->sqc_scalar_cache_size, 1024), info->num_sqc_per_wgp);
1812    }
1813 
1814    fprintf(f, "    tcp_cache_size = %i KB\n", DIV_ROUND_UP(info->tcp_cache_size, 1024));
1815 
1816    if (info->gfx_level >= GFX10 && info->gfx_level < GFX12)
1817       fprintf(f, "    l1_cache_size = %i KB\n", DIV_ROUND_UP(info->l1_cache_size, 1024));
1818 
1819    fprintf(f, "    l2_cache_size = %i KB\n", DIV_ROUND_UP(info->l2_cache_size, 1024));
1820 
1821    if (info->l3_cache_size_mb)
1822       fprintf(f, "    l3_cache_size = %i MB\n", info->l3_cache_size_mb);
1823 
1824    fprintf(f, "    memory_channels = %u (TCC blocks)\n", info->num_tcc_blocks);
1825    fprintf(f, "    memory_size = %u GB (%u MB)\n",
1826            DIV_ROUND_UP(info->vram_size_kb, (1024 * 1024)),
1827            DIV_ROUND_UP(info->vram_size_kb, 1024));
1828    fprintf(f, "    memory_freq = %u GHz\n", DIV_ROUND_UP(info->memory_freq_mhz_effective, 1000));
1829    fprintf(f, "    memory_bus_width = %u bits\n", info->memory_bus_width);
1830    fprintf(f, "    memory_bandwidth = %u GB/s\n", info->memory_bandwidth_gbps);
1831    fprintf(f, "    pcie_gen = %u\n", info->pcie_gen);
1832    fprintf(f, "    pcie_num_lanes = %u\n", info->pcie_num_lanes);
1833    fprintf(f, "    pcie_bandwidth = %1.1f GB/s\n", info->pcie_bandwidth_mbps / 1024.0);
1834    fprintf(f, "    clock_crystal_freq = %i KHz\n", info->clock_crystal_freq);
1835 
1836    for (unsigned i = 0; i < AMD_NUM_IP_TYPES; i++) {
1837       if (info->ip[i].num_queues) {
1838          fprintf(f, "    IP %-7s %2u.%u \tqueues:%u \talign:%u \tpad_dw:0x%x\n",
1839                  ac_get_ip_type_string(info, i),
1840                  info->ip[i].ver_major, info->ip[i].ver_minor, info->ip[i].num_queues,
1841                  info->ip[i].ib_alignment, info->ip[i].ib_pad_dw_mask);
1842       }
1843    }
1844 
1845    fprintf(f, "Identification:\n");
1846    if (info->pci.valid)
1847       fprintf(f, "    pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n", info->pci.domain, info->pci.bus,
1848               info->pci.dev, info->pci.func);
1849    else
1850       fprintf(f, "    pci (domain:bus:dev.func): unknown\n");
1851    fprintf(f, "    pci_id = 0x%x\n", info->pci_id);
1852    fprintf(f, "    pci_rev_id = 0x%x\n", info->pci_rev_id);
1853    fprintf(f, "    family = %i\n", info->family);
1854    fprintf(f, "    gfx_level = %i\n", info->gfx_level);
1855    fprintf(f, "    family_id = %i\n", info->family_id);
1856    fprintf(f, "    chip_external_rev = %i\n", info->chip_external_rev);
1857    fprintf(f, "    chip_rev = %i\n", info->chip_rev);
1858 
1859    fprintf(f, "Flags:\n");
1860    fprintf(f, "    family_overridden = %u\n", info->family_overridden);
1861    fprintf(f, "    is_pro_graphics = %u\n", info->is_pro_graphics);
1862    fprintf(f, "    has_graphics = %i\n", info->has_graphics);
1863    fprintf(f, "    has_clear_state = %u\n", info->has_clear_state);
1864    fprintf(f, "    has_distributed_tess = %u\n", info->has_distributed_tess);
1865    fprintf(f, "    has_dcc_constant_encode = %u\n", info->has_dcc_constant_encode);
1866    fprintf(f, "    has_rbplus = %u\n", info->has_rbplus);
1867    fprintf(f, "    rbplus_allowed = %u\n", info->rbplus_allowed);
1868    fprintf(f, "    has_load_ctx_reg_pkt = %u\n", info->has_load_ctx_reg_pkt);
1869    fprintf(f, "    has_out_of_order_rast = %u\n", info->has_out_of_order_rast);
1870    fprintf(f, "    cpdma_prefetch_writes_memory = %u\n", info->cpdma_prefetch_writes_memory);
1871    fprintf(f, "    has_gfx9_scissor_bug = %i\n", info->has_gfx9_scissor_bug);
1872    fprintf(f, "    has_tc_compat_zrange_bug = %i\n", info->has_tc_compat_zrange_bug);
1873    fprintf(f, "    has_small_prim_filter_sample_loc_bug = %i\n", info->has_small_prim_filter_sample_loc_bug);
1874    fprintf(f, "    has_ls_vgpr_init_bug = %i\n", info->has_ls_vgpr_init_bug);
1875    fprintf(f, "    has_pops_missed_overlap_bug = %i\n", info->has_pops_missed_overlap_bug);
1876    fprintf(f, "    has_32bit_predication = %i\n", info->has_32bit_predication);
1877    fprintf(f, "    has_3d_cube_border_color_mipmap = %i\n", info->has_3d_cube_border_color_mipmap);
1878    fprintf(f, "    has_image_opcodes = %i\n", info->has_image_opcodes);
1879    fprintf(f, "    never_stop_sq_perf_counters = %i\n", info->never_stop_sq_perf_counters);
1880    fprintf(f, "    has_sqtt_rb_harvest_bug = %i\n", info->has_sqtt_rb_harvest_bug);
1881    fprintf(f, "    has_sqtt_auto_flush_mode_bug = %i\n", info->has_sqtt_auto_flush_mode_bug);
1882    fprintf(f, "    never_send_perfcounter_stop = %i\n", info->never_send_perfcounter_stop);
1883    fprintf(f, "    discardable_allows_big_page = %i\n", info->discardable_allows_big_page);
1884    fprintf(f, "    has_taskmesh_indirect0_bug = %i\n", info->has_taskmesh_indirect0_bug);
1885    fprintf(f, "    has_set_context_pairs = %i\n", info->has_set_context_pairs);
1886    fprintf(f, "    has_set_context_pairs_packed = %i\n", info->has_set_context_pairs_packed);
1887    fprintf(f, "    has_set_sh_pairs = %i\n", info->has_set_sh_pairs);
1888    fprintf(f, "    has_set_sh_pairs_packed = %i\n", info->has_set_sh_pairs_packed);
1889    fprintf(f, "    has_set_uconfig_pairs = %i\n", info->has_set_uconfig_pairs);
1890    fprintf(f, "    conformant_trunc_coord = %i\n", info->conformant_trunc_coord);
1891 
1892    if (info->gfx_level < GFX12) {
1893       fprintf(f, "Display features:\n");
1894       fprintf(f, "    use_display_dcc_unaligned = %u\n", info->use_display_dcc_unaligned);
1895       fprintf(f, "    use_display_dcc_with_retile_blit = %u\n", info->use_display_dcc_with_retile_blit);
1896    }
1897 
1898    fprintf(f, "Memory info:\n");
1899    fprintf(f, "    pte_fragment_size = %u\n", info->pte_fragment_size);
1900    fprintf(f, "    gart_page_size = %u\n", info->gart_page_size);
1901    fprintf(f, "    gart_size = %i MB\n", (int)DIV_ROUND_UP(info->gart_size_kb, 1024));
1902    fprintf(f, "    vram_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_size_kb, 1024));
1903    fprintf(f, "    vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_vis_size_kb, 1024));
1904    fprintf(f, "    vram_type = %i\n", info->vram_type);
1905    fprintf(f, "    max_heap_size_kb = %i MB\n", (int)DIV_ROUND_UP(info->max_heap_size_kb, 1024));
1906    fprintf(f, "    min_alloc_size = %u\n", info->min_alloc_size);
1907    fprintf(f, "    address32_hi = 0x%x\n", info->address32_hi);
1908    fprintf(f, "    has_dedicated_vram = %u\n", info->has_dedicated_vram);
1909    fprintf(f, "    all_vram_visible = %u\n", info->all_vram_visible);
1910    fprintf(f, "    max_tcc_blocks = %i\n", info->max_tcc_blocks);
1911    fprintf(f, "    tcc_cache_line_size = %u\n", info->tcc_cache_line_size);
1912    fprintf(f, "    tcc_rb_non_coherent = %u\n", info->tcc_rb_non_coherent);
1913    fprintf(f, "    cp_sdma_ge_use_system_memory_scope = %u\n", info->cp_sdma_ge_use_system_memory_scope);
1914    fprintf(f, "    pc_lines = %u\n", info->pc_lines);
1915    fprintf(f, "    lds_size_per_workgroup = %u\n", info->lds_size_per_workgroup);
1916    fprintf(f, "    lds_alloc_granularity = %i\n", info->lds_alloc_granularity);
1917    fprintf(f, "    lds_encode_granularity = %i\n", info->lds_encode_granularity);
1918    fprintf(f, "    max_memory_clock = %i MHz\n", info->memory_freq_mhz);
1919 
1920    fprintf(f, "CP info:\n");
1921    fprintf(f, "    gfx_ib_pad_with_type2 = %i\n", info->gfx_ib_pad_with_type2);
1922    fprintf(f, "    has_cp_dma = %i\n", info->has_cp_dma);
1923    fprintf(f, "    me_fw_version = %i\n", info->me_fw_version);
1924    fprintf(f, "    me_fw_feature = %i\n", info->me_fw_feature);
1925    fprintf(f, "    mec_fw_version = %i\n", info->mec_fw_version);
1926    fprintf(f, "    mec_fw_feature = %i\n", info->mec_fw_feature);
1927    fprintf(f, "    pfp_fw_version = %i\n", info->pfp_fw_version);
1928    fprintf(f, "    pfp_fw_feature = %i\n", info->pfp_fw_feature);
1929 
1930    fprintf(f, "Multimedia info:\n");
1931    if (info->ip[AMD_IP_VCN_DEC].num_queues || info->ip[AMD_IP_VCN_UNIFIED].num_queues) {
1932       if (info->family >= CHIP_NAVI31 || info->family == CHIP_GFX940)
1933          fprintf(f, "    vcn_unified = %u\n", info->ip[AMD_IP_VCN_UNIFIED].num_instances);
1934       else {
1935          fprintf(f, "    vcn_decode = %u\n", info->ip[AMD_IP_VCN_DEC].num_instances);
1936          fprintf(f, "    vcn_encode = %u\n", info->ip[AMD_IP_VCN_ENC].num_instances);
1937       }
1938       fprintf(f, "    vcn_enc_major_version = %u\n", info->vcn_enc_major_version);
1939       fprintf(f, "    vcn_enc_minor_version = %u\n", info->vcn_enc_minor_version);
1940       fprintf(f, "    vcn_dec_version = %u\n", info->vcn_dec_version);
1941    } else if (info->ip[AMD_IP_VCE].num_queues) {
1942       fprintf(f, "    vce_encode = %u\n", info->ip[AMD_IP_VCE].num_queues);
1943       fprintf(f, "    vce_fw_version = %u\n", info->vce_fw_version);
1944       fprintf(f, "    vce_harvest_config = %i\n", info->vce_harvest_config);
1945    } else if (info->ip[AMD_IP_UVD].num_queues)
1946       fprintf(f, "    uvd_fw_version = %u\n", info->uvd_fw_version);
1947 
1948    if (info->ip[AMD_IP_VCN_JPEG].num_queues)
1949       fprintf(f, "    jpeg_decode = %u\n", info->ip[AMD_IP_VCN_JPEG].num_instances);
1950 
1951    if ((info->drm_minor >= 41) &&
1952        (info->ip[AMD_IP_VCN_DEC].num_queues || info->ip[AMD_IP_VCN_UNIFIED].num_queues
1953        || info->ip[AMD_IP_VCE].num_queues || info->ip[AMD_IP_UVD].num_queues)) {
1954       char max_res_dec[64] = {0}, max_res_enc[64] = {0};
1955       char codec_str[][8] = {
1956          [AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2] = "mpeg2",
1957          [AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4] = "mpeg4",
1958          [AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1] = "vc1",
1959          [AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC] = "h264",
1960          [AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC] = "hevc",
1961          [AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG] = "jpeg",
1962          [AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9] = "vp9",
1963          [AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1] = "av1",
1964       };
1965       fprintf(f, "    %-8s %-4s %-16s %-4s %-16s\n",
1966               "codec", "dec", "max_resolution", "enc", "max_resolution");
1967       for (unsigned i = 0; i < AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT; i++) {
1968          if (info->dec_caps.codec_info[i].valid)
1969             sprintf(max_res_dec, "%ux%u", info->dec_caps.codec_info[i].max_width,
1970                     info->dec_caps.codec_info[i].max_height);
1971          else
1972             sprintf(max_res_dec, "%s", "-");
1973          if (info->enc_caps.codec_info[i].valid)
1974             sprintf(max_res_enc, "%ux%u", info->enc_caps.codec_info[i].max_width,
1975                     info->enc_caps.codec_info[i].max_height);
1976          else
1977             sprintf(max_res_enc, "%s", "-");
1978          fprintf(f, "    %-8s %-4s %-16s %-4s %-16s\n", codec_str[i],
1979                  info->dec_caps.codec_info[i].valid ? "*" : "-", max_res_dec,
1980                  info->enc_caps.codec_info[i].valid ? "*" : "-", max_res_enc);
1981       }
1982    }
1983 
1984    fprintf(f, "Kernel & winsys capabilities:\n");
1985    fprintf(f, "    drm = %i.%i.%i\n", info->drm_major, info->drm_minor, info->drm_patchlevel);
1986    fprintf(f, "    has_userptr = %i\n", info->has_userptr);
1987    fprintf(f, "    has_timeline_syncobj = %u\n", info->has_timeline_syncobj);
1988    fprintf(f, "    has_local_buffers = %u\n", info->has_local_buffers);
1989    fprintf(f, "    has_bo_metadata = %u\n", info->has_bo_metadata);
1990    fprintf(f, "    has_eqaa_surface_allocator = %u\n", info->has_eqaa_surface_allocator);
1991    fprintf(f, "    has_sparse_vm_mappings = %u\n", info->has_sparse_vm_mappings);
1992    fprintf(f, "    has_stable_pstate = %u\n", info->has_stable_pstate);
1993    fprintf(f, "    has_scheduled_fence_dependency = %u\n", info->has_scheduled_fence_dependency);
1994    fprintf(f, "    has_gang_submit = %u\n", info->has_gang_submit);
1995    fprintf(f, "    has_gpuvm_fault_query = %u\n", info->has_gpuvm_fault_query);
1996    fprintf(f, "    register_shadowing_required = %u\n", info->register_shadowing_required);
1997    fprintf(f, "    has_fw_based_shadowing = %u\n", info->has_fw_based_shadowing);
1998    if (info->has_fw_based_shadowing) {
1999       fprintf(f, "        * shadow size: %u (alignment: %u)\n",
2000          info->fw_based_mcbp.shadow_size,
2001          info->fw_based_mcbp.shadow_alignment);
2002       fprintf(f, "        * csa size: %u (alignment: %u)\n",
2003          info->fw_based_mcbp.csa_size,
2004          info->fw_based_mcbp.csa_alignment);
2005    }
2006 
2007    fprintf(f, "    has_tmz_support = %u\n", info->has_tmz_support);
2008    for (unsigned i = 0; i < AMD_NUM_IP_TYPES; i++) {
2009       if (info->max_submitted_ibs[i]) {
2010          fprintf(f, "    IP %-7s max_submitted_ibs = %u\n", ac_get_ip_type_string(info, i),
2011                  info->max_submitted_ibs[i]);
2012       }
2013    }
2014    fprintf(f, "    kernel_has_modifiers = %u\n", info->kernel_has_modifiers);
2015    fprintf(f, "    uses_kernel_cu_mask = %u\n", info->uses_kernel_cu_mask);
2016 
2017    fprintf(f, "Shader core info:\n");
2018    for (unsigned i = 0; i < info->max_se; i++) {
2019       for (unsigned j = 0; j < info->max_sa_per_se; j++) {
2020          fprintf(f, "    cu_mask[SE%u][SA%u] = 0x%x \t(%u)\tCU_EN = 0x%x\n", i, j,
2021                  info->cu_mask[i][j], util_bitcount(info->cu_mask[i][j]),
2022                  info->spi_cu_en & BITFIELD_MASK(util_bitcount(info->cu_mask[i][j])));
2023       }
2024    }
2025    fprintf(f, "    spi_cu_en_has_effect = %i\n", info->spi_cu_en_has_effect);
2026    fprintf(f, "    max_good_cu_per_sa = %i\n", info->max_good_cu_per_sa);
2027    fprintf(f, "    min_good_cu_per_sa = %i\n", info->min_good_cu_per_sa);
2028    fprintf(f, "    max_se = %i\n", info->max_se);
2029    fprintf(f, "    max_sa_per_se = %i\n", info->max_sa_per_se);
2030    fprintf(f, "    num_cu_per_sh = %i\n", info->num_cu_per_sh);
2031    fprintf(f, "    max_waves_per_simd = %i\n", info->max_waves_per_simd);
2032    fprintf(f, "    num_physical_sgprs_per_simd = %i\n", info->num_physical_sgprs_per_simd);
2033    fprintf(f, "    num_physical_wave64_vgprs_per_simd = %i\n",
2034            info->num_physical_wave64_vgprs_per_simd);
2035    fprintf(f, "    num_simd_per_compute_unit = %i\n", info->num_simd_per_compute_unit);
2036    fprintf(f, "    min_sgpr_alloc = %i\n", info->min_sgpr_alloc);
2037    fprintf(f, "    max_sgpr_alloc = %i\n", info->max_sgpr_alloc);
2038    fprintf(f, "    sgpr_alloc_granularity = %i\n", info->sgpr_alloc_granularity);
2039    fprintf(f, "    min_wave64_vgpr_alloc = %i\n", info->min_wave64_vgpr_alloc);
2040    fprintf(f, "    max_vgpr_alloc = %i\n", info->max_vgpr_alloc);
2041    fprintf(f, "    wave64_vgpr_alloc_granularity = %i\n", info->wave64_vgpr_alloc_granularity);
2042    fprintf(f, "    max_scratch_waves = %i\n", info->max_scratch_waves);
2043    fprintf(f, "    has_scratch_base_registers = %i\n", info->has_scratch_base_registers);
2044    fprintf(f, "Ring info:\n");
2045    fprintf(f, "    attribute_ring_size_per_se = %u KB\n",
2046            DIV_ROUND_UP(info->attribute_ring_size_per_se, 1024));
2047    if (info->gfx_level >= GFX12) {
2048       fprintf(f, "    pos_ring_size_per_se = %u KB\n", DIV_ROUND_UP(info->pos_ring_size_per_se, 1024));
2049       fprintf(f, "    prim_ring_size_per_se = %u KB\n", DIV_ROUND_UP(info->prim_ring_size_per_se, 1024));
2050    }
2051    fprintf(f, "    total_attribute_pos_prim_ring_size = %u KB\n",
2052            DIV_ROUND_UP(info->total_attribute_pos_prim_ring_size, 1024));
2053 
2054    fprintf(f, "Render backend info:\n");
2055    fprintf(f, "    pa_sc_tile_steering_override = 0x%x\n", info->pa_sc_tile_steering_override);
2056    fprintf(f, "    max_render_backends = %i\n", info->max_render_backends);
2057    fprintf(f, "    num_tile_pipes = %i\n", info->num_tile_pipes);
2058    fprintf(f, "    pipe_interleave_bytes = %i\n", info->pipe_interleave_bytes);
2059    fprintf(f, "    enabled_rb_mask = 0x%" PRIx64 "\n", info->enabled_rb_mask);
2060    fprintf(f, "    max_alignment = %u\n", (unsigned)info->max_alignment);
2061    fprintf(f, "    pbb_max_alloc_count = %u\n", info->pbb_max_alloc_count);
2062 
2063    fprintf(f, "GB_ADDR_CONFIG: 0x%08x\n", info->gb_addr_config);
2064    if (info->gfx_level >= GFX12) {
2065       fprintf(f, "    num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
2066       fprintf(f, "    pipe_interleave_size = %u\n",
2067               256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config));
2068       fprintf(f, "    num_pkrs = %u\n", 1 << G_0098F8_NUM_PKRS(info->gb_addr_config));
2069    } else if (info->gfx_level >= GFX10) {
2070       fprintf(f, "    num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
2071       fprintf(f, "    pipe_interleave_size = %u\n",
2072               256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config));
2073       fprintf(f, "    max_compressed_frags = %u\n",
2074               1 << G_0098F8_MAX_COMPRESSED_FRAGS(info->gb_addr_config));
2075       if (info->gfx_level >= GFX10_3)
2076          fprintf(f, "    num_pkrs = %u\n", 1 << G_0098F8_NUM_PKRS(info->gb_addr_config));
2077    } else if (info->gfx_level == GFX9) {
2078       fprintf(f, "    num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
2079       fprintf(f, "    pipe_interleave_size = %u\n",
2080               256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config));
2081       fprintf(f, "    max_compressed_frags = %u\n",
2082               1 << G_0098F8_MAX_COMPRESSED_FRAGS(info->gb_addr_config));
2083       fprintf(f, "    bank_interleave_size = %u\n",
2084               1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
2085       fprintf(f, "    num_banks = %u\n", 1 << G_0098F8_NUM_BANKS(info->gb_addr_config));
2086       fprintf(f, "    shader_engine_tile_size = %u\n",
2087               16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
2088       fprintf(f, "    num_shader_engines = %u\n",
2089               1 << G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config));
2090       fprintf(f, "    num_gpus = %u (raw)\n", G_0098F8_NUM_GPUS_GFX9(info->gb_addr_config));
2091       fprintf(f, "    multi_gpu_tile_size = %u (raw)\n",
2092               G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
2093       fprintf(f, "    num_rb_per_se = %u\n", 1 << G_0098F8_NUM_RB_PER_SE(info->gb_addr_config));
2094       fprintf(f, "    row_size = %u\n", 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
2095       fprintf(f, "    num_lower_pipes = %u (raw)\n", G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
2096       fprintf(f, "    se_enable = %u (raw)\n", G_0098F8_SE_ENABLE(info->gb_addr_config));
2097    } else {
2098       fprintf(f, "    num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
2099       fprintf(f, "    pipe_interleave_size = %u\n",
2100               256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(info->gb_addr_config));
2101       fprintf(f, "    bank_interleave_size = %u\n",
2102               1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
2103       fprintf(f, "    num_shader_engines = %u\n",
2104               1 << G_0098F8_NUM_SHADER_ENGINES_GFX6(info->gb_addr_config));
2105       fprintf(f, "    shader_engine_tile_size = %u\n",
2106               16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
2107       fprintf(f, "    num_gpus = %u (raw)\n", G_0098F8_NUM_GPUS_GFX6(info->gb_addr_config));
2108       fprintf(f, "    multi_gpu_tile_size = %u (raw)\n",
2109               G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
2110       fprintf(f, "    row_size = %u\n", 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
2111       fprintf(f, "    num_lower_pipes = %u (raw)\n", G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
2112    }
2113 }
2114 
ac_get_gs_table_depth(enum amd_gfx_level gfx_level,enum radeon_family family)2115 int ac_get_gs_table_depth(enum amd_gfx_level gfx_level, enum radeon_family family)
2116 {
2117    if (gfx_level >= GFX9)
2118       return -1;
2119 
2120    switch (family) {
2121    case CHIP_OLAND:
2122    case CHIP_HAINAN:
2123    case CHIP_KAVERI:
2124    case CHIP_KABINI:
2125    case CHIP_ICELAND:
2126    case CHIP_CARRIZO:
2127    case CHIP_STONEY:
2128       return 16;
2129    case CHIP_TAHITI:
2130    case CHIP_PITCAIRN:
2131    case CHIP_VERDE:
2132    case CHIP_BONAIRE:
2133    case CHIP_HAWAII:
2134    case CHIP_TONGA:
2135    case CHIP_FIJI:
2136    case CHIP_POLARIS10:
2137    case CHIP_POLARIS11:
2138    case CHIP_POLARIS12:
2139    case CHIP_VEGAM:
2140       return 32;
2141    default:
2142       unreachable("Unknown GPU");
2143    }
2144 }
2145 
ac_get_raster_config(const struct radeon_info * info,uint32_t * raster_config_p,uint32_t * raster_config_1_p,uint32_t * se_tile_repeat_p)2146 void ac_get_raster_config(const struct radeon_info *info, uint32_t *raster_config_p,
2147                           uint32_t *raster_config_1_p, uint32_t *se_tile_repeat_p)
2148 {
2149    unsigned raster_config, raster_config_1, se_tile_repeat;
2150 
2151    switch (info->family) {
2152    /* 1 SE / 1 RB */
2153    case CHIP_HAINAN:
2154    case CHIP_KABINI:
2155    case CHIP_STONEY:
2156       raster_config = 0x00000000;
2157       raster_config_1 = 0x00000000;
2158       break;
2159    /* 1 SE / 4 RBs */
2160    case CHIP_VERDE:
2161       raster_config = 0x0000124a;
2162       raster_config_1 = 0x00000000;
2163       break;
2164    /* 1 SE / 2 RBs (Oland is special) */
2165    case CHIP_OLAND:
2166       raster_config = 0x00000082;
2167       raster_config_1 = 0x00000000;
2168       break;
2169    /* 1 SE / 2 RBs */
2170    case CHIP_KAVERI:
2171    case CHIP_ICELAND:
2172    case CHIP_CARRIZO:
2173       raster_config = 0x00000002;
2174       raster_config_1 = 0x00000000;
2175       break;
2176    /* 2 SEs / 4 RBs */
2177    case CHIP_BONAIRE:
2178    case CHIP_POLARIS11:
2179    case CHIP_POLARIS12:
2180       raster_config = 0x16000012;
2181       raster_config_1 = 0x00000000;
2182       break;
2183    /* 2 SEs / 8 RBs */
2184    case CHIP_TAHITI:
2185    case CHIP_PITCAIRN:
2186       raster_config = 0x2a00126a;
2187       raster_config_1 = 0x00000000;
2188       break;
2189    /* 4 SEs / 8 RBs */
2190    case CHIP_TONGA:
2191    case CHIP_POLARIS10:
2192       raster_config = 0x16000012;
2193       raster_config_1 = 0x0000002a;
2194       break;
2195    /* 4 SEs / 16 RBs */
2196    case CHIP_HAWAII:
2197    case CHIP_FIJI:
2198    case CHIP_VEGAM:
2199       raster_config = 0x3a00161a;
2200       raster_config_1 = 0x0000002e;
2201       break;
2202    default:
2203       fprintf(stderr, "ac: Unknown GPU, using 0 for raster_config\n");
2204       raster_config = 0x00000000;
2205       raster_config_1 = 0x00000000;
2206       break;
2207    }
2208 
2209    /* drm/radeon on Kaveri is buggy, so disable 1 RB to work around it.
2210     * This decreases performance by up to 50% when the RB is the bottleneck.
2211     */
2212    if (info->family == CHIP_KAVERI && !info->is_amdgpu)
2213       raster_config = 0x00000000;
2214 
2215    /* Fiji: Old kernels have incorrect tiling config. This decreases
2216     * RB performance by 25%. (it disables 1 RB in the second packer)
2217     */
2218    if (info->family == CHIP_FIJI && info->cik_macrotile_mode_array[0] == 0x000000e8) {
2219       raster_config = 0x16000012;
2220       raster_config_1 = 0x0000002a;
2221    }
2222 
2223    unsigned se_width = 8 << G_028350_SE_XSEL_GFX6(raster_config);
2224    unsigned se_height = 8 << G_028350_SE_YSEL_GFX6(raster_config);
2225 
2226    /* I don't know how to calculate this, though this is probably a good guess. */
2227    se_tile_repeat = MAX2(se_width, se_height) * info->max_se;
2228 
2229    *raster_config_p = raster_config;
2230    *raster_config_1_p = raster_config_1;
2231    if (se_tile_repeat_p)
2232       *se_tile_repeat_p = se_tile_repeat;
2233 }
2234 
ac_get_harvested_configs(const struct radeon_info * info,unsigned raster_config,unsigned * cik_raster_config_1_p,unsigned * raster_config_se)2235 void ac_get_harvested_configs(const struct radeon_info *info, unsigned raster_config,
2236                               unsigned *cik_raster_config_1_p, unsigned *raster_config_se)
2237 {
2238    unsigned sh_per_se = MAX2(info->max_sa_per_se, 1);
2239    unsigned num_se = MAX2(info->max_se, 1);
2240    unsigned rb_mask = info->enabled_rb_mask;
2241    unsigned num_rb = MIN2(info->max_render_backends, 16);
2242    unsigned rb_per_pkr = MIN2(num_rb / num_se / sh_per_se, 2);
2243    unsigned rb_per_se = num_rb / num_se;
2244    unsigned se_mask[4];
2245    unsigned se;
2246 
2247    se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
2248    se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
2249    se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
2250    se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
2251 
2252    assert(num_se == 1 || num_se == 2 || num_se == 4);
2253    assert(sh_per_se == 1 || sh_per_se == 2);
2254    assert(rb_per_pkr == 1 || rb_per_pkr == 2);
2255 
2256    if (info->gfx_level >= GFX7) {
2257       unsigned raster_config_1 = *cik_raster_config_1_p;
2258       if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || (!se_mask[2] && !se_mask[3]))) {
2259          raster_config_1 &= C_028354_SE_PAIR_MAP;
2260 
2261          if (!se_mask[0] && !se_mask[1]) {
2262             raster_config_1 |= S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3);
2263          } else {
2264             raster_config_1 |= S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0);
2265          }
2266          *cik_raster_config_1_p = raster_config_1;
2267       }
2268    }
2269 
2270    for (se = 0; se < num_se; se++) {
2271       unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
2272       unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
2273       int idx = (se / 2) * 2;
2274 
2275       raster_config_se[se] = raster_config;
2276       if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
2277          raster_config_se[se] &= C_028350_SE_MAP;
2278 
2279          if (!se_mask[idx]) {
2280             raster_config_se[se] |= S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3);
2281          } else {
2282             raster_config_se[se] |= S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0);
2283          }
2284       }
2285 
2286       pkr0_mask &= rb_mask;
2287       pkr1_mask &= rb_mask;
2288       if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
2289          raster_config_se[se] &= C_028350_PKR_MAP;
2290 
2291          if (!pkr0_mask) {
2292             raster_config_se[se] |= S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3);
2293          } else {
2294             raster_config_se[se] |= S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0);
2295          }
2296       }
2297 
2298       if (rb_per_se >= 2) {
2299          unsigned rb0_mask = 1 << (se * rb_per_se);
2300          unsigned rb1_mask = rb0_mask << 1;
2301 
2302          rb0_mask &= rb_mask;
2303          rb1_mask &= rb_mask;
2304          if (!rb0_mask || !rb1_mask) {
2305             raster_config_se[se] &= C_028350_RB_MAP_PKR0;
2306 
2307             if (!rb0_mask) {
2308                raster_config_se[se] |= S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3);
2309             } else {
2310                raster_config_se[se] |= S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0);
2311             }
2312          }
2313 
2314          if (rb_per_se > 2) {
2315             rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
2316             rb1_mask = rb0_mask << 1;
2317             rb0_mask &= rb_mask;
2318             rb1_mask &= rb_mask;
2319             if (!rb0_mask || !rb1_mask) {
2320                raster_config_se[se] &= C_028350_RB_MAP_PKR1;
2321 
2322                if (!rb0_mask) {
2323                   raster_config_se[se] |= S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3);
2324                } else {
2325                   raster_config_se[se] |= S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0);
2326                }
2327             }
2328          }
2329       }
2330    }
2331 }
2332 
2333 unsigned
ac_get_compute_resource_limits(const struct radeon_info * info,unsigned waves_per_threadgroup,unsigned max_waves_per_sh,unsigned threadgroups_per_cu)2334 ac_get_compute_resource_limits(const struct radeon_info *info, unsigned waves_per_threadgroup,
2335                                unsigned max_waves_per_sh, unsigned threadgroups_per_cu)
2336 {
2337    unsigned compute_resource_limits = S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
2338 
2339    if (info->gfx_level >= GFX7) {
2340       unsigned num_cu_per_se = info->num_cu / info->num_se;
2341 
2342       /* Gfx9 should set the limit to max instead of 0 to fix high priority compute. */
2343       if (info->gfx_level == GFX9 && !max_waves_per_sh) {
2344          max_waves_per_sh = info->max_good_cu_per_sa * info->num_simd_per_compute_unit *
2345                             info->max_waves_per_simd;
2346       }
2347 
2348       /* On GFX12+, WAVES_PER_SH means waves per SE. */
2349       if (info->gfx_level >= GFX12)
2350          max_waves_per_sh *= info->max_sa_per_se;
2351 
2352       /* Force even distribution on all SIMDs in CU if the workgroup
2353        * size is 64. This has shown some good improvements if # of CUs
2354        * per SE is not a multiple of 4.
2355        */
2356       if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
2357          compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
2358 
2359       assert(threadgroups_per_cu >= 1 && threadgroups_per_cu <= 8);
2360       compute_resource_limits |=
2361          S_00B854_WAVES_PER_SH(max_waves_per_sh) | S_00B854_CU_GROUP_COUNT(threadgroups_per_cu - 1);
2362    } else {
2363       /* GFX6 */
2364       if (max_waves_per_sh) {
2365          unsigned limit_div16 = DIV_ROUND_UP(max_waves_per_sh, 16);
2366          compute_resource_limits |= S_00B854_WAVES_PER_SH_GFX6(limit_div16);
2367       }
2368    }
2369    return compute_resource_limits;
2370 }
2371 
ac_get_hs_info(const struct radeon_info * info,struct ac_hs_info * hs)2372 void ac_get_hs_info(const struct radeon_info *info,
2373                     struct ac_hs_info *hs)
2374 {
2375    bool double_offchip_buffers = info->gfx_level >= GFX7 &&
2376                                  info->family != CHIP_CARRIZO &&
2377                                  info->family != CHIP_STONEY;
2378    unsigned max_offchip_buffers_per_se;
2379    unsigned max_offchip_buffers;
2380    unsigned offchip_granularity;
2381    unsigned hs_offchip_param;
2382 
2383    hs->tess_offchip_block_dw_size =
2384       info->family == CHIP_HAWAII ? 4096 : 8192;
2385 
2386    /*
2387     * Per RadeonSI:
2388     * This must be one less than the maximum number due to a hw limitation.
2389     * Various hardware bugs need this.
2390     *
2391     * Per AMDVLK:
2392     * Vega10 should limit max_offchip_buffers to 508 (4 * 127).
2393     * Gfx7 should limit max_offchip_buffers to 508
2394     * Gfx6 should limit max_offchip_buffers to 126 (2 * 63)
2395     *
2396     * Follow AMDVLK here.
2397     */
2398    if (info->gfx_level >= GFX11) {
2399       max_offchip_buffers_per_se = 256; /* TODO: we could decrease this to reduce memory/cache usage */
2400    } else if (info->gfx_level >= GFX10) {
2401       max_offchip_buffers_per_se = 128;
2402    } else if (info->family == CHIP_VEGA12 || info->family == CHIP_VEGA20) {
2403       /* Only certain chips can use the maximum value. */
2404       max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
2405    } else {
2406       max_offchip_buffers_per_se = double_offchip_buffers ? 127 : 63;
2407    }
2408 
2409    max_offchip_buffers = max_offchip_buffers_per_se * info->max_se;
2410 
2411    /* Hawaii has a bug with offchip buffers > 256 that can be worked
2412     * around by setting 4K granularity.
2413     */
2414    if (hs->tess_offchip_block_dw_size == 4096) {
2415       assert(info->family == CHIP_HAWAII);
2416       offchip_granularity = V_03093C_X_4K_DWORDS;
2417    } else {
2418       assert(hs->tess_offchip_block_dw_size == 8192);
2419       offchip_granularity = V_03093C_X_8K_DWORDS;
2420    }
2421 
2422    switch (info->gfx_level) {
2423    case GFX6:
2424       max_offchip_buffers = MIN2(max_offchip_buffers, 126);
2425       break;
2426    case GFX7:
2427    case GFX8:
2428    case GFX9:
2429       max_offchip_buffers = MIN2(max_offchip_buffers, 508);
2430       break;
2431    case GFX10:
2432       break;
2433    default:
2434       break;
2435    }
2436 
2437    hs->max_offchip_buffers = max_offchip_buffers;
2438 
2439    if (info->gfx_level >= GFX11) {
2440       /* OFFCHIP_BUFFERING is per SE. */
2441       hs_offchip_param = S_03093C_OFFCHIP_BUFFERING_GFX103(max_offchip_buffers_per_se - 1) |
2442                          S_03093C_OFFCHIP_GRANULARITY_GFX103(offchip_granularity);
2443    } else if (info->gfx_level >= GFX10_3) {
2444       hs_offchip_param = S_03093C_OFFCHIP_BUFFERING_GFX103(max_offchip_buffers - 1) |
2445                          S_03093C_OFFCHIP_GRANULARITY_GFX103(offchip_granularity);
2446    } else if (info->gfx_level >= GFX7) {
2447       if (info->gfx_level >= GFX8)
2448          --max_offchip_buffers;
2449       hs_offchip_param = S_03093C_OFFCHIP_BUFFERING_GFX7(max_offchip_buffers) |
2450                          S_03093C_OFFCHIP_GRANULARITY_GFX7(offchip_granularity);
2451    } else {
2452       hs_offchip_param = S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
2453    }
2454 
2455    hs->hs_offchip_param = hs_offchip_param;
2456 
2457    hs->tess_factor_ring_size = 48 * 1024 * info->max_se;
2458    hs->tess_offchip_ring_offset = align(hs->tess_factor_ring_size, 64 * 1024);
2459    hs->tess_offchip_ring_size = hs->max_offchip_buffers * hs->tess_offchip_block_dw_size * 4;
2460 }
2461 
get_task_num_entries(enum radeon_family fam)2462 static uint16_t get_task_num_entries(enum radeon_family fam)
2463 {
2464    /* Number of task shader ring entries. Needs to be a power of two.
2465     * Use a low number on smaller chips so we don't waste space,
2466     * but keep it high on bigger chips so it doesn't inhibit parallelism.
2467     *
2468     * This number is compiled into task/mesh shaders as a constant.
2469     * In order to ensure this works fine with the shader cache, we must
2470     * base this decision on the chip family, not the number of CUs in
2471     * the current GPU. (So, the cache remains consistent for all
2472     * chips in the same family.)
2473     */
2474    switch (fam) {
2475    case CHIP_VANGOGH:
2476    case CHIP_NAVI24:
2477    case CHIP_REMBRANDT:
2478       return 256;
2479    case CHIP_NAVI21:
2480    case CHIP_NAVI22:
2481    case CHIP_NAVI23:
2482    default:
2483       return 1024;
2484    }
2485 }
2486 
ac_get_task_info(const struct radeon_info * info,struct ac_task_info * task_info)2487 void ac_get_task_info(const struct radeon_info *info,
2488                       struct ac_task_info *task_info)
2489 {
2490    const uint16_t num_entries = get_task_num_entries(info->family);
2491    const uint32_t draw_ring_bytes = num_entries * AC_TASK_DRAW_ENTRY_BYTES;
2492    const uint32_t payload_ring_bytes = num_entries * AC_TASK_PAYLOAD_ENTRY_BYTES;
2493 
2494    /* Ensure that the addresses of each ring are 256 byte aligned. */
2495    task_info->num_entries = num_entries;
2496    task_info->draw_ring_offset = ALIGN(AC_TASK_CTRLBUF_BYTES, 256);
2497    task_info->payload_ring_offset = ALIGN(task_info->draw_ring_offset + draw_ring_bytes, 256);
2498    task_info->bo_size_bytes = task_info->payload_ring_offset + payload_ring_bytes;
2499 }
2500 
ac_memory_ops_per_clock(uint32_t vram_type)2501 uint32_t ac_memory_ops_per_clock(uint32_t vram_type)
2502 {
2503    /* Based on MemoryOpsPerClockTable from PAL. */
2504    switch (vram_type) {
2505    case AMDGPU_VRAM_TYPE_GDDR1:
2506    case AMDGPU_VRAM_TYPE_GDDR3: /* last in low-end Evergreen */
2507    case AMDGPU_VRAM_TYPE_GDDR4: /* last in R7xx, not used much */
2508    case AMDGPU_VRAM_TYPE_UNKNOWN:
2509    default:
2510       return 0;
2511    case AMDGPU_VRAM_TYPE_DDR2:
2512    case AMDGPU_VRAM_TYPE_DDR3:
2513    case AMDGPU_VRAM_TYPE_DDR4:
2514    case AMDGPU_VRAM_TYPE_LPDDR4:
2515    case AMDGPU_VRAM_TYPE_HBM: /* same for HBM2 and HBM3 */
2516       return 2;
2517    case AMDGPU_VRAM_TYPE_DDR5:
2518    case AMDGPU_VRAM_TYPE_LPDDR5:
2519    case AMDGPU_VRAM_TYPE_GDDR5: /* last in Polaris and low-end Navi14 */
2520       return 4;
2521    case AMDGPU_VRAM_TYPE_GDDR6:
2522       return 16;
2523    }
2524 }
2525 
ac_gfx103_get_cu_mask_ps(const struct radeon_info * info)2526 uint32_t ac_gfx103_get_cu_mask_ps(const struct radeon_info *info)
2527 {
2528    /* It's wasteful to enable all CUs for PS if shader arrays have a different
2529     * number of CUs. The reason is that the hardware sends the same number of PS
2530     * waves to each shader array, so the slowest shader array limits the performance.
2531     * Disable the extra CUs for PS in other shader arrays to save power and thus
2532     * increase clocks for busy CUs. In the future, we might disable or enable this
2533     * tweak only for certain apps.
2534     */
2535    return u_bit_consecutive(0, info->min_good_cu_per_sa);
2536 }
2537