1 /*
2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
4 *
5 * SPDX-License-Identifier: MIT
6 */
7
8 #define AC_SURFACE_INCLUDE_NIR
9 #include "ac_surface.h"
10
11 #include "ac_drm_fourcc.h"
12 #include "ac_gpu_info.h"
13 #include "addrlib/inc/addrinterface.h"
14 #include "addrlib/src/amdgpu_asic_addr.h"
15 #include "amd_family.h"
16 #include "sid.h"
17 #include "util/hash_table.h"
18 #include "util/macros.h"
19 #include "util/simple_mtx.h"
20 #include "util/u_atomic.h"
21 #include "util/format/u_format.h"
22 #include "util/u_math.h"
23 #include "util/u_memory.h"
24
25 #include <errno.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28
29 #ifdef _WIN32
30 #define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
31 #define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
32 #define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4
33 #define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f
34 #define AMDGPU_TILING_TILE_SPLIT_SHIFT 9
35 #define AMDGPU_TILING_TILE_SPLIT_MASK 0x7
36 #define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12
37 #define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7
38 #define AMDGPU_TILING_BANK_WIDTH_SHIFT 15
39 #define AMDGPU_TILING_BANK_WIDTH_MASK 0x3
40 #define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17
41 #define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3
42 #define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19
43 #define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3
44 #define AMDGPU_TILING_NUM_BANKS_SHIFT 21
45 #define AMDGPU_TILING_NUM_BANKS_MASK 0x3
46 #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
47 #define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
48 #define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
49 #define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
50 #define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
51 #define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
52 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
53 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
54 #define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
55 #define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
56 #define AMDGPU_TILING_SCANOUT_SHIFT 63
57 #define AMDGPU_TILING_SCANOUT_MASK 0x1
58 #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT 0
59 #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK 0x7
60 #define AMDGPU_TILING_GFX12_SCANOUT_SHIFT 63
61 #define AMDGPU_TILING_GFX12_SCANOUT_MASK 0x1
62 #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT 3
63 #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3
64 #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT 5
65 #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK 0x7
66 #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT 8
67 #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK 0x3f
68 #define AMDGPU_TILING_SET(field, value) \
69 (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
70 #define AMDGPU_TILING_GET(value, field) \
71 (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
72 #else
73 #include "drm-uapi/amdgpu_drm.h"
74 #endif
75
76 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
77 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
78 #endif
79
80 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
81 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
82 #endif
83
84 struct ac_addrlib {
85 ADDR_HANDLE handle;
86 simple_mtx_t lock;
87 };
88
ac_pipe_config_to_num_pipes(unsigned pipe_config)89 unsigned ac_pipe_config_to_num_pipes(unsigned pipe_config)
90 {
91 switch (pipe_config) {
92 case V_009910_ADDR_SURF_P2:
93 return 2;
94 case V_009910_ADDR_SURF_P4_8x16:
95 case V_009910_ADDR_SURF_P4_16x16:
96 case V_009910_ADDR_SURF_P4_16x32:
97 case V_009910_ADDR_SURF_P4_32x32:
98 return 4;
99 case V_009910_ADDR_SURF_P8_16x16_8x16:
100 case V_009910_ADDR_SURF_P8_16x32_8x16:
101 case V_009910_ADDR_SURF_P8_32x32_8x16:
102 case V_009910_ADDR_SURF_P8_16x32_16x16:
103 case V_009910_ADDR_SURF_P8_32x32_16x16:
104 case V_009910_ADDR_SURF_P8_32x32_16x32:
105 case V_009910_ADDR_SURF_P8_32x64_32x32:
106 return 8;
107 case V_009910_ADDR_SURF_P16_32x32_8x16:
108 case V_009910_ADDR_SURF_P16_32x32_16x16:
109 return 16;
110 default:
111 unreachable("invalid pipe_config");
112 }
113 }
114
ac_modifier_has_dcc(uint64_t modifier)115 bool ac_modifier_has_dcc(uint64_t modifier)
116 {
117 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
118 }
119
ac_modifier_has_dcc_retile(uint64_t modifier)120 bool ac_modifier_has_dcc_retile(uint64_t modifier)
121 {
122 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC_RETILE, modifier);
123 }
124
ac_modifier_supports_dcc_image_stores(enum amd_gfx_level gfx_level,uint64_t modifier)125 bool ac_modifier_supports_dcc_image_stores(enum amd_gfx_level gfx_level, uint64_t modifier)
126 {
127 if (!ac_modifier_has_dcc(modifier))
128 return false;
129
130 if (gfx_level >= GFX12)
131 return true;
132
133 return (!AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier) &&
134 AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier) &&
135 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier) == AMD_FMT_MOD_DCC_BLOCK_128B) ||
136 (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && /* gfx10.3 */
137 AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier) &&
138 AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier) &&
139 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier) == AMD_FMT_MOD_DCC_BLOCK_64B) ||
140 (gfx_level >= GFX11_5 &&
141 AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX11 &&
142 !AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier) &&
143 AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier) &&
144 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier) == AMD_FMT_MOD_DCC_BLOCK_256B);
145
146 }
147
148
ac_surface_supports_dcc_image_stores(enum amd_gfx_level gfx_level,const struct radeon_surf * surf)149 bool ac_surface_supports_dcc_image_stores(enum amd_gfx_level gfx_level,
150 const struct radeon_surf *surf)
151 {
152 /* DCC image stores is only available for GFX10+. */
153 if (gfx_level < GFX10)
154 return false;
155
156 if (gfx_level >= GFX12)
157 return true;
158
159 /* DCC image stores support the following settings:
160 * - INDEPENDENT_64B_BLOCKS = 0
161 * - INDEPENDENT_128B_BLOCKS = 1
162 * - MAX_COMPRESSED_BLOCK_SIZE = 128B
163 * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
164 *
165 * gfx10.3 also supports the following setting:
166 * - INDEPENDENT_64B_BLOCKS = 1
167 * - INDEPENDENT_128B_BLOCKS = 1
168 * - MAX_COMPRESSED_BLOCK_SIZE = 64B
169 * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
170 *
171 * gfx11.5 also supports the following:
172 * - INDEPENDENT_64B_BLOCKS = 0
173 * - INDEPENDENT_128B_BLOCKS = 1
174 * - MAX_COMPRESSED_BLOCK_SIZE = 256B
175 * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
176 *
177 * The compressor only looks at MAX_COMPRESSED_BLOCK_SIZE to determine
178 * the INDEPENDENT_xx_BLOCKS settings. 128B implies INDEP_128B, while 64B
179 * implies INDEP_64B && INDEP_128B.
180 *
181 * The same limitations apply to SDMA compressed stores because
182 * SDMA uses the same DCC codec.
183 */
184 return (!surf->u.gfx9.color.dcc.independent_64B_blocks &&
185 surf->u.gfx9.color.dcc.independent_128B_blocks &&
186 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_128B) ||
187 (gfx_level >= GFX10_3 && /* gfx10.3 - old 64B compression */
188 surf->u.gfx9.color.dcc.independent_64B_blocks &&
189 surf->u.gfx9.color.dcc.independent_128B_blocks &&
190 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B) ||
191 (gfx_level >= GFX11_5 && /* gfx11.5 - new 256B compression */
192 !surf->u.gfx9.color.dcc.independent_64B_blocks &&
193 surf->u.gfx9.color.dcc.independent_128B_blocks &&
194 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_256B);
195 }
196
ac_get_modifier_swizzle_mode(enum amd_gfx_level gfx_level,uint64_t modifier)197 static unsigned ac_get_modifier_swizzle_mode(enum amd_gfx_level gfx_level, uint64_t modifier)
198 {
199 if (modifier == DRM_FORMAT_MOD_LINEAR)
200 return ADDR_SW_LINEAR;
201
202 if (gfx_level >= GFX12 &&
203 AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX11) {
204 /* The Gfx11 swizzle mode needs to be translated to Gfx12. */
205 if (AMD_FMT_MOD_GET(TILE, modifier) == AMD_FMT_MOD_TILE_GFX9_64K_D)
206 return AMD_FMT_MOD_TILE_GFX12_64K_2D;
207
208 assert(0);
209 return ADDR_SW_MAX_TYPE; /* can't translate */
210 }
211
212 return AMD_FMT_MOD_GET(TILE, modifier);
213 }
214
215 static void
ac_modifier_fill_dcc_params(uint64_t modifier,struct radeon_surf * surf,ADDR2_COMPUTE_SURFACE_INFO_INPUT * surf_info)216 ac_modifier_fill_dcc_params(uint64_t modifier, struct radeon_surf *surf,
217 ADDR2_COMPUTE_SURFACE_INFO_INPUT *surf_info)
218 {
219 assert(ac_modifier_has_dcc(modifier));
220 assert(AMD_FMT_MOD_GET(TILE_VERSION, modifier) < AMD_FMT_MOD_TILE_VER_GFX12);
221
222 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
223 surf_info->flags.metaPipeUnaligned = 0;
224 } else {
225 surf_info->flags.metaPipeUnaligned = !AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
226 }
227
228 /* The metaPipeUnaligned is not strictly necessary, but ensure we don't set metaRbUnaligned on
229 * non-displayable DCC surfaces just because num_render_backends = 1 */
230 surf_info->flags.metaRbUnaligned = AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
231 AMD_FMT_MOD_GET(RB, modifier) == 0 &&
232 surf_info->flags.metaPipeUnaligned;
233
234 surf->u.gfx9.color.dcc.independent_64B_blocks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
235 surf->u.gfx9.color.dcc.independent_128B_blocks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
236 surf->u.gfx9.color.dcc.max_compressed_block_size = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
237 }
238
ac_is_modifier_supported(const struct radeon_info * info,const struct ac_modifier_options * options,enum pipe_format format,uint64_t modifier)239 bool ac_is_modifier_supported(const struct radeon_info *info,
240 const struct ac_modifier_options *options,
241 enum pipe_format format,
242 uint64_t modifier)
243 {
244
245 if (util_format_is_compressed(format) ||
246 util_format_is_depth_or_stencil(format) ||
247 util_format_get_blocksizebits(format) > 64)
248 return false;
249
250 if (info->gfx_level < GFX9)
251 return false;
252
253 if(modifier == DRM_FORMAT_MOD_LINEAR)
254 return true;
255
256 /* GFX8 may need a different modifier for each plane */
257 if (info->gfx_level < GFX9 && util_format_get_num_planes(format) > 1)
258 return false;
259
260 uint32_t allowed_swizzles = 0xFFFFFFFF;
261 switch(info->gfx_level) {
262 case GFX9:
263 allowed_swizzles = ac_modifier_has_dcc(modifier) ? 0x06000000 : 0x06660660;
264 break;
265 case GFX10:
266 case GFX10_3:
267 allowed_swizzles = ac_modifier_has_dcc(modifier) ? 0x08000000 : 0x0E660660;
268 break;
269 case GFX11:
270 case GFX11_5:
271 allowed_swizzles = ac_modifier_has_dcc(modifier) ? 0x88000000 : 0xCC440440;
272 break;
273 case GFX12:
274 allowed_swizzles = 0x1E; /* all 2D swizzle modes */
275 break;
276 default:
277 return false;
278 }
279
280 if (!((1u << ac_get_modifier_swizzle_mode(info->gfx_level, modifier)) & allowed_swizzles))
281 return false;
282
283 if (ac_modifier_has_dcc(modifier)) {
284 /* TODO: support multi-planar formats with DCC */
285 if (util_format_get_num_planes(format) > 1)
286 return false;
287
288 if (!info->has_graphics)
289 return false;
290
291 if (!options->dcc)
292 return false;
293
294 if (ac_modifier_has_dcc_retile(modifier)) {
295 /* radeonsi and radv retiling shaders only support bpe == 32. */
296 if (util_format_get_blocksizebits(format) != 32)
297 return false;
298 if (!info->use_display_dcc_with_retile_blit || !options->dcc_retile)
299 return false;
300 }
301 }
302
303 return true;
304 }
305
ac_get_supported_modifiers(const struct radeon_info * info,const struct ac_modifier_options * options,enum pipe_format format,unsigned * mod_count,uint64_t * mods)306 bool ac_get_supported_modifiers(const struct radeon_info *info,
307 const struct ac_modifier_options *options,
308 enum pipe_format format,
309 unsigned *mod_count,
310 uint64_t *mods)
311 {
312 unsigned current_mod = 0;
313
314 #define ADD_MOD(name) \
315 if (ac_is_modifier_supported(info, options, format, (name))) { \
316 if (mods && current_mod < *mod_count) \
317 mods[current_mod] = (name); \
318 ++current_mod; \
319 }
320
321 /* The modifiers have to be added in descending order of estimated
322 * performance. The drivers will prefer modifiers that come earlier
323 * in the list. */
324 switch (info->gfx_level) {
325 case GFX9: {
326 unsigned pipe_xor_bits = MIN2(G_0098F8_NUM_PIPES(info->gb_addr_config) +
327 G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config), 8);
328 unsigned bank_xor_bits = MIN2(G_0098F8_NUM_BANKS(info->gb_addr_config), 8 - pipe_xor_bits);
329 unsigned pipes = G_0098F8_NUM_PIPES(info->gb_addr_config);
330 unsigned rb = G_0098F8_NUM_RB_PER_SE(info->gb_addr_config) +
331 G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config);
332
333 uint64_t common_dcc = AMD_FMT_MOD_SET(DCC, 1) |
334 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
335 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
336 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, info->has_dcc_constant_encode) |
337 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
338 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits);
339
340 ADD_MOD(AMD_FMT_MOD |
341 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
342 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
343 AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
344 common_dcc |
345 AMD_FMT_MOD_SET(PIPE, pipes) |
346 AMD_FMT_MOD_SET(RB, rb))
347
348 ADD_MOD(AMD_FMT_MOD |
349 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
350 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
351 AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
352 common_dcc |
353 AMD_FMT_MOD_SET(PIPE, pipes) |
354 AMD_FMT_MOD_SET(RB, rb))
355
356 if (util_format_get_blocksizebits(format) == 32) {
357 if (info->max_render_backends == 1) {
358 ADD_MOD(AMD_FMT_MOD |
359 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
360 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
361 common_dcc);
362 }
363
364
365 ADD_MOD(AMD_FMT_MOD |
366 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
367 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
368 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
369 common_dcc |
370 AMD_FMT_MOD_SET(PIPE, pipes) |
371 AMD_FMT_MOD_SET(RB, rb))
372 }
373
374
375 ADD_MOD(AMD_FMT_MOD |
376 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
377 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
378 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
379 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
380
381 ADD_MOD(AMD_FMT_MOD |
382 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
383 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
384 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
385 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
386
387 ADD_MOD(AMD_FMT_MOD |
388 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
389 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
390
391 ADD_MOD(AMD_FMT_MOD |
392 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
393 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
394
395 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
396 break;
397 }
398 case GFX10:
399 case GFX10_3: {
400 bool rbplus = info->gfx_level >= GFX10_3;
401 unsigned pipe_xor_bits = G_0098F8_NUM_PIPES(info->gb_addr_config);
402 unsigned pkrs = rbplus ? G_0098F8_NUM_PKRS(info->gb_addr_config) : 0;
403
404 unsigned version = rbplus ? AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS : AMD_FMT_MOD_TILE_VER_GFX10;
405 uint64_t common_dcc = AMD_FMT_MOD_SET(TILE_VERSION, version) |
406 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
407 AMD_FMT_MOD_SET(DCC, 1) |
408 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
409 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
410 AMD_FMT_MOD_SET(PACKERS, pkrs);
411
412 ADD_MOD(AMD_FMT_MOD | common_dcc |
413 AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
414 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
415 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B))
416
417 if (info->gfx_level >= GFX10_3) {
418 ADD_MOD(AMD_FMT_MOD | common_dcc |
419 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
420 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
421 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B))
422
423 ADD_MOD(AMD_FMT_MOD | common_dcc |
424 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
425 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
426 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
427 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B))
428 }
429
430 ADD_MOD(AMD_FMT_MOD |
431 AMD_FMT_MOD_SET(TILE_VERSION, version) |
432 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
433 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
434 AMD_FMT_MOD_SET(PACKERS, pkrs))
435
436 ADD_MOD(AMD_FMT_MOD |
437 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
438 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
439 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits))
440
441 if (util_format_get_blocksizebits(format) != 32) {
442 ADD_MOD(AMD_FMT_MOD |
443 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
444 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
445 }
446
447 ADD_MOD(AMD_FMT_MOD |
448 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
449 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
450
451 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
452 break;
453 }
454 case GFX11:
455 case GFX11_5: {
456 /* GFX11 has new microblock organization. No S modes for 2D. */
457 unsigned pipe_xor_bits = G_0098F8_NUM_PIPES(info->gb_addr_config);
458 unsigned pkrs = G_0098F8_NUM_PKRS(info->gb_addr_config);
459 unsigned num_pipes = 1 << pipe_xor_bits;
460
461 /* R_X swizzle modes are the best for rendering and DCC requires them. */
462 for (unsigned i = 0; i < 2; i++) {
463 unsigned swizzle_r_x;
464
465 /* Insert the best one first. */
466 if (num_pipes > 16)
467 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
468 else
469 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
470
471 /* Disable 256K on APUs because it doesn't work with DAL. */
472 if (!info->has_dedicated_vram && swizzle_r_x == AMD_FMT_MOD_TILE_GFX11_256K_R_X)
473 continue;
474
475 uint64_t modifier_r_x = AMD_FMT_MOD |
476 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
477 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
478 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
479 AMD_FMT_MOD_SET(PACKERS, pkrs);
480
481 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
482 uint64_t modifier_dcc_best_gfx11_5 = modifier_r_x |
483 AMD_FMT_MOD_SET(DCC, 1) |
484 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
485 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
486 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_256B);
487
488 uint64_t modifier_dcc_best = modifier_r_x |
489 AMD_FMT_MOD_SET(DCC, 1) |
490 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
491 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
492 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
493
494 /* DCC settings for 4K and greater resolutions. (required by display hw) */
495 uint64_t modifier_dcc_4k = modifier_r_x |
496 AMD_FMT_MOD_SET(DCC, 1) |
497 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
498 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
499 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
500
501 /* Modifiers have to be sorted from best to worst.
502 *
503 * Top level order:
504 * 1. The best chip-specific modifiers with DCC, potentially non-displayable.
505 * 2. Chip-specific displayable modifiers with DCC.
506 * 3. Chip-specific displayable modifiers without DCC.
507 * 4. Chip-independent modifiers without DCC.
508 * 5. Linear.
509 */
510
511 /* Add the best non-displayable modifier first. */
512 if (info->gfx_level == GFX11_5)
513 ADD_MOD(modifier_dcc_best_gfx11_5 | AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1));
514
515 ADD_MOD(modifier_dcc_best | AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1));
516
517 /* Displayable modifiers are next. */
518 /* Add other displayable DCC settings. (DCC_RETILE implies displayable on all chips) */
519 ADD_MOD(modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1))
520 ADD_MOD(modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1))
521
522 /* Add one without DCC that is displayable (it's also optimal for non-displayable cases). */
523 ADD_MOD(modifier_r_x)
524 }
525
526 /* Add one that is compatible with other gfx11 chips. */
527 ADD_MOD(AMD_FMT_MOD |
528 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
529 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D))
530
531 /* Linear must be last. */
532 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
533 break;
534 }
535 case GFX12: {
536 /* Chip properties no longer affect tiling, and there is no distinction between displayable
537 * and non-displayable anymore. (DCC settings may affect displayability though)
538 *
539 * Only declare 64K modifiers for now.
540 */
541 uint64_t mod_64K_2D = AMD_FMT_MOD |
542 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12) |
543 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D);
544
545 /* This is identical to GFX12_64K_2D, but expressed in terms of VER_GFX11. */
546 uint64_t mod_64K_2D_as_gfx11 = AMD_FMT_MOD |
547 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
548 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D);
549
550 /* Expose both 128B and 64B compressed blocks. */
551 uint64_t dcc_128B = AMD_FMT_MOD_SET(DCC, 1) |
552 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
553 uint64_t dcc_64B = AMD_FMT_MOD_SET(DCC, 1) |
554 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
555
556 uint64_t mod_256B_2D = AMD_FMT_MOD |
557 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12) |
558 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
559
560 /* Modifiers must be sorted from best to worst. */
561 ADD_MOD(mod_64K_2D | dcc_128B) /* 64K with DCC and 128B compressed blocks */
562 ADD_MOD(mod_64K_2D | dcc_64B) /* 64K with DCC and 64B compressed blocks */
563 ADD_MOD(mod_64K_2D) /* 64K without DCC */
564 ADD_MOD(mod_64K_2D_as_gfx11) /* the same as above, but for gfx11 interop */
565 ADD_MOD(mod_256B_2D)
566 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
567 break;
568 }
569 default:
570 break;
571 }
572
573 #undef ADD_MOD
574
575 if (!mods) {
576 *mod_count = current_mod;
577 return true;
578 }
579
580 bool complete = current_mod <= *mod_count;
581 *mod_count = MIN2(*mod_count, current_mod);
582 return complete;
583 }
584
allocSysMem(const ADDR_ALLOCSYSMEM_INPUT * pInput)585 static void *ADDR_API allocSysMem(const ADDR_ALLOCSYSMEM_INPUT *pInput)
586 {
587 return malloc(pInput->sizeInBytes);
588 }
589
freeSysMem(const ADDR_FREESYSMEM_INPUT * pInput)590 static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT *pInput)
591 {
592 free(pInput->pVirtAddr);
593 return ADDR_OK;
594 }
595
ac_addrlib_create(const struct radeon_info * info,uint64_t * max_alignment)596 struct ac_addrlib *ac_addrlib_create(const struct radeon_info *info,
597 uint64_t *max_alignment)
598 {
599 ADDR_CREATE_INPUT addrCreateInput = {0};
600 ADDR_CREATE_OUTPUT addrCreateOutput = {0};
601 ADDR_REGISTER_VALUE regValue = {0};
602 ADDR_CREATE_FLAGS createFlags = {{0}};
603 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput = {0};
604 ADDR_E_RETURNCODE addrRet;
605
606 addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
607 addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
608
609 regValue.gbAddrConfig = info->gb_addr_config;
610 createFlags.value = 0;
611
612 addrCreateInput.chipFamily = info->family_id;
613 addrCreateInput.chipRevision = info->chip_external_rev;
614
615 if (addrCreateInput.chipFamily == FAMILY_UNKNOWN)
616 return NULL;
617
618 if (addrCreateInput.chipFamily >= FAMILY_AI) {
619 addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
620 } else {
621 regValue.noOfBanks = info->mc_arb_ramcfg & 0x3;
622 regValue.noOfRanks = (info->mc_arb_ramcfg & 0x4) >> 2;
623
624 regValue.backendDisables = info->enabled_rb_mask;
625 regValue.pTileConfig = info->si_tile_mode_array;
626 regValue.noOfEntries = ARRAY_SIZE(info->si_tile_mode_array);
627 if (addrCreateInput.chipFamily == FAMILY_SI) {
628 regValue.pMacroTileConfig = NULL;
629 regValue.noOfMacroEntries = 0;
630 } else {
631 regValue.pMacroTileConfig = info->cik_macrotile_mode_array;
632 regValue.noOfMacroEntries = ARRAY_SIZE(info->cik_macrotile_mode_array);
633 }
634
635 createFlags.useTileIndex = 1;
636 createFlags.useHtileSliceAlign = 1;
637
638 addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
639 }
640
641 addrCreateInput.callbacks.allocSysMem = allocSysMem;
642 addrCreateInput.callbacks.freeSysMem = freeSysMem;
643 addrCreateInput.callbacks.debugPrint = 0;
644 addrCreateInput.createFlags = createFlags;
645 addrCreateInput.regValue = regValue;
646
647 addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
648 if (addrRet != ADDR_OK)
649 return NULL;
650
651 if (max_alignment) {
652 addrRet = AddrGetMaxAlignments(addrCreateOutput.hLib, &addrGetMaxAlignmentsOutput);
653 if (addrRet == ADDR_OK) {
654 *max_alignment = addrGetMaxAlignmentsOutput.baseAlign;
655 }
656 }
657
658 struct ac_addrlib *addrlib = calloc(1, sizeof(struct ac_addrlib));
659 if (!addrlib) {
660 AddrDestroy(addrCreateOutput.hLib);
661 return NULL;
662 }
663
664 addrlib->handle = addrCreateOutput.hLib;
665 simple_mtx_init(&addrlib->lock, mtx_plain);
666 return addrlib;
667 }
668
ac_addrlib_destroy(struct ac_addrlib * addrlib)669 void ac_addrlib_destroy(struct ac_addrlib *addrlib)
670 {
671 simple_mtx_destroy(&addrlib->lock);
672 AddrDestroy(addrlib->handle);
673 free(addrlib);
674 }
675
ac_addrlib_get_handle(struct ac_addrlib * addrlib)676 void *ac_addrlib_get_handle(struct ac_addrlib *addrlib)
677 {
678 return addrlib->handle;
679 }
680
surf_config_sanity(const struct ac_surf_config * config,unsigned flags)681 static int surf_config_sanity(const struct ac_surf_config *config, unsigned flags)
682 {
683 /* FMASK is allocated together with the color surface and can't be
684 * allocated separately.
685 */
686 assert(!(flags & RADEON_SURF_FMASK));
687 if (flags & RADEON_SURF_FMASK)
688 return -EINVAL;
689
690 /* all dimension must be at least 1 ! */
691 if (!config->info.width || !config->info.height || !config->info.depth ||
692 !config->info.array_size || !config->info.levels)
693 return -EINVAL;
694
695 switch (config->info.samples) {
696 case 0:
697 case 1:
698 case 2:
699 case 4:
700 case 8:
701 break;
702 case 16:
703 if (flags & RADEON_SURF_Z_OR_SBUFFER)
704 return -EINVAL;
705 break;
706 default:
707 return -EINVAL;
708 }
709
710 if (!(flags & RADEON_SURF_Z_OR_SBUFFER)) {
711 switch (config->info.storage_samples) {
712 case 0:
713 case 1:
714 case 2:
715 case 4:
716 case 8:
717 break;
718 default:
719 return -EINVAL;
720 }
721 }
722
723 if (config->is_3d && config->info.array_size > 1)
724 return -EINVAL;
725 if (config->is_cube && config->info.depth > 1)
726 return -EINVAL;
727
728 return 0;
729 }
730
bpe_to_format(struct radeon_surf * surf)731 static unsigned bpe_to_format(struct radeon_surf *surf)
732 {
733 if (surf->blk_w != 1 || surf->blk_h != 1) {
734 if (surf->blk_w == 4 && surf->blk_h == 4) {
735 switch (surf->bpe) {
736 case 8:
737 return ADDR_FMT_BC1;
738 case 16:
739 /* since BC3 and ASTC4x4 has same blk dimension and bpe reporting BC3 also for ASTC4x4.
740 * matching is fine since addrlib needs only blk_w, blk_h and bpe to compute surface
741 * properties.
742 * TODO: If compress_type can be passed to this function, then this ugly BC3 and ASTC4x4
743 * matching can be avoided.
744 */
745 return ADDR_FMT_BC3;
746 default:
747 unreachable("invalid compressed bpe");
748 }
749 } else if (surf->blk_w == 5 && surf->blk_h == 4)
750 return ADDR_FMT_ASTC_5x4;
751 else if (surf->blk_w == 5 && surf->blk_h == 5)
752 return ADDR_FMT_ASTC_5x5;
753 else if (surf->blk_w == 6 && surf->blk_h == 5)
754 return ADDR_FMT_ASTC_6x5;
755 else if (surf->blk_w == 6 && surf->blk_h == 6)
756 return ADDR_FMT_ASTC_6x6;
757 else if (surf->blk_w == 8 && surf->blk_h == 5)
758 return ADDR_FMT_ASTC_8x5;
759 else if (surf->blk_w == 8 && surf->blk_h == 6)
760 return ADDR_FMT_ASTC_8x6;
761 else if (surf->blk_w == 8 && surf->blk_h == 8)
762 return ADDR_FMT_ASTC_8x8;
763 else if (surf->blk_w == 10 && surf->blk_h == 5)
764 return ADDR_FMT_ASTC_10x5;
765 else if (surf->blk_w == 10 && surf->blk_h == 6)
766 return ADDR_FMT_ASTC_10x6;
767 else if (surf->blk_w == 10 && surf->blk_h == 8)
768 return ADDR_FMT_ASTC_10x8;
769 else if (surf->blk_w == 10 && surf->blk_h == 10)
770 return ADDR_FMT_ASTC_10x10;
771 else if (surf->blk_w == 12 && surf->blk_h == 10)
772 return ADDR_FMT_ASTC_12x10;
773 else if (surf->blk_w == 12 && surf->blk_h == 12)
774 return ADDR_FMT_ASTC_12x12;
775 } else {
776 switch (surf->bpe) {
777 case 1:
778 assert(!(surf->flags & RADEON_SURF_ZBUFFER));
779 return ADDR_FMT_8;
780 case 2:
781 assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
782 return ADDR_FMT_16;
783 case 4:
784 assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
785 return ADDR_FMT_32;
786 case 8:
787 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
788 return ADDR_FMT_32_32;
789 case 12:
790 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
791 return ADDR_FMT_32_32_32;
792 case 16:
793 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
794 return ADDR_FMT_32_32_32_32;
795 default:
796 unreachable("invalid bpe");
797 }
798 }
799 return ADDR_FMT_INVALID;
800 }
801
802 /* The addrlib pitch alignment is forced to this number for all chips to support interop
803 * between any 2 chips.
804 */
805 #define LINEAR_PITCH_ALIGNMENT 256
806
gfx6_compute_level(ADDR_HANDLE addrlib,const struct ac_surf_config * config,struct radeon_surf * surf,bool is_stencil,unsigned level,bool compressed,ADDR_COMPUTE_SURFACE_INFO_INPUT * AddrSurfInfoIn,ADDR_COMPUTE_SURFACE_INFO_OUTPUT * AddrSurfInfoOut,ADDR_COMPUTE_DCCINFO_INPUT * AddrDccIn,ADDR_COMPUTE_DCCINFO_OUTPUT * AddrDccOut,ADDR_COMPUTE_HTILE_INFO_INPUT * AddrHtileIn,ADDR_COMPUTE_HTILE_INFO_OUTPUT * AddrHtileOut)807 static int gfx6_compute_level(ADDR_HANDLE addrlib, const struct ac_surf_config *config,
808 struct radeon_surf *surf, bool is_stencil, unsigned level,
809 bool compressed, ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
810 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
811 ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
812 ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
813 ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
814 ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
815 {
816 struct legacy_surf_level *surf_level;
817 struct legacy_surf_dcc_level *dcc_level;
818 ADDR_E_RETURNCODE ret;
819
820 AddrSurfInfoIn->mipLevel = level;
821 AddrSurfInfoIn->width = u_minify(config->info.width, level);
822 AddrSurfInfoIn->height = u_minify(config->info.height, level);
823
824 /* Make GFX6 linear surfaces compatible with all chips for multi-GPU interop. */
825 if (config->info.levels == 1 && AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED &&
826 AddrSurfInfoIn->bpp && util_is_power_of_two_or_zero(AddrSurfInfoIn->bpp)) {
827 unsigned alignment = LINEAR_PITCH_ALIGNMENT / surf->bpe;
828
829 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, alignment);
830 }
831
832 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
833 * true for r32g32b32 formats. */
834 if (AddrSurfInfoIn->bpp == 96) {
835 assert(config->info.levels == 1);
836 assert(AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED);
837
838 /* The least common multiple of 64 bytes and 12 bytes/pixel is
839 * 192 bytes, or 16 pixels. */
840 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, 16);
841 }
842
843 if (config->is_3d)
844 AddrSurfInfoIn->numSlices = u_minify(config->info.depth, level);
845 else if (config->is_cube)
846 AddrSurfInfoIn->numSlices = 6;
847 else
848 AddrSurfInfoIn->numSlices = config->info.array_size;
849
850 if (level > 0) {
851 /* Set the base level pitch. This is needed for calculation
852 * of non-zero levels. */
853 if (is_stencil)
854 AddrSurfInfoIn->basePitch = surf->u.legacy.zs.stencil_level[0].nblk_x;
855 else
856 AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
857
858 /* Convert blocks to pixels for compressed formats. */
859 if (compressed)
860 AddrSurfInfoIn->basePitch *= surf->blk_w;
861 }
862
863 ret = AddrComputeSurfaceInfo(addrlib, AddrSurfInfoIn, AddrSurfInfoOut);
864 if (ret != ADDR_OK) {
865 return ret;
866 }
867
868 surf_level = is_stencil ? &surf->u.legacy.zs.stencil_level[level] : &surf->u.legacy.level[level];
869 dcc_level = &surf->u.legacy.color.dcc_level[level];
870 surf_level->offset_256B = align64(surf->surf_size, AddrSurfInfoOut->baseAlign) / 256;
871 surf_level->slice_size_dw = AddrSurfInfoOut->sliceSize / 4;
872 surf_level->nblk_x = AddrSurfInfoOut->pitch;
873 surf_level->nblk_y = AddrSurfInfoOut->height;
874
875 switch (AddrSurfInfoOut->tileMode) {
876 case ADDR_TM_LINEAR_ALIGNED:
877 surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
878 break;
879 case ADDR_TM_1D_TILED_THIN1:
880 case ADDR_TM_1D_TILED_THICK:
881 case ADDR_TM_PRT_TILED_THIN1:
882 surf_level->mode = RADEON_SURF_MODE_1D;
883 break;
884 default:
885 surf_level->mode = RADEON_SURF_MODE_2D;
886 break;
887 }
888
889 if (is_stencil)
890 surf->u.legacy.zs.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
891 else
892 surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
893
894 if (AddrSurfInfoIn->flags.prt) {
895 if (level == 0) {
896 surf->prt_tile_width = AddrSurfInfoOut->pitchAlign;
897 surf->prt_tile_height = AddrSurfInfoOut->heightAlign;
898 surf->prt_tile_depth = AddrSurfInfoOut->depthAlign;
899 }
900 if (surf_level->nblk_x >= surf->prt_tile_width &&
901 surf_level->nblk_y >= surf->prt_tile_height) {
902 /* +1 because the current level is not in the miptail */
903 surf->first_mip_tail_level = level + 1;
904 }
905 }
906
907 surf->surf_size = (uint64_t)surf_level->offset_256B * 256 + AddrSurfInfoOut->surfSize;
908
909 /* Clear DCC fields at the beginning. */
910 if (!AddrSurfInfoIn->flags.depth && !AddrSurfInfoIn->flags.stencil)
911 dcc_level->dcc_offset = 0;
912
913 /* The previous level's flag tells us if we can use DCC for this level. */
914 if (AddrSurfInfoIn->flags.dccCompatible && (level == 0 || AddrDccOut->subLvlCompressible)) {
915 bool prev_level_clearable = level == 0 || AddrDccOut->dccRamSizeAligned;
916
917 AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
918 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
919 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
920 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
921 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
922
923 ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
924
925 if (ret == ADDR_OK) {
926 dcc_level->dcc_offset = surf->meta_size;
927 surf->num_meta_levels = level + 1;
928 surf->meta_size = dcc_level->dcc_offset + AddrDccOut->dccRamSize;
929 surf->meta_alignment_log2 = MAX2(surf->meta_alignment_log2, util_logbase2(AddrDccOut->dccRamBaseAlign));
930
931 /* If the DCC size of a subresource (1 mip level or 1 slice)
932 * is not aligned, the DCC memory layout is not contiguous for
933 * that subresource, which means we can't use fast clear.
934 *
935 * We only do fast clears for whole mipmap levels. If we did
936 * per-slice fast clears, the same restriction would apply.
937 * (i.e. only compute the slice size and see if it's aligned)
938 *
939 * The last level can be non-contiguous and still be clearable
940 * if it's interleaved with the next level that doesn't exist.
941 */
942 if (AddrDccOut->dccRamSizeAligned ||
943 (prev_level_clearable && level == config->info.levels - 1))
944 dcc_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
945 else
946 dcc_level->dcc_fast_clear_size = 0;
947
948 /* Compute the DCC slice size because addrlib doesn't
949 * provide this info. As DCC memory is linear (each
950 * slice is the same size) it's easy to compute.
951 */
952 surf->meta_slice_size = AddrDccOut->dccRamSize / config->info.array_size;
953
954 /* For arrays, we have to compute the DCC info again
955 * with one slice size to get a correct fast clear
956 * size.
957 */
958 if (config->info.array_size > 1) {
959 AddrDccIn->colorSurfSize = AddrSurfInfoOut->sliceSize;
960 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
961 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
962 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
963 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
964
965 ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
966 if (ret == ADDR_OK) {
967 /* If the DCC memory isn't properly
968 * aligned, the data are interleaved
969 * across slices.
970 */
971 if (AddrDccOut->dccRamSizeAligned)
972 dcc_level->dcc_slice_fast_clear_size = AddrDccOut->dccFastClearSize;
973 else
974 dcc_level->dcc_slice_fast_clear_size = 0;
975 }
976
977 if (surf->flags & RADEON_SURF_CONTIGUOUS_DCC_LAYERS &&
978 surf->meta_slice_size != dcc_level->dcc_slice_fast_clear_size) {
979 surf->meta_size = 0;
980 surf->num_meta_levels = 0;
981 AddrDccOut->subLvlCompressible = false;
982 }
983 } else {
984 dcc_level->dcc_slice_fast_clear_size = dcc_level->dcc_fast_clear_size;
985 }
986 }
987 }
988
989 /* HTILE. */
990 if (!is_stencil && AddrSurfInfoIn->flags.depth && surf_level->mode == RADEON_SURF_MODE_2D &&
991 level == 0 && !(surf->flags & RADEON_SURF_NO_HTILE)) {
992 AddrHtileIn->flags.tcCompatible = AddrSurfInfoOut->tcCompatible;
993 AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
994 AddrHtileIn->height = AddrSurfInfoOut->height;
995 AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
996 AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
997 AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
998 AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
999 AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
1000 AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
1001
1002 ret = AddrComputeHtileInfo(addrlib, AddrHtileIn, AddrHtileOut);
1003
1004 if (ret == ADDR_OK) {
1005 surf->meta_size = AddrHtileOut->htileBytes;
1006 surf->meta_slice_size = AddrHtileOut->sliceSize;
1007 surf->meta_alignment_log2 = util_logbase2(AddrHtileOut->baseAlign);
1008 surf->meta_pitch = AddrHtileOut->pitch;
1009 surf->num_meta_levels = level + 1;
1010 }
1011 }
1012
1013 return 0;
1014 }
1015
gfx6_set_micro_tile_mode(struct radeon_surf * surf,const struct radeon_info * info)1016 static void gfx6_set_micro_tile_mode(struct radeon_surf *surf, const struct radeon_info *info)
1017 {
1018 uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
1019
1020 if (info->gfx_level >= GFX7)
1021 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
1022 else
1023 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
1024 }
1025
cik_get_macro_tile_index(struct radeon_surf * surf)1026 static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
1027 {
1028 unsigned index, tileb;
1029
1030 tileb = 8 * 8 * surf->bpe;
1031 tileb = MIN2(surf->u.legacy.tile_split, tileb);
1032
1033 for (index = 0; tileb > 64; index++)
1034 tileb >>= 1;
1035
1036 assert(index < 16);
1037 return index;
1038 }
1039
get_display_flag(const struct ac_surf_config * config,const struct radeon_surf * surf)1040 static bool get_display_flag(const struct ac_surf_config *config, const struct radeon_surf *surf)
1041 {
1042 unsigned num_channels = config->info.num_channels;
1043 unsigned bpe = surf->bpe;
1044
1045 /* With modifiers the kernel is in charge of whether it is displayable.
1046 * We need to ensure at least 32 pixels pitch alignment, but this is
1047 * always the case when the blocksize >= 4K.
1048 */
1049 if (surf->modifier != DRM_FORMAT_MOD_INVALID)
1050 return false;
1051
1052 if (!config->is_1d && !config->is_3d && !config->is_cube &&
1053 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
1054 surf->flags & RADEON_SURF_SCANOUT && config->info.samples <= 1 && surf->blk_w <= 2 &&
1055 surf->blk_h == 1) {
1056 /* subsampled */
1057 if (surf->blk_w == 2 && surf->blk_h == 1)
1058 return true;
1059
1060 if (/* RGBA8 or RGBA16F */
1061 (bpe >= 4 && bpe <= 8 && num_channels == 4) ||
1062 /* R5G6B5 or R5G5B5A1 */
1063 (bpe == 2 && num_channels >= 3) ||
1064 /* C8 palette */
1065 (bpe == 1 && num_channels == 1))
1066 return true;
1067 }
1068 return false;
1069 }
1070
1071 /**
1072 * This must be called after the first level is computed.
1073 *
1074 * Copy surface-global settings like pipe/bank config from level 0 surface
1075 * computation, and compute tile swizzle.
1076 */
gfx6_surface_settings(ADDR_HANDLE addrlib,const struct radeon_info * info,const struct ac_surf_config * config,ADDR_COMPUTE_SURFACE_INFO_OUTPUT * csio,struct radeon_surf * surf)1077 static int gfx6_surface_settings(ADDR_HANDLE addrlib, const struct radeon_info *info,
1078 const struct ac_surf_config *config,
1079 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *csio, struct radeon_surf *surf)
1080 {
1081 surf->surf_alignment_log2 = util_logbase2(csio->baseAlign);
1082 surf->u.legacy.pipe_config = csio->pTileInfo->pipeConfig - 1;
1083 gfx6_set_micro_tile_mode(surf, info);
1084
1085 /* For 2D modes only. */
1086 if (csio->tileMode >= ADDR_TM_2D_TILED_THIN1) {
1087 surf->u.legacy.bankw = csio->pTileInfo->bankWidth;
1088 surf->u.legacy.bankh = csio->pTileInfo->bankHeight;
1089 surf->u.legacy.mtilea = csio->pTileInfo->macroAspectRatio;
1090 surf->u.legacy.tile_split = csio->pTileInfo->tileSplitBytes;
1091 surf->u.legacy.num_banks = csio->pTileInfo->banks;
1092 surf->u.legacy.macro_tile_index = csio->macroModeIndex;
1093 } else {
1094 surf->u.legacy.macro_tile_index = 0;
1095 }
1096
1097 /* Compute tile swizzle. */
1098 /* TODO: fix tile swizzle with mipmapping for GFX6 */
1099 if ((info->gfx_level >= GFX7 || config->info.levels == 1) && config->info.surf_index &&
1100 surf->u.legacy.level[0].mode == RADEON_SURF_MODE_2D &&
1101 !(surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_SHAREABLE)) &&
1102 !get_display_flag(config, surf)) {
1103 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn = {0};
1104 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut = {0};
1105
1106 AddrBaseSwizzleIn.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1107 AddrBaseSwizzleOut.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1108
1109 AddrBaseSwizzleIn.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1110 AddrBaseSwizzleIn.tileIndex = csio->tileIndex;
1111 AddrBaseSwizzleIn.macroModeIndex = csio->macroModeIndex;
1112 AddrBaseSwizzleIn.pTileInfo = csio->pTileInfo;
1113 AddrBaseSwizzleIn.tileMode = csio->tileMode;
1114
1115 int r = AddrComputeBaseSwizzle(addrlib, &AddrBaseSwizzleIn, &AddrBaseSwizzleOut);
1116 if (r != ADDR_OK)
1117 return r;
1118
1119 assert(AddrBaseSwizzleOut.tileSwizzle <=
1120 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1121 surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
1122 }
1123 return 0;
1124 }
1125
ac_compute_cmask(const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf)1126 static void ac_compute_cmask(const struct radeon_info *info, const struct ac_surf_config *config,
1127 struct radeon_surf *surf)
1128 {
1129 unsigned pipe_interleave_bytes = info->pipe_interleave_bytes;
1130 unsigned num_pipes = info->num_tile_pipes;
1131 unsigned cl_width, cl_height;
1132
1133 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER || surf->is_linear ||
1134 (config->info.samples >= 2 && !surf->fmask_size))
1135 return;
1136
1137 assert(info->gfx_level <= GFX8);
1138
1139 switch (num_pipes) {
1140 case 2:
1141 cl_width = 32;
1142 cl_height = 16;
1143 break;
1144 case 4:
1145 cl_width = 32;
1146 cl_height = 32;
1147 break;
1148 case 8:
1149 cl_width = 64;
1150 cl_height = 32;
1151 break;
1152 case 16: /* Hawaii */
1153 cl_width = 64;
1154 cl_height = 64;
1155 break;
1156 default:
1157 assert(0);
1158 return;
1159 }
1160
1161 unsigned base_align = num_pipes * pipe_interleave_bytes;
1162
1163 unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width * 8);
1164 unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height * 8);
1165 unsigned slice_elements = (width * height) / (8 * 8);
1166
1167 /* Each element of CMASK is a nibble. */
1168 unsigned slice_bytes = slice_elements / 2;
1169
1170 surf->u.legacy.color.cmask_slice_tile_max = (width * height) / (128 * 128);
1171 if (surf->u.legacy.color.cmask_slice_tile_max)
1172 surf->u.legacy.color.cmask_slice_tile_max -= 1;
1173
1174 unsigned num_layers;
1175 if (config->is_3d)
1176 num_layers = config->info.depth;
1177 else if (config->is_cube)
1178 num_layers = 6;
1179 else
1180 num_layers = config->info.array_size;
1181
1182 surf->cmask_alignment_log2 = util_logbase2(MAX2(256, base_align));
1183 surf->cmask_slice_size = align(slice_bytes, base_align);
1184 surf->cmask_size = surf->cmask_slice_size * num_layers;
1185 }
1186
1187 /**
1188 * Fill in the tiling information in \p surf based on the given surface config.
1189 *
1190 * The following fields of \p surf must be initialized by the caller:
1191 * blk_w, blk_h, bpe, flags.
1192 */
gfx6_compute_surface(ADDR_HANDLE addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)1193 static int gfx6_compute_surface(ADDR_HANDLE addrlib, const struct radeon_info *info,
1194 const struct ac_surf_config *config, enum radeon_surf_mode mode,
1195 struct radeon_surf *surf)
1196 {
1197 unsigned level;
1198 bool compressed;
1199 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
1200 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
1201 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
1202 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
1203 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
1204 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
1205 ADDR_TILEINFO AddrTileInfoIn = {0};
1206 ADDR_TILEINFO AddrTileInfoOut = {0};
1207 int r;
1208
1209 AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
1210 AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
1211 AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
1212 AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
1213 AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
1214 AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
1215 AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
1216
1217 compressed = surf->blk_w == 4 && surf->blk_h == 4;
1218
1219 /* MSAA requires 2D tiling. */
1220 if (config->info.samples > 1)
1221 mode = RADEON_SURF_MODE_2D;
1222
1223 /* DB doesn't support linear layouts. */
1224 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) && mode < RADEON_SURF_MODE_1D)
1225 mode = RADEON_SURF_MODE_1D;
1226
1227 /* Set the requested tiling mode. */
1228 switch (mode) {
1229 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1230 AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
1231 break;
1232 case RADEON_SURF_MODE_1D:
1233 if (surf->flags & RADEON_SURF_PRT)
1234 AddrSurfInfoIn.tileMode = ADDR_TM_PRT_TILED_THIN1;
1235 else if (config->is_3d)
1236 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THICK;
1237 else
1238 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THIN1;
1239 break;
1240 case RADEON_SURF_MODE_2D:
1241 if (surf->flags & RADEON_SURF_PRT) {
1242 if (config->is_3d && surf->bpe < 8) {
1243 AddrSurfInfoIn.tileMode = ADDR_TM_PRT_2D_TILED_THICK;
1244 } else {
1245 AddrSurfInfoIn.tileMode = ADDR_TM_PRT_2D_TILED_THIN1;
1246 }
1247 } else {
1248 if (config->is_3d) {
1249 /* GFX6 doesn't have 3D_TILED_XTHICK. */
1250 if (info->gfx_level >= GFX7)
1251 AddrSurfInfoIn.tileMode = ADDR_TM_3D_TILED_XTHICK;
1252 else
1253 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_XTHICK;
1254 } else {
1255 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_THIN1;
1256 }
1257 }
1258 break;
1259 default:
1260 assert(0);
1261 }
1262
1263 AddrSurfInfoIn.format = bpe_to_format(surf);
1264 if (!compressed)
1265 AddrDccIn.bpp = AddrSurfInfoIn.bpp = surf->bpe * 8;
1266
1267 /* Setting ADDR_FMT_32_32_32 breaks gfx6-8, while INVALID works. */
1268 if (AddrSurfInfoIn.format == ADDR_FMT_32_32_32)
1269 AddrSurfInfoIn.format = ADDR_FMT_INVALID;
1270
1271 AddrDccIn.numSamples = AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
1272 AddrSurfInfoIn.tileIndex = -1;
1273
1274 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
1275 AddrDccIn.numSamples = AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
1276 }
1277
1278 /* Set the micro tile type. */
1279 if (surf->flags & RADEON_SURF_SCANOUT)
1280 AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
1281 else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
1282 AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
1283 else
1284 AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
1285
1286 AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1287 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
1288 AddrSurfInfoIn.flags.cube = config->is_cube;
1289 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
1290 AddrSurfInfoIn.flags.pow2Pad = config->info.levels > 1;
1291 AddrSurfInfoIn.flags.tcCompatible = (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
1292 AddrSurfInfoIn.flags.prt = (surf->flags & RADEON_SURF_PRT) != 0;
1293
1294 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
1295 * requested, because TC-compatible HTILE requires 2D tiling.
1296 */
1297 AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible && !config->is_3d &&
1298 !AddrSurfInfoIn.flags.fmask && config->info.samples <= 1 &&
1299 !(surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE);
1300
1301 /* DCC notes:
1302 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
1303 * with samples >= 4.
1304 * - Mipmapped array textures have low performance (discovered by a closed
1305 * driver team).
1306 */
1307 AddrSurfInfoIn.flags.dccCompatible =
1308 info->gfx_level >= GFX8 && info->has_graphics && /* disable DCC on compute-only chips */
1309 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
1310 !compressed &&
1311 ((config->info.array_size == 1 && config->info.depth == 1) || config->info.levels == 1);
1312
1313 AddrSurfInfoIn.flags.noStencil =
1314 !(surf->flags & RADEON_SURF_SBUFFER) || (surf->flags & RADEON_SURF_NO_RENDER_TARGET);
1315
1316 AddrSurfInfoIn.flags.compressZ = !!(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1317
1318 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
1319 * for Z and stencil. This can cause a number of problems which we work
1320 * around here:
1321 *
1322 * - a depth part that is incompatible with mipmapped texturing
1323 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
1324 * incorrect tiling applied to the stencil part, stencil buffer
1325 * memory accesses that go out of bounds) even without mipmapping
1326 *
1327 * Some piglit tests that are prone to different types of related
1328 * failures:
1329 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
1330 * ./bin/framebuffer-blit-levels {draw,read} stencil
1331 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
1332 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
1333 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
1334 */
1335 int stencil_tile_idx = -1;
1336
1337 if (AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.noStencil &&
1338 (config->info.levels > 1 || info->family == CHIP_STONEY)) {
1339 /* Compute stencilTileIdx that is compatible with the (depth)
1340 * tileIdx. This degrades the depth surface if necessary to
1341 * ensure that a matching stencilTileIdx exists. */
1342 AddrSurfInfoIn.flags.matchStencilTileCfg = 1;
1343
1344 /* Keep the depth mip-tail compatible with texturing. */
1345 if (config->info.levels > 1 && !(surf->flags & RADEON_SURF_NO_STENCIL_ADJUST))
1346 AddrSurfInfoIn.flags.noStencil = 1;
1347 }
1348
1349 /* Set preferred macrotile parameters. This is usually required
1350 * for shared resources. This is for 2D tiling only. */
1351 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
1352 AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 && surf->u.legacy.bankw &&
1353 surf->u.legacy.bankh && surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
1354 /* If any of these parameters are incorrect, the calculation
1355 * will fail. */
1356 AddrTileInfoIn.banks = surf->u.legacy.num_banks;
1357 AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
1358 AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
1359 AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
1360 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
1361 AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
1362 AddrSurfInfoIn.flags.opt4Space = 0;
1363 AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
1364
1365 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
1366 * the tile index, because we are expected to know it if
1367 * we know the other parameters.
1368 *
1369 * This is something that can easily be fixed in Addrlib.
1370 * For now, just figure it out here.
1371 * Note that only 2D_TILE_THIN1 is handled here.
1372 */
1373 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1374 assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
1375
1376 if (info->gfx_level == GFX6) {
1377 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
1378 if (surf->bpe == 2)
1379 AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
1380 else
1381 AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
1382 } else {
1383 if (surf->bpe == 1)
1384 AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
1385 else if (surf->bpe == 2)
1386 AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
1387 else if (surf->bpe == 4)
1388 AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
1389 else
1390 AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
1391 }
1392 } else {
1393 /* GFX7 - GFX8 */
1394 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
1395 AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
1396 else
1397 AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
1398
1399 /* Addrlib doesn't set this if tileIndex is forced like above. */
1400 AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
1401 }
1402 }
1403
1404 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1405 surf->num_meta_levels = 0;
1406 surf->surf_size = 0;
1407 surf->meta_size = 0;
1408 surf->meta_slice_size = 0;
1409 surf->meta_alignment_log2 = 0;
1410
1411 const bool only_stencil =
1412 (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
1413
1414 /* Calculate texture layout information. */
1415 if (!only_stencil) {
1416 for (level = 0; level < config->info.levels; level++) {
1417 r = gfx6_compute_level(addrlib, config, surf, false, level, compressed, &AddrSurfInfoIn,
1418 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, &AddrHtileIn,
1419 &AddrHtileOut);
1420 if (r)
1421 return r;
1422
1423 if (level > 0)
1424 continue;
1425
1426 if (!AddrSurfInfoOut.tcCompatible) {
1427 AddrSurfInfoIn.flags.tcCompatible = 0;
1428 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1429 }
1430
1431 if (AddrSurfInfoIn.flags.matchStencilTileCfg) {
1432 AddrSurfInfoIn.flags.matchStencilTileCfg = 0;
1433 AddrSurfInfoIn.tileIndex = AddrSurfInfoOut.tileIndex;
1434 stencil_tile_idx = AddrSurfInfoOut.stencilTileIdx;
1435
1436 assert(stencil_tile_idx >= 0);
1437 }
1438
1439 r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1440 if (r)
1441 return r;
1442 }
1443 }
1444
1445 /* Calculate texture layout information for stencil. */
1446 if (surf->flags & RADEON_SURF_SBUFFER) {
1447 AddrSurfInfoIn.tileIndex = stencil_tile_idx;
1448 AddrSurfInfoIn.bpp = 8;
1449 AddrSurfInfoIn.format = ADDR_FMT_8;
1450 AddrSurfInfoIn.flags.depth = 0;
1451 AddrSurfInfoIn.flags.stencil = 1;
1452 AddrSurfInfoIn.flags.tcCompatible = 0;
1453 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
1454 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
1455
1456 for (level = 0; level < config->info.levels; level++) {
1457 r = gfx6_compute_level(addrlib, config, surf, true, level, compressed, &AddrSurfInfoIn,
1458 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, NULL, NULL);
1459 if (r)
1460 return r;
1461
1462 /* DB uses the depth pitch for both stencil and depth. */
1463 if (!only_stencil) {
1464 if (surf->u.legacy.zs.stencil_level[level].nblk_x != surf->u.legacy.level[level].nblk_x)
1465 surf->u.legacy.stencil_adjusted = true;
1466 } else {
1467 surf->u.legacy.level[level].nblk_x = surf->u.legacy.zs.stencil_level[level].nblk_x;
1468 }
1469
1470 if (level == 0) {
1471 if (only_stencil) {
1472 r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1473 if (r)
1474 return r;
1475 }
1476
1477 /* For 2D modes only. */
1478 if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
1479 surf->u.legacy.stencil_tile_split = AddrSurfInfoOut.pTileInfo->tileSplitBytes;
1480 }
1481 }
1482 }
1483 }
1484
1485 /* Compute FMASK. */
1486 if (config->info.samples >= 2 && AddrSurfInfoIn.flags.color && info->has_graphics &&
1487 !(surf->flags & RADEON_SURF_NO_FMASK)) {
1488 ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
1489 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1490 ADDR_TILEINFO fmask_tile_info = {0};
1491
1492 fin.size = sizeof(fin);
1493 fout.size = sizeof(fout);
1494
1495 fin.tileMode = AddrSurfInfoOut.tileMode;
1496 fin.pitch = AddrSurfInfoOut.pitch;
1497 fin.height = config->info.height;
1498 fin.numSlices = AddrSurfInfoIn.numSlices;
1499 fin.numSamples = AddrSurfInfoIn.numSamples;
1500 fin.numFrags = AddrSurfInfoIn.numFrags;
1501 fin.tileIndex = -1;
1502 fout.pTileInfo = &fmask_tile_info;
1503
1504 r = AddrComputeFmaskInfo(addrlib, &fin, &fout);
1505 if (r)
1506 return r;
1507
1508 surf->fmask_size = fout.fmaskBytes;
1509 surf->fmask_alignment_log2 = util_logbase2(fout.baseAlign);
1510 surf->fmask_slice_size = fout.sliceSize;
1511 surf->fmask_tile_swizzle = 0;
1512
1513 surf->u.legacy.color.fmask.slice_tile_max = (fout.pitch * fout.height) / 64;
1514 if (surf->u.legacy.color.fmask.slice_tile_max)
1515 surf->u.legacy.color.fmask.slice_tile_max -= 1;
1516
1517 surf->u.legacy.color.fmask.tiling_index = fout.tileIndex;
1518 surf->u.legacy.color.fmask.bankh = fout.pTileInfo->bankHeight;
1519 surf->u.legacy.color.fmask.pitch_in_pixels = fout.pitch;
1520
1521 /* Compute tile swizzle for FMASK. */
1522 if (config->info.fmask_surf_index && !(surf->flags & RADEON_SURF_SHAREABLE)) {
1523 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin = {0};
1524 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout = {0};
1525
1526 xin.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1527 xout.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1528
1529 /* This counter starts from 1 instead of 0. */
1530 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1531 xin.tileIndex = fout.tileIndex;
1532 xin.macroModeIndex = fout.macroModeIndex;
1533 xin.pTileInfo = fout.pTileInfo;
1534 xin.tileMode = fin.tileMode;
1535
1536 int r = AddrComputeBaseSwizzle(addrlib, &xin, &xout);
1537 if (r != ADDR_OK)
1538 return r;
1539
1540 assert(xout.tileSwizzle <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1541 surf->fmask_tile_swizzle = xout.tileSwizzle;
1542 }
1543 }
1544
1545 /* Recalculate the whole DCC miptree size including disabled levels.
1546 * This is what addrlib does, but calling addrlib would be a lot more
1547 * complicated.
1548 */
1549 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->meta_size && config->info.levels > 1) {
1550 /* The smallest miplevels that are never compressed by DCC
1551 * still read the DCC buffer from memory if the base level uses DCC,
1552 * and for some reason the DCC buffer needs to be larger if
1553 * the miptree uses non-zero tile_swizzle. Otherwise there are
1554 * VM faults.
1555 *
1556 * "dcc_alignment * 4" was determined by trial and error.
1557 */
1558 surf->meta_size = align64(surf->surf_size >> 8, (1ull << surf->meta_alignment_log2) * 4);
1559 }
1560
1561 /* Make sure HTILE covers the whole miptree, because the shader reads
1562 * TC-compatible HTILE even for levels where it's disabled by DB.
1563 */
1564 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_TC_COMPATIBLE_HTILE) &&
1565 surf->meta_size && config->info.levels > 1) {
1566 /* MSAA can't occur with levels > 1, so ignore the sample count. */
1567 const unsigned total_pixels = surf->surf_size / surf->bpe;
1568 const unsigned htile_block_size = 8 * 8;
1569 const unsigned htile_element_size = 4;
1570
1571 surf->meta_size = (total_pixels / htile_block_size) * htile_element_size;
1572 surf->meta_size = align(surf->meta_size, 1 << surf->meta_alignment_log2);
1573 } else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER && !surf->meta_size) {
1574 /* Unset this if HTILE is not present. */
1575 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1576 }
1577
1578 surf->is_linear = (only_stencil ? surf->u.legacy.zs.stencil_level[0].mode :
1579 surf->u.legacy.level[0].mode) == RADEON_SURF_MODE_LINEAR_ALIGNED;
1580
1581 surf->is_displayable = surf->is_linear || surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
1582 surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER;
1583
1584 surf->thick_tiling = AddrSurfInfoOut.tileMode == ADDR_TM_1D_TILED_THICK ||
1585 AddrSurfInfoOut.tileMode == ADDR_TM_2D_TILED_THICK ||
1586 AddrSurfInfoOut.tileMode == ADDR_TM_2B_TILED_THICK ||
1587 AddrSurfInfoOut.tileMode == ADDR_TM_3D_TILED_THICK ||
1588 AddrSurfInfoOut.tileMode == ADDR_TM_3B_TILED_THICK ||
1589 AddrSurfInfoOut.tileMode == ADDR_TM_2D_TILED_XTHICK ||
1590 AddrSurfInfoOut.tileMode == ADDR_TM_3D_TILED_XTHICK ||
1591 AddrSurfInfoOut.tileMode == ADDR_TM_PRT_TILED_THICK ||
1592 AddrSurfInfoOut.tileMode == ADDR_TM_PRT_2D_TILED_THICK ||
1593 AddrSurfInfoOut.tileMode == ADDR_TM_PRT_3D_TILED_THICK ||
1594 /* Not thick per se, but these also benefit from the 3D access pattern
1595 * due to pipe rotation between slices.
1596 */
1597 AddrSurfInfoOut.tileMode == ADDR_TM_3D_TILED_THIN1 ||
1598 AddrSurfInfoOut.tileMode == ADDR_TM_PRT_3D_TILED_THIN1;
1599
1600 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1601 * used at the same time. This case is not currently expected to occur
1602 * because we don't use rotated. Enforce this restriction on all chips
1603 * to facilitate testing.
1604 */
1605 if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER) {
1606 assert(!"rotate micro tile mode is unsupported");
1607 return ADDR_ERROR;
1608 }
1609
1610 ac_compute_cmask(info, config, surf);
1611 return 0;
1612 }
1613
1614 /* This is only called when expecting a tiled layout. */
gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib,const struct radeon_info * info,struct radeon_surf * surf,ADDR2_COMPUTE_SURFACE_INFO_INPUT * in,bool is_fmask,AddrSwizzleMode * swizzle_mode)1615 static int gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib, const struct radeon_info *info,
1616 struct radeon_surf *surf,
1617 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in, bool is_fmask,
1618 AddrSwizzleMode *swizzle_mode)
1619 {
1620 ADDR_E_RETURNCODE ret;
1621 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
1622 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
1623
1624 sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
1625 sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
1626
1627 sin.flags = in->flags;
1628 sin.resourceType = in->resourceType;
1629 sin.format = in->format;
1630 sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
1631
1632 /* TODO: We could allow some of these: */
1633 sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
1634
1635 if (info->gfx_level >= GFX11) {
1636 /* Disable 256K on APUs because it doesn't work with DAL. */
1637 if (!info->has_dedicated_vram) {
1638 sin.forbiddenBlock.gfx11.thin256KB = 1;
1639 sin.forbiddenBlock.gfx11.thick256KB = 1;
1640 }
1641 } else {
1642 sin.forbiddenBlock.var = 1; /* don't allow the variable-sized swizzle modes */
1643 }
1644
1645 sin.bpp = in->bpp;
1646 sin.width = in->width;
1647 sin.height = in->height;
1648 sin.numSlices = in->numSlices;
1649 sin.numMipLevels = in->numMipLevels;
1650 sin.numSamples = in->numSamples;
1651 sin.numFrags = in->numFrags;
1652
1653 if (is_fmask) {
1654 sin.flags.display = 0;
1655 sin.flags.color = 0;
1656 sin.flags.fmask = 1;
1657 }
1658
1659 /* With PRT images we want to force 64 KiB block size so that the image
1660 * created is consistent with the format properties returned in Vulkan
1661 * independent of the image. */
1662 if (sin.flags.prt) {
1663 sin.forbiddenBlock.macroThin4KB = 1;
1664 sin.forbiddenBlock.macroThick4KB = 1;
1665 if (info->gfx_level >= GFX11) {
1666 sin.forbiddenBlock.gfx11.thin256KB = 1;
1667 sin.forbiddenBlock.gfx11.thick256KB = 1;
1668 }
1669 sin.forbiddenBlock.linear = 1;
1670 } else if (surf->flags & RADEON_SURF_PREFER_4K_ALIGNMENT) {
1671 sin.forbiddenBlock.macroThin64KB = 1;
1672 sin.forbiddenBlock.macroThick64KB = 1;
1673 }
1674
1675 if (surf->flags & (RADEON_SURF_PREFER_64K_ALIGNMENT | RADEON_SURF_PREFER_4K_ALIGNMENT)) {
1676 if (info->gfx_level >= GFX11) {
1677 sin.forbiddenBlock.gfx11.thin256KB = 1;
1678 sin.forbiddenBlock.gfx11.thick256KB = 1;
1679 }
1680 }
1681
1682 if (surf->flags & RADEON_SURF_FORCE_MICRO_TILE_MODE) {
1683 sin.forbiddenBlock.linear = 1;
1684
1685 if (surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
1686 sin.preferredSwSet.sw_D = 1;
1687 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_STANDARD)
1688 sin.preferredSwSet.sw_S = 1;
1689 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH)
1690 sin.preferredSwSet.sw_Z = 1;
1691 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER)
1692 sin.preferredSwSet.sw_R = 1;
1693 }
1694
1695 if (info->gfx_level >= GFX10 && in->resourceType == ADDR_RSRC_TEX_3D && in->numSlices > 1) {
1696 /* 3D textures should use S swizzle modes for the best performance.
1697 * THe only exception is 3D render targets, which prefer 64KB_D_X.
1698 *
1699 * 3D texture sampler performance with a very large 3D texture:
1700 * ADDR_SW_64KB_R_X = 19 FPS (DCC on), 26 FPS (DCC off)
1701 * ADDR_SW_64KB_Z_X = 25 FPS
1702 * ADDR_SW_64KB_D_X = 53 FPS
1703 * ADDR_SW_4KB_S = 53 FPS
1704 * ADDR_SW_64KB_S = 53 FPS
1705 * ADDR_SW_64KB_S_T = 61 FPS
1706 * ADDR_SW_4KB_S_X = 63 FPS
1707 * ADDR_SW_64KB_S_X = 62 FPS
1708 */
1709 sin.preferredSwSet.sw_S = 1;
1710 }
1711
1712 ret = Addr2GetPreferredSurfaceSetting(addrlib, &sin, &sout);
1713 if (ret != ADDR_OK)
1714 return ret;
1715
1716 *swizzle_mode = sout.swizzleMode;
1717 return 0;
1718 }
1719
is_dcc_supported_by_CB(const struct radeon_info * info,unsigned sw_mode)1720 static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_mode)
1721 {
1722 switch (info->gfx_level) {
1723 case GFX9:
1724 return sw_mode != ADDR_SW_LINEAR;
1725
1726 case GFX10:
1727 case GFX10_3:
1728 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X;
1729
1730 case GFX11:
1731 case GFX11_5:
1732 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X ||
1733 sw_mode == ADDR_SW_256KB_Z_X || sw_mode == ADDR_SW_256KB_R_X;
1734
1735 default:
1736 unreachable("invalid gfx_level");
1737 }
1738 }
1739
is_dcc_supported_by_L2(const struct radeon_info * info,const struct radeon_surf * surf)1740 ASSERTED static bool is_dcc_supported_by_L2(const struct radeon_info *info,
1741 const struct radeon_surf *surf)
1742 {
1743 assert(info->gfx_level < GFX12);
1744
1745 bool single_indep = surf->u.gfx9.color.dcc.independent_64B_blocks !=
1746 surf->u.gfx9.color.dcc.independent_128B_blocks;
1747 bool valid_64b = surf->u.gfx9.color.dcc.independent_64B_blocks &&
1748 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B;
1749 bool valid_128b = surf->u.gfx9.color.dcc.independent_128B_blocks &&
1750 (surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_128B ||
1751 (info->gfx_level >= GFX11_5 &&
1752 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_256B));
1753
1754 if (info->gfx_level <= GFX9) {
1755 /* Only independent 64B blocks are supported. */
1756 return single_indep && valid_64b;
1757 }
1758
1759 if (info->family == CHIP_NAVI10) {
1760 /* Only independent 128B blocks are supported. */
1761 return single_indep && valid_128b;
1762 }
1763
1764 if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
1765 /* Either 64B or 128B can be used, but the INDEPENDENT_*_BLOCKS setting must match.
1766 * If 64B is used, DCC image stores are unsupported.
1767 */
1768 return single_indep && (valid_64b || valid_128b);
1769 }
1770
1771 /* Valid settings are the same as NAVI14 + (64B && 128B && max_compressed_block_size == 64B) */
1772 return (single_indep && (valid_64b || valid_128b)) || valid_64b;
1773 }
1774
gfx10_DCN_requires_independent_64B_blocks(const struct radeon_info * info,const struct ac_surf_config * config)1775 static bool gfx10_DCN_requires_independent_64B_blocks(const struct radeon_info *info,
1776 const struct ac_surf_config *config)
1777 {
1778 assert(info->gfx_level >= GFX10);
1779
1780 /* Older kernels have buggy DAL. */
1781 if (info->drm_minor <= 43)
1782 return true;
1783
1784 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B. */
1785 return config->info.width > 2560 || config->info.height > 2560;
1786 }
1787
ac_modifier_max_extent(const struct radeon_info * info,uint64_t modifier,uint32_t * width,uint32_t * height)1788 void ac_modifier_max_extent(const struct radeon_info *info,
1789 uint64_t modifier, uint32_t *width, uint32_t *height)
1790 {
1791 /* DCC is supported with any size. The maximum width per display pipe is 5760, but multiple
1792 * display pipes can be used to drive the display.
1793 */
1794 *width = 16384;
1795 *height = 16384;
1796
1797 if (info->gfx_level < GFX12 && ac_modifier_has_dcc(modifier)) {
1798 bool independent_64B_blocks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
1799
1800 if (info->gfx_level >= GFX10 && !independent_64B_blocks) {
1801 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B. */
1802 *width = 2560;
1803 *height = 2560;
1804 }
1805 }
1806 }
1807
gfx9_is_dcc_supported_by_DCN(const struct radeon_info * info,const struct ac_surf_config * config,const struct radeon_surf * surf,bool rb_aligned,bool pipe_aligned)1808 static bool gfx9_is_dcc_supported_by_DCN(const struct radeon_info *info,
1809 const struct ac_surf_config *config,
1810 const struct radeon_surf *surf, bool rb_aligned,
1811 bool pipe_aligned)
1812 {
1813 if (!info->use_display_dcc_unaligned && !info->use_display_dcc_with_retile_blit)
1814 return false;
1815
1816 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1817 if (surf->bpe != 4)
1818 return false;
1819
1820 /* Handle unaligned DCC. */
1821 if (info->use_display_dcc_unaligned && (rb_aligned || pipe_aligned))
1822 return false;
1823
1824 switch (info->gfx_level) {
1825 case GFX9:
1826 /* There are more constraints, but we always set
1827 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1828 * which always works.
1829 */
1830 assert(surf->u.gfx9.color.dcc.independent_64B_blocks &&
1831 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B);
1832 return true;
1833 case GFX10:
1834 case GFX10_3:
1835 case GFX11:
1836 case GFX11_5:
1837 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1838 if (info->gfx_level == GFX10 && surf->u.gfx9.color.dcc.independent_128B_blocks)
1839 return false;
1840
1841 return (!gfx10_DCN_requires_independent_64B_blocks(info, config) ||
1842 (surf->u.gfx9.color.dcc.independent_64B_blocks &&
1843 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
1844 default:
1845 unreachable("unhandled chip");
1846 return false;
1847 }
1848 }
1849
ac_copy_dcc_equation(const struct radeon_info * info,ADDR2_COMPUTE_DCCINFO_OUTPUT * dcc,struct gfx9_meta_equation * equation)1850 static void ac_copy_dcc_equation(const struct radeon_info *info,
1851 ADDR2_COMPUTE_DCCINFO_OUTPUT *dcc,
1852 struct gfx9_meta_equation *equation)
1853 {
1854 assert(info->gfx_level < GFX12);
1855
1856 equation->meta_block_width = dcc->metaBlkWidth;
1857 equation->meta_block_height = dcc->metaBlkHeight;
1858 equation->meta_block_depth = dcc->metaBlkDepth;
1859
1860 if (info->gfx_level >= GFX10) {
1861 /* gfx9_meta_equation doesn't store the first 4 and the last 8 elements. They must be 0. */
1862 for (unsigned i = 0; i < 4; i++)
1863 assert(dcc->equation.gfx10_bits[i] == 0);
1864
1865 for (unsigned i = ARRAY_SIZE(equation->u.gfx10_bits) + 4; i < 68; i++)
1866 assert(dcc->equation.gfx10_bits[i] == 0);
1867
1868 memcpy(equation->u.gfx10_bits, dcc->equation.gfx10_bits + 4,
1869 sizeof(equation->u.gfx10_bits));
1870 } else {
1871 assert(dcc->equation.gfx9.num_bits <= ARRAY_SIZE(equation->u.gfx9.bit));
1872
1873 equation->u.gfx9.num_bits = dcc->equation.gfx9.num_bits;
1874 equation->u.gfx9.num_pipe_bits = dcc->equation.gfx9.numPipeBits;
1875 for (unsigned b = 0; b < ARRAY_SIZE(equation->u.gfx9.bit); b++) {
1876 for (unsigned c = 0; c < ARRAY_SIZE(equation->u.gfx9.bit[b].coord); c++) {
1877 equation->u.gfx9.bit[b].coord[c].dim = dcc->equation.gfx9.bit[b].coord[c].dim;
1878 equation->u.gfx9.bit[b].coord[c].ord = dcc->equation.gfx9.bit[b].coord[c].ord;
1879 }
1880 }
1881 }
1882 }
1883
ac_copy_cmask_equation(const struct radeon_info * info,ADDR2_COMPUTE_CMASK_INFO_OUTPUT * cmask,struct gfx9_meta_equation * equation)1884 static void ac_copy_cmask_equation(const struct radeon_info *info,
1885 ADDR2_COMPUTE_CMASK_INFO_OUTPUT *cmask,
1886 struct gfx9_meta_equation *equation)
1887 {
1888 assert(info->gfx_level < GFX11);
1889
1890 equation->meta_block_width = cmask->metaBlkWidth;
1891 equation->meta_block_height = cmask->metaBlkHeight;
1892 equation->meta_block_depth = 1;
1893
1894 if (info->gfx_level == GFX9) {
1895 assert(cmask->equation.gfx9.num_bits <= ARRAY_SIZE(equation->u.gfx9.bit));
1896
1897 equation->u.gfx9.num_bits = cmask->equation.gfx9.num_bits;
1898 equation->u.gfx9.num_pipe_bits = cmask->equation.gfx9.numPipeBits;
1899 for (unsigned b = 0; b < ARRAY_SIZE(equation->u.gfx9.bit); b++) {
1900 for (unsigned c = 0; c < ARRAY_SIZE(equation->u.gfx9.bit[b].coord); c++) {
1901 equation->u.gfx9.bit[b].coord[c].dim = cmask->equation.gfx9.bit[b].coord[c].dim;
1902 equation->u.gfx9.bit[b].coord[c].ord = cmask->equation.gfx9.bit[b].coord[c].ord;
1903 }
1904 }
1905 }
1906 }
1907
ac_copy_htile_equation(const struct radeon_info * info,ADDR2_COMPUTE_HTILE_INFO_OUTPUT * htile,struct gfx9_meta_equation * equation)1908 static void ac_copy_htile_equation(const struct radeon_info *info,
1909 ADDR2_COMPUTE_HTILE_INFO_OUTPUT *htile,
1910 struct gfx9_meta_equation *equation)
1911 {
1912 assert(info->gfx_level < GFX12);
1913
1914 equation->meta_block_width = htile->metaBlkWidth;
1915 equation->meta_block_height = htile->metaBlkHeight;
1916
1917 /* gfx9_meta_equation doesn't store the first 8 and the last 4 elements. They must be 0. */
1918 for (unsigned i = 0; i < 8; i++)
1919 assert(htile->equation.gfx10_bits[i] == 0);
1920
1921 for (unsigned i = ARRAY_SIZE(equation->u.gfx10_bits) + 8; i < 72; i++)
1922 assert(htile->equation.gfx10_bits[i] == 0);
1923
1924 memcpy(equation->u.gfx10_bits, htile->equation.gfx10_bits + 8,
1925 sizeof(equation->u.gfx10_bits));
1926 }
1927
gfx9_compute_miptree(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf,bool compressed,ADDR2_COMPUTE_SURFACE_INFO_INPUT * in)1928 static int gfx9_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_info *info,
1929 const struct ac_surf_config *config, struct radeon_surf *surf,
1930 bool compressed, ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
1931 {
1932 ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {0};
1933 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
1934 ADDR_E_RETURNCODE ret;
1935
1936 out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
1937 out.pMipInfo = mip_info;
1938
1939 ret = Addr2ComputeSurfaceInfo(addrlib->handle, in, &out);
1940 if (ret != ADDR_OK)
1941 return ret;
1942
1943 if (in->flags.prt) {
1944 surf->prt_tile_width = out.blockWidth;
1945 surf->prt_tile_height = out.blockHeight;
1946 surf->prt_tile_depth = out.blockSlices;
1947
1948 surf->first_mip_tail_level = out.firstMipIdInTail;
1949
1950 for (unsigned i = 0; i < in->numMipLevels; i++) {
1951 surf->u.gfx9.prt_level_offset[i] = mip_info[i].macroBlockOffset + mip_info[i].mipTailOffset;
1952
1953 if (info->gfx_level >= GFX10)
1954 surf->u.gfx9.prt_level_pitch[i] = mip_info[i].pitch;
1955 else
1956 surf->u.gfx9.prt_level_pitch[i] = out.mipChainPitch;
1957 }
1958 }
1959
1960 surf->thick_tiling = out.blockSlices > 1; /* should be 0 for depth and stencil */
1961
1962 if (in->flags.stencil) {
1963 surf->u.gfx9.zs.stencil_swizzle_mode = in->swizzleMode;
1964 surf->u.gfx9.zs.stencil_epitch =
1965 out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1966 surf->surf_alignment_log2 = MAX2(surf->surf_alignment_log2, util_logbase2(out.baseAlign));
1967 surf->u.gfx9.zs.stencil_offset = align(surf->surf_size, out.baseAlign);
1968 surf->surf_size = surf->u.gfx9.zs.stencil_offset + out.surfSize;
1969 return 0;
1970 }
1971
1972 surf->u.gfx9.swizzle_mode = in->swizzleMode;
1973 surf->u.gfx9.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1974
1975 /* CMASK fast clear uses these even if FMASK isn't allocated.
1976 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1977 */
1978 if (!in->flags.depth) {
1979 surf->u.gfx9.color.fmask_swizzle_mode = surf->u.gfx9.swizzle_mode & ~0x3;
1980 surf->u.gfx9.color.fmask_epitch = surf->u.gfx9.epitch;
1981 }
1982
1983 surf->u.gfx9.surf_slice_size = out.sliceSize;
1984 surf->u.gfx9.surf_pitch = out.pitch;
1985 surf->u.gfx9.surf_height = out.height;
1986 surf->surf_size = out.surfSize;
1987 surf->surf_alignment_log2 = util_logbase2(out.baseAlign);
1988
1989 const int linear_alignment =
1990 util_next_power_of_two(LINEAR_PITCH_ALIGNMENT / surf->bpe);
1991
1992 if (!compressed && surf->blk_w > 1 && out.pitch == out.pixelPitch &&
1993 surf->u.gfx9.swizzle_mode == ADDR_SW_LINEAR &&
1994 in->numMipLevels == 1) {
1995 /* Divide surf_pitch (= pitch in pixels) by blk_w to get a
1996 * pitch in elements instead because that's what the hardware needs
1997 * in resource descriptors.
1998 * See the comment in si_descriptors.c.
1999 */
2000 surf->u.gfx9.surf_pitch = align(surf->u.gfx9.surf_pitch / surf->blk_w,
2001 linear_alignment);
2002 surf->u.gfx9.epitch = surf->u.gfx9.surf_pitch - 1;
2003 /* Adjust surf_slice_size and surf_size to reflect the change made to surf_pitch. */
2004 surf->u.gfx9.surf_slice_size = (uint64_t)surf->u.gfx9.surf_pitch * out.height * surf->bpe;
2005 surf->surf_size = surf->u.gfx9.surf_slice_size * in->numSlices;
2006
2007 for (unsigned i = 0; i < in->numMipLevels; i++) {
2008 surf->u.gfx9.offset[i] = mip_info[i].offset;
2009 /* Adjust pitch like we did for surf_pitch */
2010 surf->u.gfx9.pitch[i] = align(mip_info[i].pitch / surf->blk_w,
2011 linear_alignment);
2012 }
2013 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
2014 } else if (in->swizzleMode == ADDR_SW_LINEAR) {
2015 for (unsigned i = 0; i < in->numMipLevels; i++) {
2016 surf->u.gfx9.offset[i] = mip_info[i].offset;
2017 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
2018 }
2019 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
2020 } else {
2021 surf->u.gfx9.base_mip_width = mip_info[0].pitch;
2022 }
2023
2024 surf->u.gfx9.base_mip_height = mip_info[0].height;
2025
2026 if (in->flags.depth) {
2027 assert(in->swizzleMode != ADDR_SW_LINEAR);
2028
2029 if (surf->flags & RADEON_SURF_NO_HTILE)
2030 return 0;
2031
2032 /* HTILE */
2033 ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
2034 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
2035 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
2036
2037 hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
2038 hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
2039 hout.pMipInfo = meta_mip_info;
2040
2041 assert(in->flags.metaPipeUnaligned == 0);
2042 assert(in->flags.metaRbUnaligned == 0);
2043
2044 hin.hTileFlags.pipeAligned = 1;
2045 hin.hTileFlags.rbAligned = 1;
2046 hin.depthFlags = in->flags;
2047 hin.swizzleMode = in->swizzleMode;
2048 hin.unalignedWidth = in->width;
2049 hin.unalignedHeight = in->height;
2050 hin.numSlices = in->numSlices;
2051 hin.numMipLevels = in->numMipLevels;
2052 hin.firstMipIdInTail = out.firstMipIdInTail;
2053
2054 ret = Addr2ComputeHtileInfo(addrlib->handle, &hin, &hout);
2055 if (ret != ADDR_OK)
2056 return ret;
2057
2058 surf->meta_size = hout.htileBytes;
2059 surf->meta_slice_size = hout.sliceSize;
2060 surf->meta_alignment_log2 = util_logbase2(hout.baseAlign);
2061 surf->meta_pitch = hout.pitch;
2062 surf->num_meta_levels = in->numMipLevels;
2063
2064 for (unsigned i = 0; i < in->numMipLevels; i++) {
2065 surf->u.gfx9.meta_levels[i].offset = meta_mip_info[i].offset;
2066 surf->u.gfx9.meta_levels[i].size = meta_mip_info[i].sliceSize;
2067
2068 if (meta_mip_info[i].inMiptail) {
2069 /* GFX10 can only compress the first level
2070 * in the mip tail.
2071 */
2072 surf->num_meta_levels = i + 1;
2073 break;
2074 }
2075 }
2076
2077 if (!surf->num_meta_levels)
2078 surf->meta_size = 0;
2079
2080 if (info->gfx_level >= GFX10)
2081 ac_copy_htile_equation(info, &hout, &surf->u.gfx9.zs.htile_equation);
2082 return 0;
2083 }
2084
2085 {
2086 /* Compute tile swizzle for the color surface.
2087 * All *_X and *_T modes can use the swizzle.
2088 */
2089 if (config->info.surf_index && in->swizzleMode >= ADDR_SW_64KB_Z_T && !out.mipChainInTail &&
2090 !(surf->flags & RADEON_SURF_SHAREABLE) && !in->flags.display) {
2091 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
2092 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
2093
2094 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
2095 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
2096
2097 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
2098 xin.flags = in->flags;
2099 xin.swizzleMode = in->swizzleMode;
2100 xin.resourceType = in->resourceType;
2101 xin.format = in->format;
2102 xin.numSamples = in->numSamples;
2103 xin.numFrags = in->numFrags;
2104
2105 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
2106 if (ret != ADDR_OK)
2107 return ret;
2108
2109 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
2110 surf->tile_swizzle = xout.pipeBankXor;
2111
2112 /* Gfx11 should shift it by 10 bits instead of 8, and drivers already shift it by 8 bits,
2113 * so shift it by 2 bits here.
2114 */
2115 if (info->gfx_level >= GFX11)
2116 surf->tile_swizzle <<= 2;
2117 }
2118
2119 bool use_dcc = false;
2120 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
2121 use_dcc = ac_modifier_has_dcc(surf->modifier);
2122 } else {
2123 use_dcc = info->has_graphics && !(surf->flags & RADEON_SURF_DISABLE_DCC) && !compressed &&
2124 !config->is_3d &&
2125 is_dcc_supported_by_CB(info, in->swizzleMode) &&
2126 (!in->flags.display ||
2127 gfx9_is_dcc_supported_by_DCN(info, config, surf, !in->flags.metaRbUnaligned,
2128 !in->flags.metaPipeUnaligned));
2129 }
2130
2131 /* DCC */
2132 if (use_dcc) {
2133 ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
2134 ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
2135 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
2136
2137 din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
2138 dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
2139 dout.pMipInfo = meta_mip_info;
2140
2141 din.dccKeyFlags.pipeAligned = !in->flags.metaPipeUnaligned;
2142 din.dccKeyFlags.rbAligned = !in->flags.metaRbUnaligned;
2143 din.resourceType = in->resourceType;
2144 din.swizzleMode = in->swizzleMode;
2145 din.bpp = in->bpp;
2146 din.unalignedWidth = in->width;
2147 din.unalignedHeight = in->height;
2148 din.numSlices = in->numSlices;
2149 din.numFrags = in->numFrags;
2150 din.numMipLevels = in->numMipLevels;
2151 din.dataSurfaceSize = out.surfSize;
2152 din.firstMipIdInTail = out.firstMipIdInTail;
2153
2154 if (info->gfx_level == GFX9)
2155 simple_mtx_lock(&addrlib->lock);
2156 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
2157 if (info->gfx_level == GFX9)
2158 simple_mtx_unlock(&addrlib->lock);
2159
2160 if (ret != ADDR_OK)
2161 return ret;
2162
2163 surf->u.gfx9.color.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
2164 surf->u.gfx9.color.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
2165 surf->u.gfx9.color.dcc_block_width = dout.compressBlkWidth;
2166 surf->u.gfx9.color.dcc_block_height = dout.compressBlkHeight;
2167 surf->u.gfx9.color.dcc_block_depth = dout.compressBlkDepth;
2168 surf->u.gfx9.color.dcc_pitch_max = dout.pitch - 1;
2169 surf->u.gfx9.color.dcc_height = dout.height;
2170 surf->meta_size = dout.dccRamSize;
2171 surf->meta_slice_size = dout.dccRamSliceSize;
2172 surf->meta_alignment_log2 = util_logbase2(dout.dccRamBaseAlign);
2173 surf->num_meta_levels = in->numMipLevels;
2174
2175 /* Disable DCC for levels that are in the mip tail.
2176 *
2177 * There are two issues that this is intended to
2178 * address:
2179 *
2180 * 1. Multiple mip levels may share a cache line. This
2181 * can lead to corruption when switching between
2182 * rendering to different mip levels because the
2183 * RBs don't maintain coherency.
2184 *
2185 * 2. Texturing with metadata after rendering sometimes
2186 * fails with corruption, probably for a similar
2187 * reason.
2188 *
2189 * Working around these issues for all levels in the
2190 * mip tail may be overly conservative, but it's what
2191 * Vulkan does.
2192 *
2193 * Alternative solutions that also work but are worse:
2194 * - Disable DCC entirely.
2195 * - Flush the L2 cache after rendering.
2196 */
2197 for (unsigned i = 0; i < in->numMipLevels; i++) {
2198 surf->u.gfx9.meta_levels[i].offset = meta_mip_info[i].offset;
2199 surf->u.gfx9.meta_levels[i].size = meta_mip_info[i].sliceSize;
2200
2201 if (meta_mip_info[i].inMiptail) {
2202 /* GFX10 can only compress the first level
2203 * in the mip tail.
2204 *
2205 * TODO: Try to do the same thing for gfx9
2206 * if there are no regressions.
2207 */
2208 if (info->gfx_level >= GFX10)
2209 surf->num_meta_levels = i + 1;
2210 else
2211 surf->num_meta_levels = i;
2212 break;
2213 }
2214 }
2215
2216 if (!surf->num_meta_levels)
2217 surf->meta_size = 0;
2218
2219 surf->u.gfx9.color.display_dcc_size = surf->meta_size;
2220 surf->u.gfx9.color.display_dcc_alignment_log2 = surf->meta_alignment_log2;
2221 surf->u.gfx9.color.display_dcc_pitch_max = surf->u.gfx9.color.dcc_pitch_max;
2222 surf->u.gfx9.color.display_dcc_height = surf->u.gfx9.color.dcc_height;
2223
2224 if (in->resourceType == ADDR_RSRC_TEX_2D)
2225 ac_copy_dcc_equation(info, &dout, &surf->u.gfx9.color.dcc_equation);
2226
2227 /* Compute displayable DCC. */
2228 if (((in->flags.display && info->use_display_dcc_with_retile_blit) ||
2229 ac_modifier_has_dcc_retile(surf->modifier)) && surf->num_meta_levels) {
2230 /* Compute displayable DCC info. */
2231 din.dccKeyFlags.pipeAligned = 0;
2232 din.dccKeyFlags.rbAligned = 0;
2233
2234 assert(din.numSlices == 1);
2235 assert(din.numMipLevels == 1);
2236 assert(din.numFrags == 1);
2237 assert(surf->tile_swizzle == 0);
2238 assert(surf->u.gfx9.color.dcc.pipe_aligned || surf->u.gfx9.color.dcc.rb_aligned);
2239
2240 if (info->gfx_level == GFX9)
2241 simple_mtx_lock(&addrlib->lock);
2242 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
2243 if (info->gfx_level == GFX9)
2244 simple_mtx_unlock(&addrlib->lock);
2245
2246 if (ret != ADDR_OK)
2247 return ret;
2248
2249 surf->u.gfx9.color.display_dcc_size = dout.dccRamSize;
2250 surf->u.gfx9.color.display_dcc_alignment_log2 = util_logbase2(dout.dccRamBaseAlign);
2251 surf->u.gfx9.color.display_dcc_pitch_max = dout.pitch - 1;
2252 surf->u.gfx9.color.display_dcc_height = dout.height;
2253 assert(surf->u.gfx9.color.display_dcc_size <= surf->meta_size);
2254
2255 ac_copy_dcc_equation(info, &dout, &surf->u.gfx9.color.display_dcc_equation);
2256 surf->u.gfx9.color.dcc.display_equation_valid = true;
2257 }
2258 }
2259
2260 /* FMASK (it doesn't exist on GFX11) */
2261 if (info->gfx_level <= GFX10_3 && info->has_graphics &&
2262 in->numSamples > 1 && !(surf->flags & RADEON_SURF_NO_FMASK)) {
2263 ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
2264 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
2265
2266 fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
2267 fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
2268
2269 ret = gfx9_get_preferred_swizzle_mode(addrlib->handle, info, surf, in, true, &fin.swizzleMode);
2270 if (ret != ADDR_OK)
2271 return ret;
2272
2273 fin.unalignedWidth = in->width;
2274 fin.unalignedHeight = in->height;
2275 fin.numSlices = in->numSlices;
2276 fin.numSamples = in->numSamples;
2277 fin.numFrags = in->numFrags;
2278
2279 ret = Addr2ComputeFmaskInfo(addrlib->handle, &fin, &fout);
2280 if (ret != ADDR_OK)
2281 return ret;
2282
2283 surf->u.gfx9.color.fmask_swizzle_mode = fin.swizzleMode;
2284 surf->u.gfx9.color.fmask_epitch = fout.pitch - 1;
2285 surf->fmask_size = fout.fmaskBytes;
2286 surf->fmask_alignment_log2 = util_logbase2(fout.baseAlign);
2287 surf->fmask_slice_size = fout.sliceSize;
2288
2289 /* Compute tile swizzle for the FMASK surface. */
2290 if (config->info.fmask_surf_index && fin.swizzleMode >= ADDR_SW_64KB_Z_T &&
2291 !(surf->flags & RADEON_SURF_SHAREABLE)) {
2292 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
2293 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
2294
2295 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
2296 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
2297
2298 /* This counter starts from 1 instead of 0. */
2299 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
2300 xin.flags = in->flags;
2301 xin.swizzleMode = fin.swizzleMode;
2302 xin.resourceType = in->resourceType;
2303 xin.format = in->format;
2304 xin.numSamples = in->numSamples;
2305 xin.numFrags = in->numFrags;
2306
2307 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
2308 if (ret != ADDR_OK)
2309 return ret;
2310
2311 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
2312 surf->fmask_tile_swizzle = xout.pipeBankXor;
2313 }
2314 }
2315
2316 /* CMASK -- on GFX10 only for FMASK (and it doesn't exist on GFX11) */
2317 if (info->gfx_level <= GFX10_3 && info->has_graphics &&
2318 in->swizzleMode != ADDR_SW_LINEAR && in->resourceType == ADDR_RSRC_TEX_2D &&
2319 ((info->gfx_level <= GFX9 && in->numSamples == 1 && in->flags.metaPipeUnaligned == 0 &&
2320 in->flags.metaRbUnaligned == 0) ||
2321 (surf->fmask_size && in->numSamples >= 2))) {
2322 ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
2323 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
2324 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
2325
2326 cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
2327 cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
2328 cout.pMipInfo = meta_mip_info;
2329
2330 assert(in->flags.metaPipeUnaligned == 0);
2331 assert(in->flags.metaRbUnaligned == 0);
2332
2333 cin.cMaskFlags.pipeAligned = 1;
2334 cin.cMaskFlags.rbAligned = 1;
2335 cin.resourceType = in->resourceType;
2336 cin.unalignedWidth = in->width;
2337 cin.unalignedHeight = in->height;
2338 cin.numSlices = in->numSlices;
2339 cin.numMipLevels = in->numMipLevels;
2340 cin.firstMipIdInTail = out.firstMipIdInTail;
2341
2342 if (in->numSamples > 1)
2343 cin.swizzleMode = surf->u.gfx9.color.fmask_swizzle_mode;
2344 else
2345 cin.swizzleMode = in->swizzleMode;
2346
2347 if (info->gfx_level == GFX9)
2348 simple_mtx_lock(&addrlib->lock);
2349 ret = Addr2ComputeCmaskInfo(addrlib->handle, &cin, &cout);
2350 if (info->gfx_level == GFX9)
2351 simple_mtx_unlock(&addrlib->lock);
2352
2353 if (ret != ADDR_OK)
2354 return ret;
2355
2356 surf->cmask_size = cout.cmaskBytes;
2357 surf->cmask_alignment_log2 = util_logbase2(cout.baseAlign);
2358 surf->cmask_slice_size = cout.sliceSize;
2359 surf->cmask_pitch = cout.pitch;
2360 surf->cmask_height = cout.height;
2361 surf->u.gfx9.color.cmask_level0.offset = meta_mip_info[0].offset;
2362 surf->u.gfx9.color.cmask_level0.size = meta_mip_info[0].sliceSize;
2363
2364 ac_copy_cmask_equation(info, &cout, &surf->u.gfx9.color.cmask_equation);
2365 }
2366 }
2367
2368 return 0;
2369 }
2370
gfx9_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)2371 static int gfx9_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
2372 const struct ac_surf_config *config, enum radeon_surf_mode mode,
2373 struct radeon_surf *surf)
2374 {
2375 bool compressed;
2376 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
2377 int r;
2378
2379 AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
2380
2381 compressed = surf->blk_w == 4 && surf->blk_h == 4;
2382
2383 AddrSurfInfoIn.format = bpe_to_format(surf);
2384 if (!compressed)
2385 AddrSurfInfoIn.bpp = surf->bpe * 8;
2386
2387 bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
2388 AddrSurfInfoIn.flags.color = is_color_surface && !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
2389 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
2390 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
2391 /* flags.texture currently refers to TC-compatible HTILE */
2392 AddrSurfInfoIn.flags.texture = (is_color_surface && !(surf->flags & RADEON_SURF_NO_TEXTURE)) ||
2393 (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE);
2394 AddrSurfInfoIn.flags.opt4space = 1;
2395 AddrSurfInfoIn.flags.prt = (surf->flags & RADEON_SURF_PRT) != 0;
2396
2397 AddrSurfInfoIn.numMipLevels = config->info.levels;
2398 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
2399 AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
2400
2401 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER))
2402 AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
2403
2404 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
2405 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
2406 * must sample 1D textures as 2D. */
2407 if (config->is_3d)
2408 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
2409 else if (info->gfx_level != GFX9 && config->is_1d)
2410 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
2411 else
2412 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
2413
2414 AddrSurfInfoIn.width = config->info.width;
2415 AddrSurfInfoIn.height = config->info.height;
2416
2417 if (config->is_3d)
2418 AddrSurfInfoIn.numSlices = config->info.depth;
2419 else if (config->is_cube)
2420 AddrSurfInfoIn.numSlices = 6;
2421 else
2422 AddrSurfInfoIn.numSlices = config->info.array_size;
2423
2424 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
2425 AddrSurfInfoIn.flags.metaPipeUnaligned = 0;
2426 AddrSurfInfoIn.flags.metaRbUnaligned = 0;
2427
2428 if (ac_modifier_has_dcc(surf->modifier)) {
2429 ac_modifier_fill_dcc_params(surf->modifier, surf, &AddrSurfInfoIn);
2430 } else if (!AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.stencil) {
2431 /* Optimal values for the L2 cache. */
2432 /* Don't change the DCC settings for imported buffers - they might differ. */
2433 if (!(surf->flags & RADEON_SURF_IMPORTED)) {
2434 if (info->gfx_level >= GFX11_5) {
2435 surf->u.gfx9.color.dcc.independent_64B_blocks = 0;
2436 surf->u.gfx9.color.dcc.independent_128B_blocks = 1;
2437 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
2438 } else if (info->gfx_level >= GFX10) {
2439 surf->u.gfx9.color.dcc.independent_64B_blocks = 0;
2440 surf->u.gfx9.color.dcc.independent_128B_blocks = 1;
2441 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
2442 } else if (info->gfx_level == GFX9) {
2443 surf->u.gfx9.color.dcc.independent_64B_blocks = 1;
2444 surf->u.gfx9.color.dcc.independent_128B_blocks = 0;
2445 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
2446 }
2447 }
2448
2449 if (AddrSurfInfoIn.flags.display) {
2450 /* The display hardware can only read DCC with RB_ALIGNED=0 and
2451 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
2452 *
2453 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
2454 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
2455 * after rendering, so PIPE_ALIGNED=1 is recommended.
2456 */
2457 if (info->use_display_dcc_unaligned) {
2458 AddrSurfInfoIn.flags.metaPipeUnaligned = 1;
2459 AddrSurfInfoIn.flags.metaRbUnaligned = 1;
2460 }
2461
2462 /* Adjust DCC settings to meet DCN requirements. */
2463 /* Don't change the DCC settings for imported buffers - they might differ. */
2464 if (!(surf->flags & RADEON_SURF_IMPORTED) &&
2465 (info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit)) {
2466 /* Only Navi12/14 support independent 64B blocks in L2,
2467 * but without DCC image stores.
2468 */
2469 if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
2470 surf->u.gfx9.color.dcc.independent_64B_blocks = 1;
2471 surf->u.gfx9.color.dcc.independent_128B_blocks = 0;
2472 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
2473 }
2474
2475 if ((info->gfx_level >= GFX10_3 && info->family <= CHIP_REMBRANDT) ||
2476 /* Newer chips will skip this when possible to get better performance.
2477 * This is also possible for other gfx10.3 chips, but is disabled for
2478 * interoperability between different Mesa versions.
2479 */
2480 (info->family > CHIP_REMBRANDT &&
2481 gfx10_DCN_requires_independent_64B_blocks(info, config))) {
2482 surf->u.gfx9.color.dcc.independent_64B_blocks = 1;
2483 surf->u.gfx9.color.dcc.independent_128B_blocks = 1;
2484 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
2485 }
2486 }
2487 }
2488 }
2489
2490 if (surf->modifier == DRM_FORMAT_MOD_INVALID) {
2491 switch (mode) {
2492 case RADEON_SURF_MODE_LINEAR_ALIGNED:
2493 assert(config->info.samples <= 1);
2494 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
2495 AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
2496 break;
2497
2498 case RADEON_SURF_MODE_1D:
2499 case RADEON_SURF_MODE_2D:
2500 if (surf->flags & RADEON_SURF_IMPORTED ||
2501 (info->gfx_level >= GFX10 && surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE)) {
2502 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.swizzle_mode;
2503 break;
2504 }
2505
2506 /* On GFX11, the only allowed swizzle mode for VRS rate images is
2507 * 64KB_R_X.
2508 */
2509 if (info->gfx_level >= GFX11 && surf->flags & RADEON_SURF_VRS_RATE) {
2510 AddrSurfInfoIn.swizzleMode = ADDR_SW_64KB_R_X;
2511 break;
2512 }
2513
2514 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, info, surf, &AddrSurfInfoIn, false,
2515 &AddrSurfInfoIn.swizzleMode);
2516 if (r)
2517 return r;
2518 break;
2519
2520 default:
2521 assert(0);
2522 }
2523 } else {
2524 /* We have a valid and required modifier here. */
2525
2526 assert(!compressed);
2527 assert(!ac_modifier_has_dcc(surf->modifier) ||
2528 !(surf->flags & RADEON_SURF_DISABLE_DCC));
2529
2530 AddrSurfInfoIn.swizzleMode = ac_get_modifier_swizzle_mode(info->gfx_level, surf->modifier);
2531 }
2532
2533 surf->u.gfx9.resource_type = (enum gfx9_resource_type)AddrSurfInfoIn.resourceType;
2534 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
2535
2536 surf->num_meta_levels = 0;
2537 surf->surf_size = 0;
2538 surf->fmask_size = 0;
2539 surf->meta_size = 0;
2540 surf->meta_slice_size = 0;
2541 surf->u.gfx9.surf_offset = 0;
2542 if (AddrSurfInfoIn.flags.stencil)
2543 surf->u.gfx9.zs.stencil_offset = 0;
2544 surf->cmask_size = 0;
2545
2546 const bool only_stencil =
2547 (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
2548
2549 /* Calculate texture layout information. */
2550 if (!only_stencil) {
2551 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
2552 if (r)
2553 return r;
2554 }
2555
2556 /* Calculate texture layout information for stencil. */
2557 if (surf->flags & RADEON_SURF_SBUFFER) {
2558 AddrSurfInfoIn.flags.stencil = 1;
2559 AddrSurfInfoIn.bpp = 8;
2560 AddrSurfInfoIn.format = ADDR_FMT_8;
2561
2562 if (!AddrSurfInfoIn.flags.depth) {
2563 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, info, surf, &AddrSurfInfoIn, false,
2564 &AddrSurfInfoIn.swizzleMode);
2565 if (r)
2566 return r;
2567 } else
2568 AddrSurfInfoIn.flags.depth = 0;
2569
2570 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
2571 if (r)
2572 return r;
2573 }
2574
2575 surf->is_linear = (only_stencil ? surf->u.gfx9.zs.stencil_swizzle_mode :
2576 surf->u.gfx9.swizzle_mode) == ADDR_SW_LINEAR;
2577
2578 /* Query whether the surface is displayable. */
2579 /* This is only useful for surfaces that are allocated without SCANOUT. */
2580 BOOL_32 displayable = false;
2581 if (!config->is_3d && !config->is_cube) {
2582 r = Addr2IsValidDisplaySwizzleMode(addrlib->handle, surf->u.gfx9.swizzle_mode,
2583 surf->bpe * 8, &displayable);
2584 if (r)
2585 return r;
2586
2587 /* Display needs unaligned DCC. */
2588 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
2589 surf->num_meta_levels &&
2590 (!gfx9_is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.color.dcc.rb_aligned,
2591 surf->u.gfx9.color.dcc.pipe_aligned) ||
2592 /* Don't set is_displayable if displayable DCC is missing. */
2593 (info->use_display_dcc_with_retile_blit && !surf->u.gfx9.color.dcc.display_equation_valid)))
2594 displayable = false;
2595 }
2596 surf->is_displayable = displayable;
2597
2598 /* Validate that we allocated a displayable surface if requested. */
2599 assert(!AddrSurfInfoIn.flags.display || surf->is_displayable);
2600
2601 /* Validate that DCC is set up correctly. */
2602 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->num_meta_levels) {
2603 assert(is_dcc_supported_by_L2(info, surf));
2604 if (AddrSurfInfoIn.flags.color)
2605 assert(is_dcc_supported_by_CB(info, surf->u.gfx9.swizzle_mode));
2606 if (AddrSurfInfoIn.flags.display && surf->modifier == DRM_FORMAT_MOD_INVALID) {
2607 assert(gfx9_is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.color.dcc.rb_aligned,
2608 surf->u.gfx9.color.dcc.pipe_aligned));
2609 }
2610 }
2611
2612 if (info->has_graphics && !compressed && !config->is_3d && config->info.levels == 1 &&
2613 AddrSurfInfoIn.flags.color && !surf->is_linear &&
2614 (1 << surf->surf_alignment_log2) >= 64 * 1024 && /* 64KB tiling */
2615 !(surf->flags & (RADEON_SURF_DISABLE_DCC | RADEON_SURF_FORCE_SWIZZLE_MODE |
2616 RADEON_SURF_FORCE_MICRO_TILE_MODE)) &&
2617 surf->modifier == DRM_FORMAT_MOD_INVALID &&
2618 gfx9_is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.color.dcc.rb_aligned,
2619 surf->u.gfx9.color.dcc.pipe_aligned)) {
2620 /* Validate that DCC is enabled if DCN can do it. */
2621 if ((info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit) &&
2622 AddrSurfInfoIn.flags.display && surf->bpe == 4) {
2623 assert(surf->num_meta_levels);
2624 }
2625
2626 /* Validate that non-scanout DCC is always enabled. */
2627 if (!AddrSurfInfoIn.flags.display)
2628 assert(surf->num_meta_levels);
2629 }
2630
2631 if (!surf->meta_size) {
2632 /* Unset this if HTILE is not present. */
2633 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
2634 }
2635
2636 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
2637 assert((surf->num_meta_levels != 0) == ac_modifier_has_dcc(surf->modifier));
2638 }
2639
2640 switch (surf->u.gfx9.swizzle_mode) {
2641 /* S = standard. */
2642 case ADDR_SW_256B_S:
2643 case ADDR_SW_4KB_S:
2644 case ADDR_SW_64KB_S:
2645 case ADDR_SW_64KB_S_T:
2646 case ADDR_SW_4KB_S_X:
2647 case ADDR_SW_64KB_S_X:
2648 case ADDR_SW_256KB_S_X:
2649 surf->micro_tile_mode = RADEON_MICRO_MODE_STANDARD;
2650 break;
2651
2652 /* D = display. */
2653 case ADDR_SW_LINEAR:
2654 case ADDR_SW_256B_D:
2655 case ADDR_SW_4KB_D:
2656 case ADDR_SW_64KB_D:
2657 case ADDR_SW_64KB_D_T:
2658 case ADDR_SW_4KB_D_X:
2659 case ADDR_SW_64KB_D_X:
2660 case ADDR_SW_256KB_D_X:
2661 surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
2662 break;
2663
2664 /* R = rotated (gfx9), render target (gfx10). */
2665 case ADDR_SW_256B_R:
2666 case ADDR_SW_4KB_R:
2667 case ADDR_SW_64KB_R:
2668 case ADDR_SW_64KB_R_T:
2669 case ADDR_SW_4KB_R_X:
2670 case ADDR_SW_64KB_R_X:
2671 case ADDR_SW_256KB_R_X:
2672 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2673 * used at the same time. We currently do not use rotated
2674 * in gfx9.
2675 */
2676 assert(info->gfx_level >= GFX10 || !"rotate micro tile mode is unsupported");
2677 surf->micro_tile_mode = RADEON_MICRO_MODE_RENDER;
2678 break;
2679
2680 /* Z = depth. */
2681 case ADDR_SW_4KB_Z:
2682 case ADDR_SW_64KB_Z:
2683 case ADDR_SW_64KB_Z_T:
2684 case ADDR_SW_4KB_Z_X:
2685 case ADDR_SW_64KB_Z_X:
2686 case ADDR_SW_256KB_Z_X:
2687 surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
2688 break;
2689
2690 default:
2691 assert(0);
2692 }
2693
2694 return 0;
2695 }
2696
gfx12_estimate_size(const ADDR3_COMPUTE_SURFACE_INFO_INPUT * in,const struct radeon_surf * surf,unsigned align_width,unsigned align_height,unsigned align_depth)2697 static uint64_t gfx12_estimate_size(const ADDR3_COMPUTE_SURFACE_INFO_INPUT *in,
2698 const struct radeon_surf *surf,
2699 unsigned align_width, unsigned align_height,
2700 unsigned align_depth)
2701 {
2702 unsigned blk_w = surf ? surf->blk_w : 1;
2703 unsigned blk_h = surf ? surf->blk_h : 1;
2704 unsigned bpe = in->bpp ? in->bpp / 8 : surf->bpe;
2705 unsigned width = align(in->width, align_width * blk_w);
2706 unsigned height = align(in->height, align_height * blk_h);
2707 unsigned depth = align(in->numSlices, align_depth);
2708 unsigned tile_size = align_width * align_height * align_depth *
2709 in->numSamples * bpe;
2710
2711 if (in->numMipLevels > 1 && align_height > 1) {
2712 width = util_next_power_of_two(width);
2713 height = util_next_power_of_two(height);
2714 }
2715
2716 uint64_t size = 0;
2717
2718 /* Note: This mipmap size computation is inaccurate. */
2719 for (unsigned i = 0; i < in->numMipLevels; i++) {
2720 uint64_t level_size =
2721 (uint64_t)DIV_ROUND_UP(width, blk_w) * DIV_ROUND_UP(height, blk_h) * depth *
2722 in->numSamples * bpe;
2723
2724 size += level_size;
2725
2726 if (tile_size >= 4096 && level_size <= tile_size / 2) {
2727 /* We are likely in the mip tail, return. */
2728 assert(size);
2729 return size;
2730 }
2731
2732 /* Minify the level. */
2733 width = u_minify(width, 1);
2734 height = u_minify(height, 1);
2735 if (in->resourceType == ADDR_RSRC_TEX_3D)
2736 depth = u_minify(depth, 1);
2737 }
2738
2739 /* TODO: check that this is not too different from the correct value */
2740 assert(size);
2741 return size;
2742 }
2743
gfx12_select_swizzle_mode(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const ADDR3_COMPUTE_SURFACE_INFO_INPUT * in)2744 static unsigned gfx12_select_swizzle_mode(struct ac_addrlib *addrlib,
2745 const struct radeon_info *info,
2746 const struct radeon_surf *surf,
2747 const ADDR3_COMPUTE_SURFACE_INFO_INPUT *in)
2748 {
2749 ADDR3_GET_POSSIBLE_SWIZZLE_MODE_INPUT get_in = {0};
2750 ADDR3_GET_POSSIBLE_SWIZZLE_MODE_OUTPUT get_out = {0};
2751
2752 get_in.size = sizeof(ADDR3_GET_POSSIBLE_SWIZZLE_MODE_INPUT);
2753 get_out.size = sizeof(ADDR3_GET_POSSIBLE_SWIZZLE_MODE_OUTPUT);
2754
2755 get_in.flags = in->flags;
2756 get_in.resourceType = in->resourceType;
2757 get_in.bpp = in->bpp ? in->bpp : (surf->bpe * 8);
2758 get_in.width = in->width;
2759 get_in.height = in->height;
2760 get_in.numSlices = in->numSlices;
2761 get_in.numMipLevels = in->numMipLevels;
2762 get_in.numSamples = in->numSamples;
2763 get_in.maxAlign = info->has_dedicated_vram ? (256 * 1024) : (64 * 1024);
2764
2765 if (Addr3GetPossibleSwizzleModes(addrlib->handle, &get_in, &get_out) != ADDR_OK) {
2766 assert(!"Addr3GetPossibleSwizzleModes failed");
2767 return ADDR3_MAX_TYPE;
2768 }
2769
2770 /* TODO: Workaround for SW_LINEAR assertion failures in addrlib. This should be fixed in addrlib. */
2771 if (surf && surf->blk_w == 4)
2772 get_out.validModes.swLinear = 0;
2773
2774 assert(get_out.validModes.value);
2775
2776 unsigned bpe = in->bpp ? in->bpp / 8 : surf->bpe;
2777 unsigned log_bpp = util_logbase2(bpe);
2778 unsigned log_samples = util_logbase2(in->numSamples);
2779 uint64_t ideal_size = gfx12_estimate_size(in, surf, 1, 1, 1);
2780
2781 if (in->resourceType == ADDR_RSRC_TEX_3D) {
2782 static unsigned block3d_size_4K[5][3] = {
2783 {16, 16, 16},
2784 {8, 16, 16},
2785 {8, 16, 8},
2786 {8, 8, 8},
2787 {4, 8, 8},
2788 };
2789 static unsigned block3d_size_64K[5][3] = {
2790 {64, 32, 32},
2791 {32, 32, 32},
2792 {32, 32, 16},
2793 {32, 16, 16},
2794 {16, 16, 16},
2795 };
2796 static unsigned block3d_size_256K[5][3] = {
2797 {64, 64, 64},
2798 {32, 64, 64},
2799 {32, 64, 32},
2800 {32, 32, 32},
2801 {16, 32, 32},
2802 };
2803
2804 uint64_t size_4K = gfx12_estimate_size(in, surf, block3d_size_4K[log_bpp][0],
2805 block3d_size_4K[log_bpp][1],
2806 block3d_size_4K[log_bpp][2]);
2807
2808 uint64_t size_64K = gfx12_estimate_size(in, surf, block3d_size_64K[log_bpp][0],
2809 block3d_size_64K[log_bpp][1],
2810 block3d_size_64K[log_bpp][2]);
2811
2812 uint64_t size_256K = gfx12_estimate_size(in, surf, block3d_size_256K[log_bpp][0],
2813 block3d_size_256K[log_bpp][1],
2814 block3d_size_256K[log_bpp][2]);;
2815
2816 float max_3d_overalloc_256K = 1.1;
2817 float max_3d_overalloc_64K = 1.2;
2818 float max_3d_overalloc_4K = 2;
2819
2820 if (get_out.validModes.sw3d256kB &&
2821 (size_256K / (double)ideal_size <= max_3d_overalloc_256K || !get_out.validModes.sw3d64kB))
2822 return ADDR3_256KB_3D;
2823
2824 if (get_out.validModes.sw3d64kB &&
2825 (size_64K / (double)ideal_size <= max_3d_overalloc_64K || !get_out.validModes.sw3d4kB))
2826 return ADDR3_64KB_3D;
2827
2828 if (get_out.validModes.sw3d4kB &&
2829 (size_4K / (double)ideal_size <= max_3d_overalloc_4K ||
2830 /* If the image is thick, prefer thick tiling. */
2831 in->numSlices >= block3d_size_4K[log_bpp][2] * 3))
2832 return ADDR3_4KB_3D;
2833
2834 /* Try to select a 2D (planar) swizzle mode to save memory. */
2835 }
2836
2837 static unsigned block_size_LINEAR[5] = {
2838 /* 1xAA (MSAA not supported with LINEAR)
2839 *
2840 * The pitch alignment is 128B, but the slice size is computed as if the pitch alignment
2841 * was 256B.
2842 */
2843 256,
2844 128,
2845 64,
2846 32,
2847 16,
2848 };
2849 static unsigned block_size_256B[4][5][2] = {
2850 { /* 1xAA */
2851 {16, 16},
2852 {16, 8},
2853 {8, 8},
2854 {8, 4},
2855 {4, 4},
2856 },
2857 { /* 2xAA */
2858 {16, 8},
2859 {8, 8},
2860 {8, 4},
2861 {4, 4},
2862 {4, 2},
2863 },
2864 { /* 4xAA */
2865 {8, 8},
2866 {8, 4},
2867 {4, 4},
2868 {4, 2},
2869 {2, 2},
2870 },
2871 { /* 8xAA */
2872 {8, 4},
2873 {4, 4},
2874 {4, 2},
2875 {2, 2},
2876 {2, 1},
2877 },
2878 };
2879 static unsigned block_size_4K[4][5][2] = {
2880 { /* 1xAA */
2881 {64, 64},
2882 {64, 32},
2883 {32, 32},
2884 {32, 16},
2885 {16, 16},
2886 },
2887 { /* 2xAA */
2888 {64, 32},
2889 {32, 32},
2890 {32, 16},
2891 {16, 16},
2892 {16, 8},
2893 },
2894 { /* 4xAA */
2895 {32, 32},
2896 {32, 16},
2897 {16, 16},
2898 {16, 8},
2899 {8, 8},
2900 },
2901 { /* 8xAA */
2902 {32, 16},
2903 {16, 16},
2904 {16, 8},
2905 {8, 8},
2906 {8, 4},
2907 },
2908 };
2909 static unsigned block_size_64K[4][5][2] = {
2910 { /* 1xAA */
2911 {256, 256},
2912 {256, 128},
2913 {128, 128},
2914 {128, 64},
2915 {64, 64},
2916 },
2917 { /* 2xAA */
2918 {256, 128},
2919 {128, 128},
2920 {128, 64},
2921 {64, 64},
2922 {64, 32},
2923 },
2924 { /* 4xAA */
2925 {128, 128},
2926 {128, 64},
2927 {64, 64},
2928 {64, 32},
2929 {32, 32},
2930 },
2931 { /* 8xAA */
2932 {128, 64},
2933 {64, 64},
2934 {64, 32},
2935 {32, 32},
2936 {32, 16},
2937 },
2938 };
2939 static unsigned block_size_256K[4][5][2] = {
2940 { /* 1xAA */
2941 {512, 512},
2942 {512, 256},
2943 {256, 256},
2944 {256, 128},
2945 {128, 128},
2946 },
2947 { /* 2xAA */
2948 {512, 256},
2949 {256, 256},
2950 {256, 128},
2951 {128, 128},
2952 {128, 64},
2953 },
2954 { /* 4xAA */
2955 {256, 256},
2956 {256, 128},
2957 {128, 128},
2958 {128, 64},
2959 {64, 64},
2960 },
2961 { /* 8xAA */
2962 {256, 128},
2963 {128, 128},
2964 {128, 64},
2965 {64, 64},
2966 {64, 32},
2967 },
2968 };
2969
2970 uint64_t size_LINEAR = gfx12_estimate_size(in, surf, block_size_LINEAR[log_bpp], 1, 1);
2971
2972 uint64_t size_256B = gfx12_estimate_size(in, surf, block_size_256B[log_samples][log_bpp][0],
2973 block_size_256B[log_samples][log_bpp][1], 1);
2974
2975 uint64_t size_4K = gfx12_estimate_size(in, surf, block_size_4K[log_samples][log_bpp][0],
2976 block_size_4K[log_samples][log_bpp][1], 1);;
2977
2978 uint64_t size_64K = gfx12_estimate_size(in, surf, block_size_64K[log_samples][log_bpp][0],
2979 block_size_64K[log_samples][log_bpp][1], 1);
2980
2981 uint64_t size_256K = gfx12_estimate_size(in, surf, block_size_256K[log_samples][log_bpp][0],
2982 block_size_256K[log_samples][log_bpp][1], 1);
2983
2984 float max_2d_overalloc_256K = 1.1; /* relative to ideal */
2985 float max_2d_overalloc_64K = 1.3; /* relative to ideal */
2986 float max_2d_overalloc_4K = 2; /* relative to ideal */
2987 float max_2d_overalloc_256B = 3; /* relative to LINEAR */
2988
2989 if (get_out.validModes.sw2d256kB &&
2990 (size_256K / (double)ideal_size <= max_2d_overalloc_256K || !get_out.validModes.sw2d64kB))
2991 return ADDR3_256KB_2D;
2992
2993 if (get_out.validModes.sw2d64kB &&
2994 (size_64K / (double)ideal_size <= max_2d_overalloc_64K || !get_out.validModes.sw2d4kB))
2995 return ADDR3_64KB_2D;
2996
2997 if (get_out.validModes.sw2d4kB &&
2998 (size_4K / (double)ideal_size <= max_2d_overalloc_4K ||
2999 (!get_out.validModes.sw2d256B && !get_out.validModes.swLinear)))
3000 return ADDR3_4KB_2D;
3001
3002 assert(get_out.validModes.sw2d256B || get_out.validModes.swLinear);
3003
3004 if (get_out.validModes.sw2d256B && get_out.validModes.swLinear)
3005 return size_256B / (double)size_LINEAR <= max_2d_overalloc_256B ? ADDR3_256B_2D : ADDR3_LINEAR;
3006 else if (get_out.validModes.sw2d256B)
3007 return ADDR3_256B_2D;
3008 else
3009 return ADDR3_LINEAR;
3010 }
3011
gfx12_compute_hiz_his_info(struct ac_addrlib * addrlib,const struct radeon_info * info,struct radeon_surf * surf,struct gfx12_hiz_his_layout * hizs,const ADDR3_COMPUTE_SURFACE_INFO_INPUT * surf_in)3012 static bool gfx12_compute_hiz_his_info(struct ac_addrlib *addrlib, const struct radeon_info *info,
3013 struct radeon_surf *surf, struct gfx12_hiz_his_layout *hizs,
3014 const ADDR3_COMPUTE_SURFACE_INFO_INPUT *surf_in)
3015 {
3016 assert(surf_in->flags.depth != surf_in->flags.stencil);
3017
3018 if (surf->flags & RADEON_SURF_NO_HTILE || (info->gfx_level == GFX12 && info->chip_rev == 0))
3019 return true;
3020
3021 ADDR3_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
3022 out.size = sizeof(ADDR3_COMPUTE_SURFACE_INFO_OUTPUT);
3023
3024 ADDR3_COMPUTE_SURFACE_INFO_INPUT in = *surf_in;
3025 in.flags.depth = 0;
3026 in.flags.stencil = 0;
3027 in.flags.hiZHiS = 1;
3028
3029 if (surf_in->flags.depth) {
3030 in.format = ADDR_FMT_32;
3031 in.bpp = 32;
3032 } else {
3033 in.format = ADDR_FMT_16;
3034 in.bpp = 16;
3035 }
3036
3037 /* Compute the HiZ/HiS size. */
3038 in.width = align(DIV_ROUND_UP(surf_in->width, 8), 2);
3039 in.height = align(DIV_ROUND_UP(surf_in->height, 8), 2);
3040 in.swizzleMode = gfx12_select_swizzle_mode(addrlib, info, NULL, &in);
3041
3042 int ret = Addr3ComputeSurfaceInfo(addrlib->handle, &in, &out);
3043 if (ret != ADDR_OK)
3044 return false;
3045
3046 hizs->size = out.surfSize;
3047 hizs->width_in_tiles = in.width;
3048 hizs->height_in_tiles = in.height;
3049 hizs->swizzle_mode = in.swizzleMode;
3050 hizs->alignment_log2 = out.baseAlign;
3051 return true;
3052 }
3053
gfx12_compute_miptree(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf,bool compressed,ADDR3_COMPUTE_SURFACE_INFO_INPUT * in)3054 static bool gfx12_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_info *info,
3055 const struct ac_surf_config *config, struct radeon_surf *surf,
3056 bool compressed, ADDR3_COMPUTE_SURFACE_INFO_INPUT *in)
3057 {
3058 ADDR3_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {0};
3059 ADDR3_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
3060 ADDR_E_RETURNCODE ret;
3061
3062 out.size = sizeof(ADDR3_COMPUTE_SURFACE_INFO_OUTPUT);
3063 out.pMipInfo = mip_info;
3064
3065 ret = Addr3ComputeSurfaceInfo(addrlib->handle, in, &out);
3066 if (ret != ADDR_OK)
3067 return false;
3068
3069 /* TODO: remove this block once addrlib stops giving us 64K pitch for small images, breaking
3070 * modifiers and X.Org.
3071 */
3072 if (in->swizzleMode >= ADDR3_256B_2D && in->swizzleMode <= ADDR3_256KB_2D &&
3073 in->numMipLevels == 1) {
3074 static unsigned block_bits[ADDR3_MAX_TYPE] = {
3075 [ADDR3_256B_2D] = 8,
3076 [ADDR3_4KB_2D] = 12,
3077 [ADDR3_64KB_2D] = 16,
3078 [ADDR3_256KB_2D] = 18,
3079 };
3080 unsigned align_bits = block_bits[in->swizzleMode] - util_logbase2(surf->bpe);
3081 unsigned w_align = 1 << (align_bits / 2 + align_bits % 2);
3082
3083 out.pitch = align(in->width, w_align);
3084 }
3085
3086 if (in->flags.stencil) {
3087 surf->u.gfx9.zs.stencil_swizzle_mode = in->swizzleMode;
3088 surf->u.gfx9.zs.stencil_offset = align(surf->surf_size, out.baseAlign);
3089 surf->surf_alignment_log2 = MAX2(surf->surf_alignment_log2, util_logbase2(out.baseAlign));
3090 surf->surf_size = surf->u.gfx9.zs.stencil_offset + out.surfSize;
3091
3092 return gfx12_compute_hiz_his_info(addrlib, info, surf, &surf->u.gfx9.zs.his, in);
3093 }
3094
3095 surf->u.gfx9.surf_slice_size = out.sliceSize;
3096 surf->u.gfx9.surf_pitch = out.pitch;
3097 surf->u.gfx9.surf_height = out.height;
3098 surf->surf_size = out.surfSize;
3099 surf->surf_alignment_log2 = util_logbase2(out.baseAlign);
3100
3101 if (surf->flags & RADEON_SURF_PRT) {
3102 surf->prt_tile_width = out.blockExtent.width;
3103 surf->prt_tile_height = out.blockExtent.height;
3104 surf->prt_tile_depth = out.blockExtent.depth;
3105 surf->first_mip_tail_level = out.firstMipIdInTail;
3106
3107 for (unsigned i = 0; i < in->numMipLevels; i++) {
3108 surf->u.gfx9.prt_level_offset[i] = mip_info[i].macroBlockOffset + mip_info[i].mipTailOffset;
3109 surf->u.gfx9.prt_level_pitch[i] = mip_info[i].pitch;
3110 }
3111 }
3112
3113 if (surf->blk_w == 2 && out.pitch == out.pixelPitch &&
3114 surf->u.gfx9.swizzle_mode == ADDR3_LINEAR) {
3115 const unsigned linear_byte_alignment = 128;
3116
3117 /* Adjust surf_pitch to be in elements units not in pixels */
3118 surf->u.gfx9.surf_pitch = align(surf->u.gfx9.surf_pitch / surf->blk_w,
3119 linear_byte_alignment / surf->bpe);
3120 /* The surface is really a surf->bpe bytes per pixel surface even if we
3121 * use it as a surf->bpe bytes per element one.
3122 * Adjust surf_slice_size and surf_size to reflect the change
3123 * made to surf_pitch.
3124 */
3125 surf->u.gfx9.surf_slice_size =
3126 MAX2(surf->u.gfx9.surf_slice_size,
3127 (uint64_t)surf->u.gfx9.surf_pitch * out.height * surf->bpe * surf->blk_w);
3128 surf->surf_size = surf->u.gfx9.surf_slice_size * in->numSlices;
3129
3130 int alignment = linear_byte_alignment / surf->bpe;
3131 for (unsigned i = 0; i < in->numMipLevels; i++) {
3132 surf->u.gfx9.offset[i] = mip_info[i].offset;
3133 /* Adjust pitch like we did for surf_pitch */
3134 surf->u.gfx9.pitch[i] = align(mip_info[i].pitch / surf->blk_w, alignment);
3135 }
3136 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
3137 } else if (in->swizzleMode == ADDR3_LINEAR) {
3138 for (unsigned i = 0; i < in->numMipLevels; i++) {
3139 surf->u.gfx9.offset[i] = mip_info[i].offset;
3140 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
3141 }
3142 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
3143 } else {
3144 surf->u.gfx9.base_mip_width = mip_info[0].pitch;
3145 }
3146
3147 surf->u.gfx9.base_mip_height = mip_info[0].height;
3148
3149 if (in->flags.depth) {
3150 assert(in->swizzleMode != ADDR3_LINEAR);
3151
3152 return gfx12_compute_hiz_his_info(addrlib, info, surf, &surf->u.gfx9.zs.hiz, in);
3153 }
3154
3155 /* Compute tile swizzle for the color surface. All swizzle modes >= 4K support it. */
3156 if (surf->modifier == DRM_FORMAT_MOD_INVALID && config->info.surf_index &&
3157 in->swizzleMode >= ADDR3_4KB_2D && !out.mipChainInTail &&
3158 !(surf->flags & RADEON_SURF_SHAREABLE) && !get_display_flag(config, surf)) {
3159 ADDR3_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
3160 ADDR3_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
3161
3162 xin.size = sizeof(ADDR3_COMPUTE_PIPEBANKXOR_INPUT);
3163 xout.size = sizeof(ADDR3_COMPUTE_PIPEBANKXOR_OUTPUT);
3164
3165 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
3166 xin.swizzleMode = in->swizzleMode;
3167
3168 ret = Addr3ComputePipeBankXor(addrlib->handle, &xin, &xout);
3169 if (ret != ADDR_OK)
3170 return false;
3171
3172 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8 + 2));
3173 surf->tile_swizzle = xout.pipeBankXor;
3174 }
3175
3176 return true;
3177 }
3178
gfx12_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)3179 static bool gfx12_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
3180 const struct ac_surf_config *config, enum radeon_surf_mode mode,
3181 struct radeon_surf *surf)
3182 {
3183 bool compressed = surf->blk_w == 4 && surf->blk_h == 4;
3184 bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
3185 bool stencil_only = (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
3186 ADDR3_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
3187
3188 AddrSurfInfoIn.size = sizeof(ADDR3_COMPUTE_SURFACE_INFO_INPUT);
3189
3190 if (stencil_only) {
3191 AddrSurfInfoIn.bpp = 8;
3192 AddrSurfInfoIn.format = ADDR_FMT_8;
3193 } else {
3194 AddrSurfInfoIn.format = bpe_to_format(surf);
3195 if (!compressed)
3196 AddrSurfInfoIn.bpp = surf->bpe * 8;
3197 }
3198
3199 AddrSurfInfoIn.flags.color = is_color_surface && !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
3200 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
3201 AddrSurfInfoIn.flags.stencil = stencil_only;
3202 AddrSurfInfoIn.flags.texture = !(surf->flags & RADEON_SURF_NO_TEXTURE);
3203 AddrSurfInfoIn.flags.unordered = !(surf->flags & RADEON_SURF_NO_TEXTURE);
3204 AddrSurfInfoIn.flags.blockCompressed = compressed;
3205 AddrSurfInfoIn.flags.isVrsImage = !!(surf->flags & RADEON_SURF_VRS_RATE);
3206
3207 if (config->is_3d)
3208 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
3209 else if (config->is_1d)
3210 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
3211 else
3212 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
3213
3214 AddrSurfInfoIn.width = config->info.width;
3215 AddrSurfInfoIn.height = config->info.height;
3216 AddrSurfInfoIn.numMipLevels = config->info.levels;
3217 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
3218
3219 if (config->is_3d)
3220 AddrSurfInfoIn.numSlices = config->info.depth;
3221 else if (config->is_cube)
3222 AddrSurfInfoIn.numSlices = 6;
3223 else
3224 AddrSurfInfoIn.numSlices = config->info.array_size;
3225
3226 /* Select the swizzle mode. */
3227 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
3228 assert(!compressed);
3229 assert(!ac_modifier_has_dcc(surf->modifier) || !(surf->flags & RADEON_SURF_DISABLE_DCC));
3230 AddrSurfInfoIn.swizzleMode = ac_get_modifier_swizzle_mode(info->gfx_level, surf->modifier);
3231 } else if (surf->flags & RADEON_SURF_IMPORTED) {
3232 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.swizzle_mode;
3233 } else if (surf->flags & RADEON_SURF_PRT) {
3234 if (config->is_3d)
3235 AddrSurfInfoIn.swizzleMode = ADDR3_64KB_3D;
3236 else
3237 AddrSurfInfoIn.swizzleMode = ADDR3_64KB_2D;
3238 } else if (mode == RADEON_SURF_MODE_LINEAR_ALIGNED) {
3239 assert(config->info.samples <= 1 && !(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
3240 AddrSurfInfoIn.swizzleMode = ADDR3_LINEAR;
3241 } else if (config->is_1d && !(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
3242 AddrSurfInfoIn.swizzleMode = ADDR3_LINEAR;
3243 } else {
3244 AddrSurfInfoIn.swizzleMode = gfx12_select_swizzle_mode(addrlib, info, surf, &AddrSurfInfoIn);
3245 }
3246
3247 /* Force the linear pitch from 128B (default) to 256B for multi-GPU interop. This only applies
3248 * to 2D non-MSAA and plain color formats.
3249 */
3250 if (!config->is_1d && !config->is_3d && !config->is_cube && !config->is_array &&
3251 config->info.levels == 1 && config->info.samples <= 1 &&
3252 surf->blk_w == 1 && surf->blk_h == 1 && !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
3253 util_is_power_of_two_nonzero(surf->bpe) && AddrSurfInfoIn.swizzleMode == ADDR3_LINEAR) {
3254 AddrSurfInfoIn.pitchInElement = align(config->info.width, LINEAR_PITCH_ALIGNMENT / surf->bpe);
3255 surf->u.gfx9.uses_custom_pitch = true;
3256 }
3257
3258 bool supports_display_dcc = info->drm_minor >= 58;
3259 surf->u.gfx9.swizzle_mode = AddrSurfInfoIn.swizzleMode;
3260 surf->u.gfx9.resource_type = (enum gfx9_resource_type)AddrSurfInfoIn.resourceType;
3261 surf->u.gfx9.gfx12_enable_dcc = ac_modifier_has_dcc(surf->modifier) ||
3262 (surf->modifier == DRM_FORMAT_MOD_INVALID &&
3263 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
3264 /* Always enable compression for Z/S and MSAA color by default. */
3265 (surf->flags & RADEON_SURF_Z_OR_SBUFFER ||
3266 config->info.samples > 1 ||
3267 ((supports_display_dcc || !(surf->flags & RADEON_SURF_SCANOUT)) &&
3268 /* These two are not strictly necessary. */
3269 surf->u.gfx9.swizzle_mode != ADDR3_LINEAR &&
3270 surf->surf_size >= 4096)));
3271
3272 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
3273 surf->is_linear = surf->u.gfx9.swizzle_mode == ADDR3_LINEAR;
3274 surf->is_displayable = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
3275 surf->u.gfx9.resource_type != RADEON_RESOURCE_3D &&
3276 (supports_display_dcc || !surf->u.gfx9.gfx12_enable_dcc);
3277 surf->thick_tiling = surf->u.gfx9.swizzle_mode >= ADDR3_4KB_3D;
3278
3279 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER) {
3280 surf->u.gfx9.zs.hiz.offset = 0;
3281 surf->u.gfx9.zs.hiz.size = 0;
3282 surf->u.gfx9.zs.his.offset = 0;
3283 surf->u.gfx9.zs.his.size = 0;
3284 }
3285
3286 if (surf->u.gfx9.gfx12_enable_dcc) {
3287 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
3288 surf->u.gfx9.color.dcc.max_compressed_block_size =
3289 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, surf->modifier);
3290 } else if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
3291 /* Don't change the DCC settings for imported buffers - they might differ. */
3292 !(surf->flags & RADEON_SURF_IMPORTED)) {
3293 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
3294 }
3295 }
3296
3297 /* Calculate texture layout information. */
3298 if (!stencil_only &&
3299 !gfx12_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn))
3300 return false;
3301
3302 /* Calculate texture layout information for stencil. */
3303 if (surf->flags & RADEON_SURF_SBUFFER) {
3304 if (stencil_only) {
3305 assert(!AddrSurfInfoIn.flags.depth);
3306 assert(AddrSurfInfoIn.flags.stencil);
3307 assert(AddrSurfInfoIn.bpp == 8);
3308 assert(AddrSurfInfoIn.format == ADDR_FMT_8);
3309 } else {
3310 AddrSurfInfoIn.flags.depth = 0;
3311 AddrSurfInfoIn.flags.stencil = 1;
3312 AddrSurfInfoIn.bpp = 8;
3313 AddrSurfInfoIn.format = ADDR_FMT_8;
3314 }
3315
3316 if (!gfx12_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn))
3317 return false;
3318 }
3319
3320 return true;
3321 }
3322
ac_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)3323 int ac_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
3324 const struct ac_surf_config *config, enum radeon_surf_mode mode,
3325 struct radeon_surf *surf)
3326 {
3327 int r;
3328
3329 r = surf_config_sanity(config, surf->flags);
3330 if (r)
3331 return r;
3332
3333 /* Images are emulated on some CDNA chips. */
3334 if (!info->has_image_opcodes)
3335 mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
3336
3337 /* 0 offsets mean disabled. */
3338 surf->meta_offset = surf->fmask_offset = surf->cmask_offset = surf->display_dcc_offset = 0;
3339
3340 if (info->family_id >= FAMILY_GFX12) {
3341 if (!gfx12_compute_surface(addrlib, info, config, mode, surf))
3342 return ADDR_ERROR;
3343
3344 /* Determine the memory layout of multiple allocations in one buffer. */
3345 surf->total_size = surf->surf_size;
3346 surf->alignment_log2 = surf->surf_alignment_log2;
3347
3348 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER) {
3349 if (surf->u.gfx9.zs.hiz.size) {
3350 surf->u.gfx9.zs.hiz.offset = align64(surf->total_size,
3351 1ull << surf->u.gfx9.zs.hiz.alignment_log2);
3352 surf->surf_alignment_log2 = MAX2(surf->surf_alignment_log2,
3353 surf->u.gfx9.zs.hiz.alignment_log2);
3354 surf->total_size = surf->u.gfx9.zs.hiz.offset + surf->u.gfx9.zs.hiz.size;
3355 }
3356
3357 if (surf->u.gfx9.zs.his.size) {
3358 surf->u.gfx9.zs.his.offset = align64(surf->total_size,
3359 1ull << surf->u.gfx9.zs.his.alignment_log2);
3360 surf->surf_alignment_log2 = MAX2(surf->surf_alignment_log2,
3361 surf->u.gfx9.zs.his.alignment_log2);
3362 surf->total_size = surf->u.gfx9.zs.his.offset + surf->u.gfx9.zs.his.size;
3363 }
3364 }
3365
3366 return 0;
3367 }
3368
3369 /* Gfx6-11. */
3370 if (info->family_id >= FAMILY_AI)
3371 r = gfx9_compute_surface(addrlib, info, config, mode, surf);
3372 else
3373 r = gfx6_compute_surface(addrlib->handle, info, config, mode, surf);
3374
3375 if (r)
3376 return r;
3377
3378 /* Determine the memory layout of multiple allocations in one buffer. */
3379 surf->total_size = surf->surf_size;
3380 surf->alignment_log2 = surf->surf_alignment_log2;
3381
3382 if (surf->fmask_size) {
3383 assert(config->info.samples >= 2);
3384 surf->fmask_offset = align64(surf->total_size, 1ull << surf->fmask_alignment_log2);
3385 surf->total_size = surf->fmask_offset + surf->fmask_size;
3386 surf->alignment_log2 = MAX2(surf->alignment_log2, surf->fmask_alignment_log2);
3387 }
3388
3389 /* Single-sample CMASK is in a separate buffer. */
3390 if (surf->cmask_size && config->info.samples >= 2) {
3391 surf->cmask_offset = align64(surf->total_size, 1ull << surf->cmask_alignment_log2);
3392 surf->total_size = surf->cmask_offset + surf->cmask_size;
3393 surf->alignment_log2 = MAX2(surf->alignment_log2, surf->cmask_alignment_log2);
3394 }
3395
3396 if (surf->is_displayable)
3397 surf->flags |= RADEON_SURF_SCANOUT;
3398
3399 if (surf->meta_size &&
3400 /* dcc_size is computed on GFX9+ only if it's displayable. */
3401 (info->gfx_level >= GFX9 || !get_display_flag(config, surf))) {
3402 /* It's better when displayable DCC is immediately after
3403 * the image due to hw-specific reasons.
3404 */
3405 if (info->gfx_level >= GFX9 &&
3406 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
3407 surf->u.gfx9.color.dcc.display_equation_valid) {
3408 /* Add space for the displayable DCC buffer. */
3409 surf->display_dcc_offset = align64(surf->total_size, 1ull << surf->u.gfx9.color.display_dcc_alignment_log2);
3410 surf->total_size = surf->display_dcc_offset + surf->u.gfx9.color.display_dcc_size;
3411 }
3412
3413 surf->meta_offset = align64(surf->total_size, 1ull << surf->meta_alignment_log2);
3414 surf->total_size = surf->meta_offset + surf->meta_size;
3415 surf->alignment_log2 = MAX2(surf->alignment_log2, surf->meta_alignment_log2);
3416 }
3417
3418 return 0;
3419 }
3420
3421 /* This is meant to be used for disabling DCC. */
ac_surface_zero_dcc_fields(struct radeon_surf * surf)3422 void ac_surface_zero_dcc_fields(struct radeon_surf *surf)
3423 {
3424 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
3425 return;
3426
3427 surf->meta_offset = 0;
3428 surf->display_dcc_offset = 0;
3429 if (!surf->fmask_offset && !surf->cmask_offset) {
3430 surf->total_size = surf->surf_size;
3431 surf->alignment_log2 = surf->surf_alignment_log2;
3432 }
3433 }
3434
eg_tile_split(unsigned tile_split)3435 static unsigned eg_tile_split(unsigned tile_split)
3436 {
3437 switch (tile_split) {
3438 case 0:
3439 tile_split = 64;
3440 break;
3441 case 1:
3442 tile_split = 128;
3443 break;
3444 case 2:
3445 tile_split = 256;
3446 break;
3447 case 3:
3448 tile_split = 512;
3449 break;
3450 default:
3451 case 4:
3452 tile_split = 1024;
3453 break;
3454 case 5:
3455 tile_split = 2048;
3456 break;
3457 case 6:
3458 tile_split = 4096;
3459 break;
3460 }
3461 return tile_split;
3462 }
3463
eg_tile_split_rev(unsigned eg_tile_split)3464 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
3465 {
3466 switch (eg_tile_split) {
3467 case 64:
3468 return 0;
3469 case 128:
3470 return 1;
3471 case 256:
3472 return 2;
3473 case 512:
3474 return 3;
3475 default:
3476 case 1024:
3477 return 4;
3478 case 2048:
3479 return 5;
3480 case 4096:
3481 return 6;
3482 }
3483 }
3484
3485 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
3486 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
3487
3488 /* This should be called before ac_compute_surface. */
ac_surface_apply_bo_metadata(const struct radeon_info * info,struct radeon_surf * surf,uint64_t tiling_flags,enum radeon_surf_mode * mode)3489 void ac_surface_apply_bo_metadata(const struct radeon_info *info, struct radeon_surf *surf,
3490 uint64_t tiling_flags, enum radeon_surf_mode *mode)
3491 {
3492 bool scanout;
3493
3494 if (info->gfx_level >= GFX12) {
3495 surf->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, GFX12_SWIZZLE_MODE);
3496 surf->u.gfx9.color.dcc.max_compressed_block_size =
3497 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
3498 surf->u.gfx9.color.dcc_data_format =
3499 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
3500 surf->u.gfx9.color.dcc_number_type =
3501 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
3502 scanout = AMDGPU_TILING_GET(tiling_flags, GFX12_SCANOUT);
3503 } else if (info->gfx_level >= GFX9) {
3504 surf->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3505 surf->u.gfx9.color.dcc.independent_64B_blocks =
3506 AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_64B);
3507 surf->u.gfx9.color.dcc.independent_128B_blocks =
3508 AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_128B);
3509 surf->u.gfx9.color.dcc.max_compressed_block_size =
3510 AMDGPU_TILING_GET(tiling_flags, DCC_MAX_COMPRESSED_BLOCK_SIZE);
3511 surf->u.gfx9.color.display_dcc_pitch_max = AMDGPU_TILING_GET(tiling_flags, DCC_PITCH_MAX);
3512 scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
3513 *mode =
3514 surf->u.gfx9.swizzle_mode > 0 ? RADEON_SURF_MODE_2D : RADEON_SURF_MODE_LINEAR_ALIGNED;
3515 } else {
3516 surf->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3517 surf->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3518 surf->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3519 surf->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
3520 surf->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3521 surf->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3522 scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
3523
3524 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
3525 *mode = RADEON_SURF_MODE_2D;
3526 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
3527 *mode = RADEON_SURF_MODE_1D;
3528 else
3529 *mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
3530 }
3531
3532 if (scanout)
3533 surf->flags |= RADEON_SURF_SCANOUT;
3534 else
3535 surf->flags &= ~RADEON_SURF_SCANOUT;
3536 }
3537
ac_surface_compute_bo_metadata(const struct radeon_info * info,struct radeon_surf * surf,uint64_t * tiling_flags)3538 void ac_surface_compute_bo_metadata(const struct radeon_info *info, struct radeon_surf *surf,
3539 uint64_t *tiling_flags)
3540 {
3541 *tiling_flags = 0;
3542
3543 if (info->gfx_level >= GFX12) {
3544 *tiling_flags |= AMDGPU_TILING_SET(GFX12_SWIZZLE_MODE, surf->u.gfx9.swizzle_mode);
3545 *tiling_flags |= AMDGPU_TILING_SET(GFX12_DCC_MAX_COMPRESSED_BLOCK,
3546 surf->u.gfx9.color.dcc.max_compressed_block_size);
3547 *tiling_flags |= AMDGPU_TILING_SET(GFX12_DCC_NUMBER_TYPE, surf->u.gfx9.color.dcc_number_type);
3548 *tiling_flags |= AMDGPU_TILING_SET(GFX12_DCC_DATA_FORMAT, surf->u.gfx9.color.dcc_data_format);
3549 *tiling_flags |= AMDGPU_TILING_SET(GFX12_SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
3550 } else if (info->gfx_level >= GFX9) {
3551 uint64_t dcc_offset = 0;
3552
3553 if (surf->meta_offset) {
3554 dcc_offset = surf->display_dcc_offset ? surf->display_dcc_offset : surf->meta_offset;
3555 assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
3556 }
3557
3558 *tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, surf->u.gfx9.swizzle_mode);
3559 *tiling_flags |= AMDGPU_TILING_SET(DCC_OFFSET_256B, dcc_offset >> 8);
3560 *tiling_flags |= AMDGPU_TILING_SET(DCC_PITCH_MAX, surf->u.gfx9.color.display_dcc_pitch_max);
3561 *tiling_flags |=
3562 AMDGPU_TILING_SET(DCC_INDEPENDENT_64B, surf->u.gfx9.color.dcc.independent_64B_blocks);
3563 *tiling_flags |=
3564 AMDGPU_TILING_SET(DCC_INDEPENDENT_128B, surf->u.gfx9.color.dcc.independent_128B_blocks);
3565 *tiling_flags |= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE,
3566 surf->u.gfx9.color.dcc.max_compressed_block_size);
3567 *tiling_flags |= AMDGPU_TILING_SET(SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
3568 } else {
3569 if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)
3570 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
3571 else if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
3572 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
3573 else
3574 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
3575
3576 *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, surf->u.legacy.pipe_config);
3577 *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(surf->u.legacy.bankw));
3578 *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(surf->u.legacy.bankh));
3579 if (surf->u.legacy.tile_split)
3580 *tiling_flags |=
3581 AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(surf->u.legacy.tile_split));
3582 *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(surf->u.legacy.mtilea));
3583 *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(surf->u.legacy.num_banks) - 1);
3584
3585 if (surf->flags & RADEON_SURF_SCANOUT)
3586 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
3587 else
3588 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
3589 }
3590 }
3591
ac_get_umd_metadata_word1(const struct radeon_info * info)3592 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info *info)
3593 {
3594 return (ATI_VENDOR_ID << 16) | info->pci_id;
3595 }
3596
3597 /* This should be called after ac_compute_surface. */
ac_surface_apply_umd_metadata(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_storage_samples,unsigned num_mipmap_levels,unsigned size_metadata,const uint32_t metadata[64])3598 bool ac_surface_apply_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
3599 unsigned num_storage_samples, unsigned num_mipmap_levels,
3600 unsigned size_metadata, const uint32_t metadata[64])
3601 {
3602 const uint32_t *desc = &metadata[2];
3603 uint64_t offset;
3604
3605 if (surf->modifier != DRM_FORMAT_MOD_INVALID)
3606 return true;
3607
3608 if (info->gfx_level >= GFX9)
3609 offset = surf->u.gfx9.surf_offset;
3610 else
3611 offset = (uint64_t)surf->u.legacy.level[0].offset_256B * 256;
3612
3613 if (offset || /* Non-zero planes ignore metadata. */
3614 size_metadata < 10 * 4 || /* at least 2(header) + 8(desc) dwords */
3615 metadata[0] == 0 || /* invalid version number (1 and 2 layouts are compatible) */
3616 metadata[1] != ac_get_umd_metadata_word1(info)) /* invalid PCI ID */ {
3617 /* Disable DCC because it might not be enabled. */
3618 ac_surface_zero_dcc_fields(surf);
3619
3620 /* Don't report an error if the texture comes from an incompatible driver,
3621 * but this might not work.
3622 */
3623 return true;
3624 }
3625
3626 /* Validate that sample counts and the number of mipmap levels match. */
3627 unsigned desc_last_level = info->gfx_level >= GFX12 ? G_00A00C_LAST_LEVEL_GFX12(desc[3])
3628 : G_008F1C_LAST_LEVEL(desc[3]);
3629 unsigned type = G_008F1C_TYPE(desc[3]);
3630
3631 if (type == V_008F1C_SQ_RSRC_IMG_2D_MSAA || type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
3632 unsigned log_samples = util_logbase2(MAX2(1, num_storage_samples));
3633
3634 if (desc_last_level != log_samples) {
3635 fprintf(stderr,
3636 "amdgpu: invalid MSAA texture import, "
3637 "metadata has log2(samples) = %u, the caller set %u\n",
3638 desc_last_level, log_samples);
3639 return false;
3640 }
3641 } else {
3642 if (desc_last_level != num_mipmap_levels - 1) {
3643 fprintf(stderr,
3644 "amdgpu: invalid mipmapped texture import, "
3645 "metadata has last_level = %u, the caller set %u\n",
3646 desc_last_level, num_mipmap_levels - 1);
3647 return false;
3648 }
3649 }
3650
3651 if (info->gfx_level >= GFX8 && info->gfx_level < GFX12 && G_008F28_COMPRESSION_EN(desc[6])) {
3652 /* Read DCC information. */
3653 switch (info->gfx_level) {
3654 case GFX8:
3655 surf->meta_offset = (uint64_t)desc[7] << 8;
3656 break;
3657
3658 case GFX9:
3659 surf->meta_offset =
3660 ((uint64_t)desc[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc[5]) << 40);
3661 surf->u.gfx9.color.dcc.pipe_aligned = G_008F24_META_PIPE_ALIGNED(desc[5]);
3662 surf->u.gfx9.color.dcc.rb_aligned = G_008F24_META_RB_ALIGNED(desc[5]);
3663
3664 /* If DCC is unaligned, this can only be a displayable image. */
3665 if (!surf->u.gfx9.color.dcc.pipe_aligned && !surf->u.gfx9.color.dcc.rb_aligned)
3666 assert(surf->is_displayable);
3667 break;
3668
3669 case GFX10:
3670 case GFX10_3:
3671 case GFX11:
3672 case GFX11_5:
3673 surf->meta_offset =
3674 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc[6]) << 8) | ((uint64_t)desc[7] << 16);
3675 surf->u.gfx9.color.dcc.pipe_aligned = G_00A018_META_PIPE_ALIGNED(desc[6]);
3676 break;
3677
3678 default:
3679 assert(0);
3680 return false;
3681 }
3682 } else {
3683 /* Disable DCC. dcc_offset is always set by texture_from_handle
3684 * and must be cleared here.
3685 */
3686 ac_surface_zero_dcc_fields(surf);
3687 }
3688
3689 return true;
3690 }
3691
ac_surface_compute_umd_metadata(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_mipmap_levels,uint32_t desc[8],unsigned * size_metadata,uint32_t metadata[64],bool include_tool_md)3692 void ac_surface_compute_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
3693 unsigned num_mipmap_levels, uint32_t desc[8],
3694 unsigned *size_metadata, uint32_t metadata[64],
3695 bool include_tool_md)
3696 {
3697 /* Clear the base address and set the relative DCC offset. */
3698 desc[0] = 0;
3699 desc[1] &= C_008F14_BASE_ADDRESS_HI;
3700
3701 switch (info->gfx_level) {
3702 case GFX6:
3703 case GFX7:
3704 break;
3705 case GFX8:
3706 desc[7] = surf->meta_offset >> 8;
3707 break;
3708 case GFX9:
3709 desc[7] = surf->meta_offset >> 8;
3710 desc[5] &= C_008F24_META_DATA_ADDRESS;
3711 desc[5] |= S_008F24_META_DATA_ADDRESS(surf->meta_offset >> 40);
3712 break;
3713 case GFX10:
3714 case GFX10_3:
3715 case GFX11:
3716 case GFX11_5:
3717 desc[6] &= C_00A018_META_DATA_ADDRESS_LO;
3718 desc[6] |= S_00A018_META_DATA_ADDRESS_LO(surf->meta_offset >> 8);
3719 desc[7] = surf->meta_offset >> 16;
3720 break;
3721 default: /* Gfx12 doesn't have any metadata address */
3722 break;
3723 }
3724
3725 /* Metadata image format format version 1 and 2. Version 2 uses the same layout as
3726 * version 1 with some additional fields (used if include_tool_md=true).
3727 * [0] = metadata_format_identifier
3728 * [1] = (VENDOR_ID << 16) | PCI_ID
3729 * [2:9] = image descriptor for the whole resource
3730 * [2] is always 0, because the base address is cleared
3731 * [9] is the DCC offset bits [39:8] from the beginning of
3732 * the buffer
3733 * gfx8-: [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level (gfx8-)
3734 * ---- The data below is only set in version=2.
3735 * It shouldn't be used by the driver as it's only present to help
3736 * tools (eg: umr) that would want to access this buffer.
3737 * gfx9+ if valid modifier: [10:11] = modifier
3738 * [12:12+3*nplane] = [offset, stride]
3739 * else: [10]: stride
3740 */
3741 metadata[0] = include_tool_md ? 2 : 1; /* metadata image format version */
3742
3743 /* Tiling modes are ambiguous without a PCI ID. */
3744 metadata[1] = ac_get_umd_metadata_word1(info);
3745
3746 /* Dwords [2:9] contain the image descriptor. */
3747 memcpy(&metadata[2], desc, 8 * 4);
3748 *size_metadata = 10 * 4;
3749
3750 /* Dwords [10:..] contain the mipmap level offsets. */
3751 if (info->gfx_level <= GFX8) {
3752 for (unsigned i = 0; i < num_mipmap_levels; i++)
3753 metadata[10 + i] = surf->u.legacy.level[i].offset_256B;
3754
3755 *size_metadata += num_mipmap_levels * 4;
3756 } else if (include_tool_md) {
3757 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
3758 /* Modifier */
3759 metadata[10] = surf->modifier;
3760 metadata[11] = surf->modifier >> 32;
3761 /* Num planes */
3762 int nplanes = ac_surface_get_nplanes(surf);
3763 metadata[12] = nplanes;
3764 int ndw = 13;
3765 for (int i = 0; i < nplanes; i++) {
3766 metadata[ndw++] = ac_surface_get_plane_offset(info->gfx_level,
3767 surf, i, 0);
3768 metadata[ndw++] = ac_surface_get_plane_stride(info->gfx_level,
3769 surf, i, 0);
3770 }
3771 *size_metadata = ndw * 4;
3772 } else {
3773 metadata[10] = ac_surface_get_plane_stride(info->gfx_level,
3774 surf, 0, 0);
3775 *size_metadata = 11 * 4;
3776 }
3777 }
3778 }
3779
ac_surface_get_pitch_align(const struct radeon_info * info,const struct radeon_surf * surf)3780 static uint32_t ac_surface_get_pitch_align(const struct radeon_info *info,
3781 const struct radeon_surf *surf)
3782 {
3783 if (surf->is_linear) {
3784 if (info->gfx_level >= GFX12)
3785 return 128 / surf->bpe;
3786 else if (info->gfx_level >= GFX9)
3787 return 256 / surf->bpe;
3788 else
3789 return MAX2(8, 64 / surf->bpe);
3790 }
3791
3792 if (info->gfx_level >= GFX12) {
3793 if (surf->u.gfx9.resource_type == RADEON_RESOURCE_3D)
3794 return 1u << 31; /* reject 3D textures by returning an impossible alignment */
3795
3796 unsigned bpe_log2 = util_logbase2(surf->bpe);
3797 unsigned block_size_log2;
3798
3799 switch (surf->u.gfx9.swizzle_mode) {
3800 case ADDR3_256B_2D:
3801 block_size_log2 = 8;
3802 break;
3803 case ADDR3_4KB_2D:
3804 block_size_log2 = 12;
3805 break;
3806 case ADDR3_64KB_2D:
3807 block_size_log2 = 16;
3808 break;
3809 case ADDR3_256KB_2D:
3810 block_size_log2 = 18;
3811 break;
3812 default:
3813 unreachable("unhandled swizzle mode");
3814 }
3815
3816 return 1 << ((block_size_log2 >> 1) - (bpe_log2 >> 1));
3817 } else if (info->gfx_level >= GFX9) {
3818 if (surf->u.gfx9.resource_type == RADEON_RESOURCE_3D)
3819 return 1u << 31; /* reject 3D textures by returning an impossible alignment */
3820
3821 unsigned bpe_log2 = util_logbase2(surf->bpe);
3822 unsigned block_size_log2;
3823
3824 switch((surf->u.gfx9.swizzle_mode & ~3) + 3) {
3825 case ADDR_SW_256B_R:
3826 block_size_log2 = 8;
3827 break;
3828 case ADDR_SW_4KB_R:
3829 case ADDR_SW_4KB_R_X:
3830 block_size_log2 = 12;
3831 break;
3832 case ADDR_SW_64KB_R:
3833 case ADDR_SW_64KB_R_T:
3834 case ADDR_SW_64KB_R_X:
3835 block_size_log2 = 16;
3836 break;
3837 case ADDR_SW_256KB_R_X:
3838 block_size_log2 = 18;
3839 break;
3840 default:
3841 unreachable("unhandled swizzle mode");
3842 }
3843
3844 if (info->gfx_level >= GFX10) {
3845 return 1 << (((block_size_log2 - bpe_log2) + 1) / 2);
3846 } else {
3847 static unsigned block_256B_width[] = {16, 16, 8, 8, 4};
3848 return block_256B_width[bpe_log2] << ((block_size_log2 - 8) / 2);
3849 }
3850 } else {
3851 unsigned mode;
3852
3853 if ((surf->flags & RADEON_SURF_Z_OR_SBUFFER) == RADEON_SURF_SBUFFER)
3854 mode = surf->u.legacy.zs.stencil_level[0].mode;
3855 else
3856 mode = surf->u.legacy.level[0].mode;
3857
3858 /* Note that display usage requires an alignment of 32 pixels (see AdjustPitchAlignment),
3859 * which is not checked here.
3860 */
3861 switch (mode) {
3862 case RADEON_SURF_MODE_1D:
3863 return 8;
3864 case RADEON_SURF_MODE_2D:
3865 return 8 * surf->u.legacy.bankw * surf->u.legacy.mtilea *
3866 ac_pipe_config_to_num_pipes(surf->u.legacy.pipe_config);
3867 default:
3868 unreachable("unhandled surf mode");
3869 }
3870 }
3871 }
3872
ac_surface_override_offset_stride(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_layers,unsigned num_mipmap_levels,uint64_t offset,unsigned pitch)3873 bool ac_surface_override_offset_stride(const struct radeon_info *info, struct radeon_surf *surf,
3874 unsigned num_layers, unsigned num_mipmap_levels,
3875 uint64_t offset, unsigned pitch)
3876 {
3877 if ((ac_surface_get_pitch_align(info, surf) - 1) & pitch)
3878 return false;
3879
3880 /* Require an equal pitch with metadata (DCC), mipmapping, non-linear layout (that could be
3881 * relaxed), or when the chip is GFX10, which is the only generation that can't override
3882 * the pitch.
3883 */
3884 bool require_equal_pitch = surf->surf_size != surf->total_size ||
3885 num_layers != 1 ||
3886 num_mipmap_levels != 1 ||
3887 (info->gfx_level >= GFX9 && !surf->is_linear) ||
3888 info->gfx_level == GFX10;
3889
3890 if (info->gfx_level >= GFX9) {
3891 if (pitch) {
3892 if (surf->u.gfx9.surf_pitch != pitch && require_equal_pitch)
3893 return false;
3894
3895 if (pitch != surf->u.gfx9.surf_pitch) {
3896 unsigned slices = surf->surf_size / surf->u.gfx9.surf_slice_size;
3897
3898 surf->u.gfx9.uses_custom_pitch = true;
3899 surf->u.gfx9.surf_pitch = pitch;
3900 surf->u.gfx9.epitch = pitch - 1;
3901 surf->u.gfx9.pitch[0] = pitch;
3902 surf->u.gfx9.surf_slice_size = (uint64_t)pitch * surf->u.gfx9.surf_height * surf->bpe;
3903 surf->total_size = surf->surf_size = surf->u.gfx9.surf_slice_size * slices;
3904 }
3905 }
3906
3907 surf->u.gfx9.surf_offset = offset;
3908 if (surf->has_stencil)
3909 surf->u.gfx9.zs.stencil_offset += offset;
3910 } else {
3911 if (pitch) {
3912 if (surf->u.legacy.level[0].nblk_x != pitch && require_equal_pitch)
3913 return false;
3914
3915 surf->u.legacy.level[0].nblk_x = pitch;
3916 surf->u.legacy.level[0].slice_size_dw =
3917 ((uint64_t)pitch * surf->u.legacy.level[0].nblk_y * surf->bpe) / 4;
3918 }
3919
3920 if (offset) {
3921 for (unsigned i = 0; i < ARRAY_SIZE(surf->u.legacy.level); ++i)
3922 surf->u.legacy.level[i].offset_256B += offset / 256;
3923 }
3924 }
3925
3926 if (offset & ((1 << surf->alignment_log2) - 1) ||
3927 offset >= UINT64_MAX - surf->total_size)
3928 return false;
3929
3930 if (surf->meta_offset)
3931 surf->meta_offset += offset;
3932 if (surf->fmask_offset)
3933 surf->fmask_offset += offset;
3934 if (surf->cmask_offset)
3935 surf->cmask_offset += offset;
3936 if (surf->display_dcc_offset)
3937 surf->display_dcc_offset += offset;
3938 return true;
3939 }
3940
ac_surface_get_nplanes(const struct radeon_surf * surf)3941 unsigned ac_surface_get_nplanes(const struct radeon_surf *surf)
3942 {
3943 if (surf->modifier == DRM_FORMAT_MOD_INVALID)
3944 return 1;
3945 else if (surf->display_dcc_offset)
3946 return 3;
3947 else if (surf->meta_offset)
3948 return 2;
3949 else
3950 return 1;
3951 }
3952
ac_surface_get_plane_offset(enum amd_gfx_level gfx_level,const struct radeon_surf * surf,unsigned plane,unsigned layer)3953 uint64_t ac_surface_get_plane_offset(enum amd_gfx_level gfx_level,
3954 const struct radeon_surf *surf,
3955 unsigned plane, unsigned layer)
3956 {
3957 switch (plane) {
3958 case 0:
3959 if (gfx_level >= GFX9) {
3960 return surf->u.gfx9.surf_offset +
3961 layer * surf->u.gfx9.surf_slice_size;
3962 } else {
3963 return (uint64_t)surf->u.legacy.level[0].offset_256B * 256 +
3964 layer * (uint64_t)surf->u.legacy.level[0].slice_size_dw * 4;
3965 }
3966 case 1:
3967 assert(!layer);
3968 return surf->display_dcc_offset ?
3969 surf->display_dcc_offset : surf->meta_offset;
3970 case 2:
3971 assert(!layer);
3972 return surf->meta_offset;
3973 default:
3974 unreachable("Invalid plane index");
3975 }
3976 }
3977
ac_surface_get_plane_stride(enum amd_gfx_level gfx_level,const struct radeon_surf * surf,unsigned plane,unsigned level)3978 uint64_t ac_surface_get_plane_stride(enum amd_gfx_level gfx_level,
3979 const struct radeon_surf *surf,
3980 unsigned plane, unsigned level)
3981 {
3982 switch (plane) {
3983 case 0:
3984 if (gfx_level >= GFX9) {
3985 return (surf->is_linear ? surf->u.gfx9.pitch[level] : surf->u.gfx9.surf_pitch) * surf->bpe;
3986 } else {
3987 return surf->u.legacy.level[level].nblk_x * surf->bpe;
3988 }
3989 case 1:
3990 return 1 + (surf->display_dcc_offset ?
3991 surf->u.gfx9.color.display_dcc_pitch_max : surf->u.gfx9.color.dcc_pitch_max);
3992 case 2:
3993 return surf->u.gfx9.color.dcc_pitch_max + 1;
3994 default:
3995 unreachable("Invalid plane index");
3996 }
3997 }
3998
ac_surface_get_plane_size(const struct radeon_surf * surf,unsigned plane)3999 uint64_t ac_surface_get_plane_size(const struct radeon_surf *surf,
4000 unsigned plane)
4001 {
4002 switch (plane) {
4003 case 0:
4004 return surf->surf_size;
4005 case 1:
4006 return surf->display_dcc_offset ?
4007 surf->u.gfx9.color.display_dcc_size : surf->meta_size;
4008 case 2:
4009 return surf->meta_size;
4010 default:
4011 unreachable("Invalid plane index");
4012 }
4013 }
4014
4015 uint64_t
ac_surface_addr_from_coord(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned x,unsigned y,unsigned layer,bool is_3d)4016 ac_surface_addr_from_coord(struct ac_addrlib *addrlib, const struct radeon_info *info,
4017 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
4018 unsigned level, unsigned x, unsigned y, unsigned layer, bool is_3d)
4019 {
4020 /* Only implemented for GFX9+ */
4021 assert(info->gfx_level >= GFX9);
4022
4023 ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_INPUT input = {0};
4024 input.size = sizeof(ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_INPUT);
4025 input.slice = layer;
4026 input.mipId = level;
4027 input.unalignedWidth = DIV_ROUND_UP(surf_info->width, surf->blk_w);
4028 input.unalignedHeight = DIV_ROUND_UP(surf_info->height, surf->blk_h);
4029 input.numSlices = is_3d ? surf_info->depth : surf_info->array_size;
4030 input.numMipLevels = surf_info->levels;
4031 input.numSamples = surf_info->samples;
4032 input.numFrags = surf_info->samples;
4033 input.swizzleMode = surf->u.gfx9.swizzle_mode;
4034 input.resourceType = (AddrResourceType)surf->u.gfx9.resource_type;
4035 input.pipeBankXor = surf->tile_swizzle;
4036 input.bpp = surf->bpe * 8;
4037 input.x = x;
4038 input.y = y;
4039
4040 ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_OUTPUT output = {0};
4041 output.size = sizeof(ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_OUTPUT);
4042 Addr2ComputeSurfaceAddrFromCoord(addrlib->handle, &input, &output);
4043 return output.addr;
4044 }
4045
4046 static void
gfx12_surface_compute_nbc_view(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned layer,struct ac_surf_nbc_view * out)4047 gfx12_surface_compute_nbc_view(struct ac_addrlib *addrlib, const struct radeon_info *info,
4048 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
4049 unsigned level, unsigned layer, struct ac_surf_nbc_view *out)
4050 {
4051 ADDR3_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT input = {0};
4052 input.size = sizeof(ADDR3_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT);
4053 input.swizzleMode = surf->u.gfx9.swizzle_mode;
4054 input.resourceType = (AddrResourceType)surf->u.gfx9.resource_type;
4055 switch (surf->bpe) {
4056 case 8:
4057 input.format = ADDR_FMT_BC1;
4058 break;
4059 case 16:
4060 input.format = ADDR_FMT_BC3;
4061 break;
4062 default:
4063 assert(0);
4064 }
4065 input.unAlignedDims.width = surf_info->width;
4066 input.unAlignedDims.height = surf_info->height;
4067 input.numMipLevels = surf_info->levels;
4068 input.pipeBankXor = surf->tile_swizzle;
4069 input.slice = layer;
4070 input.mipId = level;
4071
4072 ADDR_E_RETURNCODE res;
4073 ADDR3_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT output = {0};
4074 output.size = sizeof(ADDR3_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT);
4075 res = Addr3ComputeNonBlockCompressedView(addrlib->handle, &input, &output);
4076 if (res == ADDR_OK) {
4077 out->base_address_offset = output.offset;
4078 out->tile_swizzle = output.pipeBankXor;
4079 out->width = output.unAlignedDims.width;
4080 out->height = output.unAlignedDims.height;
4081 out->num_levels = output.numMipLevels;
4082 out->level = output.mipId;
4083 out->valid = true;
4084 } else {
4085 out->valid = false;
4086 }
4087 }
4088
4089 static void
gfx10_surface_compute_nbc_view(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned layer,struct ac_surf_nbc_view * out)4090 gfx10_surface_compute_nbc_view(struct ac_addrlib *addrlib, const struct radeon_info *info,
4091 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
4092 unsigned level, unsigned layer, struct ac_surf_nbc_view *out)
4093 {
4094 ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT input = {0};
4095 input.size = sizeof(ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT);
4096 input.swizzleMode = surf->u.gfx9.swizzle_mode;
4097 input.resourceType = (AddrResourceType)surf->u.gfx9.resource_type;
4098 switch (surf->bpe) {
4099 case 8:
4100 input.format = ADDR_FMT_BC1;
4101 break;
4102 case 16:
4103 input.format = ADDR_FMT_BC3;
4104 break;
4105 default:
4106 assert(0);
4107 }
4108 input.width = surf_info->width;
4109 input.height = surf_info->height;
4110 input.numSlices = surf_info->array_size;
4111 input.numMipLevels = surf_info->levels;
4112 input.pipeBankXor = surf->tile_swizzle;
4113 input.slice = layer;
4114 input.mipId = level;
4115
4116 ADDR_E_RETURNCODE res;
4117 ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT output = {0};
4118 output.size = sizeof(ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT);
4119 res = Addr2ComputeNonBlockCompressedView(addrlib->handle, &input, &output);
4120 if (res == ADDR_OK) {
4121 out->base_address_offset = output.offset;
4122 out->tile_swizzle = output.pipeBankXor;
4123 out->width = output.unalignedWidth;
4124 out->height = output.unalignedHeight;
4125 out->num_levels = output.numMipLevels;
4126 out->level = output.mipId;
4127 out->valid = true;
4128 } else {
4129 out->valid = false;
4130 }
4131 }
4132
4133 void
ac_surface_compute_nbc_view(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned layer,struct ac_surf_nbc_view * out)4134 ac_surface_compute_nbc_view(struct ac_addrlib *addrlib, const struct radeon_info *info,
4135 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
4136 unsigned level, unsigned layer, struct ac_surf_nbc_view *out)
4137 {
4138 /* Only implemented for GFX10+ */
4139 assert(info->gfx_level >= GFX10);
4140
4141 if (info->gfx_level >= GFX12) {
4142 gfx12_surface_compute_nbc_view(addrlib, info, surf, surf_info, level, layer, out);
4143 } else {
4144 gfx10_surface_compute_nbc_view(addrlib, info, surf, surf_info, level, layer, out);
4145 }
4146 }
4147
ac_surface_print_info(FILE * out,const struct radeon_info * info,const struct radeon_surf * surf)4148 void ac_surface_print_info(FILE *out, const struct radeon_info *info,
4149 const struct radeon_surf *surf)
4150 {
4151 if (info->gfx_level >= GFX9) {
4152 fprintf(out,
4153 " Surf: size=%" PRIu64 ", slice_size=%" PRIu64 ", "
4154 "alignment=%u, swmode=%u, tile_swizzle=%u, epitch=%u, pitch=%u, blk_w=%u, "
4155 "blk_h=%u, bpe=%u, flags=0x%"PRIx64"\n",
4156 surf->surf_size, surf->u.gfx9.surf_slice_size,
4157 1 << surf->surf_alignment_log2, surf->u.gfx9.swizzle_mode, surf->tile_swizzle,
4158 surf->u.gfx9.epitch, surf->u.gfx9.surf_pitch,
4159 surf->blk_w, surf->blk_h, surf->bpe, surf->flags);
4160
4161 if (surf->fmask_offset)
4162 fprintf(out,
4163 " FMask: offset=%" PRIu64 ", size=%" PRIu64 ", "
4164 "alignment=%u, swmode=%u, epitch=%u\n",
4165 surf->fmask_offset, surf->fmask_size,
4166 1 << surf->fmask_alignment_log2, surf->u.gfx9.color.fmask_swizzle_mode,
4167 surf->u.gfx9.color.fmask_epitch);
4168
4169 if (surf->cmask_offset)
4170 fprintf(out,
4171 " CMask: offset=%" PRIu64 ", size=%u, "
4172 "alignment=%u\n",
4173 surf->cmask_offset, surf->cmask_size,
4174 1 << surf->cmask_alignment_log2);
4175
4176 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER && surf->meta_offset)
4177 fprintf(out,
4178 " HTile: offset=%" PRIu64 ", size=%u, alignment=%u\n",
4179 surf->meta_offset, surf->meta_size,
4180 1 << surf->meta_alignment_log2);
4181
4182 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->meta_offset)
4183 fprintf(out,
4184 " DCC: offset=%" PRIu64 ", size=%u, "
4185 "alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
4186 surf->meta_offset, surf->meta_size, 1 << surf->meta_alignment_log2,
4187 surf->u.gfx9.color.display_dcc_pitch_max, surf->num_meta_levels);
4188
4189 if (surf->has_stencil)
4190 fprintf(out,
4191 " Stencil: offset=%" PRIu64 ", swmode=%u, epitch=%u\n",
4192 surf->u.gfx9.zs.stencil_offset,
4193 surf->u.gfx9.zs.stencil_swizzle_mode,
4194 surf->u.gfx9.zs.stencil_epitch);
4195
4196 if (info->gfx_level == GFX12) {
4197 if (surf->u.gfx9.zs.hiz.size) {
4198 fprintf(out,
4199 " HiZ: offset=%" PRIu64 ", size=%u, swmode=%u, width_in_tiles=%u, height_in_tiles=%u\n",
4200 surf->u.gfx9.zs.hiz.offset, surf->u.gfx9.zs.hiz.size, surf->u.gfx9.zs.hiz.swizzle_mode,
4201 surf->u.gfx9.zs.hiz.width_in_tiles, surf->u.gfx9.zs.hiz.height_in_tiles);
4202 }
4203
4204 if (surf->u.gfx9.zs.his.size) {
4205 fprintf(out,
4206 " HiS: offset=%" PRIu64 ", size=%u, swmode=%u, width_in_tiles=%u, height_in_tiles=%u\n",
4207 surf->u.gfx9.zs.his.offset, surf->u.gfx9.zs.his.size, surf->u.gfx9.zs.his.swizzle_mode,
4208 surf->u.gfx9.zs.his.width_in_tiles, surf->u.gfx9.zs.his.height_in_tiles);
4209 }
4210 }
4211 } else {
4212 fprintf(out,
4213 " Surf: size=%" PRIu64 ", alignment=%u, blk_w=%u, blk_h=%u, "
4214 "bpe=%u, flags=0x%"PRIx64"\n",
4215 surf->surf_size, 1 << surf->surf_alignment_log2, surf->blk_w,
4216 surf->blk_h, surf->bpe, surf->flags);
4217
4218 fprintf(out,
4219 " Layout: size=%" PRIu64 ", alignment=%u, bankw=%u, bankh=%u, "
4220 "nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
4221 surf->surf_size, 1 << surf->surf_alignment_log2,
4222 surf->u.legacy.bankw, surf->u.legacy.bankh,
4223 surf->u.legacy.num_banks, surf->u.legacy.mtilea,
4224 surf->u.legacy.tile_split, surf->u.legacy.pipe_config,
4225 (surf->flags & RADEON_SURF_SCANOUT) != 0);
4226
4227 if (surf->fmask_offset)
4228 fprintf(out,
4229 " FMask: offset=%" PRIu64 ", size=%" PRIu64 ", "
4230 "alignment=%u, pitch_in_pixels=%u, bankh=%u, "
4231 "slice_tile_max=%u, tile_mode_index=%u\n",
4232 surf->fmask_offset, surf->fmask_size,
4233 1 << surf->fmask_alignment_log2, surf->u.legacy.color.fmask.pitch_in_pixels,
4234 surf->u.legacy.color.fmask.bankh,
4235 surf->u.legacy.color.fmask.slice_tile_max,
4236 surf->u.legacy.color.fmask.tiling_index);
4237
4238 if (surf->cmask_offset)
4239 fprintf(out,
4240 " CMask: offset=%" PRIu64 ", size=%u, alignment=%u, "
4241 "slice_tile_max=%u\n",
4242 surf->cmask_offset, surf->cmask_size,
4243 1 << surf->cmask_alignment_log2, surf->u.legacy.color.cmask_slice_tile_max);
4244
4245 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER && surf->meta_offset)
4246 fprintf(out, " HTile: offset=%" PRIu64 ", size=%u, alignment=%u\n",
4247 surf->meta_offset, surf->meta_size,
4248 1 << surf->meta_alignment_log2);
4249
4250 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->meta_offset)
4251 fprintf(out, " DCC: offset=%" PRIu64 ", size=%u, alignment=%u\n",
4252 surf->meta_offset, surf->meta_size, 1 << surf->meta_alignment_log2);
4253
4254 if (surf->has_stencil)
4255 fprintf(out, " StencilLayout: tilesplit=%u\n",
4256 surf->u.legacy.stencil_tile_split);
4257 }
4258 }
4259
gfx10_nir_meta_addr_from_coord(nir_builder * b,const struct radeon_info * info,const struct gfx9_meta_equation * equation,int blkSizeBias,unsigned blkStart,nir_def * meta_pitch,nir_def * meta_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * pipe_xor,nir_def ** bit_position)4260 static nir_def *gfx10_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4261 const struct gfx9_meta_equation *equation,
4262 int blkSizeBias, unsigned blkStart,
4263 nir_def *meta_pitch, nir_def *meta_slice_size,
4264 nir_def *x, nir_def *y, nir_def *z,
4265 nir_def *pipe_xor,
4266 nir_def **bit_position)
4267 {
4268 nir_def *zero = nir_imm_int(b, 0);
4269 nir_def *one = nir_imm_int(b, 1);
4270
4271 assert(info->gfx_level >= GFX10);
4272
4273 unsigned meta_block_width_log2 = util_logbase2(equation->meta_block_width);
4274 unsigned meta_block_height_log2 = util_logbase2(equation->meta_block_height);
4275 unsigned blkSizeLog2 = meta_block_width_log2 + meta_block_height_log2 + blkSizeBias;
4276
4277 nir_def *coord[] = {x, y, z, 0};
4278 nir_def *address = zero;
4279
4280 for (unsigned i = blkStart; i < blkSizeLog2 + 1; i++) {
4281 nir_def *v = zero;
4282
4283 for (unsigned c = 0; c < 4; c++) {
4284 unsigned index = i * 4 + c - (blkStart * 4);
4285 if (equation->u.gfx10_bits[index]) {
4286 unsigned mask = equation->u.gfx10_bits[index];
4287 nir_def *bits = coord[c];
4288
4289 while (mask)
4290 v = nir_ixor(b, v, nir_iand(b, nir_ushr_imm(b, bits, u_bit_scan(&mask)), one));
4291 }
4292 }
4293
4294 address = nir_ior(b, address, nir_ishl_imm(b, v, i));
4295 }
4296
4297 unsigned blkMask = (1 << blkSizeLog2) - 1;
4298 unsigned pipeMask = (1 << G_0098F8_NUM_PIPES(info->gb_addr_config)) - 1;
4299 unsigned m_pipeInterleaveLog2 = 8 + G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config);
4300 nir_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
4301 nir_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
4302 nir_def *pb = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
4303 nir_def *blkIndex = nir_iadd(b, nir_imul(b, yb, pb), xb);
4304 nir_def *pipeXor = nir_iand_imm(b, nir_ishl_imm(b, nir_iand_imm(b, pipe_xor, pipeMask),
4305 m_pipeInterleaveLog2), blkMask);
4306
4307 if (bit_position)
4308 *bit_position = nir_ishl_imm(b, nir_iand_imm(b, address, 1), 2);
4309
4310 return nir_iadd(b, nir_iadd(b, nir_imul(b, meta_slice_size, z),
4311 nir_imul(b, blkIndex, nir_ishl_imm(b, one, blkSizeLog2))),
4312 nir_ixor(b, nir_ushr(b, address, one), pipeXor));
4313 }
4314
gfx9_nir_meta_addr_from_coord(nir_builder * b,const struct radeon_info * info,const struct gfx9_meta_equation * equation,nir_def * meta_pitch,nir_def * meta_height,nir_def * x,nir_def * y,nir_def * z,nir_def * sample,nir_def * pipe_xor,nir_def ** bit_position)4315 static nir_def *gfx9_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4316 const struct gfx9_meta_equation *equation,
4317 nir_def *meta_pitch, nir_def *meta_height,
4318 nir_def *x, nir_def *y, nir_def *z,
4319 nir_def *sample, nir_def *pipe_xor,
4320 nir_def **bit_position)
4321 {
4322 nir_def *zero = nir_imm_int(b, 0);
4323 nir_def *one = nir_imm_int(b, 1);
4324
4325 assert(info->gfx_level >= GFX9);
4326
4327 unsigned meta_block_width_log2 = util_logbase2(equation->meta_block_width);
4328 unsigned meta_block_height_log2 = util_logbase2(equation->meta_block_height);
4329 unsigned meta_block_depth_log2 = util_logbase2(equation->meta_block_depth);
4330
4331 unsigned m_pipeInterleaveLog2 = 8 + G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config);
4332 unsigned numPipeBits = equation->u.gfx9.num_pipe_bits;
4333 nir_def *pitchInBlock = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
4334 nir_def *sliceSizeInBlock = nir_imul(b, nir_ushr_imm(b, meta_height, meta_block_height_log2),
4335 pitchInBlock);
4336
4337 nir_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
4338 nir_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
4339 nir_def *zb = nir_ushr_imm(b, z, meta_block_depth_log2);
4340
4341 nir_def *blockIndex = nir_iadd(b, nir_iadd(b, nir_imul(b, zb, sliceSizeInBlock),
4342 nir_imul(b, yb, pitchInBlock)), xb);
4343 nir_def *coords[] = {x, y, z, sample, blockIndex};
4344
4345 nir_def *address = zero;
4346 unsigned num_bits = equation->u.gfx9.num_bits;
4347 assert(num_bits <= 32);
4348
4349 /* Compute the address up until the last bit that doesn't use the block index. */
4350 for (unsigned i = 0; i < num_bits - 1; i++) {
4351 nir_def *xor = zero;
4352
4353 for (unsigned c = 0; c < 5; c++) {
4354 if (equation->u.gfx9.bit[i].coord[c].dim >= 5)
4355 continue;
4356
4357 assert(equation->u.gfx9.bit[i].coord[c].ord < 32);
4358 nir_def *ison =
4359 nir_iand(b, nir_ushr_imm(b, coords[equation->u.gfx9.bit[i].coord[c].dim],
4360 equation->u.gfx9.bit[i].coord[c].ord), one);
4361
4362 xor = nir_ixor(b, xor, ison);
4363 }
4364 address = nir_ior(b, address, nir_ishl_imm(b, xor, i));
4365 }
4366
4367 /* Fill the remaining bits with the block index. */
4368 unsigned last = num_bits - 1;
4369 address = nir_ior(b, address,
4370 nir_ishl_imm(b, nir_ushr_imm(b, blockIndex,
4371 equation->u.gfx9.bit[last].coord[0].ord),
4372 last));
4373
4374 if (bit_position)
4375 *bit_position = nir_ishl_imm(b, nir_iand_imm(b, address, 1), 2);
4376
4377 nir_def *pipeXor = nir_iand_imm(b, pipe_xor, (1 << numPipeBits) - 1);
4378 return nir_ixor(b, nir_ushr(b, address, one),
4379 nir_ishl_imm(b, pipeXor, m_pipeInterleaveLog2));
4380 }
4381
ac_nir_dcc_addr_from_coord(nir_builder * b,const struct radeon_info * info,unsigned bpe,const struct gfx9_meta_equation * equation,nir_def * dcc_pitch,nir_def * dcc_height,nir_def * dcc_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * sample,nir_def * pipe_xor)4382 nir_def *ac_nir_dcc_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4383 unsigned bpe, const struct gfx9_meta_equation *equation,
4384 nir_def *dcc_pitch, nir_def *dcc_height,
4385 nir_def *dcc_slice_size,
4386 nir_def *x, nir_def *y, nir_def *z,
4387 nir_def *sample, nir_def *pipe_xor)
4388 {
4389 if (info->gfx_level >= GFX10) {
4390 unsigned bpp_log2 = util_logbase2(bpe);
4391
4392 return gfx10_nir_meta_addr_from_coord(b, info, equation, bpp_log2 - 8, 1,
4393 dcc_pitch, dcc_slice_size,
4394 x, y, z, pipe_xor, NULL);
4395 } else {
4396 return gfx9_nir_meta_addr_from_coord(b, info, equation, dcc_pitch,
4397 dcc_height, x, y, z,
4398 sample, pipe_xor, NULL);
4399 }
4400 }
4401
ac_nir_cmask_addr_from_coord(nir_builder * b,const struct radeon_info * info,const struct gfx9_meta_equation * equation,nir_def * cmask_pitch,nir_def * cmask_height,nir_def * cmask_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * pipe_xor,nir_def ** bit_position)4402 nir_def *ac_nir_cmask_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4403 const struct gfx9_meta_equation *equation,
4404 nir_def *cmask_pitch, nir_def *cmask_height,
4405 nir_def *cmask_slice_size,
4406 nir_def *x, nir_def *y, nir_def *z,
4407 nir_def *pipe_xor,
4408 nir_def **bit_position)
4409 {
4410 nir_def *zero = nir_imm_int(b, 0);
4411
4412 if (info->gfx_level >= GFX10) {
4413 return gfx10_nir_meta_addr_from_coord(b, info, equation, -7, 1,
4414 cmask_pitch, cmask_slice_size,
4415 x, y, z, pipe_xor, bit_position);
4416 } else {
4417 return gfx9_nir_meta_addr_from_coord(b, info, equation, cmask_pitch,
4418 cmask_height, x, y, z, zero,
4419 pipe_xor, bit_position);
4420 }
4421 }
4422
ac_nir_htile_addr_from_coord(nir_builder * b,const struct radeon_info * info,const struct gfx9_meta_equation * equation,nir_def * htile_pitch,nir_def * htile_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * pipe_xor)4423 nir_def *ac_nir_htile_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4424 const struct gfx9_meta_equation *equation,
4425 nir_def *htile_pitch,
4426 nir_def *htile_slice_size,
4427 nir_def *x, nir_def *y, nir_def *z,
4428 nir_def *pipe_xor)
4429 {
4430 return gfx10_nir_meta_addr_from_coord(b, info, equation, -4, 2,
4431 htile_pitch, htile_slice_size,
4432 x, y, z, pipe_xor, NULL);
4433 }
4434