1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #ifndef SI_STATE_H
8 #define SI_STATE_H
9
10 #include "si_pm4.h"
11 #include "util/format/u_format.h"
12 #include "util/bitset.h"
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 #define SI_NUM_GRAPHICS_SHADERS (PIPE_SHADER_FRAGMENT + 1)
19 #define SI_NUM_SHADERS (PIPE_SHADER_COMPUTE + 1)
20
21 #define SI_NUM_VERTEX_BUFFERS SI_MAX_ATTRIBS
22 #define SI_NUM_SAMPLERS 32 /* OpenGL textures units per shader */
23 #define SI_NUM_CONST_BUFFERS 16
24 #define SI_NUM_IMAGES 16
25 #define SI_NUM_IMAGE_SLOTS (SI_NUM_IMAGES * 2) /* the second half are FMASK slots */
26 #define SI_NUM_SHADER_BUFFERS 32
27
28 struct si_screen;
29 struct si_shader;
30 struct si_shader_ctx_state;
31 struct si_shader_selector;
32 struct si_texture;
33 struct si_qbo_state;
34 struct legacy_surf_level;
35 struct pb_slab_entry;
36
37 struct si_state_blend {
38 struct si_pm4_state pm4;
39 uint32_t cb_target_mask;
40 /* Set 0xf or 0x0 (4 bits) per render target if the following is
41 * true. ANDed with spi_shader_col_format.
42 */
43 unsigned cb_target_enabled_4bit;
44 unsigned blend_enable_4bit;
45 unsigned need_src_alpha_4bit;
46 unsigned commutative_4bit;
47 unsigned dcc_msaa_corruption_4bit;
48 bool alpha_to_coverage : 1;
49 bool alpha_to_one : 1;
50 bool dual_src_blend : 1;
51 bool logicop_enable : 1;
52 bool allows_noop_optimization : 1;
53 };
54
55 struct si_state_rasterizer {
56 struct si_pm4_state pm4;
57
58 /* Register values. */
59 unsigned spi_interp_control_0;
60 unsigned pa_su_point_size;
61 unsigned pa_su_point_minmax;
62 unsigned pa_su_line_cntl;
63 unsigned pa_sc_mode_cntl_0;
64 unsigned pa_su_sc_mode_cntl;
65 unsigned pa_cl_ngg_cntl;
66 unsigned pa_sc_edgerule;
67 unsigned pa_su_poly_offset_db_fmt_cntl[3];
68 unsigned pa_su_poly_offset_clamp;
69 unsigned pa_su_poly_offset_frontback_scale;
70 unsigned pa_su_poly_offset_frontback_offset[3];
71
72 unsigned pa_sc_line_stipple;
73 unsigned pa_cl_clip_cntl;
74 float line_width;
75 float max_point_size;
76 unsigned ngg_cull_flags_tris : 16;
77 unsigned ngg_cull_flags_tris_y_inverted : 16;
78 unsigned ngg_cull_flags_lines : 16;
79 unsigned sprite_coord_enable : 8;
80 unsigned clip_plane_enable : 8;
81 unsigned half_pixel_center : 1;
82 unsigned flatshade : 1;
83 unsigned flatshade_first : 1;
84 unsigned two_side : 1;
85 unsigned multisample_enable : 1;
86 unsigned force_persample_interp : 1;
87 unsigned line_stipple_enable : 1;
88 unsigned poly_stipple_enable : 1;
89 unsigned line_smooth : 1;
90 unsigned poly_smooth : 1;
91 unsigned point_smooth : 1;
92 unsigned uses_poly_offset : 1;
93 unsigned clamp_fragment_color : 1;
94 unsigned clamp_vertex_color : 1;
95 unsigned rasterizer_discard : 1;
96 unsigned scissor_enable : 1;
97 unsigned clip_halfz : 1;
98 unsigned polygon_mode_is_lines : 1;
99 unsigned polygon_mode_is_points : 1;
100 unsigned perpendicular_end_caps : 1;
101 unsigned bottom_edge_rule : 1;
102 int force_front_face_input : 2;
103 };
104
105 struct si_dsa_stencil_ref_part {
106 uint8_t valuemask[2];
107 uint8_t writemask[2];
108 };
109
110 struct si_dsa_order_invariance {
111 /** Whether the final result in Z/S buffers is guaranteed to be
112 * invariant under changes to the order in which fragments arrive. */
113 bool zs : 1;
114
115 /** Whether the set of fragments that pass the combined Z/S test is
116 * guaranteed to be invariant under changes to the order in which
117 * fragments arrive. */
118 bool pass_set : 1;
119 };
120
121 struct si_state_dsa {
122 struct si_pm4_state pm4;
123 struct si_dsa_stencil_ref_part stencil_ref;
124
125 /* Register values. */
126 unsigned db_depth_control;
127 unsigned db_stencil_control;
128 unsigned db_depth_bounds_min;
129 unsigned db_depth_bounds_max;
130 unsigned spi_shader_user_data_ps_alpha_ref;
131 unsigned db_stencil_read_mask;
132 unsigned db_stencil_write_mask;
133
134 /* 0 = without stencil buffer, 1 = when both Z and S buffers are present */
135 struct si_dsa_order_invariance order_invariance[2];
136
137 uint8_t alpha_func : 3;
138 bool depth_enabled : 1;
139 bool depth_write_enabled : 1;
140 bool stencil_enabled : 1;
141 bool stencil_write_enabled : 1;
142 bool db_can_write : 1;
143 bool depth_bounds_enabled : 1;
144 };
145
146 struct si_stencil_ref {
147 struct pipe_stencil_ref state;
148 struct si_dsa_stencil_ref_part dsa_part;
149 };
150
151 struct si_vertex_elements {
152 struct si_resource *instance_divisor_factor_buffer;
153
154 /* Bitmask of elements that always need a fixup to be applied. */
155 uint16_t fix_fetch_always;
156
157 /* Bitmask of elements whose fetch should always be opencoded. */
158 uint16_t fix_fetch_opencode;
159
160 /* Bitmask of elements which need to be opencoded if the vertex buffer
161 * is unaligned. */
162 uint16_t fix_fetch_unaligned;
163
164 /* For elements in fix_fetch_unaligned: whether the effective
165 * element load size as seen by the hardware is a dword (as opposed
166 * to a short).
167 */
168 uint16_t hw_load_is_dword;
169
170 /* Bitmask of vertex buffers requiring alignment check */
171 uint16_t vb_alignment_check_mask;
172
173 uint8_t count;
174
175 /* Vertex buffer descriptor list size aligned for optimal prefetch. */
176 uint16_t vb_desc_list_alloc_size;
177 uint16_t instance_divisor_is_one; /* bitmask of inputs */
178 uint16_t instance_divisor_is_fetched; /* bitmask of inputs */
179
180 uint8_t fix_fetch[SI_MAX_ATTRIBS];
181 uint8_t vertex_buffer_index[SI_MAX_ATTRIBS];
182
183 struct {
184 uint32_t rsrc_word3;
185 uint16_t src_offset;
186 uint16_t stride;
187 uint8_t format_size;
188 } elem[SI_MAX_ATTRIBS];
189 };
190
191 union si_state {
192 struct si_state_named {
193 struct si_state_blend *blend;
194 struct si_state_rasterizer *rasterizer;
195 struct si_state_dsa *dsa;
196 struct si_shader *ls;
197 struct si_shader *hs;
198 struct si_shader *es;
199 struct si_shader *gs;
200 struct si_shader *vs;
201 struct si_shader *ps;
202 struct si_sqtt_fake_pipeline *sqtt_pipeline;
203 } named;
204 struct si_pm4_state *array[sizeof(struct si_state_named) / sizeof(struct si_pm4_state *)];
205 };
206
207 #define SI_STATE_IDX(name) (offsetof(union si_state, named.name) / sizeof(struct si_pm4_state *))
208 #define SI_STATE_BIT(name) (1ull << SI_STATE_IDX(name))
209 #define SI_NUM_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
210
211 union si_state_atoms {
212 struct si_atoms_s {
213 /* This must be first. */
214 struct si_atom pm4_states[SI_NUM_STATES];
215 struct si_atom gfx_add_all_to_bo_list;
216 struct si_atom streamout_enable;
217 struct si_atom framebuffer;
218 struct si_atom sample_locations;
219 struct si_atom db_render_state;
220 struct si_atom dpbb_state;
221 struct si_atom msaa_config;
222 struct si_atom sample_mask;
223 struct si_atom cb_render_state;
224 struct si_atom blend_color;
225 struct si_atom clip_regs;
226 struct si_atom clip_state;
227 struct si_atom gfx_shader_pointers;
228 struct si_atom guardband;
229 struct si_atom scissors;
230 struct si_atom viewports;
231 struct si_atom stencil_ref;
232 struct si_atom spi_map;
233 struct si_atom scratch_state;
234 struct si_atom window_rectangles;
235 struct si_atom shader_query;
236 struct si_atom ngg_cull_state;
237 struct si_atom vgt_pipeline_state;
238 struct si_atom tess_io_layout;
239 struct si_atom barrier;
240 struct si_atom streamout_begin; /* this must be done after barrier */
241 struct si_atom render_cond; /* this must be after barrier */
242 struct si_atom spi_ge_ring_state; /* this must be last because it waits for idle. */
243 } s;
244 struct si_atom array[sizeof(struct si_atoms_s) / sizeof(struct si_atom)];
245 };
246
247 #define SI_ATOM_BIT(name) (1ull << (offsetof(union si_state_atoms, s.name) / sizeof(struct si_atom)))
248 #define SI_NUM_ATOMS (sizeof(union si_state_atoms) / sizeof(struct si_atom))
249
si_atoms_that_always_roll_context(void)250 static inline uint64_t si_atoms_that_always_roll_context(void)
251 {
252 return SI_STATE_BIT(blend) |
253 SI_ATOM_BIT(streamout_begin) | SI_ATOM_BIT(streamout_enable) | SI_ATOM_BIT(framebuffer) |
254 SI_ATOM_BIT(sample_locations) | SI_ATOM_BIT(sample_mask) | SI_ATOM_BIT(blend_color)|
255 SI_ATOM_BIT(clip_state) | SI_ATOM_BIT(scissors) | SI_ATOM_BIT(viewports)|
256 SI_ATOM_BIT(stencil_ref) | SI_ATOM_BIT(scratch_state) | SI_ATOM_BIT(window_rectangles);
257 }
258
259 struct si_shader_data {
260 uint32_t sh_base[SI_NUM_SHADERS];
261 };
262
263 /* Registers whose values are tracked by si_context. */
264 enum si_tracked_reg
265 {
266 /* CONTEXT registers. */
267 /* 2 consecutive registers (GFX6-11), or separate registers (GFX12) */
268 SI_TRACKED_DB_RENDER_CONTROL,
269 SI_TRACKED_DB_COUNT_CONTROL,
270
271 SI_TRACKED_DB_DEPTH_CONTROL,
272 SI_TRACKED_DB_STENCIL_CONTROL,
273 /* 2 consecutive registers */
274 SI_TRACKED_DB_DEPTH_BOUNDS_MIN,
275 SI_TRACKED_DB_DEPTH_BOUNDS_MAX,
276
277 SI_TRACKED_SPI_INTERP_CONTROL_0,
278 SI_TRACKED_PA_SU_POINT_SIZE,
279 SI_TRACKED_PA_SU_POINT_MINMAX,
280 SI_TRACKED_PA_SU_LINE_CNTL,
281 SI_TRACKED_PA_SC_MODE_CNTL_0,
282 SI_TRACKED_PA_SU_SC_MODE_CNTL,
283 SI_TRACKED_PA_SC_EDGERULE,
284
285 /* 6 consecutive registers */
286 SI_TRACKED_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
287 SI_TRACKED_PA_SU_POLY_OFFSET_CLAMP,
288 SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_SCALE,
289 SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_OFFSET,
290 SI_TRACKED_PA_SU_POLY_OFFSET_BACK_SCALE,
291 SI_TRACKED_PA_SU_POLY_OFFSET_BACK_OFFSET,
292
293 /* 2 consecutive registers */
294 SI_TRACKED_PA_SC_LINE_CNTL,
295 SI_TRACKED_PA_SC_AA_CONFIG,
296
297 /* 5 consecutive registers (GFX6-11) */
298 SI_TRACKED_PA_SU_VTX_CNTL,
299 /* 4 consecutive registers (GFX12) */
300 SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ,
301 SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ,
302 SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ,
303 SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ,
304
305 /* Non-consecutive register */
306 SI_TRACKED_SPI_SHADER_POS_FORMAT,
307
308 /* 5 consecutive registers (GFX12), or 2 consecutive registers (GFX6-11) */
309 SI_TRACKED_SPI_SHADER_Z_FORMAT,
310 SI_TRACKED_SPI_SHADER_COL_FORMAT,
311 /* Continuing consecutive registers (GFX12), or separate register (GFX6-11) */
312 SI_TRACKED_SPI_BARYC_CNTL,
313 /* Continuing consecutive registers (GFX12), or 2 consecutive registers (GFX6-11) */
314 SI_TRACKED_SPI_PS_INPUT_ENA,
315 SI_TRACKED_SPI_PS_INPUT_ADDR,
316
317 SI_TRACKED_DB_EQAA,
318 SI_TRACKED_DB_RENDER_OVERRIDE2,
319 SI_TRACKED_DB_SHADER_CONTROL,
320 SI_TRACKED_CB_SHADER_MASK,
321 SI_TRACKED_CB_TARGET_MASK,
322 SI_TRACKED_PA_CL_CLIP_CNTL,
323 SI_TRACKED_PA_CL_VS_OUT_CNTL,
324 SI_TRACKED_PA_CL_VTE_CNTL,
325 SI_TRACKED_PA_SC_CLIPRECT_RULE,
326 SI_TRACKED_PA_SC_LINE_STIPPLE,
327 SI_TRACKED_PA_SC_MODE_CNTL_1,
328 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
329 SI_TRACKED_SPI_PS_IN_CONTROL,
330 SI_TRACKED_VGT_GS_INSTANCE_CNT,
331 SI_TRACKED_VGT_GS_MAX_VERT_OUT,
332 SI_TRACKED_VGT_SHADER_STAGES_EN,
333 SI_TRACKED_VGT_LS_HS_CONFIG,
334 SI_TRACKED_VGT_TF_PARAM,
335 SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL, /* GFX8-9 (only with has_small_prim_filter_sample_loc_bug) */
336 SI_TRACKED_PA_SC_BINNER_CNTL_0, /* GFX9+ */
337 SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP, /* GFX10+ - the SMALL_PRIM_FILTER slot above can be reused */
338 SI_TRACKED_GE_NGG_SUBGRP_CNTL, /* GFX10+ */
339 SI_TRACKED_PA_CL_NGG_CNTL, /* GFX10+ */
340 SI_TRACKED_DB_PA_SC_VRS_OVERRIDE_CNTL, /* GFX10.3+ */
341
342 /* 3 consecutive registers */
343 SI_TRACKED_SX_PS_DOWNCONVERT, /* GFX8+ */
344 SI_TRACKED_SX_BLEND_OPT_EPSILON, /* GFX8+ */
345 SI_TRACKED_SX_BLEND_OPT_CONTROL, /* GFX8+ */
346
347 /* The slots below can be reused by other generations. */
348 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE, /* GFX6-8 (GFX9+ can reuse this slot) */
349 SI_TRACKED_VGT_REUSE_OFF, /* GFX6-8,10.3 */
350 SI_TRACKED_IA_MULTI_VGT_PARAM, /* GFX6-8 (GFX9+ can reuse this slot) */
351
352 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP, /* GFX9 - the slots above can be reused */
353 SI_TRACKED_VGT_GS_ONCHIP_CNTL, /* GFX9-10 - the slots above can be reused */
354
355 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE, /* GFX6-10 (GFX11+ can reuse this slot) */
356 SI_TRACKED_VGT_GS_MODE, /* GFX6-10 (GFX11+ can reuse this slot) */
357 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL, /* GFX6-10 (GFX11+ can reuse this slot) */
358 SI_TRACKED_VGT_GS_OUT_PRIM_TYPE, /* GFX6-10 (GFX11+ can reuse this slot) */
359
360 /* 3 consecutive registers */
361 SI_TRACKED_VGT_GSVS_RING_OFFSET_1, /* GFX6-10 (GFX11+ can reuse this slot) */
362 SI_TRACKED_VGT_GSVS_RING_OFFSET_2, /* GFX6-10 (GFX11+ can reuse this slot) */
363 SI_TRACKED_VGT_GSVS_RING_OFFSET_3, /* GFX6-10 (GFX11+ can reuse this slot) */
364
365 /* 4 consecutive registers */
366 SI_TRACKED_VGT_GS_VERT_ITEMSIZE, /* GFX6-10 (GFX11+ can reuse this slot) */
367 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1, /* GFX6-10 (GFX11+ can reuse this slot) */
368 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2, /* GFX6-10 (GFX11+ can reuse this slot) */
369 SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3, /* GFX6-10 (GFX11+ can reuse this slot) */
370
371 SI_TRACKED_SPI_VS_OUT_CONFIG, /* GFX6-11 */
372 SI_TRACKED_VGT_PRIMITIVEID_EN, /* GFX6-11 */
373 SI_TRACKED_CB_DCC_CONTROL, /* GFX8-11 */
374 SI_TRACKED_DB_STENCIL_READ_MASK, /* GFX12+ */
375 SI_TRACKED_DB_STENCIL_WRITE_MASK, /* GFX12+ */
376 SI_TRACKED_PA_SC_HISZ_CONTROL, /* GFX12+ */
377 SI_TRACKED_PA_SC_LINE_STIPPLE_RESET, /* GFX12+ */
378
379 SI_NUM_TRACKED_CONTEXT_REGS,
380 SI_FIRST_TRACKED_OTHER_REG = SI_NUM_TRACKED_CONTEXT_REGS,
381
382 /* SH and UCONFIG registers. */
383 SI_TRACKED_GE_PC_ALLOC = SI_FIRST_TRACKED_OTHER_REG, /* GFX10-11 */
384 SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS, /* GFX7-11 */
385 SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS, /* GFX10+ */
386 SI_TRACKED_VGT_GS_OUT_PRIM_TYPE_UCONFIG, /* GFX11+ */
387 SI_TRACKED_SPI_SHADER_GS_OUT_CONFIG_PS, /* GFX12+ */
388 SI_TRACKED_VGT_PRIMITIVEID_EN_UCONFIG, /* GFX12+ */
389
390 SI_TRACKED_IA_MULTI_VGT_PARAM_UCONFIG, /* GFX9 only */
391 SI_TRACKED_GE_CNTL = SI_TRACKED_IA_MULTI_VGT_PARAM_UCONFIG, /* GFX10+ */
392
393 SI_TRACKED_SPI_SHADER_PGM_RSRC2_HS, /* GFX9+ (not tracked on previous chips) */
394
395 /* 3 consecutive registers. */
396 SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_LAYOUT,
397 SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_ADDR,
398 SI_TRACKED_SPI_SHADER_USER_DATA_HS__VS_STATE_BITS, /* GFX6-8 */
399
400 SI_TRACKED_SPI_SHADER_USER_DATA_LS__BASE_VERTEX,
401 SI_TRACKED_SPI_SHADER_USER_DATA_LS__DRAWID,
402 SI_TRACKED_SPI_SHADER_USER_DATA_LS__START_INSTANCE,
403
404 SI_TRACKED_SPI_SHADER_USER_DATA_ES__BASE_VERTEX,
405 SI_TRACKED_SPI_SHADER_USER_DATA_ES__DRAWID,
406 SI_TRACKED_SPI_SHADER_USER_DATA_ES__START_INSTANCE,
407
408 SI_TRACKED_SPI_SHADER_USER_DATA_VS__BASE_VERTEX, /* GFX6-10 */
409 SI_TRACKED_SPI_SHADER_USER_DATA_VS__DRAWID, /* GFX6-10 */
410 SI_TRACKED_SPI_SHADER_USER_DATA_VS__START_INSTANCE, /* GFX6-10 */
411
412 SI_TRACKED_SPI_SHADER_USER_DATA_PS__ALPHA_REF,
413
414 SI_TRACKED_COMPUTE_RESOURCE_LIMITS,
415 SI_TRACKED_COMPUTE_DISPATCH_INTERLEAVE, /* GFX12+ (not tracked on previous chips) */
416 SI_TRACKED_COMPUTE_NUM_THREAD_X,
417 SI_TRACKED_COMPUTE_NUM_THREAD_Y,
418 SI_TRACKED_COMPUTE_NUM_THREAD_Z,
419 SI_TRACKED_COMPUTE_TMPRING_SIZE,
420 SI_TRACKED_COMPUTE_PGM_RSRC3, /* GFX11+ */
421
422 /* 2 consecutive registers. */
423 SI_TRACKED_COMPUTE_PGM_RSRC1,
424 SI_TRACKED_COMPUTE_PGM_RSRC2,
425
426 /* 2 consecutive registers. */
427 SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_LO, /* GFX11+ */
428 SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_HI, /* GFX11+ */
429
430 SI_NUM_ALL_TRACKED_REGS,
431 };
432
433 /* For 3 draw constants: BaseVertex, DrawID, StartInstance */
434 #define BASEVERTEX_MASK 0x1
435 #define DRAWID_MASK 0x2
436 #define STARTINSTANCE_MASK 0x4
437 #define BASEVERTEX_DRAWID_MASK (BASEVERTEX_MASK | DRAWID_MASK)
438 #define BASEVERTEX_DRAWID_STARTINSTANCE_MASK (BASEVERTEX_MASK | DRAWID_MASK | STARTINSTANCE_MASK)
439
440 struct si_tracked_regs {
441 BITSET_DECLARE(reg_saved_mask, SI_NUM_ALL_TRACKED_REGS);
442 uint32_t reg_value[SI_NUM_ALL_TRACKED_REGS];
443 uint32_t spi_ps_input_cntl[32];
444 };
445
446 /* Private read-write buffer slots. */
447 enum
448 {
449 SI_VS_STREAMOUT_BUF0,
450 SI_VS_STREAMOUT_BUF1,
451 SI_VS_STREAMOUT_BUF2,
452 SI_VS_STREAMOUT_BUF3,
453
454 /* Image descriptor of color buffer 0 for KHR_blend_equation_advanced. */
455 SI_PS_IMAGE_COLORBUF0,
456 SI_PS_IMAGE_COLORBUF0_HI,
457 SI_PS_IMAGE_COLORBUF0_FMASK, /* gfx6-10 */
458 SI_PS_IMAGE_COLORBUF0_FMASK_HI, /* gfx6-10 */
459
460 /* Internal constant buffers. */
461 SI_HS_CONST_DEFAULT_TESS_LEVELS,
462 SI_VS_CONST_INSTANCE_DIVISORS,
463 SI_VS_CONST_CLIP_PLANES,
464 SI_PS_CONST_POLY_STIPPLE,
465 SI_PS_CONST_SAMPLE_POSITIONS,
466
467 SI_RING_ESGS, /* gfx6-8 */
468 SI_RING_GSVS, /* gfx6-10 */
469 SI_GS_QUERY_EMULATED_COUNTERS_BUF, /* gfx10+ */
470 SI_RING_SHADER_LOG,
471
472 SI_NUM_INTERNAL_BINDINGS,
473
474 /* Aliases to reuse slots that are unused on other generations. */
475 SI_GS_QUERY_BUF = SI_RING_ESGS, /* gfx10+ */
476 SI_STREAMOUT_STATE_BUF = SI_RING_GSVS, /* gfx12+ */
477 };
478
479 /* Indices into sctx->descriptors, laid out so that gfx and compute pipelines
480 * are contiguous:
481 *
482 * 0 - rw buffers
483 * 1 - vertex const and shader buffers
484 * 2 - vertex samplers and images
485 * 3 - fragment const and shader buffer
486 * ...
487 * 11 - compute const and shader buffers
488 * 12 - compute samplers and images
489 */
490 enum
491 {
492 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
493 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
494 SI_NUM_SHADER_DESCS,
495 };
496
497 #define SI_DESCS_INTERNAL 0
498 #define SI_DESCS_FIRST_SHADER 1
499 #define SI_DESCS_FIRST_COMPUTE (SI_DESCS_FIRST_SHADER + PIPE_SHADER_COMPUTE * SI_NUM_SHADER_DESCS)
500 #define SI_NUM_DESCS (SI_DESCS_FIRST_SHADER + SI_NUM_SHADERS * SI_NUM_SHADER_DESCS)
501
502 #define SI_DESCS_SHADER_MASK(name) \
503 u_bit_consecutive(SI_DESCS_FIRST_SHADER + PIPE_SHADER_##name * SI_NUM_SHADER_DESCS, \
504 SI_NUM_SHADER_DESCS)
505
si_const_and_shader_buffer_descriptors_idx(unsigned shader)506 static inline unsigned si_const_and_shader_buffer_descriptors_idx(unsigned shader)
507 {
508 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
509 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS;
510 }
511
si_sampler_and_image_descriptors_idx(unsigned shader)512 static inline unsigned si_sampler_and_image_descriptors_idx(unsigned shader)
513 {
514 return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
515 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES;
516 }
517
518 /* This represents descriptors in memory, such as buffer resources,
519 * image resources, and sampler states.
520 */
521 struct si_descriptors {
522 /* The list of descriptors in malloc'd memory. */
523 uint32_t *list;
524 /* The list in mapped GPU memory. */
525 uint32_t *gpu_list;
526
527 /* The buffer where the descriptors have been uploaded. */
528 struct si_resource *buffer;
529 uint64_t gpu_address;
530
531 /* The maximum number of descriptors. */
532 uint32_t num_elements;
533
534 /* Slots that are used by currently-bound shaders.
535 * It determines which slots are uploaded.
536 */
537 uint32_t first_active_slot;
538 uint32_t num_active_slots;
539
540 /* The SH register offset relative to USER_DATA*_0 where the pointer
541 * to the descriptor array will be stored. */
542 short shader_userdata_offset;
543 /* The size of one descriptor. */
544 uint8_t element_dw_size;
545 /* If there is only one slot enabled, bind it directly instead of
546 * uploading descriptors. -1 if disabled. */
547 signed char slot_index_to_bind_directly;
548 };
549
550 struct si_buffer_resources {
551 struct pipe_resource **buffers; /* this has num_buffers elements */
552 unsigned *offsets; /* this has num_buffers elements */
553
554 unsigned priority;
555 unsigned priority_constbuf;
556
557 /* The i-th bit is set if that element is enabled (non-NULL resource). */
558 uint64_t enabled_mask;
559 uint64_t writable_mask;
560 };
561
562 #define si_pm4_state_changed(sctx, member) \
563 ((sctx)->queued.named.member != (sctx)->emitted.named.member)
564
565 #define si_pm4_state_enabled_and_changed(sctx, member) \
566 ((sctx)->queued.named.member && si_pm4_state_changed(sctx, member))
567
568 #define si_pm4_bind_state(sctx, member, value) \
569 do { \
570 (sctx)->queued.named.member = (value); \
571 if (value && value != (sctx)->emitted.named.member) \
572 (sctx)->dirty_atoms |= SI_STATE_BIT(member); \
573 else \
574 (sctx)->dirty_atoms &= ~SI_STATE_BIT(member); \
575 } while (0)
576
577 /* si_descriptors.c */
578 void si_get_inline_uniform_state(union si_shader_key *key, enum pipe_shader_type shader,
579 bool *inline_uniforms, uint32_t **inlined_values);
580 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture *tex,
581 const struct legacy_surf_level *base_level_info,
582 unsigned base_level, unsigned first_level, unsigned block_width,
583 /* restrict decreases overhead of si_set_sampler_view_desc ~8x. */
584 bool is_stencil, uint16_t access, uint32_t * restrict state);
585 void si_update_ps_colorbuf0_slot(struct si_context *sctx);
586 void si_force_disable_ps_colorbuf0_slot(struct si_context *sctx);
587 void si_invalidate_inlinable_uniforms(struct si_context *sctx, enum pipe_shader_type shader);
588 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader, uint slot,
589 struct pipe_constant_buffer *cbuf);
590 void si_set_shader_buffers(struct pipe_context *ctx, enum pipe_shader_type shader,
591 unsigned start_slot, unsigned count,
592 const struct pipe_shader_buffer *sbuffers,
593 unsigned writable_bitmask, bool internal_blit);
594 void si_get_shader_buffers(struct si_context *sctx, enum pipe_shader_type shader, uint start_slot,
595 uint count, struct pipe_shader_buffer *sbuf);
596 void si_set_ring_buffer(struct si_context *sctx, uint slot, struct pipe_resource *buffer,
597 unsigned stride, unsigned num_records, bool add_tid, bool swizzle,
598 unsigned element_size, unsigned index_stride, uint64_t offset);
599 void si_init_all_descriptors(struct si_context *sctx);
600 void si_release_all_descriptors(struct si_context *sctx);
601 void si_compute_resources_add_all_to_bo_list(struct si_context *sctx);
602 bool si_gfx_resources_check_encrypted(struct si_context *sctx);
603 bool si_compute_resources_check_encrypted(struct si_context *sctx);
604 void si_shader_pointers_mark_dirty(struct si_context *sctx);
605 void si_add_all_descriptors_to_bo_list(struct si_context *sctx);
606 void si_update_all_texture_descriptors(struct si_context *sctx);
607 void si_shader_change_notify(struct si_context *sctx);
608 void si_update_needs_color_decompress_masks(struct si_context *sctx);
609 void si_emit_graphics_shader_pointers(struct si_context *sctx, unsigned index);
610 void si_emit_compute_shader_pointers(struct si_context *sctx);
611 void si_set_internal_const_buffer(struct si_context *sctx, uint slot,
612 const struct pipe_constant_buffer *input);
613 void si_set_internal_shader_buffer(struct si_context *sctx, uint slot,
614 const struct pipe_shader_buffer *sbuffer);
615 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
616 uint64_t new_active_mask);
617 void si_set_active_descriptors_for_shader(struct si_context *sctx, struct si_shader_selector *sel);
618 bool si_bindless_descriptor_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
619 struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap, unsigned entry_size,
620 unsigned group_index);
621 void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab);
622 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf);
623 /* si_state.c */
624 void si_init_state_compute_functions(struct si_context *sctx);
625 void si_init_state_functions(struct si_context *sctx);
626 void si_init_screen_state_functions(struct si_screen *sscreen);
627 void si_init_gfx_preamble_state(struct si_context *sctx);
628 void si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf,
629 enum pipe_format format, unsigned offset, unsigned num_elements,
630 uint32_t *state);
631 void si_mark_display_dcc_dirty(struct si_context *sctx, struct si_texture *tex);
632 void si_update_ps_iter_samples(struct si_context *sctx);
633 void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
634 void si_restore_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
635
636 struct si_fast_udiv_info32 {
637 unsigned multiplier; /* the "magic number" multiplier */
638 unsigned pre_shift; /* shift for the dividend before multiplying */
639 unsigned post_shift; /* shift for the dividend after multiplying */
640 int increment; /* 0 or 1; if set then increment the numerator, using one of
641 the two strategies */
642 };
643
644 struct si_fast_udiv_info32 si_compute_fast_udiv_info32(uint32_t D, unsigned num_bits);
645
646 /* si_state_binning.c */
647 void si_emit_dpbb_state(struct si_context *sctx, unsigned index);
648
649 /* si_state_shaders.cpp */
650 void si_get_ir_cache_key(struct si_shader_selector *sel, bool ngg, bool es,
651 unsigned wave_size, unsigned char ir_sha1_cache_key[20]);
652 bool si_shader_cache_load_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
653 struct si_shader *shader);
654 void si_shader_cache_insert_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
655 struct si_shader *shader, bool insert_into_disk_cache);
656 bool si_shader_mem_ordered(struct si_shader *shader);
657 void si_init_screen_live_shader_cache(struct si_screen *sscreen);
658 void si_init_shader_functions(struct si_context *sctx);
659 bool si_init_shader_cache(struct si_screen *sscreen);
660 void si_destroy_shader_cache(struct si_screen *sscreen);
661 void si_schedule_initial_compile(struct si_context *sctx, gl_shader_stage stage,
662 struct util_queue_fence *ready_fence,
663 struct si_compiler_ctx_state *compiler_ctx_state, void *job,
664 util_queue_execute_func execute);
665 void si_get_active_slot_masks(struct si_screen *sscreen, const struct si_shader_info *info,
666 uint64_t *const_and_shader_buffers, uint64_t *samplers_and_images);
667 int si_shader_select(struct pipe_context *ctx, struct si_shader_ctx_state *state);
668 void si_vs_key_update_inputs(struct si_context *sctx);
669 void si_update_ps_inputs_read_or_disabled(struct si_context *sctx);
670 void si_update_vrs_flat_shading(struct si_context *sctx);
671 unsigned si_get_input_prim(const struct si_shader_selector *gs, const union si_shader_key *key);
672 bool si_update_ngg(struct si_context *sctx);
673 void si_vs_ps_key_update_rast_prim_smooth_stipple(struct si_context *sctx);
674 void si_ps_key_update_framebuffer(struct si_context *sctx);
675 void si_ps_key_update_framebuffer_blend_rasterizer(struct si_context *sctx);
676 void si_ps_key_update_rasterizer(struct si_context *sctx);
677 void si_ps_key_update_dsa(struct si_context *sctx);
678 void si_ps_key_update_sample_shading(struct si_context *sctx);
679 void si_ps_key_update_framebuffer_rasterizer_sample_shading(struct si_context *sctx);
680 void si_init_tess_factor_ring(struct si_context *sctx);
681 bool si_update_gs_ring_buffers(struct si_context *sctx);
682 bool si_update_spi_tmpring_size(struct si_context *sctx, unsigned bytes);
683 bool si_set_tcs_to_fixed_func_shader(struct si_context *sctx);
684 void si_update_tess_io_layout_state(struct si_context *sctx);
685
686 /* si_state_draw.cpp */
687 void si_cp_dma_prefetch(struct si_context *sctx, struct pipe_resource *buf,
688 unsigned offset, unsigned size);
689 void si_set_vertex_buffer_descriptor(struct si_screen *sscreen, struct si_vertex_elements *velems,
690 const struct pipe_vertex_buffer *vb, unsigned element_index,
691 uint32_t *out);
692 void si_emit_buffered_compute_sh_regs(struct si_context *sctx);
693 void si_init_draw_functions_GFX6(struct si_context *sctx);
694 void si_init_draw_functions_GFX7(struct si_context *sctx);
695 void si_init_draw_functions_GFX8(struct si_context *sctx);
696 void si_init_draw_functions_GFX9(struct si_context *sctx);
697 void si_init_draw_functions_GFX10(struct si_context *sctx);
698 void si_init_draw_functions_GFX10_3(struct si_context *sctx);
699 void si_init_draw_functions_GFX11(struct si_context *sctx);
700 void si_init_draw_functions_GFX11_5(struct si_context *sctx);
701 void si_init_draw_functions_GFX12(struct si_context *sctx);
702
703 /* si_state_msaa.c */
704 extern unsigned si_msaa_max_distance[5];
705 void si_init_msaa_functions(struct si_context *sctx);
706
707 /* si_state_streamout.c */
708 void si_streamout_buffers_dirty(struct si_context *sctx);
709 void si_emit_streamout_end(struct si_context *sctx);
710 void si_update_prims_generated_query_state(struct si_context *sctx, unsigned type, int diff);
711 void si_init_streamout_functions(struct si_context *sctx);
712
si_get_constbuf_slot(unsigned slot)713 static inline unsigned si_get_constbuf_slot(unsigned slot)
714 {
715 /* Constant buffers are in slots [32..47], ascending */
716 return SI_NUM_SHADER_BUFFERS + slot;
717 }
718
si_get_shaderbuf_slot(unsigned slot)719 static inline unsigned si_get_shaderbuf_slot(unsigned slot)
720 {
721 /* shader buffers are in slots [31..0], descending */
722 return SI_NUM_SHADER_BUFFERS - 1 - slot;
723 }
724
si_get_sampler_slot(unsigned slot)725 static inline unsigned si_get_sampler_slot(unsigned slot)
726 {
727 /* 32 samplers are in sampler slots [16..47], 16 dw per slot, ascending */
728 /* those are equivalent to image slots [32..95], 8 dw per slot, ascending */
729 return SI_NUM_IMAGE_SLOTS / 2 + slot;
730 }
731
si_get_image_slot(unsigned slot)732 static inline unsigned si_get_image_slot(unsigned slot)
733 {
734 /* image slots are in [31..0] (sampler slots [15..0]), descending */
735 /* images are in slots [31..16], while FMASKs are in slots [15..0] */
736 return SI_NUM_IMAGE_SLOTS - 1 - slot;
737 }
738
si_clamp_texture_texel_count(unsigned max_texel_buffer_elements,enum pipe_format format,uint32_t size)739 static inline unsigned si_clamp_texture_texel_count(unsigned max_texel_buffer_elements,
740 enum pipe_format format,
741 uint32_t size)
742 {
743 /* The spec says:
744 * The number of texels in the texel array is then clamped to the value of
745 * the implementation-dependent limit GL_MAX_TEXTURE_BUFFER_SIZE.
746 *
747 * So compute the number of texels, compare to GL_MAX_TEXTURE_BUFFER_SIZE and update it.
748 */
749 unsigned stride = util_format_get_blocksize(format);
750 return MIN2(max_texel_buffer_elements, size / stride);
751 }
752
753 #ifdef __cplusplus
754 }
755 #endif
756
757 #endif
758