xref: /aosp_15_r20/external/mesa3d/src/freedreno/ir3/ir3_compiler.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2013 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #ifndef IR3_COMPILER_H_
10 #define IR3_COMPILER_H_
11 
12 #include "compiler/nir/nir.h"
13 #include "util/disk_cache.h"
14 #include "util/log.h"
15 #include "util/perf/cpu_trace.h"
16 
17 #include "freedreno_dev_info.h"
18 
19 #include "ir3.h"
20 
21 BEGINC;
22 
23 struct ir3_ra_reg_set;
24 struct ir3_shader;
25 
26 struct ir3_compiler_options {
27    /* If true, UBO/SSBO accesses are assumed to be bounds-checked as defined by
28     * VK_EXT_robustness2 and optimizations may have to be more conservative.
29     */
30    bool robust_buffer_access2;
31 
32    /* If true, promote UBOs (except for constant data) to constants using ldc.k
33     * in the preamble. The driver should ignore everything in ubo_state except
34     * for the constant data UBO, which is excluded because the command pushing
35     * constants for it can be pre-baked when compiling the shader.
36     */
37    bool push_ubo_with_preamble;
38 
39    /* If true, disable the shader cache. The driver is then responsible for
40     * caching.
41     */
42    bool disable_cache;
43 
44    /* If >= 0, this specifies the bindless descriptor set + descriptor to use
45     * for txf_ms_fb
46     */
47    int bindless_fb_read_descriptor;
48    int bindless_fb_read_slot;
49 
50    /* True if 16-bit descriptors are available. */
51    bool storage_16bit;
52    /* True if 8-bit descriptors are available. */
53    bool storage_8bit;
54 
55   /* If base_vertex should be lowered in nir */
56   bool lower_base_vertex;
57 
58   bool shared_push_consts;
59 
60    /* "dual_color_blend_by_location" workaround is enabled: */
61    bool dual_color_blend_by_location;
62 };
63 
64 struct ir3_compiler {
65    struct fd_device *dev;
66    const struct fd_dev_id *dev_id;
67    uint8_t gen;
68    uint32_t shader_count;
69 
70    struct disk_cache *disk_cache;
71 
72    struct nir_shader_compiler_options nir_options;
73 
74    /*
75     * Configuration options for things handled differently by turnip vs
76     * gallium
77     */
78    struct ir3_compiler_options options;
79 
80    /*
81     * Configuration options for things that are handled differently on
82     * different generations:
83     */
84 
85    bool is_64bit;
86 
87    /* a4xx (and later) drops SP_FS_FLAT_SHAD_MODE_REG_* for flat-interpolate
88     * so we need to use ldlv.u32 to load the varying directly:
89     */
90    bool flat_bypass;
91 
92    /* on a3xx, we need to add one to # of array levels:
93     */
94    bool levels_add_one;
95 
96    /* on a3xx, we need to scale up integer coords for isaml based
97     * on LoD:
98     */
99    bool unminify_coords;
100 
101    /* on a3xx do txf_ms w/ isaml and scaled coords: */
102    bool txf_ms_with_isaml;
103 
104    /* on a4xx, for array textures we need to add 0.5 to the array
105     * index coordinate:
106     */
107    bool array_index_add_half;
108 
109    /* on a6xx, rewrite samgp to sequence of samgq0-3 in vertex shaders:
110     */
111    bool samgq_workaround;
112 
113    /* on a650, vertex shader <-> tess control io uses LDL/STL */
114    bool tess_use_shared;
115 
116    /* The maximum number of constants, in vec4's, across the entire graphics
117     * pipeline.
118     */
119    uint16_t max_const_pipeline;
120 
121    /* The maximum number of constants, in vec4's, for VS+HS+DS+GS. */
122    uint16_t max_const_geom;
123 
124    /* The maximum number of constants, in vec4's, for FS. */
125    uint16_t max_const_frag;
126 
127    /* A "safe" max constlen that can be applied to each shader in the
128     * pipeline which we guarantee will never exceed any combined limits.
129     */
130    uint16_t max_const_safe;
131 
132    /* The maximum number of constants, in vec4's, for compute shaders. */
133    uint16_t max_const_compute;
134 
135    /* Number of instructions that the shader's base address and length
136     * (instrlen divides instruction count by this) must be aligned to.
137     */
138    uint32_t instr_align;
139 
140    /* on a3xx, the unit of indirect const load is higher than later gens (in
141     * vec4 units):
142     */
143    uint32_t const_upload_unit;
144 
145    /* The base number of threads per wave. Some stages may be able to double
146     * this.
147     */
148    uint32_t threadsize_base;
149 
150    /* On at least a6xx, waves are always launched in pairs. In calculations
151     * about occupancy, we pretend that each wave pair is actually one wave,
152     * which simplifies many of the calculations, but means we have to
153     * multiply threadsize_base by this number.
154     */
155    uint32_t wave_granularity;
156 
157    /* The maximum number of simultaneous waves per core. */
158    uint32_t max_waves;
159 
160    /* This is theoretical maximum number of vec4 registers that one wave of
161     * the base threadsize could use. To get the actual size of the register
162     * file in bytes one would need to compute:
163     *
164     * reg_size_vec4 * threadsize_base * wave_granularity * 16 (bytes per vec4)
165     *
166     * However this number is more often what we actually need. For example, a
167     * max_reg more than half of this will result in a doubled threadsize
168     * being impossible (because double-sized waves take up twice as many
169     * registers). Also, the formula for the occupancy given a particular
170     * register footprint is simpler.
171     *
172     * It is in vec4 units because the register file is allocated
173     * with vec4 granularity, so it's in the same units as max_reg.
174     */
175    uint32_t reg_size_vec4;
176 
177    /* The size of local memory in bytes */
178    uint32_t local_mem_size;
179 
180    /* The number of total branch stack entries, divided by wave_granularity. */
181    uint32_t branchstack_size;
182 
183    /* The byte increment of MEMSIZEPERITEM, the private memory per-fiber allocation. */
184    uint32_t pvtmem_per_fiber_align;
185 
186    /* Whether clip+cull distances are supported */
187    bool has_clip_cull;
188 
189    /* Whether private memory is supported */
190    bool has_pvtmem;
191 
192    /* Whether SSBOs have descriptors for sampling with ISAM */
193    bool has_isam_ssbo;
194 
195    /* Whether isam.v is supported to sample multiple components from SSBOs */
196    bool has_isam_v;
197 
198    /* Whether isam/stib/ldib have immediate offsets. */
199    bool has_ssbo_imm_offsets;
200 
201    /* True if getfiberid, getlast.w8, brcst.active, and quad_shuffle
202     * instructions are supported which are necessary to support
203     * subgroup quad and arithmetic operations.
204     */
205    bool has_getfiberid;
206 
207    /* Number of available predicate registers (p0.c) */
208    uint32_t num_predicates;
209 
210    /* True if bitops (and.b, or.b, xor.b, not.b) can write to p0.c */
211    bool bitops_can_write_predicates;
212 
213    /* True if braa/brao are available. */
214    bool has_branch_and_or;
215 
216    /* True if predt/predf/prede are supported. */
217    bool has_predication;
218 
219    /* MAX_COMPUTE_VARIABLE_GROUP_INVOCATIONS_ARB */
220    uint32_t max_variable_workgroup_size;
221 
222    bool has_dp2acc;
223    bool has_dp4acc;
224    bool has_compliant_dp4acc;
225 
226    /* Type to use for 1b nir bools: */
227    type_t bool_type;
228 
229    /* Whether compute invocation params are passed in via shared regfile or
230     * constbuf. a5xx+ has the shared regfile.
231     */
232    bool has_shared_regfile;
233 
234    /* True if preamble instructions (shps, shpe, etc.) are supported */
235    bool has_preamble;
236 
237    /* Where the shared consts start in constants file, in vec4's. */
238    uint16_t shared_consts_base_offset;
239 
240    /* The size of shared consts for CS and FS(in vec4's).
241     * Also the size that is actually used on geometry stages (on a6xx).
242     */
243    uint64_t shared_consts_size;
244 
245    /* Found on a6xx for geometry stages, that is different from
246     * actually used shared consts.
247     *
248     * TODO: Keep an eye on this for next gens.
249     */
250    uint64_t geom_shared_consts_size_quirk;
251 
252    bool has_fs_tex_prefetch;
253 
254    bool stsc_duplication_quirk;
255 
256    bool load_shader_consts_via_preamble;
257    bool load_inline_uniforms_via_preamble_ldgk;
258 
259    /* True if there is a scalar ALU capable of executing a subset of
260     * cat2-cat4 instructions with a shared register destination. This also
261     * implies expanded MOV/COV capability when writing to shared registers,
262     * as MOV/COV is now executed on the scalar ALU except when reading from a
263     * normal register, as well as the ability for ldc to write to a shared
264     * register.
265     */
266    bool has_scalar_alu;
267 
268    bool fs_must_have_non_zero_constlen_quirk;
269 
270    /* On all generations that support scalar ALU, there is also a copy of the
271     * scalar ALU and some other HW units in HLSQ that can execute preambles
272     * before work is dispatched to the SPs, called "early preamble". We detect
273     * whether the shader can use early preamble in ir3.
274     */
275    bool has_early_preamble;
276 
277    /* True if (rptN) is supported for bary.f. */
278    bool has_rpt_bary_f;
279 };
280 
281 void ir3_compiler_destroy(struct ir3_compiler *compiler);
282 struct ir3_compiler *ir3_compiler_create(struct fd_device *dev,
283                                          const struct fd_dev_id *dev_id,
284                                          const struct fd_dev_info *dev_info,
285                                          const struct ir3_compiler_options *options);
286 
287 void ir3_disk_cache_init(struct ir3_compiler *compiler);
288 void ir3_disk_cache_init_shader_key(struct ir3_compiler *compiler,
289                                     struct ir3_shader *shader);
290 struct ir3_shader_variant *ir3_retrieve_variant(struct blob_reader *blob,
291                                                 struct ir3_compiler *compiler,
292                                                 void *mem_ctx);
293 void ir3_store_variant(struct blob *blob, const struct ir3_shader_variant *v);
294 bool ir3_disk_cache_retrieve(struct ir3_shader *shader,
295                              struct ir3_shader_variant *v);
296 void ir3_disk_cache_store(struct ir3_shader *shader,
297                           struct ir3_shader_variant *v);
298 
299 const nir_shader_compiler_options *
300 ir3_get_compiler_options(struct ir3_compiler *compiler);
301 
302 int ir3_compile_shader_nir(struct ir3_compiler *compiler,
303                            struct ir3_shader *shader,
304                            struct ir3_shader_variant *so);
305 
306 /* gpu pointer size in units of 32bit registers/slots */
307 static inline unsigned
ir3_pointer_size(struct ir3_compiler * compiler)308 ir3_pointer_size(struct ir3_compiler *compiler)
309 {
310    return compiler->is_64bit ? 2 : 1;
311 }
312 
313 enum ir3_shader_debug {
314    IR3_DBG_SHADER_VS = BITFIELD_BIT(0),
315    IR3_DBG_SHADER_TCS = BITFIELD_BIT(1),
316    IR3_DBG_SHADER_TES = BITFIELD_BIT(2),
317    IR3_DBG_SHADER_GS = BITFIELD_BIT(3),
318    IR3_DBG_SHADER_FS = BITFIELD_BIT(4),
319    IR3_DBG_SHADER_CS = BITFIELD_BIT(5),
320    IR3_DBG_DISASM = BITFIELD_BIT(6),
321    IR3_DBG_OPTMSGS = BITFIELD_BIT(7),
322    IR3_DBG_FORCES2EN = BITFIELD_BIT(8),
323    IR3_DBG_NOUBOOPT = BITFIELD_BIT(9),
324    IR3_DBG_NOFP16 = BITFIELD_BIT(10),
325    IR3_DBG_NOCACHE = BITFIELD_BIT(11),
326    IR3_DBG_SPILLALL = BITFIELD_BIT(12),
327    IR3_DBG_NOPREAMBLE = BITFIELD_BIT(13),
328    IR3_DBG_SHADER_INTERNAL = BITFIELD_BIT(14),
329    IR3_DBG_FULLSYNC = BITFIELD_BIT(15),
330    IR3_DBG_FULLNOP = BITFIELD_BIT(16),
331    IR3_DBG_NOEARLYPREAMBLE = BITFIELD_BIT(17),
332    IR3_DBG_NODESCPREFETCH = BITFIELD_BIT(18),
333    IR3_DBG_EXPANDRPT = BITFIELD_BIT(19),
334 
335    /* MESA_DEBUG-only options: */
336    IR3_DBG_SCHEDMSGS = BITFIELD_BIT(20),
337    IR3_DBG_RAMSGS = BITFIELD_BIT(21),
338 
339    /* Only used for the disk-caching logic: */
340    IR3_DBG_ROBUST_UBO_ACCESS = BITFIELD_BIT(30),
341 };
342 
343 extern enum ir3_shader_debug ir3_shader_debug;
344 extern const char *ir3_shader_override_path;
345 
346 static inline bool
shader_debug_enabled(gl_shader_stage type,bool internal)347 shader_debug_enabled(gl_shader_stage type, bool internal)
348 {
349    if (internal)
350       return !!(ir3_shader_debug & IR3_DBG_SHADER_INTERNAL);
351 
352    if (ir3_shader_debug & IR3_DBG_DISASM)
353       return true;
354 
355    switch (type) {
356    case MESA_SHADER_VERTEX:
357       return !!(ir3_shader_debug & IR3_DBG_SHADER_VS);
358    case MESA_SHADER_TESS_CTRL:
359       return !!(ir3_shader_debug & IR3_DBG_SHADER_TCS);
360    case MESA_SHADER_TESS_EVAL:
361       return !!(ir3_shader_debug & IR3_DBG_SHADER_TES);
362    case MESA_SHADER_GEOMETRY:
363       return !!(ir3_shader_debug & IR3_DBG_SHADER_GS);
364    case MESA_SHADER_FRAGMENT:
365       return !!(ir3_shader_debug & IR3_DBG_SHADER_FS);
366    case MESA_SHADER_COMPUTE:
367    case MESA_SHADER_KERNEL:
368       return !!(ir3_shader_debug & IR3_DBG_SHADER_CS);
369    default:
370       assert(0);
371       return false;
372    }
373 }
374 
375 static inline void
ir3_debug_print(struct ir3 * ir,const char * when)376 ir3_debug_print(struct ir3 *ir, const char *when)
377 {
378    if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
379       mesa_logi("%s:", when);
380       ir3_print(ir);
381    }
382 }
383 
384 ENDC;
385 
386 #endif /* IR3_COMPILER_H_ */
387