xref: /aosp_15_r20/external/mesa3d/src/freedreno/ir3/ir3_nir.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2015 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #include "util/u_debug.h"
10 #include "util/u_math.h"
11 
12 #include "ir3_compiler.h"
13 #include "ir3_nir.h"
14 #include "ir3_shader.h"
15 
16 nir_def *
ir3_get_shared_driver_ubo(nir_builder * b,const struct ir3_driver_ubo * ubo)17 ir3_get_shared_driver_ubo(nir_builder *b, const struct ir3_driver_ubo *ubo)
18 {
19    assert(ubo->idx > 0);
20 
21    /* Binning shader shared ir3_driver_ubo definitions but not shader info */
22    b->shader->info.num_ubos = MAX2(b->shader->info.num_ubos, ubo->idx + 1);
23    return nir_imm_int(b, ubo->idx);
24 }
25 
26 nir_def *
ir3_get_driver_ubo(nir_builder * b,struct ir3_driver_ubo * ubo)27 ir3_get_driver_ubo(nir_builder *b, struct ir3_driver_ubo *ubo)
28 {
29    /* Pick a UBO index to use as our constant data.  Skip UBO 0 since that's
30     * reserved for gallium's cb0.
31     */
32    if (ubo->idx == -1) {
33       if (b->shader->info.num_ubos == 0)
34          b->shader->info.num_ubos++;
35       ubo->idx = b->shader->info.num_ubos++;
36       return nir_imm_int(b, ubo->idx);
37    }
38 
39    return ir3_get_shared_driver_ubo(b, ubo);
40 }
41 
42 nir_def *
ir3_get_driver_consts_ubo(nir_builder * b,struct ir3_shader_variant * v)43 ir3_get_driver_consts_ubo(nir_builder *b, struct ir3_shader_variant *v)
44 {
45    if (v->binning_pass)
46       return ir3_get_shared_driver_ubo(b, &ir3_const_state(v)->consts_ubo);
47    return ir3_get_driver_ubo(b, &ir3_const_state_mut(v)->consts_ubo);
48 }
49 
50 nir_def *
ir3_load_driver_ubo(nir_builder * b,unsigned components,struct ir3_driver_ubo * ubo,unsigned offset)51 ir3_load_driver_ubo(nir_builder *b, unsigned components,
52                     struct ir3_driver_ubo *ubo,
53                     unsigned offset)
54 {
55    ubo->size = MAX2(ubo->size, offset + components);
56 
57    return nir_load_ubo(b, components, 32, ir3_get_driver_ubo(b, ubo),
58                        nir_imm_int(b, offset * sizeof(uint32_t)),
59                        .align_mul = 16,
60                        .align_offset = (offset % 4) * sizeof(uint32_t),
61                        .range_base = offset * sizeof(uint32_t),
62                        .range = components * sizeof(uint32_t));
63 }
64 
65 nir_def *
ir3_load_driver_ubo_indirect(nir_builder * b,unsigned components,struct ir3_driver_ubo * ubo,unsigned base,nir_def * offset,unsigned range)66 ir3_load_driver_ubo_indirect(nir_builder *b, unsigned components,
67                              struct ir3_driver_ubo *ubo,
68                              unsigned base, nir_def *offset,
69                              unsigned range)
70 {
71    assert(range > 0);
72    ubo->size = MAX2(ubo->size, base + components + (range - 1) * 4);
73 
74    return nir_load_ubo(b, components, 32, ir3_get_driver_ubo(b, ubo),
75                        nir_iadd(b, nir_imul24(b, offset, nir_imm_int(b, 16)),
76                                 nir_imm_int(b, base * sizeof(uint32_t))),
77                        .align_mul = 16,
78                        .align_offset = (base % 4) * sizeof(uint32_t),
79                        .range_base = base * sizeof(uint32_t),
80                        .range = components * sizeof(uint32_t) +
81                         (range - 1) * 16);
82 }
83 
84 static bool
ir3_nir_should_scalarize_mem(const nir_instr * instr,const void * data)85 ir3_nir_should_scalarize_mem(const nir_instr *instr, const void *data)
86 {
87    const struct ir3_compiler *compiler = data;
88    const nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
89 
90    /* Scalarize load_ssbo's that we could otherwise lower to isam,
91     * as the tex cache benefit outweighs the benefit of vectorizing
92     * Don't do this if (vectorized) isam.v is supported.
93     */
94    if ((intrin->intrinsic == nir_intrinsic_load_ssbo) &&
95        (nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER) &&
96        compiler->has_isam_ssbo && !compiler->has_isam_v) {
97       return true;
98    }
99 
100    if ((intrin->intrinsic == nir_intrinsic_load_ssbo &&
101         intrin->def.bit_size == 8) ||
102        (intrin->intrinsic == nir_intrinsic_store_ssbo &&
103         intrin->src[0].ssa->bit_size == 8)) {
104       return true;
105    }
106 
107    return false;
108 }
109 
110 static bool
ir3_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)111 ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
112                              unsigned bit_size, unsigned num_components,
113                              nir_intrinsic_instr *low,
114                              nir_intrinsic_instr *high, void *data)
115 {
116    struct ir3_compiler *compiler = data;
117    unsigned byte_size = bit_size / 8;
118 
119    if (low->intrinsic == nir_intrinsic_load_const_ir3)
120       return bit_size <= 32 && num_components <= 4;
121 
122    if (low->intrinsic == nir_intrinsic_store_const_ir3)
123       return bit_size == 32 && num_components <= 4;
124 
125    /* Don't vectorize load_ssbo's that we could otherwise lower to isam,
126     * as the tex cache benefit outweighs the benefit of vectorizing. If we
127     * support isam.v, we can vectorize this though.
128     */
129    if ((low->intrinsic == nir_intrinsic_load_ssbo) &&
130        (nir_intrinsic_access(low) & ACCESS_CAN_REORDER) &&
131        compiler->has_isam_ssbo && !compiler->has_isam_v) {
132       return false;
133    }
134 
135    if (low->intrinsic != nir_intrinsic_load_ubo) {
136       return bit_size <= 32 && align_mul >= byte_size &&
137          align_offset % byte_size == 0 &&
138          num_components <= 4;
139    }
140 
141    assert(bit_size >= 8);
142    if (bit_size != 32)
143       return false;
144 
145    int size = num_components * byte_size;
146 
147    /* Don't care about alignment past vec4. */
148    assert(util_is_power_of_two_nonzero(align_mul));
149    align_mul = MIN2(align_mul, 16);
150    align_offset &= 15;
151 
152    /* Our offset alignment should aways be at least 4 bytes */
153    if (align_mul < 4)
154       return false;
155 
156    unsigned worst_start_offset = 16 - align_mul + align_offset;
157    if (worst_start_offset + size > 16)
158       return false;
159 
160    return true;
161 }
162 
163 static unsigned
ir3_lower_bit_size(const nir_instr * instr,UNUSED void * data)164 ir3_lower_bit_size(const nir_instr *instr, UNUSED void *data)
165 {
166    if (instr->type == nir_instr_type_intrinsic) {
167       nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr);
168       switch (intrinsic->intrinsic) {
169       case nir_intrinsic_exclusive_scan:
170       case nir_intrinsic_inclusive_scan:
171       case nir_intrinsic_quad_broadcast:
172       case nir_intrinsic_quad_swap_diagonal:
173       case nir_intrinsic_quad_swap_horizontal:
174       case nir_intrinsic_quad_swap_vertical:
175       case nir_intrinsic_reduce:
176          return intrinsic->def.bit_size == 8 ? 16 : 0;
177       default:
178          break;
179       }
180    }
181 
182    if (instr->type == nir_instr_type_alu) {
183       nir_alu_instr *alu = nir_instr_as_alu(instr);
184       switch (alu->op) {
185       case nir_op_iabs:
186       case nir_op_iadd_sat:
187       case nir_op_imax:
188       case nir_op_imin:
189       case nir_op_ineg:
190       case nir_op_ishl:
191       case nir_op_ishr:
192       case nir_op_isub_sat:
193       case nir_op_uadd_sat:
194       case nir_op_umax:
195       case nir_op_umin:
196       case nir_op_ushr:
197          return alu->def.bit_size == 8 ? 16 : 0;
198       case nir_op_ieq:
199       case nir_op_ige:
200       case nir_op_ilt:
201       case nir_op_ine:
202       case nir_op_uge:
203       case nir_op_ult:
204          return nir_src_bit_size(alu->src[0].src) == 8 ? 16 : 0;
205       default:
206          break;
207       }
208    }
209 
210    return 0;
211 }
212 
213 static void
ir3_get_variable_size_align_bytes(const glsl_type * type,unsigned * size,unsigned * align)214 ir3_get_variable_size_align_bytes(const glsl_type *type, unsigned *size, unsigned *align)
215 {
216    switch (type->base_type) {
217    case GLSL_TYPE_ARRAY:
218    case GLSL_TYPE_INTERFACE:
219    case GLSL_TYPE_STRUCT:
220       glsl_size_align_handle_array_and_structs(type, ir3_get_variable_size_align_bytes,
221                                                size, align);
222       break;
223    case GLSL_TYPE_UINT8:
224    case GLSL_TYPE_INT8:
225       /* 8-bit values are handled through 16-bit half-registers, so the resulting size
226        * and alignment value has to be doubled to reflect the actual variable size
227        * requirement.
228        */
229       *size = 2 * glsl_get_components(type);
230       *align = 2;
231       break;
232    default:
233       glsl_get_natural_size_align_bytes(type, size, align);
234       break;
235    }
236 }
237 
238 #define OPT(nir, pass, ...)                                                    \
239    ({                                                                          \
240       bool this_progress = false;                                              \
241       NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);                       \
242       this_progress;                                                           \
243    })
244 
245 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
246 
247 bool
ir3_optimize_loop(struct ir3_compiler * compiler,nir_shader * s)248 ir3_optimize_loop(struct ir3_compiler *compiler, nir_shader *s)
249 {
250    MESA_TRACE_FUNC();
251 
252    bool progress;
253    bool did_progress = false;
254    unsigned lower_flrp = (s->options->lower_flrp16 ? 16 : 0) |
255                          (s->options->lower_flrp32 ? 32 : 0) |
256                          (s->options->lower_flrp64 ? 64 : 0);
257 
258    do {
259       progress = false;
260 
261       OPT_V(s, nir_lower_vars_to_ssa);
262       progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
263       progress |= OPT(s, nir_lower_phis_to_scalar, false);
264 
265       progress |= OPT(s, nir_copy_prop);
266       progress |= OPT(s, nir_opt_deref);
267       progress |= OPT(s, nir_opt_dce);
268       progress |= OPT(s, nir_opt_cse);
269 
270       progress |= OPT(s, nir_opt_find_array_copies);
271       progress |= OPT(s, nir_opt_copy_prop_vars);
272       progress |= OPT(s, nir_opt_dead_write_vars);
273 
274       static int gcm = -1;
275       if (gcm == -1)
276          gcm = debug_get_num_option("GCM", 0);
277       if (gcm == 1)
278          progress |= OPT(s, nir_opt_gcm, true);
279       else if (gcm == 2)
280          progress |= OPT(s, nir_opt_gcm, false);
281       progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
282       progress |= OPT(s, nir_opt_intrinsics);
283       /* NOTE: GS lowering inserts an output var with varying slot that
284        * is larger than VARYING_SLOT_MAX (ie. GS_VERTEX_FLAGS_IR3),
285        * which triggers asserts in nir_shader_gather_info().  To work
286        * around that skip lowering phi precision for GS.
287        *
288        * Calling nir_shader_gather_info() late also seems to cause
289        * problems for tess lowering, for now since we only enable
290        * fp16/int16 for frag and compute, skip phi precision lowering
291        * for other stages.
292        */
293       if ((s->info.stage == MESA_SHADER_FRAGMENT) ||
294           (s->info.stage == MESA_SHADER_COMPUTE) ||
295           (s->info.stage == MESA_SHADER_KERNEL)) {
296          progress |= OPT(s, nir_opt_phi_precision);
297       }
298       progress |= OPT(s, nir_opt_algebraic);
299       progress |= OPT(s, nir_lower_alu);
300       progress |= OPT(s, nir_lower_pack);
301       progress |= OPT(s, nir_lower_bit_size, ir3_lower_bit_size, NULL);
302       progress |= OPT(s, nir_opt_constant_folding);
303 
304       const nir_opt_offsets_options offset_options = {
305          /* How large an offset we can encode in the instr's immediate field.
306           */
307          .uniform_max = (1 << 9) - 1,
308 
309          /* STL/LDL have 13b for offset with MSB being a sign bit, but this opt
310           * doesn't deal with negative offsets.
311           */
312          .shared_max = (1 << 12) - 1,
313 
314          .buffer_max = 0,
315          .max_offset_cb = ir3_nir_max_imm_offset,
316          .max_offset_data = compiler,
317          .allow_offset_wrap = true,
318       };
319       progress |= OPT(s, nir_opt_offsets, &offset_options);
320 
321       nir_load_store_vectorize_options vectorize_opts = {
322          .modes = nir_var_mem_ubo | nir_var_mem_ssbo | nir_var_uniform,
323          .callback = ir3_nir_should_vectorize_mem,
324          .robust_modes = compiler->options.robust_buffer_access2 ?
325                nir_var_mem_ubo | nir_var_mem_ssbo : 0,
326          .cb_data = compiler,
327       };
328       progress |= OPT(s, nir_opt_load_store_vectorize, &vectorize_opts);
329 
330       if (lower_flrp != 0) {
331          if (OPT(s, nir_lower_flrp, lower_flrp, false /* always_precise */)) {
332             OPT(s, nir_opt_constant_folding);
333             progress = true;
334          }
335 
336          /* Nothing should rematerialize any flrps, so we only
337           * need to do this lowering once.
338           */
339          lower_flrp = 0;
340       }
341 
342       progress |= OPT(s, nir_opt_dead_cf);
343       if (OPT(s, nir_opt_loop)) {
344          progress |= true;
345          /* If nir_opt_loop makes progress, then we need to clean
346           * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
347           * to make progress.
348           */
349          OPT(s, nir_copy_prop);
350          OPT(s, nir_opt_dce);
351       }
352       progress |= OPT(s, nir_opt_if, nir_opt_if_optimize_phi_true_false);
353       progress |= OPT(s, nir_opt_loop_unroll);
354       progress |= OPT(s, nir_lower_64bit_phis);
355       progress |= OPT(s, nir_opt_remove_phis);
356       progress |= OPT(s, nir_opt_undef);
357       did_progress |= progress;
358    } while (progress);
359 
360    OPT(s, nir_lower_var_copies);
361    return did_progress;
362 }
363 
364 static bool
should_split_wrmask(const nir_instr * instr,const void * data)365 should_split_wrmask(const nir_instr *instr, const void *data)
366 {
367    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
368 
369    switch (intr->intrinsic) {
370    case nir_intrinsic_store_ssbo:
371    case nir_intrinsic_store_shared:
372    case nir_intrinsic_store_global:
373    case nir_intrinsic_store_scratch:
374       return true;
375    default:
376       return false;
377    }
378 }
379 
380 static bool
ir3_nir_lower_ssbo_size_filter(const nir_instr * instr,const void * data)381 ir3_nir_lower_ssbo_size_filter(const nir_instr *instr, const void *data)
382 {
383    return instr->type == nir_instr_type_intrinsic &&
384           nir_instr_as_intrinsic(instr)->intrinsic ==
385              nir_intrinsic_get_ssbo_size;
386 }
387 
388 static nir_def *
ir3_nir_lower_ssbo_size_instr(nir_builder * b,nir_instr * instr,void * data)389 ir3_nir_lower_ssbo_size_instr(nir_builder *b, nir_instr *instr, void *data)
390 {
391    uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
392    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
393    return nir_ishl_imm(b, &intr->def, ssbo_size_to_bytes_shift);
394 }
395 
396 static bool
ir3_nir_lower_ssbo_size(nir_shader * s,uint8_t ssbo_size_to_bytes_shift)397 ir3_nir_lower_ssbo_size(nir_shader *s, uint8_t ssbo_size_to_bytes_shift)
398 {
399    return nir_shader_lower_instructions(s, ir3_nir_lower_ssbo_size_filter,
400                                         ir3_nir_lower_ssbo_size_instr,
401                                         &ssbo_size_to_bytes_shift);
402 }
403 
404 void
ir3_nir_lower_io_to_temporaries(nir_shader * s)405 ir3_nir_lower_io_to_temporaries(nir_shader *s)
406 {
407    /* Outputs consumed by the VPC, VS inputs, and FS outputs are all handled
408     * by the hardware pre-loading registers at the beginning and then reading
409     * them at the end, so we can't access them indirectly except through
410     * normal register-indirect accesses, and therefore ir3 doesn't support
411     * indirect accesses on those. Other i/o is lowered in ir3_nir_lower_tess,
412     * and indirects work just fine for those. GS outputs may be consumed by
413     * VPC, but have their own lowering in ir3_nir_lower_gs() which does
414     * something similar to nir_lower_io_to_temporaries so we shouldn't need
415     * to lower them.
416     *
417     * Note: this might be a little inefficient for VS or TES outputs which are
418     * when the next stage isn't an FS, but it probably don't make sense to
419     * depend on the next stage before variant creation.
420     *
421     * TODO: for gallium, mesa/st also does some redundant lowering, including
422     * running this pass for GS inputs/outputs which we don't want but not
423     * including TES outputs or FS inputs which we do need. We should probably
424     * stop doing that once we're sure all drivers are doing their own
425     * indirect i/o lowering.
426     */
427    bool lower_input = s->info.stage == MESA_SHADER_VERTEX ||
428                       s->info.stage == MESA_SHADER_FRAGMENT;
429    bool lower_output = s->info.stage != MESA_SHADER_TESS_CTRL &&
430                        s->info.stage != MESA_SHADER_GEOMETRY;
431    if (lower_input || lower_output) {
432       NIR_PASS_V(s, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(s),
433                  lower_output, lower_input);
434 
435       /* nir_lower_io_to_temporaries() creates global variables and copy
436        * instructions which need to be cleaned up.
437        */
438       NIR_PASS_V(s, nir_split_var_copies);
439       NIR_PASS_V(s, nir_lower_var_copies);
440       NIR_PASS_V(s, nir_lower_global_vars_to_local);
441    }
442 
443    /* Regardless of the above, we need to lower indirect references to
444     * compact variables such as clip/cull distances because due to how
445     * TCS<->TES IO works we cannot handle indirect accesses that "straddle"
446     * vec4 components. nir_lower_indirect_derefs has a special case for
447     * compact variables, so it will actually lower them even though we pass
448     * in 0 modes.
449     *
450     * Using temporaries would be slightly better but
451     * nir_lower_io_to_temporaries currently doesn't support TCS i/o.
452     */
453    NIR_PASS_V(s, nir_lower_indirect_derefs, 0, UINT32_MAX);
454 }
455 
456 /**
457  * Inserts an add of 0.5 to floating point array index values in texture coordinates.
458  */
459 static bool
ir3_nir_lower_array_sampler_cb(struct nir_builder * b,nir_instr * instr,void * _data)460 ir3_nir_lower_array_sampler_cb(struct nir_builder *b, nir_instr *instr, void *_data)
461 {
462    if (instr->type != nir_instr_type_tex)
463       return false;
464 
465    nir_tex_instr *tex = nir_instr_as_tex(instr);
466    if (!tex->is_array || tex->op == nir_texop_lod)
467       return false;
468 
469    int coord_idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
470    if (coord_idx == -1 ||
471        nir_tex_instr_src_type(tex, coord_idx) != nir_type_float)
472       return false;
473 
474    b->cursor = nir_before_instr(&tex->instr);
475 
476    unsigned ncomp = tex->coord_components;
477    nir_def *src = tex->src[coord_idx].src.ssa;
478 
479    assume(ncomp >= 1);
480    nir_def *ai = nir_channel(b, src, ncomp - 1);
481    ai = nir_fadd_imm(b, ai, 0.5);
482    nir_src_rewrite(&tex->src[coord_idx].src,
483                    nir_vector_insert_imm(b, src, ai, ncomp - 1));
484    return true;
485 }
486 
487 static bool
ir3_nir_lower_array_sampler(nir_shader * shader)488 ir3_nir_lower_array_sampler(nir_shader *shader)
489 {
490    return nir_shader_instructions_pass(
491       shader, ir3_nir_lower_array_sampler_cb,
492       nir_metadata_control_flow, NULL);
493 }
494 
495 void
ir3_finalize_nir(struct ir3_compiler * compiler,nir_shader * s)496 ir3_finalize_nir(struct ir3_compiler *compiler, nir_shader *s)
497 {
498    MESA_TRACE_FUNC();
499 
500    struct nir_lower_tex_options tex_options = {
501       .lower_rect = 0,
502       .lower_tg4_offsets = true,
503       .lower_invalid_implicit_lod = true,
504       .lower_index_to_offset = true,
505    };
506 
507    if (compiler->gen >= 4) {
508       /* a4xx seems to have *no* sam.p */
509       tex_options.lower_txp = ~0; /* lower all txp */
510    } else {
511       /* a3xx just needs to avoid sam.p for 3d tex */
512       tex_options.lower_txp = (1 << GLSL_SAMPLER_DIM_3D);
513    }
514 
515    if (ir3_shader_debug & IR3_DBG_DISASM) {
516       mesa_logi("----------------------");
517       nir_log_shaderi(s);
518       mesa_logi("----------------------");
519    }
520 
521    if (s->info.stage == MESA_SHADER_GEOMETRY)
522       NIR_PASS_V(s, ir3_nir_lower_gs);
523 
524    NIR_PASS_V(s, nir_lower_frexp);
525    NIR_PASS_V(s, nir_lower_amul, ir3_glsl_type_size);
526 
527    OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
528 
529    OPT_V(s, nir_lower_tex, &tex_options);
530    OPT_V(s, nir_lower_load_const_to_scalar);
531 
532    if (compiler->array_index_add_half)
533       OPT_V(s, ir3_nir_lower_array_sampler);
534 
535    OPT_V(s, nir_lower_is_helper_invocation);
536 
537    ir3_optimize_loop(compiler, s);
538 
539    /* do idiv lowering after first opt loop to get a chance to propagate
540     * constants for divide by immed power-of-two:
541     */
542    nir_lower_idiv_options idiv_options = {
543       .allow_fp16 = true,
544    };
545    bool idiv_progress = OPT(s, nir_opt_idiv_const, 8);
546    idiv_progress |= OPT(s, nir_lower_idiv, &idiv_options);
547 
548    if (idiv_progress)
549       ir3_optimize_loop(compiler, s);
550 
551    OPT_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
552 
553    if (ir3_shader_debug & IR3_DBG_DISASM) {
554       mesa_logi("----------------------");
555       nir_log_shaderi(s);
556       mesa_logi("----------------------");
557    }
558 
559    /* st_program.c's parameter list optimization requires that future nir
560     * variants don't reallocate the uniform storage, so we have to remove
561     * uniforms that occupy storage.  But we don't want to remove samplers,
562     * because they're needed for YUV variant lowering.
563     */
564    nir_foreach_uniform_variable_safe (var, s) {
565       if (var->data.mode == nir_var_uniform &&
566           (glsl_type_get_image_count(var->type) ||
567            glsl_type_get_sampler_count(var->type)))
568          continue;
569 
570       exec_node_remove(&var->node);
571    }
572    nir_validate_shader(s, "after uniform var removal");
573 
574    nir_sweep(s);
575 }
576 
577 static bool
lower_subgroup_id_filter(const nir_instr * instr,const void * unused)578 lower_subgroup_id_filter(const nir_instr *instr, const void *unused)
579 {
580    (void)unused;
581 
582    if (instr->type != nir_instr_type_intrinsic)
583       return false;
584 
585    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
586    return intr->intrinsic == nir_intrinsic_load_subgroup_invocation ||
587           intr->intrinsic == nir_intrinsic_load_subgroup_id ||
588           intr->intrinsic == nir_intrinsic_load_num_subgroups;
589 }
590 
591 static nir_def *
lower_subgroup_id(nir_builder * b,nir_instr * instr,void * _shader)592 lower_subgroup_id(nir_builder *b, nir_instr *instr, void *_shader)
593 {
594    struct ir3_shader *shader = _shader;
595 
596    /* Vulkan allows implementations to tile workgroup invocations even when
597     * subgroup operations are involved, which is implied by this Note:
598     *
599     *    "There is no direct relationship between SubgroupLocalInvocationId and
600     *    LocalInvocationId or LocalInvocationIndex."
601     *
602     * However there is no way to get SubgroupId directly, so we have to use
603     * LocalInvocationIndex here. This means that whenever we do this lowering we
604     * have to force linear dispatch to make sure that the relation between
605     * SubgroupId/SubgroupLocalInvocationId and LocalInvocationIndex is what we
606     * expect.
607     */
608    shader->cs.force_linear_dispatch = true;
609 
610    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
611    if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation) {
612       return nir_iand(
613          b, nir_load_local_invocation_index(b),
614          nir_iadd_imm(b, nir_load_subgroup_size(b), -1));
615    } else if (intr->intrinsic == nir_intrinsic_load_subgroup_id) {
616       return nir_ishr(b, nir_load_local_invocation_index(b),
617                       nir_load_subgroup_id_shift_ir3(b));
618    } else {
619       assert(intr->intrinsic == nir_intrinsic_load_num_subgroups);
620       /* If the workgroup size is constant,
621        * nir_lower_compute_system_values() will replace local_size with a
622        * constant so this can mostly be constant folded away.
623        */
624       nir_def *local_size = nir_load_workgroup_size(b);
625       nir_def *size =
626          nir_imul24(b, nir_channel(b, local_size, 0),
627                     nir_imul24(b, nir_channel(b, local_size, 1),
628                                nir_channel(b, local_size, 2)));
629       nir_def *one = nir_imm_int(b, 1);
630       return nir_iadd(b, one,
631                       nir_ishr(b, nir_isub(b, size, one),
632                                nir_load_subgroup_id_shift_ir3(b)));
633    }
634 }
635 
636 static bool
ir3_nir_lower_subgroup_id_cs(nir_shader * nir,struct ir3_shader * shader)637 ir3_nir_lower_subgroup_id_cs(nir_shader *nir, struct ir3_shader *shader)
638 {
639    return nir_shader_lower_instructions(nir, lower_subgroup_id_filter,
640                                         lower_subgroup_id, shader);
641 }
642 
643 /**
644  * Late passes that need to be done after pscreen->finalize_nir()
645  */
646 void
ir3_nir_post_finalize(struct ir3_shader * shader)647 ir3_nir_post_finalize(struct ir3_shader *shader)
648 {
649    struct nir_shader *s = shader->nir;
650    struct ir3_compiler *compiler = shader->compiler;
651 
652    MESA_TRACE_FUNC();
653 
654    NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
655               ir3_glsl_type_size, nir_lower_io_lower_64bit_to_32);
656 
657    if (s->info.stage == MESA_SHADER_FRAGMENT) {
658       /* NOTE: lower load_barycentric_at_sample first, since it
659        * produces load_barycentric_at_offset:
660        */
661       NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_sample);
662       NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_offset);
663       NIR_PASS_V(s, ir3_nir_move_varying_inputs);
664       NIR_PASS_V(s, nir_lower_fb_read);
665       NIR_PASS_V(s, ir3_nir_lower_layer_id);
666    }
667 
668    if (compiler->gen >= 6 && s->info.stage == MESA_SHADER_FRAGMENT &&
669        !(ir3_shader_debug & IR3_DBG_NOFP16)) {
670       /* Lower FS mediump inputs to 16-bit. If you declared it mediump, you
671        * probably want 16-bit instructions (and have set
672        * mediump/RelaxedPrecision on most of the rest of the shader's
673        * instructions).  If we don't lower it in NIR, then comparisons of the
674        * results of mediump ALU ops with the mediump input will happen in highp,
675        * causing extra conversions (and, incidentally, causing
676        * dEQP-GLES2.functional.shaders.algorithm.rgb_to_hsl_fragment on ANGLE to
677        * fail)
678        *
679        * However, we can't do flat inputs because flat.b doesn't have the
680        * destination type for how to downconvert the
681        * 32-bit-in-the-varyings-interpolator value. (also, even if it did, watch
682        * out for how gl_nir_lower_packed_varyings packs all flat-interpolated
683        * things together as ivec4s, so when we lower a formerly-float input
684        * you'd end up with an incorrect f2f16(i2i32(load_input())) instead of
685        * load_input).
686        */
687       uint64_t mediump_varyings = 0;
688       nir_foreach_shader_in_variable(var, s) {
689          if ((var->data.precision == GLSL_PRECISION_MEDIUM ||
690               var->data.precision == GLSL_PRECISION_LOW) &&
691              var->data.interpolation != INTERP_MODE_FLAT) {
692             mediump_varyings |= BITFIELD64_BIT(var->data.location);
693          }
694       }
695 
696       if (mediump_varyings) {
697          NIR_PASS_V(s, nir_lower_mediump_io,
698                   nir_var_shader_in,
699                   mediump_varyings,
700                   false);
701       }
702 
703       /* This should come after input lowering, to opportunistically lower non-mediump outputs. */
704       NIR_PASS_V(s, nir_lower_mediump_io, nir_var_shader_out, 0, false);
705    }
706 
707    {
708       /* If the API-facing subgroup size is forced to a particular value, lower
709        * it here. Beyond this point nir_intrinsic_load_subgroup_size will return
710        * the "real" subgroup size.
711        */
712       unsigned subgroup_size = 0, max_subgroup_size = 0;
713       switch (shader->options.api_wavesize) {
714       case IR3_SINGLE_ONLY:
715          subgroup_size = max_subgroup_size = compiler->threadsize_base;
716          break;
717       case IR3_DOUBLE_ONLY:
718          subgroup_size = max_subgroup_size = compiler->threadsize_base * 2;
719          break;
720       case IR3_SINGLE_OR_DOUBLE:
721          /* For vertex stages, we know the wavesize will never be doubled.
722           * Lower subgroup_size here, to avoid having to deal with it when
723           * translating from NIR. Otherwise use the "real" wavesize obtained as
724           * a driver param.
725           */
726          if (s->info.stage != MESA_SHADER_COMPUTE &&
727              s->info.stage != MESA_SHADER_FRAGMENT) {
728             subgroup_size = max_subgroup_size = compiler->threadsize_base;
729          } else {
730             subgroup_size = 0;
731             max_subgroup_size = compiler->threadsize_base * 2;
732          }
733          break;
734       }
735 
736       nir_lower_subgroups_options options = {
737             .subgroup_size = subgroup_size,
738             .ballot_bit_size = 32,
739             .ballot_components = max_subgroup_size / 32,
740             .lower_to_scalar = true,
741             .lower_vote_eq = true,
742             .lower_vote_bool_eq = true,
743             .lower_subgroup_masks = true,
744             .lower_read_invocation_to_cond = true,
745             .lower_shuffle = true,
746             .lower_relative_shuffle = true,
747             .lower_inverse_ballot = true,
748       };
749 
750       if (!((s->info.stage == MESA_SHADER_COMPUTE) ||
751             (s->info.stage == MESA_SHADER_KERNEL) ||
752             compiler->has_getfiberid)) {
753          options.subgroup_size = 1;
754          options.lower_vote_trivial = true;
755       }
756 
757       OPT(s, nir_lower_subgroups, &options);
758    }
759 
760    if ((s->info.stage == MESA_SHADER_COMPUTE) ||
761        (s->info.stage == MESA_SHADER_KERNEL)) {
762       bool progress = false;
763       NIR_PASS(progress, s, ir3_nir_lower_subgroup_id_cs, shader);
764 
765       /* ir3_nir_lower_subgroup_id_cs creates extra compute intrinsics which
766        * we need to lower again.
767        */
768       if (progress)
769          NIR_PASS_V(s, nir_lower_compute_system_values, NULL);
770    }
771 
772    /* we cannot ensure that ir3_finalize_nir() is only called once, so
773     * we also need to do any run-once workarounds here:
774     */
775    OPT_V(s, ir3_nir_apply_trig_workarounds);
776 
777    const nir_lower_image_options lower_image_opts = {
778       .lower_cube_size = true,
779       .lower_image_samples_to_one = true
780    };
781    NIR_PASS_V(s, nir_lower_image, &lower_image_opts);
782 
783    const nir_lower_idiv_options lower_idiv_options = {
784       .allow_fp16 = true,
785    };
786    NIR_PASS_V(s, nir_lower_idiv, &lower_idiv_options); /* idiv generated by cube lowering */
787 
788 
789    /* The resinfo opcode returns the size in dwords on a4xx */
790    if (compiler->gen == 4)
791       OPT_V(s, ir3_nir_lower_ssbo_size, 2);
792 
793    /* The resinfo opcode we have for getting the SSBO size on a6xx returns a
794     * byte length divided by IBO_0_FMT, while the NIR intrinsic coming in is a
795     * number of bytes. Switch things so the NIR intrinsic in our backend means
796     * dwords.
797     */
798    if (compiler->gen >= 6)
799       OPT_V(s, ir3_nir_lower_ssbo_size, compiler->options.storage_16bit ? 1 : 2);
800 
801    ir3_optimize_loop(compiler, s);
802 }
803 
804 static bool
lower_ucp_vs(struct ir3_shader_variant * so)805 lower_ucp_vs(struct ir3_shader_variant *so)
806 {
807    if (!so->key.ucp_enables)
808       return false;
809 
810    gl_shader_stage last_geom_stage;
811 
812    if (so->key.has_gs) {
813       last_geom_stage = MESA_SHADER_GEOMETRY;
814    } else if (so->key.tessellation) {
815       last_geom_stage = MESA_SHADER_TESS_EVAL;
816    } else {
817       last_geom_stage = MESA_SHADER_VERTEX;
818    }
819 
820    return so->type == last_geom_stage;
821 }
822 
823 static bool
output_slot_used_for_binning(gl_varying_slot slot)824 output_slot_used_for_binning(gl_varying_slot slot)
825 {
826    return slot == VARYING_SLOT_POS || slot == VARYING_SLOT_PSIZ ||
827           slot == VARYING_SLOT_CLIP_DIST0 || slot == VARYING_SLOT_CLIP_DIST1 ||
828           slot == VARYING_SLOT_VIEWPORT;
829 }
830 
831 static bool
remove_nonbinning_output(nir_builder * b,nir_intrinsic_instr * intr,void * data)832 remove_nonbinning_output(nir_builder *b, nir_intrinsic_instr *intr, void *data)
833 {
834    if (intr->intrinsic != nir_intrinsic_store_output)
835       return false;
836 
837    nir_io_semantics io = nir_intrinsic_io_semantics(intr);
838 
839    if (output_slot_used_for_binning(io.location))
840       return false;
841 
842    nir_instr_remove(&intr->instr);
843    return true;
844 }
845 
846 static bool
lower_binning(nir_shader * s)847 lower_binning(nir_shader *s)
848 {
849    return nir_shader_intrinsics_pass(s, remove_nonbinning_output,
850                                      nir_metadata_control_flow, NULL);
851 }
852 
853 static nir_mem_access_size_align
ir3_mem_access_size_align(nir_intrinsic_op intrin,uint8_t bytes,uint8_t bit_size,uint32_t align,uint32_t align_offset,bool offset_is_const,const void * cb_data)854 ir3_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
855                  uint8_t bit_size, uint32_t align,
856                  uint32_t align_offset, bool offset_is_const,
857                  const void *cb_data)
858 {
859    align = nir_combined_align(align, align_offset);
860    assert(util_is_power_of_two_nonzero(align));
861 
862    /* But if we're only aligned to 1 byte, use 8-bit loads. If we're only
863     * aligned to 2 bytes, use 16-bit loads, unless we needed 8-bit loads due to
864     * the size.
865     */
866    if ((bytes & 1) || (align == 1))
867       bit_size = 8;
868    else if ((bytes & 2) || (align == 2))
869       bit_size = 16;
870    else if (bit_size >= 32)
871       bit_size = 32;
872 
873    if (intrin == nir_intrinsic_load_ubo)
874       bit_size = 32;
875 
876    return (nir_mem_access_size_align){
877       .num_components = MAX2(1, MIN2(bytes / (bit_size / 8), 4)),
878       .bit_size = bit_size,
879       .align = bit_size / 8,
880    };
881 }
882 
883 void
ir3_nir_lower_variant(struct ir3_shader_variant * so,nir_shader * s)884 ir3_nir_lower_variant(struct ir3_shader_variant *so, nir_shader *s)
885 {
886    MESA_TRACE_FUNC();
887 
888    if (ir3_shader_debug & IR3_DBG_DISASM) {
889       mesa_logi("----------------------");
890       nir_log_shaderi(s);
891       mesa_logi("----------------------");
892    }
893 
894    bool progress = false;
895 
896    progress |= OPT(s, nir_lower_io_to_scalar, nir_var_mem_ssbo,
897                    ir3_nir_should_scalarize_mem, so->compiler);
898 
899    if (so->key.has_gs || so->key.tessellation) {
900       switch (so->type) {
901       case MESA_SHADER_VERTEX:
902          NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
903                     so->key.tessellation);
904          progress = true;
905          break;
906       case MESA_SHADER_TESS_CTRL:
907          NIR_PASS_V(s, nir_lower_io_to_scalar,
908                      nir_var_shader_in | nir_var_shader_out, NULL, NULL);
909          NIR_PASS_V(s, ir3_nir_lower_tess_ctrl, so, so->key.tessellation);
910          NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
911          progress = true;
912          break;
913       case MESA_SHADER_TESS_EVAL:
914          NIR_PASS_V(s, ir3_nir_lower_tess_eval, so, so->key.tessellation);
915          if (so->key.has_gs)
916             NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
917                        so->key.tessellation);
918          progress = true;
919          break;
920       case MESA_SHADER_GEOMETRY:
921          NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
922          progress = true;
923          break;
924       default:
925          break;
926       }
927    }
928 
929    if (so->binning_pass) {
930       if (OPT(s, lower_binning)) {
931          progress = true;
932 
933          /* outputs_written has changed. */
934          nir_shader_gather_info(s, nir_shader_get_entrypoint(s));
935       }
936    }
937 
938    /* Note that it is intentional to use the VS lowering pass for GS, since we
939     * lower GS into something that looks more like a VS in ir3_nir_lower_gs():
940     */
941    if (lower_ucp_vs(so)) {
942       progress |= OPT(s, nir_lower_clip_vs, so->key.ucp_enables, false, true, NULL);
943    } else if (s->info.stage == MESA_SHADER_FRAGMENT) {
944       if (so->key.ucp_enables && !so->compiler->has_clip_cull)
945          progress |= OPT(s, nir_lower_clip_fs, so->key.ucp_enables, true);
946    }
947 
948    /* Move large constant variables to the constants attached to the NIR
949     * shader, which we will upload in the immediates range.  This generates
950     * amuls, so we need to clean those up after.
951     *
952     * Passing no size_align, we would get packed values, which if we end up
953     * having to load with LDC would result in extra reads to unpack from
954     * straddling loads.  Align everything to vec4 to avoid that, though we
955     * could theoretically do better.
956     */
957    OPT_V(s, nir_opt_large_constants, glsl_get_vec4_size_align_bytes,
958          32 /* bytes */);
959    progress |= OPT(s, ir3_nir_lower_load_constant, so);
960 
961    /* Lower large temporaries to scratch, which in Qualcomm terms is private
962     * memory, to avoid excess register pressure. This should happen after
963     * nir_opt_large_constants, because loading from a UBO is much, much less
964     * expensive.
965     */
966    if (so->compiler->has_pvtmem) {
967       progress |= OPT(s, nir_lower_vars_to_scratch, nir_var_function_temp,
968                       16 * 16 /* bytes */,
969                       ir3_get_variable_size_align_bytes, glsl_get_natural_size_align_bytes);
970    }
971 
972    /* Lower scratch writemasks */
973    progress |= OPT(s, nir_lower_wrmasks, should_split_wrmask, s);
974 
975    if (OPT(s, nir_lower_locals_to_regs, 1)) {
976       progress = true;
977 
978       /* Split 64b registers into two 32b ones. */
979       OPT_V(s, ir3_nir_lower_64b_regs);
980    }
981 
982    nir_lower_mem_access_bit_sizes_options mem_bit_size_options = {
983       .modes = nir_var_mem_constant | nir_var_mem_ubo |
984                nir_var_mem_global | nir_var_mem_shared |
985                nir_var_function_temp,
986       .callback = ir3_mem_access_size_align,
987    };
988 
989    progress |= OPT(s, nir_lower_mem_access_bit_sizes, &mem_bit_size_options);
990    progress |= OPT(s, ir3_nir_lower_64b_global);
991    progress |= OPT(s, ir3_nir_lower_64b_intrinsics);
992    progress |= OPT(s, ir3_nir_lower_64b_undef);
993    progress |= OPT(s, nir_lower_int64);
994 
995    /* Cleanup code leftover from lowering passes before opt_preamble */
996    if (progress) {
997       progress |= OPT(s, nir_opt_constant_folding);
998    }
999 
1000    progress |= OPT(s, ir3_nir_opt_subgroups, so);
1001 
1002    if (so->compiler->load_shader_consts_via_preamble)
1003       progress |= OPT(s, ir3_nir_lower_driver_params_to_ubo, so);
1004 
1005    /* Do the preamble before analysing UBO ranges, because it's usually
1006     * higher-value and because it can result in eliminating some indirect UBO
1007     * accesses where otherwise we'd have to push the whole range. However we
1008     * have to lower the preamble after UBO lowering so that UBO lowering can
1009     * insert instructions in the preamble to push UBOs.
1010     */
1011    if (so->compiler->has_preamble &&
1012        !(ir3_shader_debug & IR3_DBG_NOPREAMBLE))
1013       progress |= OPT(s, ir3_nir_opt_preamble, so);
1014 
1015    if (so->compiler->load_shader_consts_via_preamble)
1016       progress |= OPT(s, ir3_nir_lower_driver_params_to_ubo, so);
1017 
1018    /* TODO: ldg.k might also work on a6xx */
1019    if (so->compiler->gen >= 7)
1020       progress |= OPT(s, ir3_nir_lower_const_global_loads, so);
1021 
1022    if (!so->binning_pass)
1023       OPT_V(s, ir3_nir_analyze_ubo_ranges, so);
1024 
1025    progress |= OPT(s, ir3_nir_lower_ubo_loads, so);
1026 
1027    if (so->compiler->gen >= 7 &&
1028        !(ir3_shader_debug & (IR3_DBG_NOPREAMBLE | IR3_DBG_NODESCPREFETCH)))
1029       progress |= OPT(s, ir3_nir_opt_prefetch_descriptors, so);
1030 
1031    if (so->shader_options.push_consts_type == IR3_PUSH_CONSTS_SHARED_PREAMBLE)
1032       progress |= OPT(s, ir3_nir_lower_push_consts_to_preamble, so);
1033 
1034    progress |= OPT(s, ir3_nir_lower_preamble, so);
1035 
1036    progress |= OPT(s, nir_lower_amul, ir3_glsl_type_size);
1037 
1038    /* UBO offset lowering has to come after we've decided what will
1039     * be left as load_ubo
1040     */
1041    if (so->compiler->gen >= 6)
1042       progress |= OPT(s, nir_lower_ubo_vec4);
1043 
1044    progress |= OPT(s, ir3_nir_lower_io_offsets);
1045 
1046    if (progress)
1047       ir3_optimize_loop(so->compiler, s);
1048 
1049    /* verify that progress is always set */
1050    assert(!ir3_optimize_loop(so->compiler, s));
1051 
1052    /* Fixup indirect load_const_ir3's which end up with a const base offset
1053     * which is too large to encode.  Do this late(ish) so we actually
1054     * can differentiate indirect vs non-indirect.
1055     */
1056    if (OPT(s, ir3_nir_fixup_load_const_ir3))
1057       ir3_optimize_loop(so->compiler, s);
1058 
1059    /* Do late algebraic optimization to turn add(a, neg(b)) back into
1060     * subs, then the mandatory cleanup after algebraic.  Note that it may
1061     * produce fnegs, and if so then we need to keep running to squash
1062     * fneg(fneg(a)).
1063     */
1064    bool more_late_algebraic = true;
1065    while (more_late_algebraic) {
1066       more_late_algebraic = OPT(s, nir_opt_algebraic_late);
1067       if (!more_late_algebraic && so->compiler->gen >= 5) {
1068          /* Lowers texture operations that have only f2f16 or u2u16 called on
1069           * them to have a 16-bit destination.  Also, lower 16-bit texture
1070           * coordinates that had been upconverted to 32-bits just for the
1071           * sampler to just be 16-bit texture sources.
1072           */
1073          struct nir_opt_tex_srcs_options opt_srcs_options = {
1074             .sampler_dims = ~0,
1075             .src_types = (1 << nir_tex_src_coord) |
1076                          (1 << nir_tex_src_lod) |
1077                          (1 << nir_tex_src_bias) |
1078                          (1 << nir_tex_src_offset) |
1079                          (1 << nir_tex_src_comparator) |
1080                          (1 << nir_tex_src_min_lod) |
1081                          (1 << nir_tex_src_ms_index) |
1082                          (1 << nir_tex_src_ddx) |
1083                          (1 << nir_tex_src_ddy),
1084          };
1085          struct nir_opt_16bit_tex_image_options opt_16bit_options = {
1086             .rounding_mode = nir_rounding_mode_rtz,
1087             .opt_tex_dest_types = nir_type_float,
1088             /* blob dumps have no half regs on pixel 2's ldib or stib, so only enable for a6xx+. */
1089             .opt_image_dest_types = so->compiler->gen >= 6 ?
1090                                         nir_type_float | nir_type_uint | nir_type_int : 0,
1091             .opt_image_store_data = so->compiler->gen >= 6,
1092             .opt_srcs_options_count = 1,
1093             .opt_srcs_options = &opt_srcs_options,
1094          };
1095          OPT(s, nir_opt_16bit_tex_image, &opt_16bit_options);
1096       }
1097       OPT_V(s, nir_opt_constant_folding);
1098       OPT_V(s, nir_copy_prop);
1099       OPT_V(s, nir_opt_dce);
1100       OPT_V(s, nir_opt_cse);
1101    }
1102 
1103    OPT_V(s, nir_opt_sink, nir_move_const_undef);
1104 
1105    if (ir3_shader_debug & IR3_DBG_DISASM) {
1106       mesa_logi("----------------------");
1107       nir_log_shaderi(s);
1108       mesa_logi("----------------------");
1109    }
1110 
1111    nir_sweep(s);
1112 
1113    /* Binning pass variants re-use  the const_state of the corresponding
1114     * draw pass shader, so that same const emit can be re-used for both
1115     * passes:
1116     */
1117    if (!so->binning_pass)
1118       ir3_setup_const_state(s, so, ir3_const_state_mut(so));
1119 }
1120 
1121 bool
ir3_get_driver_param_info(const nir_shader * shader,nir_intrinsic_instr * intr,struct driver_param_info * param_info)1122 ir3_get_driver_param_info(const nir_shader *shader, nir_intrinsic_instr *intr,
1123                           struct driver_param_info *param_info)
1124 {
1125    switch (intr->intrinsic) {
1126    case nir_intrinsic_load_base_workgroup_id:
1127       param_info->offset = IR3_DP_BASE_GROUP_X;
1128       break;
1129    case nir_intrinsic_load_num_workgroups:
1130       param_info->offset = IR3_DP_NUM_WORK_GROUPS_X;
1131       break;
1132    case nir_intrinsic_load_workgroup_size:
1133       param_info->offset = IR3_DP_LOCAL_GROUP_SIZE_X;
1134       break;
1135    case nir_intrinsic_load_subgroup_size:
1136       assert(shader->info.stage == MESA_SHADER_COMPUTE ||
1137              shader->info.stage == MESA_SHADER_FRAGMENT);
1138       if (shader->info.stage == MESA_SHADER_COMPUTE) {
1139          param_info->offset = IR3_DP_CS_SUBGROUP_SIZE;
1140       } else {
1141          param_info->offset = IR3_DP_FS_SUBGROUP_SIZE;
1142       }
1143       break;
1144    case nir_intrinsic_load_subgroup_id_shift_ir3:
1145       param_info->offset = IR3_DP_SUBGROUP_ID_SHIFT;
1146       break;
1147    case nir_intrinsic_load_work_dim:
1148       param_info->offset = IR3_DP_WORK_DIM;
1149       break;
1150    case nir_intrinsic_load_base_vertex:
1151    case nir_intrinsic_load_first_vertex:
1152       param_info->offset = IR3_DP_VTXID_BASE;
1153       break;
1154    case nir_intrinsic_load_is_indexed_draw:
1155       param_info->offset = IR3_DP_IS_INDEXED_DRAW;
1156       break;
1157    case nir_intrinsic_load_draw_id:
1158       param_info->offset = IR3_DP_DRAWID;
1159       break;
1160    case nir_intrinsic_load_base_instance:
1161       param_info->offset = IR3_DP_INSTID_BASE;
1162       break;
1163    case nir_intrinsic_load_user_clip_plane: {
1164       uint32_t idx = nir_intrinsic_ucp_id(intr);
1165       param_info->offset = IR3_DP_UCP0_X + 4 * idx;
1166       break;
1167    }
1168    case nir_intrinsic_load_tess_level_outer_default:
1169       param_info->offset = IR3_DP_HS_DEFAULT_OUTER_LEVEL_X;
1170       break;
1171    case nir_intrinsic_load_tess_level_inner_default:
1172       param_info->offset = IR3_DP_HS_DEFAULT_INNER_LEVEL_X;
1173       break;
1174    case nir_intrinsic_load_frag_size_ir3:
1175       param_info->offset = IR3_DP_FS_FRAG_SIZE;
1176       break;
1177    case nir_intrinsic_load_frag_offset_ir3:
1178       param_info->offset = IR3_DP_FS_FRAG_OFFSET;
1179       break;
1180    case nir_intrinsic_load_frag_invocation_count:
1181       param_info->offset = IR3_DP_FS_FRAG_INVOCATION_COUNT;
1182       break;
1183    default:
1184       return false;
1185    }
1186 
1187    return true;
1188 }
1189 
1190 static void
ir3_nir_scan_driver_consts(struct ir3_compiler * compiler,nir_shader * shader,struct ir3_const_state * layout)1191 ir3_nir_scan_driver_consts(struct ir3_compiler *compiler, nir_shader *shader, struct ir3_const_state *layout)
1192 {
1193    nir_foreach_function (function, shader) {
1194       if (!function->impl)
1195          continue;
1196 
1197       nir_foreach_block (block, function->impl) {
1198          nir_foreach_instr (instr, block) {
1199             if (instr->type != nir_instr_type_intrinsic)
1200                continue;
1201 
1202             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1203             unsigned idx;
1204 
1205             switch (intr->intrinsic) {
1206             case nir_intrinsic_image_atomic:
1207             case nir_intrinsic_image_atomic_swap:
1208             case nir_intrinsic_image_load:
1209             case nir_intrinsic_image_store:
1210             case nir_intrinsic_image_size:
1211                /* a4xx gets these supplied by the hw directly (maybe CP?) */
1212                if (compiler->gen == 5 &&
1213                    !(intr->intrinsic == nir_intrinsic_image_load &&
1214                      !(nir_intrinsic_access(intr) & ACCESS_COHERENT))) {
1215                   idx = nir_src_as_uint(intr->src[0]);
1216                   if (layout->image_dims.mask & (1 << idx))
1217                      break;
1218                   layout->image_dims.mask |= (1 << idx);
1219                   layout->image_dims.off[idx] = layout->image_dims.count;
1220                   layout->image_dims.count += 3; /* three const per */
1221                }
1222                break;
1223             default:
1224                break;
1225             }
1226 
1227             struct driver_param_info param_info;
1228             if (ir3_get_driver_param_info(shader, intr, &param_info)) {
1229                layout->num_driver_params =
1230                   MAX2(layout->num_driver_params,
1231                        param_info.offset + nir_intrinsic_dest_components(intr));
1232             }
1233          }
1234       }
1235    }
1236 
1237    /* TODO: Provide a spot somewhere to safely upload unwanted values, and a way
1238     * to determine if they're wanted or not. For now we always make the whole
1239     * driver param range available, since the driver will always instruct the
1240     * hardware to upload these.
1241     */
1242    if (!compiler->has_shared_regfile &&
1243          shader->info.stage == MESA_SHADER_COMPUTE) {
1244       layout->num_driver_params =
1245          MAX2(layout->num_driver_params, IR3_DP_WORKGROUP_ID_Z + 1);
1246    }
1247 }
1248 
1249 static unsigned
ir3_align_constoff(struct ir3_const_state * const_state,unsigned constoff,unsigned aligment)1250 ir3_align_constoff(struct ir3_const_state *const_state, unsigned constoff,
1251                    unsigned aligment)
1252 {
1253    constoff = align(constoff, aligment);
1254    const_state->required_consts_aligment_vec4 =
1255       MAX2(const_state->required_consts_aligment_vec4, aligment);
1256    return constoff;
1257 }
1258 
1259 /* Sets up the variant-dependent constant state for the ir3_shader.  Note
1260  * that it is also used from ir3_nir_analyze_ubo_ranges() to figure out the
1261  * maximum number of driver params that would eventually be used, to leave
1262  * space for this function to allocate the driver params.
1263  */
1264 void
ir3_setup_const_state(nir_shader * nir,struct ir3_shader_variant * v,struct ir3_const_state * const_state)1265 ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
1266                       struct ir3_const_state *const_state)
1267 {
1268    struct ir3_compiler *compiler = v->compiler;
1269 
1270    memset(&const_state->offsets, ~0, sizeof(const_state->offsets));
1271    const_state->required_consts_aligment_vec4 = 1;
1272 
1273    ir3_nir_scan_driver_consts(compiler, nir, const_state);
1274 
1275    if ((compiler->gen < 5) && (v->stream_output.num_outputs > 0)) {
1276       const_state->num_driver_params =
1277          MAX2(const_state->num_driver_params, IR3_DP_VTXCNT_MAX + 1);
1278    }
1279 
1280    const_state->num_ubos = nir->info.num_ubos;
1281 
1282    assert((const_state->ubo_state.size % 16) == 0);
1283    unsigned constoff = v->shader_options.num_reserved_user_consts +
1284       const_state->ubo_state.size / 16 +
1285       const_state->preamble_size +
1286       const_state->global_size;
1287    unsigned ptrsz = ir3_pointer_size(compiler);
1288 
1289    if (const_state->num_ubos > 0 && compiler->gen < 6) {
1290       const_state->offsets.ubo = constoff;
1291       constoff += align(const_state->num_ubos * ptrsz, 4) / 4;
1292    }
1293 
1294    if (const_state->image_dims.count > 0) {
1295       unsigned cnt = const_state->image_dims.count;
1296       const_state->offsets.image_dims = constoff;
1297       constoff += align(cnt, 4) / 4;
1298    }
1299 
1300    if (v->type == MESA_SHADER_KERNEL) {
1301       const_state->offsets.kernel_params = constoff;
1302       constoff += align(v->cs.req_input_mem, 4) / 4;
1303    }
1304 
1305    if (const_state->num_driver_params > 0) {
1306       /* num_driver_params in dwords.  we only need to align to vec4s for the
1307        * common case of immediate constant uploads, but for indirect dispatch
1308        * the constants may also be indirect and so we have to align the area in
1309        * const space to that requirement.
1310        */
1311       const_state->num_driver_params = align(const_state->num_driver_params, 4);
1312       unsigned upload_unit = 1;
1313       if (v->type == MESA_SHADER_COMPUTE ||
1314           (const_state->num_driver_params >= IR3_DP_VTXID_BASE)) {
1315          upload_unit = compiler->const_upload_unit;
1316       }
1317 
1318       /* offset cannot be 0 for vs params loaded by CP_DRAW_INDIRECT_MULTI */
1319       if (v->type == MESA_SHADER_VERTEX && compiler->gen >= 6)
1320          constoff = MAX2(constoff, 1);
1321       constoff = ir3_align_constoff(const_state, constoff, upload_unit);
1322 
1323       const_state->offsets.driver_param = constoff;
1324 
1325       constoff += align(const_state->num_driver_params / 4, upload_unit);
1326    }
1327 
1328    if ((v->type == MESA_SHADER_VERTEX) && (compiler->gen < 5) &&
1329        v->stream_output.num_outputs > 0) {
1330       const_state->offsets.tfbo = constoff;
1331       constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
1332    }
1333 
1334    if (!compiler->load_shader_consts_via_preamble) {
1335       switch (v->type) {
1336       case MESA_SHADER_TESS_CTRL:
1337       case MESA_SHADER_TESS_EVAL:
1338          const_state->offsets.primitive_param = constoff;
1339          constoff += 2;
1340 
1341          const_state->offsets.primitive_map = constoff;
1342          break;
1343       case MESA_SHADER_GEOMETRY:
1344          const_state->offsets.primitive_param = constoff;
1345          constoff += 1;
1346 
1347          const_state->offsets.primitive_map = constoff;
1348          break;
1349       default:
1350          break;
1351       }
1352    }
1353 
1354    switch (v->type) {
1355    case MESA_SHADER_VERTEX:
1356       const_state->offsets.primitive_param = constoff;
1357       constoff += 1;
1358       break;
1359    case MESA_SHADER_TESS_CTRL:
1360    case MESA_SHADER_TESS_EVAL:
1361       constoff += DIV_ROUND_UP(v->input_size, 4);
1362       break;
1363    case MESA_SHADER_GEOMETRY:
1364       constoff += DIV_ROUND_UP(v->input_size, 4);
1365       break;
1366    default:
1367       break;
1368    }
1369 
1370    const_state->offsets.immediate = constoff;
1371 
1372    assert(constoff <= ir3_max_const(v));
1373 }
1374 
1375 uint32_t
ir3_const_state_get_free_space(const struct ir3_shader_variant * v,const struct ir3_const_state * const_state)1376 ir3_const_state_get_free_space(const struct ir3_shader_variant *v,
1377                                const struct ir3_const_state *const_state)
1378 {
1379    uint32_t free_space_vec4 = ir3_max_const(v) - const_state->offsets.immediate;
1380    free_space_vec4 =
1381       (free_space_vec4 / const_state->required_consts_aligment_vec4) *
1382       const_state->required_consts_aligment_vec4;
1383    return free_space_vec4;
1384 }
1385