xref: /aosp_15_r20/external/mesa3d/src/freedreno/ir3/ir3_nir_opt_preamble.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2021 Valve Corporation
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "ir3_compiler.h"
7 #include "ir3_nir.h"
8 #include "nir_instr_set.h"
9 
10 /* Preamble optimization happens in two parts: first we generate the preamble
11  * using the generic NIR pass, then we setup the preamble sequence and inline
12  * the preamble into the main shader if there was a preamble. The first part
13  * should happen before UBO lowering, because we want to prefer more complex
14  * expressions over UBO loads, but the second part has to happen after UBO
15  * lowering because it may add copy instructions to the preamble.
16  */
17 
18 static void
def_size(nir_def * def,unsigned * size,unsigned * align)19 def_size(nir_def *def, unsigned *size, unsigned *align)
20 {
21    unsigned bit_size = def->bit_size == 1 ? 32 : def->bit_size;
22    /* Due to the implicit const file promotion we want to expand 16-bit values
23     * to 32-bit so that the truncation in the main shader can hopefully be
24     * folded into the use.
25     */
26    *size = DIV_ROUND_UP(bit_size, 32) * def->num_components;
27    *align = 1;
28 }
29 
30 static bool
all_uses_float(nir_def * def,bool allow_src2)31 all_uses_float(nir_def *def, bool allow_src2)
32 {
33    nir_foreach_use_including_if (use, def) {
34       if (nir_src_is_if(use))
35          return false;
36 
37       nir_instr *use_instr = nir_src_parent_instr(use);
38       if (use_instr->type != nir_instr_type_alu)
39          return false;
40       nir_alu_instr *use_alu = nir_instr_as_alu(use_instr);
41       unsigned src_index = ~0;
42       for  (unsigned i = 0; i < nir_op_infos[use_alu->op].num_inputs; i++) {
43          if (&use_alu->src[i].src == use) {
44             src_index = i;
45             break;
46          }
47       }
48 
49       assert(src_index != ~0);
50       nir_alu_type src_type =
51          nir_alu_type_get_base_type(nir_op_infos[use_alu->op].input_types[src_index]);
52 
53       if (src_type != nir_type_float || (src_index == 2 && !allow_src2))
54          return false;
55    }
56 
57    return true;
58 }
59 
60 static bool
all_uses_bit(nir_def * def)61 all_uses_bit(nir_def *def)
62 {
63    nir_foreach_use_including_if (use, def) {
64       if (nir_src_is_if(use))
65          return false;
66 
67       nir_instr *use_instr = nir_src_parent_instr(use);
68       if (use_instr->type != nir_instr_type_alu)
69          return false;
70       nir_alu_instr *use_alu = nir_instr_as_alu(use_instr);
71 
72       /* See ir3_cat2_absneg() */
73       switch (use_alu->op) {
74       case nir_op_iand:
75       case nir_op_ior:
76       case nir_op_inot:
77       case nir_op_ixor:
78       case nir_op_bitfield_reverse:
79       case nir_op_ufind_msb:
80       case nir_op_ifind_msb:
81       case nir_op_find_lsb:
82       case nir_op_ishl:
83       case nir_op_ushr:
84       case nir_op_ishr:
85       case nir_op_bit_count:
86          continue;
87       default:
88          return false;
89       }
90    }
91 
92    return true;
93 }
94 
95 static float
instr_cost(nir_instr * instr,const void * data)96 instr_cost(nir_instr *instr, const void *data)
97 {
98    /* We'll assume wave64 here for simplicity and assume normal cat1-cat3 ops
99     * take 1 (normalized) cycle.
100     *
101     * See https://gitlab.freedesktop.org/freedreno/freedreno/-/wikis/A6xx-SP
102     *
103     * TODO: assume wave128 on fragment/compute shaders?
104     */
105 
106    switch (instr->type) {
107    case nir_instr_type_alu: {
108       nir_alu_instr *alu = nir_instr_as_alu(instr);
109       unsigned components = alu->def.num_components;
110       switch (alu->op) {
111       /* cat4 */
112       case nir_op_frcp:
113       case nir_op_fsqrt:
114       case nir_op_frsq:
115       case nir_op_flog2:
116       case nir_op_fexp2:
117       case nir_op_fsin:
118       case nir_op_fcos:
119          return 4 * components;
120 
121       /* Instructions that become src modifiers. Note for conversions this is
122        * really an approximation.
123        *
124        * This prevents silly things like lifting a negate that would become a
125        * modifier.
126        */
127       case nir_op_f2f32:
128       case nir_op_f2f16:
129       case nir_op_f2fmp:
130       case nir_op_fneg:
131          return all_uses_float(&alu->def, true) ? 0 : 1 * components;
132 
133       case nir_op_fabs:
134          return all_uses_float(&alu->def, false) ? 0 : 1 * components;
135 
136       case nir_op_inot:
137          return all_uses_bit(&alu->def) ? 0 : 1 * components;
138 
139       /* Instructions that become vector split/collect */
140       case nir_op_vec2:
141       case nir_op_vec3:
142       case nir_op_vec4:
143       case nir_op_mov:
144          return 0;
145 
146       /* cat1-cat3 */
147       default:
148          return 1 * components;
149       }
150       break;
151    }
152 
153    case nir_instr_type_tex:
154       /* cat5 */
155       return 8;
156 
157    case nir_instr_type_intrinsic: {
158       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
159       switch (intrin->intrinsic) {
160       case nir_intrinsic_load_ubo: {
161          /* If the UBO and offset are constant, then UBO lowering should do a
162           * better job trying to lower this, and opt_preamble shouldn't try to
163           * duplicate it. However if it has a non-constant offset then we can
164           * avoid setting up a0.x etc. in the main shader and potentially have
165           * to push less.
166           */
167          bool const_ubo = nir_src_is_const(intrin->src[0]);
168          if (!const_ubo) {
169             nir_intrinsic_instr *rsrc = ir3_bindless_resource(intrin->src[0]);
170             if (rsrc)
171                const_ubo = nir_src_is_const(rsrc->src[0]);
172          }
173 
174          if (const_ubo && nir_src_is_const(intrin->src[1]))
175             return 0;
176 
177          /* TODO: get actual numbers for ldc */
178          return 8;
179       }
180 
181       case nir_intrinsic_load_ssbo:
182       case nir_intrinsic_load_ssbo_ir3:
183       case nir_intrinsic_get_ssbo_size:
184       case nir_intrinsic_image_load:
185       case nir_intrinsic_bindless_image_load:
186          /* cat5/isam */
187          return 8;
188 
189       /* By default assume it's a sysval or something */
190       default:
191          return 0;
192       }
193    }
194 
195    case nir_instr_type_phi:
196       /* Although we can often coalesce phis, the cost of a phi is a proxy for
197        * the cost of the if-else statement... If all phis are moved, then the
198        * branches move too. So this needs to have a nonzero cost, even if we're
199        * optimistic about coalescing.
200        *
201        * Value chosen empirically. On Rob's shader-db, cost of 2 performs better
202        * across the board than a cost of 1. Values greater than 2 do not seem to
203        * have any change, so sticking with 2.
204        */
205       return 2;
206 
207    default:
208       return 0;
209    }
210 }
211 
212 static float
rewrite_cost(nir_def * def,const void * data)213 rewrite_cost(nir_def *def, const void *data)
214 {
215    /* We always have to expand booleans */
216    if (def->bit_size == 1)
217       return def->num_components;
218 
219    bool mov_needed = false;
220    nir_foreach_use (use, def) {
221       nir_instr *parent_instr = nir_src_parent_instr(use);
222       if (parent_instr->type != nir_instr_type_alu) {
223          mov_needed = true;
224          break;
225       } else {
226          nir_alu_instr *alu = nir_instr_as_alu(parent_instr);
227          if (alu->op == nir_op_vec2 ||
228              alu->op == nir_op_vec3 ||
229              alu->op == nir_op_vec4 ||
230              alu->op == nir_op_mov) {
231             mov_needed = true;
232             break;
233          } else {
234             /* Assume for non-moves that the const is folded into the src */
235          }
236       }
237    }
238 
239    return mov_needed ? def->num_components : 0;
240 }
241 
242 static bool
avoid_instr(const nir_instr * instr,const void * data)243 avoid_instr(const nir_instr *instr, const void *data)
244 {
245    if (instr->type != nir_instr_type_intrinsic)
246       return false;
247 
248    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
249 
250    return intrin->intrinsic == nir_intrinsic_bindless_resource_ir3;
251 }
252 
253 static bool
set_speculate(nir_builder * b,nir_intrinsic_instr * intr,UNUSED void * _)254 set_speculate(nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *_)
255 {
256    switch (intr->intrinsic) {
257    /* These instructions go through bounds-checked hardware descriptors so
258     * should be safe to speculate.
259     *
260     * TODO: This isn't necessarily true in Vulkan, where descriptors don't need
261     * to be filled out and bindless descriptor offsets aren't bounds checked.
262     * We may need to plumb this information through from turnip for correctness
263     * to avoid regressing freedreno codegen.
264     */
265    case nir_intrinsic_load_ubo:
266    case nir_intrinsic_load_ubo_vec4:
267    case nir_intrinsic_image_load:
268    case nir_intrinsic_image_samples_identical:
269    case nir_intrinsic_bindless_image_load:
270    case nir_intrinsic_load_ssbo:
271    case nir_intrinsic_load_ssbo_ir3:
272       nir_intrinsic_set_access(intr, nir_intrinsic_access(intr) |
273                                      ACCESS_CAN_SPECULATE);
274       return true;
275 
276    default:
277       return false;
278    }
279 }
280 
281 bool
ir3_nir_opt_preamble(nir_shader * nir,struct ir3_shader_variant * v)282 ir3_nir_opt_preamble(nir_shader *nir, struct ir3_shader_variant *v)
283 {
284    unsigned max_size;
285    if (v->binning_pass) {
286       const struct ir3_const_state *const_state = ir3_const_state(v);
287       max_size = const_state->preamble_size * 4;
288    } else {
289       struct ir3_const_state worst_case_const_state = {};
290       ir3_setup_const_state(nir, v, &worst_case_const_state);
291       max_size = ir3_const_state_get_free_space(v, &worst_case_const_state) * 4;
292    }
293 
294    if (max_size == 0)
295       return false;
296 
297    bool progress = nir_shader_intrinsics_pass(nir, set_speculate,
298                                               nir_metadata_control_flow, NULL);
299 
300    nir_opt_preamble_options options = {
301       .drawid_uniform = true,
302       .subgroup_size_uniform = true,
303       .load_workgroup_size_allowed = true,
304       .def_size = def_size,
305       .preamble_storage_size = max_size,
306       .instr_cost_cb = instr_cost,
307       .avoid_instr_cb = avoid_instr,
308       .rewrite_cost_cb = rewrite_cost,
309    };
310 
311    unsigned size = 0;
312    progress |= nir_opt_preamble(nir, &options, &size);
313 
314    if (!v->binning_pass) {
315       struct ir3_const_state *const_state = ir3_const_state_mut(v);
316       const_state->preamble_size = DIV_ROUND_UP(size, 4);
317    }
318 
319    return progress;
320 }
321 
322 /* This isn't nearly as comprehensive as what's done in nir_opt_preamble, but in
323  * various use-cases we need to hoist definitions into preambles outside of
324  * opt_preamble. Currently we only handle a few uncomplicated intrinsics.
325  */
326 bool
ir3_def_is_rematerializable_for_preamble(nir_def * def,nir_def ** preamble_defs)327 ir3_def_is_rematerializable_for_preamble(nir_def *def,
328                                          nir_def **preamble_defs)
329 {
330    switch (def->parent_instr->type) {
331    case nir_instr_type_load_const:
332       return true;
333    case nir_instr_type_intrinsic: {
334       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(def->parent_instr);
335       switch (intrin->intrinsic) {
336       case nir_intrinsic_load_ubo:
337          return ir3_def_is_rematerializable_for_preamble(intrin->src[0].ssa,
338                                                          preamble_defs) &&
339             ir3_def_is_rematerializable_for_preamble(intrin->src[1].ssa,
340                                                      preamble_defs) &&
341             (def->parent_instr->block->cf_node.parent->type ==
342              nir_cf_node_function ||
343              (nir_intrinsic_access(intrin) & ACCESS_CAN_SPECULATE));
344       case nir_intrinsic_bindless_resource_ir3:
345          return ir3_def_is_rematerializable_for_preamble(intrin->src[0].ssa,
346                                                          preamble_defs);
347       case nir_intrinsic_load_preamble:
348          return !!preamble_defs;
349       default:
350          return false;
351       }
352    }
353    case nir_instr_type_alu: {
354       nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
355       for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
356          if (!ir3_def_is_rematerializable_for_preamble(alu->src[i].src.ssa,
357                                                        preamble_defs))
358             return false;
359       }
360       return true;
361    }
362    default:
363       return false;
364    }
365 }
366 
367 static nir_def *
_rematerialize_def(nir_builder * b,struct hash_table * remap_ht,struct set * instr_set,nir_def ** preamble_defs,nir_def * def)368 _rematerialize_def(nir_builder *b, struct hash_table *remap_ht,
369                    struct set *instr_set, nir_def **preamble_defs,
370                    nir_def *def)
371 {
372    if (_mesa_hash_table_search(remap_ht, def->parent_instr))
373       return NULL;
374 
375    switch (def->parent_instr->type) {
376    case nir_instr_type_load_const:
377       break;
378    case nir_instr_type_intrinsic: {
379       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(def->parent_instr);
380       if (intrin->intrinsic == nir_intrinsic_load_preamble) {
381          _mesa_hash_table_insert(remap_ht, def,
382                                  preamble_defs[nir_intrinsic_base(intrin)]);
383          return preamble_defs[nir_intrinsic_base(intrin)];
384       } else {
385          for (unsigned i = 0; i < nir_intrinsic_infos[intrin->intrinsic].num_srcs;
386               i++)
387             _rematerialize_def(b, remap_ht, instr_set, preamble_defs,
388                                intrin->src[i].ssa);
389       }
390       break;
391    }
392    case nir_instr_type_alu: {
393       nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
394       for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
395          _rematerialize_def(b, remap_ht, instr_set, preamble_defs,
396                             alu->src[i].src.ssa);
397       break;
398    }
399    default:
400       unreachable("should not get here");
401    }
402 
403    nir_instr *instr = nir_instr_clone_deep(b->shader, def->parent_instr,
404                                            remap_ht);
405    if (instr_set) {
406       nir_instr *other_instr =
407          nir_instr_set_add_or_rewrite(instr_set, instr, NULL);
408       if (other_instr) {
409          instr = other_instr;
410          _mesa_hash_table_insert(remap_ht, def, nir_instr_def(other_instr));
411       } else {
412          nir_builder_instr_insert(b, instr);
413       }
414    } else {
415       nir_builder_instr_insert(b, instr);
416    }
417 
418    return nir_instr_def(instr);
419 }
420 
421 /* Hoist a given definition into the preamble. If "instr_set" is non-NULL,
422  * de-duplicate the hoisted definitions, and if "preamble_defs" is non-NULL then
423  * it is used to remap load_preamble instructions back to the original
424  * definition in the preamble, if the definition uses load_preamble
425  * instructions.
426  */
427 
428 nir_def *
ir3_rematerialize_def_for_preamble(nir_builder * b,nir_def * def,struct set * instr_set,nir_def ** preamble_defs)429 ir3_rematerialize_def_for_preamble(nir_builder *b, nir_def *def,
430                                    struct set *instr_set,
431                                    nir_def **preamble_defs)
432 {
433    struct hash_table *remap_ht = _mesa_pointer_hash_table_create(NULL);
434 
435    nir_def *new_def =
436       _rematerialize_def(b, remap_ht, instr_set, preamble_defs, def);
437 
438    _mesa_hash_table_destroy(remap_ht, NULL);
439 
440    return new_def;
441 }
442 
443 
444 static void
get_descriptors(nir_instr * instr,nir_def ** descs)445 get_descriptors(nir_instr *instr, nir_def **descs)
446 {
447    if (instr->type == nir_instr_type_tex) {
448       nir_tex_instr *tex = nir_instr_as_tex(instr);
449       /* TODO: handle non-bindless tex instructions. These are more complicated,
450        * because of the implicit addition in the instruction.
451        */
452       int texture_index =
453          nir_tex_instr_src_index(tex, nir_tex_src_texture_handle);
454       int sampler_index =
455          nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle);
456       if (texture_index >= 0)
457          descs[0] = tex->src[texture_index].src.ssa;
458       if (sampler_index >= 0)
459          descs[1] = tex->src[sampler_index].src.ssa;
460    } else if (instr->type == nir_instr_type_intrinsic) {
461       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
462       switch (intrin->intrinsic) {
463       case nir_intrinsic_load_ssbo:
464       case nir_intrinsic_load_ubo:
465       case nir_intrinsic_ssbo_atomic:
466       case nir_intrinsic_ssbo_atomic_swap:
467       case nir_intrinsic_get_ssbo_size:
468       case nir_intrinsic_image_load:
469       case nir_intrinsic_bindless_image_load:
470       case nir_intrinsic_image_store:
471       case nir_intrinsic_bindless_image_store:
472       case nir_intrinsic_image_atomic:
473       case nir_intrinsic_bindless_image_atomic:
474       case nir_intrinsic_image_size:
475       case nir_intrinsic_bindless_image_size:
476          descs[0] = intrin->src[0].ssa;
477          break;
478       case nir_intrinsic_store_ssbo:
479          descs[0] = intrin->src[1].ssa;
480          break;
481       default:
482          break;
483       }
484    }
485 }
486 
487 #define MAX_PREFETCHES 32
488 
489 struct prefetches {
490    nir_def *prefetches[MAX_PREFETCHES];
491    unsigned num_prefetches;
492 };
493 
494 static bool
is_already_prefetched(struct prefetches * prefetches,nir_def * def)495 is_already_prefetched(struct prefetches *prefetches, nir_def *def)
496 {
497    for (unsigned i = 0; i < prefetches->num_prefetches; i++) {
498       if (prefetches->prefetches[i] == def)
499          return true;
500    }
501 
502    return false;
503 }
504 
505 static void
add_prefetch(struct prefetches * prefetches,nir_def * def)506 add_prefetch(struct prefetches *prefetches, nir_def *def)
507 {
508    assert(prefetches->num_prefetches < MAX_PREFETCHES);
509    prefetches->prefetches[prefetches->num_prefetches++] = def;
510 }
511 
512 struct prefetch_state {
513    struct prefetches tex, sampler;
514 };
515 
516 static bool
emit_descriptor_prefetch(nir_builder * b,nir_instr * instr,nir_def ** descs,struct prefetch_state * state)517 emit_descriptor_prefetch(nir_builder *b, nir_instr *instr, nir_def **descs,
518                          struct prefetch_state *state)
519 {
520    if (instr->type == nir_instr_type_tex) {
521       nir_tex_instr *tex = nir_instr_as_tex(instr);
522       int sampler_index =
523          nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle);
524       int texture_index =
525          nir_tex_instr_src_index(tex, nir_tex_src_texture_handle);
526 
527       /* For texture instructions, prefetch if at least one source hasn't been
528        * prefetched already. For example, the same sampler may be used with
529        * different textures, and we still want to prefetch the texture
530        * descriptor if we've already prefetched the sampler descriptor.
531        */
532 
533       bool tex_already_prefetched = is_already_prefetched(&state->tex, descs[0]);
534 
535       if (!tex_already_prefetched &&
536           state->tex.num_prefetches == MAX_PREFETCHES)
537          return false;
538 
539       assert(texture_index >= 0);
540       if (sampler_index >= 0) {
541          bool sampler_already_prefetched =
542             is_already_prefetched(&state->sampler, descs[1]);
543 
544          if (!sampler_already_prefetched &&
545              state->sampler.num_prefetches == MAX_PREFETCHES)
546             return false;
547 
548          if (tex_already_prefetched && sampler_already_prefetched)
549             return false;
550 
551          if (!tex_already_prefetched)
552             add_prefetch(&state->tex, descs[0]);
553          if (!sampler_already_prefetched)
554             add_prefetch(&state->sampler, descs[1]);
555 
556          nir_prefetch_sam_ir3(b, descs[0], descs[1]);
557       } else {
558          if (tex_already_prefetched)
559             return false;
560 
561          add_prefetch(&state->tex, descs[0]);
562          nir_prefetch_tex_ir3(b, descs[0]);
563       }
564    } else {
565       assert(instr->type == nir_instr_type_intrinsic);
566 
567       if (state->tex.num_prefetches == MAX_PREFETCHES)
568          return false;
569 
570       if (is_already_prefetched(&state->tex, descs[0]))
571          return false;
572 
573       add_prefetch(&state->tex, descs[0]);
574 
575       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
576       if (intrin->intrinsic == nir_intrinsic_load_ubo)
577          nir_prefetch_ubo_ir3(b, descs[0]);
578       else
579          nir_prefetch_tex_ir3(b, descs[0]);
580    }
581 
582    return true;
583 }
584 
585 static unsigned
get_preamble_offset(nir_def * def)586 get_preamble_offset(nir_def *def)
587 {
588    return nir_intrinsic_base(nir_instr_as_intrinsic(def->parent_instr));
589 }
590 
591 /* Prefetch descriptors in the preamble. This is an optimization introduced on
592  * a7xx, mainly useful when the preamble is an early preamble, and replaces the
593  * use of CP_LOAD_STATE on a6xx to prefetch descriptors in HLSQ.
594  */
595 
596 bool
ir3_nir_opt_prefetch_descriptors(nir_shader * nir,struct ir3_shader_variant * v)597 ir3_nir_opt_prefetch_descriptors(nir_shader *nir, struct ir3_shader_variant *v)
598 {
599    const struct ir3_const_state *const_state = ir3_const_state(v);
600 
601    nir_function_impl *main = nir_shader_get_entrypoint(nir);
602    struct set *instr_set = nir_instr_set_create(NULL);
603    nir_function_impl *preamble = main->preamble ? main->preamble->impl : NULL;
604    nir_builder b;
605    bool progress = false;
606    struct prefetch_state state = {};
607 
608    nir_def **preamble_defs = calloc(const_state->preamble_size * 4,
609                                     sizeof(nir_def *));
610 
611    /* Collect preamble defs. This is useful if the computation of the offset has
612     * already been hoisted to the preamble.
613     */
614    if (preamble) {
615       nir_foreach_block (block, preamble) {
616          nir_foreach_instr (instr, block) {
617             if (instr->type != nir_instr_type_intrinsic)
618                continue;
619 
620             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
621 
622             if (intrin->intrinsic != nir_intrinsic_store_preamble)
623                continue;
624 
625             assert(nir_intrinsic_base(intrin) < const_state->preamble_size * 4);
626             preamble_defs[nir_intrinsic_base(intrin)] = intrin->src[0].ssa;
627          }
628       }
629    }
630 
631    nir_foreach_block (block, main) {
632       nir_foreach_instr (instr, block) {
633          nir_def *descs[2] = { NULL, NULL };
634          nir_def *preamble_descs[2] = { NULL, NULL };
635          get_descriptors(instr, descs);
636 
637          /* We must have found at least one descriptor */
638          if (!descs[0] && !descs[1])
639             continue;
640 
641          /* The instruction itself must be hoistable.
642           * TODO: If the descriptor is statically referenced and in-bounds, then
643           * we should be able to hoist the descriptor load even if the
644           * descriptor contents aren't guaranteed. This would require more
645           * plumbing.
646           * TODO: Textures. This is broken in nir_opt_preamble at the moment and
647           * handling them would also require more plumbing.
648           */
649          if (instr->type == nir_instr_type_intrinsic &&
650              nir_intrinsic_has_access(nir_instr_as_intrinsic(instr)) &&
651              !(nir_intrinsic_access(nir_instr_as_intrinsic(instr)) &
652                ACCESS_CAN_SPECULATE) &&
653              block->cf_node.parent->type != nir_cf_node_function)
654             continue;
655 
656          /* Each descriptor must be rematerializable */
657          if (descs[0] &&
658              !ir3_def_is_rematerializable_for_preamble(descs[0], preamble_defs))
659             continue;
660          if (descs[1] &&
661              !ir3_def_is_rematerializable_for_preamble(descs[1], preamble_defs))
662             continue;
663 
664          /* If the preamble hasn't been created then this descriptor isn't a
665           * duplicate and we will definitely insert an instruction, so create
666           * the preamble if it hasn't already been created.
667           */
668          if (!preamble) {
669             preamble = nir_shader_get_preamble(nir);
670          }
671 
672          b = nir_builder_at(nir_after_impl(preamble));
673 
674          /* Materialize descriptors for the prefetch. Note that we deduplicate
675           * descriptors so that we don't blow our budget when repeatedly loading
676           * from the same descriptor, even if the calculation of the descriptor
677           * offset hasn't been CSE'd because the accesses are in different
678           * blocks. This is common because we emit the bindless_resource_ir3
679           * intrinsic right before the access.
680           */
681          for (unsigned i = 0; i < 2; i++) {
682             if (!descs[i])
683                continue;
684 
685             preamble_descs[i] =
686                ir3_rematerialize_def_for_preamble(&b, descs[i], instr_set,
687                                                   preamble_defs);
688          }
689 
690          progress |= emit_descriptor_prefetch(&b, instr, preamble_descs, &state);
691 
692          if (state.sampler.num_prefetches == MAX_PREFETCHES &&
693              state.tex.num_prefetches == MAX_PREFETCHES)
694             goto finished;
695       }
696    }
697 
698 finished:
699    nir_metadata_preserve(main, nir_metadata_all);
700    if (preamble) {
701       nir_metadata_preserve(preamble,
702                             nir_metadata_block_index |
703                             nir_metadata_dominance);
704    }
705    nir_instr_set_destroy(instr_set);
706    free(preamble_defs);
707    return progress;
708 }
709 
710 bool
ir3_nir_lower_preamble(nir_shader * nir,struct ir3_shader_variant * v)711 ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v)
712 {
713    nir_function_impl *main = nir_shader_get_entrypoint(nir);
714 
715    if (!main->preamble)
716       return false;
717 
718    nir_function_impl *preamble = main->preamble->impl;
719 
720    /* First, lower load/store_preamble. */
721    const struct ir3_const_state *const_state = ir3_const_state(v);
722    unsigned preamble_base = v->shader_options.num_reserved_user_consts * 4 +
723       const_state->ubo_state.size / 4 + const_state->global_size * 4;
724    unsigned preamble_size = const_state->preamble_size * 4;
725 
726    BITSET_DECLARE(promoted_to_float, preamble_size);
727    memset(promoted_to_float, 0, sizeof(promoted_to_float));
728 
729    nir_builder builder_main = nir_builder_create(main);
730    nir_builder *b = &builder_main;
731 
732    nir_foreach_block (block, main) {
733       nir_foreach_instr_safe (instr, block) {
734          if (instr->type != nir_instr_type_intrinsic)
735             continue;
736 
737          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
738          if (intrin->intrinsic != nir_intrinsic_load_preamble)
739             continue;
740 
741          nir_def *dest = &intrin->def;
742 
743          unsigned offset = preamble_base + nir_intrinsic_base(intrin);
744          b->cursor = nir_before_instr(instr);
745 
746          nir_def *new_dest = nir_load_const_ir3(
747             b, dest->num_components, 32, nir_imm_int(b, 0), .base = offset);
748 
749          if (dest->bit_size == 1) {
750             new_dest = nir_i2b(b, new_dest);
751          } else if (dest->bit_size != 32) {
752             if (all_uses_float(dest, true)) {
753                assert(dest->bit_size == 16);
754                new_dest = nir_f2f16(b, new_dest);
755                BITSET_SET(promoted_to_float, nir_intrinsic_base(intrin));
756             } else {
757                new_dest = nir_u2uN(b, new_dest, dest->bit_size);
758             }
759          }
760 
761          nir_def_rewrite_uses(dest, new_dest);
762          nir_instr_remove(instr);
763          nir_instr_free(instr);
764       }
765    }
766 
767    nir_builder builder_preamble = nir_builder_create(preamble);
768    b = &builder_preamble;
769 
770    nir_foreach_block (block, preamble) {
771       nir_foreach_instr_safe (instr, block) {
772          if (instr->type != nir_instr_type_intrinsic)
773             continue;
774 
775          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
776          if (intrin->intrinsic != nir_intrinsic_store_preamble)
777             continue;
778 
779          nir_def *src = intrin->src[0].ssa;
780          unsigned offset = preamble_base + nir_intrinsic_base(intrin);
781 
782          b->cursor = nir_before_instr(instr);
783 
784          if (src->bit_size == 1)
785             src = nir_b2i32(b, src);
786          if (src->bit_size != 32) {
787             if (BITSET_TEST(promoted_to_float, nir_intrinsic_base(intrin))){
788                assert(src->bit_size == 16);
789                src = nir_f2f32(b, src);
790             } else {
791                src = nir_u2u32(b, src);
792             }
793          }
794 
795          nir_store_const_ir3(b, src, .base = offset);
796          nir_instr_remove(instr);
797          nir_instr_free(instr);
798       }
799    }
800 
801    /* Now, create the preamble sequence and move the preamble into the main
802     * shader:
803     *
804     * if (preamble_start_ir3()) {
805     *    if (subgroupElect()) {
806     *       preamble();
807     *       preamble_end_ir3();
808     *    }
809     * }
810     * ...
811     */
812 
813    /* @decl_regs need to stay in the first block. */
814    b->cursor = nir_after_reg_decls(main);
815 
816    nir_if *outer_if = nir_push_if(b, nir_preamble_start_ir3(b, 1));
817    {
818       nir_if *inner_if = nir_push_if(b, nir_elect_any_ir3(b, 1));
819       {
820          nir_call_instr *call = nir_call_instr_create(nir, main->preamble);
821          nir_builder_instr_insert(b, &call->instr);
822          nir_preamble_end_ir3(b);
823       }
824       nir_pop_if(b, inner_if);
825    }
826    nir_pop_if(b, outer_if);
827 
828    nir_inline_functions(nir);
829    exec_node_remove(&main->preamble->node);
830    main->preamble = NULL;
831 
832    nir_metadata_preserve(main, nir_metadata_none);
833    return true;
834 }
835