xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_lower_phis_to_scalar.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 
26 /*
27  * Implements a pass that lowers vector phi nodes to scalar phi nodes when
28  * we don't think it will hurt anything.
29  */
30 
31 struct lower_phis_to_scalar_state {
32    nir_shader *shader;
33    void *mem_ctx;
34    struct exec_list dead_instrs;
35 
36    bool lower_all;
37 
38    /* Hash table marking which phi nodes are scalarizable.  The key is
39     * pointers to phi instructions and the entry is either NULL for not
40     * scalarizable or non-null for scalarizable.
41     */
42    struct hash_table *phi_table;
43 };
44 
45 static bool
46 should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state);
47 
48 static bool
is_phi_src_scalarizable(nir_phi_src * src,struct lower_phis_to_scalar_state * state)49 is_phi_src_scalarizable(nir_phi_src *src,
50                         struct lower_phis_to_scalar_state *state)
51 {
52 
53    nir_instr *src_instr = src->src.ssa->parent_instr;
54    switch (src_instr->type) {
55    case nir_instr_type_alu: {
56       nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
57 
58       /* ALU operations with output_size == 0 should be scalarized.  We
59        * will also see a bunch of vecN operations from scalarizing ALU
60        * operations and, since they can easily be copy-propagated, they
61        * are ok too.
62        */
63       return nir_op_infos[src_alu->op].output_size == 0 ||
64              nir_op_is_vec_or_mov(src_alu->op);
65    }
66 
67    case nir_instr_type_phi:
68       /* A phi is scalarizable if we're going to lower it */
69       return should_lower_phi(nir_instr_as_phi(src_instr), state);
70 
71    case nir_instr_type_load_const:
72       /* These are trivially scalarizable */
73       return true;
74 
75    case nir_instr_type_undef:
76       /* The caller of this function is going to OR the results and we don't
77        * want undefs to count so we return false.
78        */
79       return false;
80 
81    case nir_instr_type_intrinsic: {
82       nir_intrinsic_instr *src_intrin = nir_instr_as_intrinsic(src_instr);
83 
84       switch (src_intrin->intrinsic) {
85       case nir_intrinsic_load_deref: {
86          /* Don't scalarize if we see a load of a local variable because it
87           * might turn into one of the things we can't scalarize.
88           */
89          nir_deref_instr *deref = nir_src_as_deref(src_intrin->src[0]);
90          return !nir_deref_mode_may_be(deref, nir_var_function_temp |
91                                                  nir_var_shader_temp);
92       }
93 
94       case nir_intrinsic_interp_deref_at_centroid:
95       case nir_intrinsic_interp_deref_at_sample:
96       case nir_intrinsic_interp_deref_at_offset:
97       case nir_intrinsic_interp_deref_at_vertex:
98       case nir_intrinsic_load_uniform:
99       case nir_intrinsic_load_ubo:
100       case nir_intrinsic_load_ssbo:
101       case nir_intrinsic_load_global:
102       case nir_intrinsic_load_global_constant:
103       case nir_intrinsic_load_input:
104       case nir_intrinsic_load_per_primitive_input:
105          return true;
106       default:
107          break;
108       }
109    }
110       FALLTHROUGH;
111 
112    default:
113       /* We can't scalarize this type of instruction */
114       return false;
115    }
116 }
117 
118 /**
119  * Determines if the given phi node should be lowered.  The only phi nodes
120  * we will scalarize at the moment are those where all of the sources are
121  * scalarizable, unless lower_all is set.
122  *
123  * The reason for this comes down to coalescing.  Since phi sources can't
124  * swizzle, swizzles on phis have to be resolved by inserting a mov right
125  * before the phi.  The choice then becomes between movs to pick off
126  * components for a scalar phi or potentially movs to recombine components
127  * for a vector phi.  The problem is that the movs generated to pick off
128  * the components are almost uncoalescable.  We can't coalesce them in NIR
129  * because we need them to pick off components and we can't coalesce them
130  * in the backend because the source register is a vector and the
131  * destination is a scalar that may be used at other places in the program.
132  * On the other hand, if we have a bunch of scalars going into a vector
133  * phi, the situation is much better.  In this case, if the SSA def is
134  * generated in the predecessor block to the corresponding phi source, the
135  * backend code will be an ALU op into a temporary and then a mov into the
136  * given vector component;  this move can almost certainly be coalesced
137  * away.
138  */
139 static bool
should_lower_phi(nir_phi_instr * phi,struct lower_phis_to_scalar_state * state)140 should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state)
141 {
142    /* Already scalar */
143    if (phi->def.num_components == 1)
144       return false;
145 
146    if (state->lower_all)
147       return true;
148 
149    struct hash_entry *entry = _mesa_hash_table_search(state->phi_table, phi);
150    if (entry)
151       return entry->data != NULL;
152 
153    /* Insert an entry and mark it as scalarizable for now. That way
154     * we don't recurse forever and a cycle in the dependence graph
155     * won't automatically make us fail to scalarize.
156     */
157    entry = _mesa_hash_table_insert(state->phi_table, phi, (void *)(intptr_t)1);
158 
159    bool scalarizable = false;
160 
161    nir_foreach_phi_src(src, phi) {
162       /* This loop ignores srcs that are not scalarizable because its likely
163        * still worth copying to temps if another phi source is scalarizable.
164        * This reduces register spilling by a huge amount in the i965 driver for
165        * Deus Ex: MD.
166        */
167       scalarizable = is_phi_src_scalarizable(src, state);
168       if (scalarizable)
169          break;
170    }
171 
172    /* The hash table entry for 'phi' may have changed while recursing the
173     * dependence graph, so we need to reset it */
174    entry = _mesa_hash_table_search(state->phi_table, phi);
175    assert(entry);
176 
177    entry->data = (void *)(intptr_t)scalarizable;
178 
179    return scalarizable;
180 }
181 
182 static bool
lower_phis_to_scalar_block(nir_block * block,struct lower_phis_to_scalar_state * state)183 lower_phis_to_scalar_block(nir_block *block,
184                            struct lower_phis_to_scalar_state *state)
185 {
186    bool progress = false;
187    nir_phi_instr *last_phi = nir_block_last_phi_instr(block);
188 
189    /* We have to handle the phi nodes in their own pass due to the way
190     * we're modifying the linked list of instructions.
191     */
192    nir_foreach_phi_safe(phi, block) {
193       if (!should_lower_phi(phi, state))
194          continue;
195 
196       unsigned bit_size = phi->def.bit_size;
197 
198       /* Create a vecN operation to combine the results.  Most of these
199        * will be redundant, but copy propagation should clean them up for
200        * us.  No need to add the complexity here.
201        */
202       nir_op vec_op = nir_op_vec(phi->def.num_components);
203 
204       nir_alu_instr *vec = nir_alu_instr_create(state->shader, vec_op);
205       nir_def_init(&vec->instr, &vec->def,
206                    phi->def.num_components, bit_size);
207 
208       for (unsigned i = 0; i < phi->def.num_components; i++) {
209          nir_phi_instr *new_phi = nir_phi_instr_create(state->shader);
210          nir_def_init(&new_phi->instr, &new_phi->def, 1,
211                       phi->def.bit_size);
212 
213          vec->src[i].src = nir_src_for_ssa(&new_phi->def);
214 
215          nir_foreach_phi_src(src, phi) {
216             /* We need to insert a mov to grab the i'th component of src */
217             nir_alu_instr *mov = nir_alu_instr_create(state->shader,
218                                                       nir_op_mov);
219             nir_def_init(&mov->instr, &mov->def, 1, bit_size);
220             mov->src[0].src = nir_src_for_ssa(src->src.ssa);
221             mov->src[0].swizzle[0] = i;
222 
223             /* Insert at the end of the predecessor but before the jump */
224             nir_instr *pred_last_instr = nir_block_last_instr(src->pred);
225             if (pred_last_instr && pred_last_instr->type == nir_instr_type_jump)
226                nir_instr_insert_before(pred_last_instr, &mov->instr);
227             else
228                nir_instr_insert_after_block(src->pred, &mov->instr);
229 
230             nir_phi_instr_add_src(new_phi, src->pred, &mov->def);
231          }
232 
233          nir_instr_insert_before(&phi->instr, &new_phi->instr);
234       }
235 
236       nir_instr_insert_after(&last_phi->instr, &vec->instr);
237 
238       nir_def_replace(&phi->def, &vec->def);
239       exec_list_push_tail(&state->dead_instrs, &phi->instr.node);
240 
241       progress = true;
242 
243       /* We're using the safe iterator and inserting all the newly
244        * scalarized phi nodes before their non-scalarized version so that's
245        * ok.  However, we are also inserting vec operations after all of
246        * the last phi node so once we get here, we can't trust even the
247        * safe iterator to stop properly.  We have to break manually.
248        */
249       if (phi == last_phi)
250          break;
251    }
252 
253    return progress;
254 }
255 
256 static bool
lower_phis_to_scalar_impl(nir_function_impl * impl,bool lower_all)257 lower_phis_to_scalar_impl(nir_function_impl *impl, bool lower_all)
258 {
259    struct lower_phis_to_scalar_state state;
260    bool progress = false;
261 
262    state.shader = impl->function->shader;
263    state.mem_ctx = ralloc_parent(impl);
264    exec_list_make_empty(&state.dead_instrs);
265    state.phi_table = _mesa_pointer_hash_table_create(NULL);
266    state.lower_all = lower_all;
267 
268    nir_foreach_block(block, impl) {
269       progress = lower_phis_to_scalar_block(block, &state) || progress;
270    }
271 
272    nir_metadata_preserve(impl, nir_metadata_control_flow);
273 
274    nir_instr_free_list(&state.dead_instrs);
275 
276    ralloc_free(state.phi_table);
277 
278    return progress;
279 }
280 
281 /** A pass that lowers vector phi nodes to scalar
282  *
283  * This pass loops through the blocks and lowers looks for vector phi nodes
284  * it can lower to scalar phi nodes.  Not all phi nodes are lowered.  For
285  * instance, if one of the sources is a non-scalarizable vector, then we
286  * don't bother lowering because that would generate hard-to-coalesce movs.
287  */
288 bool
nir_lower_phis_to_scalar(nir_shader * shader,bool lower_all)289 nir_lower_phis_to_scalar(nir_shader *shader, bool lower_all)
290 {
291    bool progress = false;
292 
293    nir_foreach_function_impl(impl, shader) {
294       progress = lower_phis_to_scalar_impl(impl, lower_all) || progress;
295    }
296 
297    return progress;
298 }
299