1 /*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott ([email protected])
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow.h"
30
31 /*
32 * This file implements an optimization that deletes statically
33 * unreachable/dead code. In NIR, one way this can happen is when an if
34 * statement has a constant condition:
35 *
36 * if (true) {
37 * ...
38 * }
39 *
40 * We delete the if statement and paste the contents of the always-executed
41 * branch into the surrounding control flow, possibly removing more code if
42 * the branch had a jump at the end.
43 *
44 * Another way is that control flow can end in a jump so that code after it
45 * never gets executed. In particular, this can happen after optimizing
46 * something like:
47 *
48 * if (true) {
49 * ...
50 * break;
51 * }
52 * ...
53 *
54 * We also consider the case where both branches of an if end in a jump, e.g.:
55 *
56 * if (...) {
57 * break;
58 * } else {
59 * continue;
60 * }
61 * ...
62 *
63 * Finally, we also handle removing useless loops and ifs, i.e. loops and ifs
64 * with no side effects and without any definitions that are used
65 * elsewhere. This case is a little different from the first two in that the
66 * code is actually run (it just never does anything), but there are similar
67 * issues with needing to be careful with restarting after deleting the
68 * cf_node (see dead_cf_list()) so this is a convenient place to remove them.
69 */
70
71 static void
opt_constant_if(nir_if * if_stmt,bool condition)72 opt_constant_if(nir_if *if_stmt, bool condition)
73 {
74 nir_block *last_block = condition ? nir_if_last_then_block(if_stmt)
75 : nir_if_last_else_block(if_stmt);
76
77 /* The control flow list we're about to paste in may include a jump at the
78 * end, and in that case we have to delete the rest of the control flow
79 * list after the if since it's unreachable and the validator will balk if
80 * we don't.
81 */
82
83 if (nir_block_ends_in_jump(last_block)) {
84 nir_remove_after_cf_node(&if_stmt->cf_node);
85 } else {
86 /* Remove any phi nodes after the if by rewriting uses to point to the
87 * correct source.
88 */
89 nir_block *after = nir_cf_node_as_block(nir_cf_node_next(&if_stmt->cf_node));
90 nir_foreach_phi_safe(phi, after) {
91 nir_def *def = NULL;
92 nir_foreach_phi_src(phi_src, phi) {
93 if (phi_src->pred != last_block)
94 continue;
95
96 def = phi_src->src.ssa;
97 }
98
99 assert(def);
100 nir_def_replace(&phi->def, def);
101 }
102 }
103
104 /* Finally, actually paste in the then or else branch and delete the if. */
105 struct exec_list *cf_list = condition ? &if_stmt->then_list
106 : &if_stmt->else_list;
107
108 nir_cf_list list;
109 nir_cf_list_extract(&list, cf_list);
110 nir_cf_reinsert(&list, nir_after_cf_node(&if_stmt->cf_node));
111 nir_cf_node_remove(&if_stmt->cf_node);
112 }
113
114 static bool
block_in_cf_node(nir_block * block,nir_cf_node * node)115 block_in_cf_node(nir_block *block, nir_cf_node *node)
116 {
117 assert(node->type == nir_cf_node_loop || node->type == nir_cf_node_if);
118 for (nir_cf_node *cur = block->cf_node.parent; cur && cur != node->parent;
119 cur = cur->parent) {
120 if (cur == node)
121 return true;
122 }
123 return false;
124 }
125
126 static bool
def_only_used_in_cf_node(nir_def * def,void * _node)127 def_only_used_in_cf_node(nir_def *def, void *_node)
128 {
129 nir_cf_node *node = _node;
130
131 nir_foreach_use_including_if(use, def) {
132 nir_block *block;
133
134 if (nir_src_is_if(use))
135 block = nir_cf_node_as_block(nir_cf_node_prev(&nir_src_parent_if(use)->cf_node));
136 else
137 block = nir_src_parent_instr(use)->block;
138
139 /* Note: Normally, the uses of a phi instruction are considered to be
140 * used in the block that is the predecessor of the phi corresponding to
141 * that use. If we were computing liveness or something similar, that
142 * would mean a special case here for phis. However, we're trying here
143 * to determine if the SSA def ever escapes the loop. If it's used by a
144 * phi that lives outside the loop then it doesn't matter if the
145 * corresponding predecessor is inside the loop or not because the value
146 * can go through the phi into the outside world and escape the loop.
147 */
148 if (block != def->parent_instr->block && !block_in_cf_node(block, node))
149 return false;
150 }
151
152 return true;
153 }
154
155 /*
156 * Test if a loop or if node is dead. Such nodes are dead if:
157 *
158 * 1) It has no side effects (i.e. intrinsics which could possibly affect the
159 * state of the program aside from producing an SSA value, indicated by a lack
160 * of NIR_INTRINSIC_CAN_ELIMINATE).
161 *
162 * 2) It has no phi instructions after it, since those indicate values inside
163 * the node being used after the node.
164 *
165 * 3) None of the values defined inside the node is used outside the node,
166 * i.e. none of the definitions that dominate the node exit are used outside.
167 *
168 * If those conditions hold, then the node is dead and can be deleted.
169 */
170
171 static bool
node_is_dead(nir_cf_node * node)172 node_is_dead(nir_cf_node *node)
173 {
174 assert(node->type == nir_cf_node_loop || node->type == nir_cf_node_if);
175
176 nir_block *after = nir_cf_node_as_block(nir_cf_node_next(node));
177
178 /* Quick check if there are any phis that follow this CF node. If there
179 * are, then we automatically know it isn't dead.
180 */
181 if (!exec_list_is_empty(&after->instr_list) &&
182 nir_block_first_instr(after)->type == nir_instr_type_phi)
183 return false;
184
185 nir_foreach_block_in_cf_node(block, node) {
186 bool inside_loop = node->type == nir_cf_node_loop;
187 for (nir_cf_node *n = &block->cf_node;
188 !inside_loop && n != node; n = n->parent) {
189 if (n->type == nir_cf_node_loop)
190 inside_loop = true;
191 }
192
193 nir_foreach_instr(instr, block) {
194 if (instr->type == nir_instr_type_call)
195 return false;
196
197 /* Return and halt instructions can cause us to skip over other
198 * side-effecting instructions after the loop, so consider them to
199 * have side effects here.
200 *
201 * When the block is not inside a loop, break and continue might also
202 * cause a skip.
203 */
204 if (instr->type == nir_instr_type_jump &&
205 (!inside_loop ||
206 nir_instr_as_jump(instr)->type == nir_jump_return ||
207 nir_instr_as_jump(instr)->type == nir_jump_halt))
208 return false;
209
210 if (instr->type == nir_instr_type_intrinsic) {
211 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
212 if (!(nir_intrinsic_infos[intrin->intrinsic].flags &
213 NIR_INTRINSIC_CAN_ELIMINATE))
214 return false;
215
216 switch (intrin->intrinsic) {
217 case nir_intrinsic_load_deref:
218 case nir_intrinsic_load_ssbo:
219 case nir_intrinsic_load_global:
220 /* If there's a memory barrier after the loop, a load might be
221 * required to happen before some other instruction after the
222 * barrier, so it is not valid to eliminate it -- unless we
223 * know we can reorder it.
224 *
225 * Consider only loads that the result can be affected by other
226 * invocations.
227 */
228 if (intrin->intrinsic == nir_intrinsic_load_deref) {
229 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
230 if (!nir_deref_mode_may_be(deref, nir_var_mem_ssbo |
231 nir_var_mem_shared |
232 nir_var_mem_global |
233 nir_var_shader_out))
234 break;
235 }
236 if (nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER)
237 break;
238 return false;
239
240 case nir_intrinsic_load_shared:
241 case nir_intrinsic_load_shared2_amd:
242 case nir_intrinsic_load_output:
243 case nir_intrinsic_load_per_vertex_output:
244 /* Same as above loads. */
245 return false;
246
247 default:
248 /* Do nothing. */
249 break;
250 }
251 }
252
253 if (!nir_foreach_def(instr, def_only_used_in_cf_node, node))
254 return false;
255 }
256 }
257
258 return true;
259 }
260
261 static bool
dead_cf_block(nir_block * block)262 dead_cf_block(nir_block *block)
263 {
264 /* opt_constant_if() doesn't handle this case. */
265 if (nir_block_ends_in_jump(block) &&
266 !exec_node_is_tail_sentinel(block->cf_node.node.next)) {
267 nir_remove_after_cf_node(&block->cf_node);
268 return true;
269 }
270
271 nir_if *following_if = nir_block_get_following_if(block);
272 if (following_if) {
273 if (nir_src_is_const(following_if->condition)) {
274 opt_constant_if(following_if, nir_src_as_bool(following_if->condition));
275 return true;
276 } else if (nir_src_is_undef(following_if->condition)) {
277 opt_constant_if(following_if, false);
278 return true;
279 }
280
281 if (node_is_dead(&following_if->cf_node)) {
282 nir_cf_node_remove(&following_if->cf_node);
283 return true;
284 }
285 }
286
287 nir_loop *following_loop = nir_block_get_following_loop(block);
288 if (!following_loop)
289 return false;
290
291 if (!node_is_dead(&following_loop->cf_node))
292 return false;
293
294 nir_cf_node_remove(&following_loop->cf_node);
295 return true;
296 }
297
298 static bool
dead_cf_list(struct exec_list * list,bool * list_ends_in_jump)299 dead_cf_list(struct exec_list *list, bool *list_ends_in_jump)
300 {
301 bool progress = false;
302 *list_ends_in_jump = false;
303
304 nir_cf_node *prev = NULL;
305
306 foreach_list_typed(nir_cf_node, cur, node, list) {
307 switch (cur->type) {
308 case nir_cf_node_block: {
309 nir_block *block = nir_cf_node_as_block(cur);
310 while (dead_cf_block(block)) {
311 /* We just deleted the if or loop after this block.
312 * nir_cf_node_remove may have deleted the block before
313 * or after it -- which one is an implementation detail.
314 * Therefore, to recover the place we were at, we have
315 * to use the previous cf_node.
316 */
317
318 if (prev) {
319 cur = nir_cf_node_next(prev);
320 } else {
321 cur = exec_node_data(nir_cf_node, exec_list_get_head(list),
322 node);
323 }
324
325 block = nir_cf_node_as_block(cur);
326
327 progress = true;
328 }
329
330 if (nir_block_ends_in_jump(block)) {
331 assert(exec_node_is_tail_sentinel(cur->node.next));
332 *list_ends_in_jump = true;
333 }
334
335 break;
336 }
337
338 case nir_cf_node_if: {
339 nir_if *if_stmt = nir_cf_node_as_if(cur);
340 bool then_ends_in_jump, else_ends_in_jump;
341 progress |= dead_cf_list(&if_stmt->then_list, &then_ends_in_jump);
342 progress |= dead_cf_list(&if_stmt->else_list, &else_ends_in_jump);
343
344 if (then_ends_in_jump && else_ends_in_jump) {
345 *list_ends_in_jump = true;
346 nir_block *next = nir_cf_node_as_block(nir_cf_node_next(cur));
347 if (!exec_list_is_empty(&next->instr_list) ||
348 !exec_node_is_tail_sentinel(next->cf_node.node.next)) {
349 nir_remove_after_cf_node(cur);
350 return true;
351 }
352 }
353
354 break;
355 }
356
357 case nir_cf_node_loop: {
358 nir_loop *loop = nir_cf_node_as_loop(cur);
359 assert(!nir_loop_has_continue_construct(loop));
360 bool dummy;
361 progress |= dead_cf_list(&loop->body, &dummy);
362
363 nir_block *next = nir_cf_node_as_block(nir_cf_node_next(cur));
364 if (next->predecessors->entries == 0 &&
365 (!exec_list_is_empty(&next->instr_list) ||
366 !exec_node_is_tail_sentinel(next->cf_node.node.next))) {
367 nir_remove_after_cf_node(cur);
368 return true;
369 }
370 break;
371 }
372
373 default:
374 unreachable("unknown cf node type");
375 }
376
377 prev = cur;
378 }
379
380 return progress;
381 }
382
383 static bool
opt_dead_cf_impl(nir_function_impl * impl)384 opt_dead_cf_impl(nir_function_impl *impl)
385 {
386 bool dummy;
387 bool progress = dead_cf_list(&impl->body, &dummy);
388
389 if (progress) {
390 nir_metadata_preserve(impl, nir_metadata_none);
391 nir_rematerialize_derefs_in_use_blocks_impl(impl);
392
393 /* The CF manipulation code called by this pass is smart enough to keep
394 * from breaking any SSA use/def chains by replacing any uses of removed
395 * instructions with SSA undefs. However, it's not quite smart enough
396 * to always preserve the dominance properties. In particular, if you
397 * remove the one break from a loop, stuff in the loop may still be used
398 * outside the loop even though there's no path between the two. We can
399 * easily fix these issues by calling nir_repair_ssa which will ensure
400 * that the dominance properties hold.
401 */
402 nir_repair_ssa_impl(impl);
403 } else {
404 nir_metadata_preserve(impl, nir_metadata_all);
405 }
406
407 return progress;
408 }
409
410 bool
nir_opt_dead_cf(nir_shader * shader)411 nir_opt_dead_cf(nir_shader *shader)
412 {
413 bool progress = false;
414
415 nir_foreach_function_impl(impl, shader)
416 progress |= opt_dead_cf_impl(impl);
417
418 return progress;
419 }
420