1 /*
2 * Copyright 2023 Valve Corporation
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "nir/nir_builder.h"
7 #include "nir.h"
8 #include "nir_control_flow.h"
9 #include "nir_loop_analyze.h"
10
11 static bool
is_block_empty(nir_block * block)12 is_block_empty(nir_block *block)
13 {
14 return nir_cf_node_is_last(&block->cf_node) &&
15 exec_list_is_empty(&block->instr_list);
16 }
17
18 static bool
is_block_singular(nir_block * block)19 is_block_singular(nir_block *block)
20 {
21 return nir_cf_node_is_last(&block->cf_node) &&
22 (exec_list_is_empty(&block->instr_list) ||
23 (exec_list_is_singular(&block->instr_list) && nir_block_ends_in_jump(block)));
24 }
25
26 static bool
nir_block_ends_in_continue(nir_block * block)27 nir_block_ends_in_continue(nir_block *block)
28 {
29 if (exec_list_is_empty(&block->instr_list))
30 return false;
31
32 nir_instr *instr = nir_block_last_instr(block);
33 return instr->type == nir_instr_type_jump &&
34 nir_instr_as_jump(instr)->type == nir_jump_continue;
35 }
36
37 /**
38 * This optimization tries to merge two equal jump instructions (break or
39 * continue) into a single one.
40 *
41 * This optimization turns
42 *
43 * loop {
44 * ...
45 * if (cond) {
46 * do_work_1();
47 * break;
48 * } else {
49 * do_work_2();
50 * break;
51 * }
52 * }
53 *
54 * into:
55 *
56 * loop {
57 * ...
58 * if (cond) {
59 * do_work_1();
60 * } else {
61 * do_work_2();
62 * }
63 * break;
64 * }
65 *
66 * It does the same with continue statements, respectively.
67 *
68 */
69 static bool
opt_loop_merge_break_continue(nir_if * nif)70 opt_loop_merge_break_continue(nir_if *nif)
71 {
72 nir_block *after_if = nir_cf_node_cf_tree_next(&nif->cf_node);
73
74 /* The block after the IF must have no predecessors and be empty. */
75 if (after_if->predecessors->entries > 0 || !is_block_empty(after_if))
76 return false;
77
78 nir_block *last_then = nir_if_last_then_block(nif);
79 nir_block *last_else = nir_if_last_else_block(nif);
80 const bool then_break = nir_block_ends_in_break(last_then);
81 const bool else_break = nir_block_ends_in_break(last_else);
82 const bool then_cont = nir_block_ends_in_continue(last_then);
83 const bool else_cont = nir_block_ends_in_continue(last_else);
84
85 /* If both branch legs end with the same jump instruction,
86 * merge the statement after the branch
87 */
88 if ((then_break && else_break) || (then_cont && else_cont)) {
89 nir_lower_phis_to_regs_block(last_then->successors[0]);
90 nir_instr_remove_v(nir_block_last_instr(last_then));
91 nir_instr *jump = nir_block_last_instr(last_else);
92 nir_instr_remove_v(jump);
93 nir_instr_insert(nir_after_block(after_if), jump);
94 return true;
95 }
96
97 return false;
98 }
99
100 /**
101 * This optimization simplifies potential loop terminators which then allows
102 * other passes such as opt_if_simplification() and loop unrolling to progress
103 * further:
104 *
105 * if (cond) {
106 * ... then block instructions ...
107 * } else {
108 * ...
109 * break;
110 * }
111 *
112 * into:
113 *
114 * if (cond) {
115 * } else {
116 * ...
117 * break;
118 * }
119 * ... then block instructions ...
120 */
121 static bool
opt_loop_terminator(nir_if * nif)122 opt_loop_terminator(nir_if *nif)
123 {
124 nir_block *break_blk = NULL;
125 nir_block *continue_from_blk = NULL;
126 nir_block *first_continue_from_blk = NULL;
127
128 nir_block *last_then = nir_if_last_then_block(nif);
129 nir_block *last_else = nir_if_last_else_block(nif);
130
131 if (nir_block_ends_in_break(last_then)) {
132 break_blk = last_then;
133 continue_from_blk = last_else;
134 first_continue_from_blk = nir_if_first_else_block(nif);
135 } else if (nir_block_ends_in_break(last_else)) {
136 break_blk = last_else;
137 continue_from_blk = last_then;
138 first_continue_from_blk = nir_if_first_then_block(nif);
139 }
140
141 /* Continue if the if-statement contained no jumps at all */
142 if (!break_blk)
143 return false;
144
145 /* If the continue from block is empty then return as there is nothing to
146 * move.
147 */
148 if (is_block_empty(first_continue_from_blk))
149 return false;
150
151 if (nir_block_ends_in_jump(continue_from_blk)) {
152 /* Let nir_opt_dead_cf() clean up any dead code. */
153 if (!is_block_empty(nir_cf_node_cf_tree_next(&nif->cf_node)))
154 return false;
155
156 /* We are about to move the predecessor. */
157 nir_lower_phis_to_regs_block(continue_from_blk->successors[0]);
158 }
159
160 /* Even though this if statement has a jump on one side, we may still have
161 * phis afterwards. Single-source phis can be produced by loop unrolling
162 * or dead control-flow passes and are perfectly legal. Run a quick phi
163 * removal on the block after the if to clean up any such phis.
164 */
165 nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
166
167 /* Finally, move the continue from branch after the if-statement. */
168 nir_cf_list tmp;
169 nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
170 nir_after_block(continue_from_blk));
171 nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
172
173 return true;
174 }
175
176 /**
177 * This optimization tries to merge the jump instruction (break or continue)
178 * of a block with an equal one from a previous IF.
179 *
180 * This optimization turns:
181 *
182 * loop {
183 * ...
184 * if (cond) {
185 * do_work_1();
186 * break;
187 * } else {
188 * }
189 * do_work_2();
190 * break;
191 * }
192 *
193 * into:
194 *
195 * loop {
196 * ...
197 * if (cond) {
198 * do_work_1();
199 * } else {
200 * do_work_2();
201 * }
202 * break;
203 * }
204 *
205 * It does the same with continue statements, respectively.
206 *
207 */
208 static bool
opt_loop_last_block(nir_block * block,bool is_trivial_continue,bool is_trivial_break)209 opt_loop_last_block(nir_block *block, bool is_trivial_continue, bool is_trivial_break)
210 {
211 /* If this block has no predecessors, let nir_opt_dead_cf() do the cleanup */
212 if (block->predecessors->entries == 0)
213 return false;
214
215 bool progress = false;
216 bool has_break = nir_block_ends_in_break(block);
217 bool has_continue = nir_block_ends_in_continue(block);
218
219 /* Remove any "trivial" break and continue, i.e. those that are at the tail
220 * of a CF-list where we can just delete the instruction and
221 * control-flow will naturally take us to the same target block.
222 */
223 if ((has_break && is_trivial_break) || (has_continue && is_trivial_continue)) {
224 nir_lower_phis_to_regs_block(block->successors[0]);
225 nir_instr_remove_v(nir_block_last_instr(block));
226 return true;
227 }
228
229 if (!nir_block_ends_in_jump(block)) {
230 has_break = is_trivial_break;
231 has_continue = is_trivial_continue;
232 } else if (is_trivial_continue || is_trivial_break) {
233 /* This block ends in a jump that cannot be removed because the implicit
234 * fallthrough leads to a different target block.
235 *
236 * We already optimized this block's jump with the predecessors' when visiting
237 * this block with opt_loop_last_block(block, is_trivial_* = false, false).
238 */
239 return false;
240 }
241
242 /* Nothing to do. */
243 if (!has_continue && !has_break)
244 return false;
245
246 /* Walk backwards and check for previous IF statements whether one of the
247 * branch legs ends with an equal jump instruction as this block.
248 */
249 for (nir_cf_node *prev = nir_cf_node_prev(&block->cf_node); prev != NULL; prev = nir_cf_node_prev(prev)) {
250 /* Skip blocks and nested loops */
251 if (prev->type != nir_cf_node_if)
252 continue;
253
254 nir_if *nif = nir_cf_node_as_if(prev);
255 nir_block *then_block = nir_if_last_then_block(nif);
256 nir_block *else_block = nir_if_last_else_block(nif);
257 if (!nir_block_ends_in_jump(then_block) && !nir_block_ends_in_jump(else_block))
258 continue;
259
260 bool merge_into_then = (has_continue && nir_block_ends_in_continue(else_block)) ||
261 (has_break && nir_block_ends_in_break(else_block));
262 bool merge_into_else = (has_continue && nir_block_ends_in_continue(then_block)) ||
263 (has_break && nir_block_ends_in_break(then_block));
264
265 if (!merge_into_then && !merge_into_else)
266 continue;
267
268 /* If there are single-source phis after the IF, get rid of them first */
269 nir_opt_remove_phis_block(nir_cf_node_cf_tree_next(prev));
270
271 /* We are about to remove one predecessor. */
272 nir_lower_phis_to_regs_block(block->successors[0]);
273
274 nir_cf_list tmp;
275 nir_cf_extract(&tmp, nir_after_cf_node(prev), nir_after_block_before_jump(block));
276
277 if (merge_into_then) {
278 nir_cf_reinsert(&tmp, nir_after_block(then_block));
279 } else {
280 nir_cf_reinsert(&tmp, nir_after_block(else_block));
281 }
282
283 /* Because we split the current block, the pointer is not valid anymore. */
284 block = nir_cf_node_cf_tree_next(prev);
285 progress = true;
286 }
287
288 /* Revisit the predecessor blocks in order to remove implicit jump instructions. */
289 if (is_block_singular(block)) {
290 nir_cf_node *prev = nir_cf_node_prev(&block->cf_node);
291 if (prev && prev->type == nir_cf_node_if) {
292 nir_if *nif = nir_cf_node_as_if(prev);
293 progress |= opt_loop_last_block(nir_if_last_then_block(nif), has_continue, has_break);
294 progress |= opt_loop_last_block(nir_if_last_else_block(nif), has_continue, has_break);
295 }
296 }
297
298 return progress;
299 }
300
301 static bool
can_constant_fold(nir_scalar scalar,nir_block * loop_header)302 can_constant_fold(nir_scalar scalar, nir_block *loop_header)
303 {
304 if (nir_scalar_is_const(scalar))
305 return true;
306
307 if (nir_scalar_is_alu(scalar)) {
308 for (unsigned i = 0; i < nir_op_infos[nir_scalar_alu_op(scalar)].num_inputs; i++) {
309 if (nir_op_infos[nir_scalar_alu_op(scalar)].input_sizes[i] > 1 ||
310 !can_constant_fold(nir_scalar_chase_alu_src(scalar, i), loop_header))
311 return false;
312 }
313 return true;
314 }
315
316 if (scalar.def->parent_instr->type == nir_instr_type_phi) {
317 /* If this is a phi from anything but the loop header, we cannot constant-fold. */
318 if (scalar.def->parent_instr->block != loop_header)
319 return false;
320
321 nir_block *preheader = nir_block_cf_tree_prev(loop_header);
322 nir_phi_instr *phi = nir_instr_as_phi(scalar.def->parent_instr);
323 nir_phi_src *src = nir_phi_get_src_from_block(phi, preheader);
324 return can_constant_fold(nir_get_scalar(src->src.ssa, 0), loop_header);
325 }
326
327 return false;
328 }
329
330 /**
331 * This optimization tries to peel the first loop break.
332 *
333 * This optimization turns:
334 *
335 * loop {
336 * do_work_1();
337 * if (cond) {
338 * break;
339 * } else {
340 * }
341 * do_work_2();
342 * }
343 *
344 * into:
345 *
346 * do_work_1();
347 * if (cond) {
348 * } else {
349 * loop {
350 * do_work_2();
351 * do_work_1();
352 * if (cond) {
353 * break;
354 * } else {
355 * }
356 * }
357 * }
358 *
359 * nir_opt_dead_cf() can later remove the outer IF statement, again.
360 *
361 */
362 static bool
opt_loop_peel_initial_break(nir_loop * loop)363 opt_loop_peel_initial_break(nir_loop *loop)
364 {
365 nir_block *header_block = nir_loop_first_block(loop);
366 nir_block *prev_block = nir_cf_node_cf_tree_prev(&loop->cf_node);
367 nir_block *exit_block = nir_cf_node_cf_tree_next(&loop->cf_node);
368
369 /* The loop must have exactly one continue block. */
370 if (header_block->predecessors->entries != 2)
371 return false;
372
373 nir_cf_node *if_node = nir_cf_node_next(&header_block->cf_node);
374 if (!if_node || if_node->type != nir_cf_node_if)
375 return false;
376
377 nir_if *nif = nir_cf_node_as_if(if_node);
378 nir_block *last_then = nir_if_last_then_block(nif);
379 if (!nir_block_ends_in_break(last_then) ||
380 !is_block_empty(nir_if_first_else_block(nif)) ||
381 !nir_is_trivial_loop_if(nif, last_then))
382 return false;
383
384 /* If do_work_2() ends in a break or other kind of jump then we can't move
385 * it to the top of the loop ahead of do_work_1().
386 */
387 if (nir_block_ends_in_jump(nir_loop_last_block(loop)))
388 return false;
389
390 /* Check that there is actual work to be done after the initial break. */
391 if (!nir_block_contains_work(nir_cf_node_cf_tree_next(if_node)))
392 return false;
393
394 /* For now, we restrict this optimization to cases where the outer IF
395 * can be constant-folded.
396 *
397 * Note: If this restriction is lifted, it might recurse infinitely.
398 * Prevent by e.g. restricting to single-exit loops.
399 */
400 if (!can_constant_fold(nir_get_scalar(nif->condition.ssa, 0), header_block))
401 return false;
402
403 /* Even though this if statement has a jump on one side, we may still have
404 * phis afterwards. Single-source phis can be produced by loop unrolling
405 * or dead control-flow passes and are perfectly legal. Run a quick phi
406 * removal on the block after the if to clean up any such phis.
407 */
408 nir_opt_remove_phis_block(nir_cf_node_cf_tree_next(if_node));
409
410 /* We need LCSSA because we are going to wrap the loop into an IF. */
411 nir_convert_loop_to_lcssa(loop);
412
413 /* Lower loop header and LCSSA-phis to regs. */
414 nir_lower_phis_to_regs_block(header_block);
415 nir_lower_ssa_defs_to_regs_block(header_block);
416 nir_lower_phis_to_regs_block(exit_block);
417
418 /* Extract the loop header including the first break. */
419 nir_cf_list tmp;
420 nir_cf_extract(&tmp, nir_before_block(header_block),
421 nir_after_cf_node(if_node));
422 header_block = nir_loop_first_block(loop);
423
424 /* Clone and re-insert at the continue block. */
425 nir_block *cont_block = nir_loop_last_block(loop);
426 struct hash_table *remap_table = _mesa_pointer_hash_table_create(NULL);
427 nir_cf_list_clone_and_reinsert(&tmp, &loop->cf_node, nir_after_block(cont_block), remap_table);
428 _mesa_hash_table_destroy(remap_table, NULL);
429
430 /* Remove the break and insert before the loop. */
431 nir_cf_reinsert(&tmp, nir_after_block(prev_block));
432 nir_instr_remove_v(nir_block_last_instr(last_then));
433
434 /* Finally, extract the entire loop and insert into the else-branch. */
435 nir_cf_extract(&tmp, nir_before_cf_node(&loop->cf_node),
436 nir_after_cf_node(&loop->cf_node));
437 nir_cf_reinsert(&tmp, nir_after_block(nir_if_first_else_block(nif)));
438
439 return true;
440 }
441
442 struct merge_term_state {
443 nir_shader *shader;
444 nir_cursor after_src_if;
445 nir_block *old_break_block;
446 nir_block *continue_block;
447 };
448
449 static bool
insert_phis_after_terminator_merge(nir_def * def,void * state)450 insert_phis_after_terminator_merge(nir_def *def, void *state)
451 {
452 struct merge_term_state *m_state = (struct merge_term_state *)state;
453
454 bool phi_created = false;
455 nir_phi_instr *phi_instr = NULL;
456
457 nir_foreach_use_including_if_safe(src, def) {
458 /* Don't reprocess the phi we just added */
459 if (!nir_src_is_if(src) && phi_instr &&
460 nir_src_parent_instr(src) == &phi_instr->instr) {
461 continue;
462 }
463
464 if (nir_src_is_if(src) ||
465 (!nir_src_is_if(src) && nir_src_parent_instr(src)->block != def->parent_instr->block)) {
466 if (!phi_created) {
467 phi_instr = nir_phi_instr_create(m_state->shader);
468 nir_def_init(&phi_instr->instr, &phi_instr->def, def->num_components,
469 def->bit_size);
470 nir_instr_insert(nir_after_block(m_state->after_src_if.block),
471 &phi_instr->instr);
472
473 nir_phi_src *phi_src =
474 nir_phi_instr_add_src(phi_instr, m_state->continue_block, def);
475 list_addtail(&phi_src->src.use_link, &def->uses);
476
477 nir_undef_instr *undef =
478 nir_undef_instr_create(m_state->shader,
479 def->num_components,
480 def->bit_size);
481 nir_instr_insert(nir_after_block(m_state->old_break_block),
482 &undef->instr);
483 phi_src = nir_phi_instr_add_src(phi_instr,
484 m_state->old_break_block,
485 &undef->def);
486 list_addtail(&phi_src->src.use_link, &undef->def.uses);
487
488 phi_created = true;
489 }
490 assert(phi_instr);
491 nir_src_rewrite(src, &phi_instr->def);
492 }
493 }
494
495 return true;
496 }
497
498 static void
merge_terminators(nir_builder * b,nir_if * dest_if,nir_if * src_if)499 merge_terminators(nir_builder *b, nir_if *dest_if, nir_if *src_if)
500 {
501 /* Move instructions from the block between the ifs into the src
502 * if-statements continue block and remove the break from the break block.
503 * This helps avoid any potential out of bounds access after the merging
504 * moves the break later.
505 */
506 bool then_break = nir_block_ends_in_break(nir_if_last_then_block(src_if));
507 nir_cursor continue_blk_c = then_break ?
508 nir_after_block(nir_if_last_else_block(src_if)) :
509 nir_after_block(nir_if_last_then_block(src_if));
510
511 nir_cf_list tmp;
512 nir_cursor after_src_if = nir_after_cf_node(&src_if->cf_node);
513 nir_cf_extract(&tmp, after_src_if, nir_before_cf_node(&dest_if->cf_node));
514 nir_cf_reinsert(&tmp, continue_blk_c);
515
516 /* Remove the break from the src if-statement */
517 nir_block *break_blk = then_break ?
518 nir_if_last_then_block(src_if) : nir_if_last_else_block(src_if);
519 nir_instr_remove(nir_block_last_instr(break_blk));
520
521 /* Add phis if needed after we moved instructions to the src if-statements
522 * continue block.
523 */
524 struct merge_term_state m_state;
525 m_state.shader = b->shader;
526 m_state.after_src_if = nir_after_cf_node(&src_if->cf_node);
527 m_state.old_break_block = break_blk;
528 m_state.continue_block = continue_blk_c.block;
529 /* Use _safe because nir_rematerialize_deref_in_use_blocks might remove dead derefs. */
530 nir_foreach_instr_reverse_safe(instr, m_state.continue_block) {
531 if (instr->type == nir_instr_type_deref)
532 nir_rematerialize_deref_in_use_blocks(nir_instr_as_deref(instr));
533 else
534 nir_foreach_def(instr, insert_phis_after_terminator_merge, &m_state);
535 }
536
537 b->cursor = nir_before_src(&dest_if->condition);
538
539 nir_def *new_c = NULL;
540 if (then_break)
541 new_c = nir_ior(b, dest_if->condition.ssa, src_if->condition.ssa);
542 else
543 new_c = nir_iand(b, dest_if->condition.ssa, src_if->condition.ssa);
544
545 nir_src_rewrite(&dest_if->condition, new_c);
546 }
547
548 /* Checks to see if the if-statement is a basic terminator containing no
549 * instructions in the branches other than a single break in one of the
550 * branches.
551 */
552 static bool
is_basic_terminator_if(nir_if * nif)553 is_basic_terminator_if(nir_if *nif)
554 {
555 nir_block *first_then = nir_if_first_then_block(nif);
556 nir_block *first_else = nir_if_first_else_block(nif);
557 nir_block *last_then = nir_if_last_then_block(nif);
558 nir_block *last_else = nir_if_last_else_block(nif);
559
560 if (first_then != last_then || first_else != last_else)
561 return false;
562
563 if (!nir_block_ends_in_break(last_then) &&
564 !nir_block_ends_in_break(last_else))
565 return false;
566
567 if (nir_block_ends_in_break(last_then)) {
568 if (!exec_list_is_empty(&last_else->instr_list) ||
569 !exec_list_is_singular(&last_then->instr_list))
570 return false;
571 } else {
572 assert(nir_block_ends_in_break(last_else));
573 if (!exec_list_is_empty(&last_then->instr_list) ||
574 !exec_list_is_singular(&last_else->instr_list))
575 return false;
576 }
577
578 return true;
579 }
580
581 /*
582 * Merge two consecutive loop terminators. For example:
583 *
584 * int i;
585 * for(i = 0; i < n_stop; i++) {
586 * ...
587 *
588 * if(0.0 < stops[i])
589 * break;
590 * }
591 *
592 * This loop checks if the value of stops[i] is greater than 0.0 and if untrue
593 * immediately checks n_stop is less than i. If we combine these into a single
594 * if the compiler has a greater chance of unrolling the loop.
595 */
596 static bool
opt_loop_merge_terminators(nir_builder * b,nir_if * nif,nir_loop * loop)597 opt_loop_merge_terminators(nir_builder *b, nir_if *nif, nir_loop *loop)
598 {
599 if (!loop)
600 return false;
601
602 /* If the loop has phis abort any merge attempt */
603 nir_block *blk_after_lp = nir_cf_node_cf_tree_next(&loop->cf_node);
604 nir_instr *instr_after_loop = nir_block_first_instr(blk_after_lp);
605 if (instr_after_loop && instr_after_loop->type == nir_instr_type_phi)
606 return false;
607
608 /* Check if we have two consecutive basic terminators */
609 if (!is_basic_terminator_if(nif))
610 return false;
611
612 nir_block *next_blk = nir_cf_node_cf_tree_next(&nif->cf_node);
613 if (!next_blk)
614 return false;
615
616 nir_if *next_if = nir_block_get_following_if(next_blk);
617 if (!next_if)
618 return false;
619
620 if (!is_basic_terminator_if(next_if))
621 return false;
622
623 /* If the terminators exit from different branches just abort for now.
624 * After further if-statement optimisations are done we should get another
625 * go at merging.
626 */
627 bool break_in_then_f = nir_block_ends_in_break(nir_if_last_then_block(nif));
628 bool break_in_then_s = nir_block_ends_in_break(nir_if_last_then_block(next_if));
629 if (break_in_then_f != break_in_then_s)
630 return false;
631
632 /* Allow some instructions that are acceptable between the terminators
633 * these are expected to simply be used by the condition in the second
634 * loop terminator.
635 */
636 nir_foreach_instr(instr, next_blk) {
637 if (instr->type == nir_instr_type_phi)
638 return false;
639
640 if (instr->type != nir_instr_type_alu &&
641 instr->type != nir_instr_type_load_const &&
642 instr->type != nir_instr_type_deref &&
643 (instr->type != nir_instr_type_intrinsic ||
644 (instr->type == nir_instr_type_intrinsic &&
645 nir_instr_as_intrinsic(instr)->intrinsic != nir_intrinsic_load_deref))) {
646 return false;
647 }
648 }
649
650 /* If either if-statement has phis abort */
651 next_blk = nir_cf_node_cf_tree_next(&next_if->cf_node);
652 if (next_blk) {
653 nir_foreach_instr(instr, next_blk) {
654 if (instr->type == nir_instr_type_phi)
655 return false;
656 }
657 }
658
659 merge_terminators(b, next_if, nif);
660 return true;
661 }
662
663 static bool
opt_loop_cf_list(nir_builder * b,struct exec_list * cf_list,nir_loop * current_loop)664 opt_loop_cf_list(nir_builder *b, struct exec_list *cf_list,
665 nir_loop *current_loop)
666 {
667 bool progress = false;
668 foreach_list_typed_safe(nir_cf_node, cf_node, node, cf_list) {
669 switch (cf_node->type) {
670 case nir_cf_node_block: {
671 nir_block *block = nir_cf_node_as_block(cf_node);
672 progress |= opt_loop_last_block(block, false, false);
673 break;
674 }
675
676 case nir_cf_node_if: {
677 nir_if *nif = nir_cf_node_as_if(cf_node);
678 progress |= opt_loop_cf_list(b, &nif->then_list, current_loop);
679 progress |= opt_loop_cf_list(b, &nif->else_list, current_loop);
680 progress |= opt_loop_merge_break_continue(nif);
681 progress |= opt_loop_terminator(nif);
682 progress |= opt_loop_merge_terminators(b, nif, current_loop);
683 break;
684 }
685
686 case nir_cf_node_loop: {
687 nir_loop *loop = nir_cf_node_as_loop(cf_node);
688 assert(!nir_loop_has_continue_construct(loop));
689 progress |= opt_loop_cf_list(b, &loop->body, loop);
690 progress |= opt_loop_last_block(nir_loop_last_block(loop), true, false);
691 progress |= opt_loop_peel_initial_break(loop);
692 break;
693 }
694
695 case nir_cf_node_function:
696 unreachable("Invalid cf type");
697 }
698 }
699
700 return progress;
701 }
702
703 /**
704 * This pass aims to simplify loop control-flow by reducing the number
705 * of break and continue statements.
706 */
707 bool
nir_opt_loop(nir_shader * shader)708 nir_opt_loop(nir_shader *shader)
709 {
710 bool progress = false;
711
712 nir_foreach_function_impl(impl, shader) {
713 nir_builder b = nir_builder_create(impl);
714
715 /* First we run the simple pass to get rid of pesky continues */
716 if (opt_loop_cf_list(&b, &impl->body, NULL)) {
717 nir_metadata_preserve(impl, nir_metadata_none);
718
719 /* If that made progress, we're no longer really in SSA form. */
720 nir_lower_reg_intrinsics_to_ssa_impl(impl);
721 progress = true;
722 } else {
723 nir_metadata_preserve(impl, nir_metadata_all);
724 }
725 }
726
727 return progress;
728 }
729