1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_builder_opcodes.h"
27 #include "nir_vla.h"
28
29 /*
30 * This file implements an out-of-SSA pass as described in "Revisiting
31 * Out-of-SSA Translation for Correctness, Code Quality, and Efficiency" by
32 * Boissinot et al.
33 */
34
35 struct from_ssa_state {
36 nir_builder builder;
37 void *dead_ctx;
38 struct exec_list dead_instrs;
39 bool phi_webs_only;
40 struct hash_table *merge_node_table;
41 nir_instr *instr;
42 bool progress;
43 };
44
45 /* Returns if def @a comes after def @b.
46 *
47 * The core observation that makes the Boissinot algorithm efficient
48 * is that, given two properly sorted sets, we can check for
49 * interference in these sets via a linear walk. This is accomplished
50 * by doing single combined walk over union of the two sets in DFS
51 * order. It doesn't matter what DFS we do so long as we're
52 * consistent. Fortunately, the dominance algorithm we ran prior to
53 * this pass did such a walk and recorded the pre- and post-indices in
54 * the blocks.
55 *
56 * We treat SSA undefs as always coming before other instruction types.
57 */
58 static bool
def_after(nir_def * a,nir_def * b)59 def_after(nir_def *a, nir_def *b)
60 {
61 if (a->parent_instr->type == nir_instr_type_undef)
62 return false;
63
64 if (b->parent_instr->type == nir_instr_type_undef)
65 return true;
66
67 /* If they're in the same block, we can rely on whichever instruction
68 * comes first in the block.
69 */
70 if (a->parent_instr->block == b->parent_instr->block)
71 return a->parent_instr->index > b->parent_instr->index;
72
73 /* Otherwise, if blocks are distinct, we sort them in DFS pre-order */
74 return a->parent_instr->block->dom_pre_index >
75 b->parent_instr->block->dom_pre_index;
76 }
77
78 /* Returns true if a dominates b */
79 static bool
ssa_def_dominates(nir_def * a,nir_def * b)80 ssa_def_dominates(nir_def *a, nir_def *b)
81 {
82 if (a->parent_instr->type == nir_instr_type_undef) {
83 /* SSA undefs always dominate */
84 return true;
85 }
86 if (def_after(a, b)) {
87 return false;
88 } else if (a->parent_instr->block == b->parent_instr->block) {
89 return def_after(b, a);
90 } else {
91 return nir_block_dominates(a->parent_instr->block,
92 b->parent_instr->block);
93 }
94 }
95
96 /* The following data structure, which I have named merge_set is a way of
97 * representing a set registers of non-interfering registers. This is
98 * based on the concept of a "dominance forest" presented in "Fast Copy
99 * Coalescing and Live-Range Identification" by Budimlic et al. but the
100 * implementation concept is taken from "Revisiting Out-of-SSA Translation
101 * for Correctness, Code Quality, and Efficiency" by Boissinot et al.
102 *
103 * Each SSA definition is associated with a merge_node and the association
104 * is represented by a combination of a hash table and the "def" parameter
105 * in the merge_node structure. The merge_set stores a linked list of
106 * merge_nodes, ordered by a pre-order DFS walk of the dominance tree. (Since
107 * the liveness analysis pass indexes the SSA values in dominance order for
108 * us, this is an easy thing to keep up.) It is assumed that no pair of the
109 * nodes in a given set interfere. Merging two sets or checking for
110 * interference can be done in a single linear-time merge-sort walk of the
111 * two lists of nodes.
112 */
113 struct merge_set;
114
115 typedef struct {
116 struct exec_node node;
117 struct merge_set *set;
118 nir_def *def;
119 } merge_node;
120
121 typedef struct merge_set {
122 struct exec_list nodes;
123 unsigned size;
124 bool divergent;
125 nir_def *reg_decl;
126 } merge_set;
127
128 #if 0
129 static void
130 merge_set_dump(merge_set *set, FILE *fp)
131 {
132 NIR_VLA(nir_def *, dom, set->size);
133 int dom_idx = -1;
134
135 foreach_list_typed(merge_node, node, node, &set->nodes) {
136 while (dom_idx >= 0 && !ssa_def_dominates(dom[dom_idx], node->def))
137 dom_idx--;
138
139 for (int i = 0; i <= dom_idx; i++)
140 fprintf(fp, " ");
141
142 fprintf(fp, "ssa_%d\n", node->def->index);
143
144 dom[++dom_idx] = node->def;
145 }
146 }
147 #endif
148
149 static merge_node *
get_merge_node(nir_def * def,struct from_ssa_state * state)150 get_merge_node(nir_def *def, struct from_ssa_state *state)
151 {
152 struct hash_entry *entry =
153 _mesa_hash_table_search(state->merge_node_table, def);
154 if (entry)
155 return entry->data;
156
157 merge_set *set = rzalloc(state->dead_ctx, merge_set);
158 exec_list_make_empty(&set->nodes);
159 set->size = 1;
160 set->divergent = def->divergent;
161
162 merge_node *node = ralloc(state->dead_ctx, merge_node);
163 node->set = set;
164 node->def = def;
165 exec_list_push_head(&set->nodes, &node->node);
166
167 _mesa_hash_table_insert(state->merge_node_table, def, node);
168
169 return node;
170 }
171
172 static bool
merge_nodes_interfere(merge_node * a,merge_node * b)173 merge_nodes_interfere(merge_node *a, merge_node *b)
174 {
175 /* There's no need to check for interference within the same set,
176 * because we assume, that sets themselves are already
177 * interference-free.
178 */
179 if (a->set == b->set)
180 return false;
181
182 return nir_defs_interfere(a->def, b->def);
183 }
184
185 /* Merges b into a
186 *
187 * This algorithm uses def_after to ensure that the sets always stay in the
188 * same order as the pre-order DFS done by the liveness algorithm.
189 */
190 static merge_set *
merge_merge_sets(merge_set * a,merge_set * b)191 merge_merge_sets(merge_set *a, merge_set *b)
192 {
193 struct exec_node *an = exec_list_get_head(&a->nodes);
194 struct exec_node *bn = exec_list_get_head(&b->nodes);
195 while (!exec_node_is_tail_sentinel(bn)) {
196 merge_node *a_node = exec_node_data(merge_node, an, node);
197 merge_node *b_node = exec_node_data(merge_node, bn, node);
198
199 if (exec_node_is_tail_sentinel(an) ||
200 def_after(a_node->def, b_node->def)) {
201 struct exec_node *next = bn->next;
202 exec_node_remove(bn);
203 exec_node_insert_node_before(an, bn);
204 exec_node_data(merge_node, bn, node)->set = a;
205 bn = next;
206 } else {
207 an = an->next;
208 }
209 }
210
211 a->size += b->size;
212 b->size = 0;
213 a->divergent |= b->divergent;
214
215 return a;
216 }
217
218 /* Checks for any interference between two merge sets
219 *
220 * This is an implementation of Algorithm 2 in "Revisiting Out-of-SSA
221 * Translation for Correctness, Code Quality, and Efficiency" by
222 * Boissinot et al.
223 */
224 static bool
merge_sets_interfere(merge_set * a,merge_set * b)225 merge_sets_interfere(merge_set *a, merge_set *b)
226 {
227 /* List of all the nodes which dominate the current node, in dominance
228 * order.
229 */
230 NIR_VLA(merge_node *, dom, a->size + b->size);
231 int dom_idx = -1;
232
233 struct exec_node *an = exec_list_get_head(&a->nodes);
234 struct exec_node *bn = exec_list_get_head(&b->nodes);
235 while (!exec_node_is_tail_sentinel(an) ||
236 !exec_node_is_tail_sentinel(bn)) {
237
238 /* We walk the union of the two sets in the same order as the pre-order
239 * DFS done by liveness analysis.
240 */
241 merge_node *current;
242 if (exec_node_is_tail_sentinel(an)) {
243 current = exec_node_data(merge_node, bn, node);
244 bn = bn->next;
245 } else if (exec_node_is_tail_sentinel(bn)) {
246 current = exec_node_data(merge_node, an, node);
247 an = an->next;
248 } else {
249 merge_node *a_node = exec_node_data(merge_node, an, node);
250 merge_node *b_node = exec_node_data(merge_node, bn, node);
251
252 if (def_after(b_node->def, a_node->def)) {
253 current = a_node;
254 an = an->next;
255 } else {
256 current = b_node;
257 bn = bn->next;
258 }
259 }
260
261 /* Because our walk is a pre-order DFS, we can maintain the list of
262 * dominating nodes as a simple stack, pushing every node onto the list
263 * after we visit it and popping any non-dominating nodes off before we
264 * visit the current node.
265 */
266 while (dom_idx >= 0 &&
267 !ssa_def_dominates(dom[dom_idx]->def, current->def))
268 dom_idx--;
269
270 /* There are three invariants of this algorithm that are important here:
271 *
272 * 1. There is no interference within either set a or set b.
273 * 2. None of the nodes processed up until this point interfere.
274 * 3. All the dominators of `current` have been processed
275 *
276 * Because of these invariants, we only need to check the current node
277 * against its minimal dominator. If any other node N in the union
278 * interferes with current, then N must dominate current because we are
279 * in SSA form. If N dominates current then it must also dominate our
280 * minimal dominator dom[dom_idx]. Since N is live at current it must
281 * also be live at the minimal dominator which means N interferes with
282 * the minimal dominator dom[dom_idx] and, by invariants 2 and 3 above,
283 * the algorithm would have already terminated. Therefore, if we got
284 * here, the only node that can possibly interfere with current is the
285 * minimal dominator dom[dom_idx].
286 *
287 * This is what allows us to do a interference check of the union of the
288 * two sets with a single linear-time walk.
289 */
290 if (dom_idx >= 0 && merge_nodes_interfere(current, dom[dom_idx]))
291 return true;
292
293 dom[++dom_idx] = current;
294 }
295
296 return false;
297 }
298
299 static bool
add_parallel_copy_to_end_of_block(nir_shader * shader,nir_block * block,void * dead_ctx)300 add_parallel_copy_to_end_of_block(nir_shader *shader, nir_block *block, void *dead_ctx)
301 {
302 bool need_end_copy = false;
303 if (block->successors[0]) {
304 nir_instr *instr = nir_block_first_instr(block->successors[0]);
305 if (instr && instr->type == nir_instr_type_phi)
306 need_end_copy = true;
307 }
308
309 if (block->successors[1]) {
310 nir_instr *instr = nir_block_first_instr(block->successors[1]);
311 if (instr && instr->type == nir_instr_type_phi)
312 need_end_copy = true;
313 }
314
315 if (need_end_copy) {
316 /* If one of our successors has at least one phi node, we need to
317 * create a parallel copy at the end of the block but before the jump
318 * (if there is one).
319 */
320 nir_parallel_copy_instr *pcopy =
321 nir_parallel_copy_instr_create(shader);
322
323 nir_instr_insert(nir_after_block_before_jump(block), &pcopy->instr);
324 }
325
326 return true;
327 }
328
329 static nir_parallel_copy_instr *
get_parallel_copy_at_end_of_block(nir_block * block)330 get_parallel_copy_at_end_of_block(nir_block *block)
331 {
332 nir_instr *last_instr = nir_block_last_instr(block);
333 if (last_instr == NULL)
334 return NULL;
335
336 /* The last instruction may be a jump in which case the parallel copy is
337 * right before it.
338 */
339 if (last_instr->type == nir_instr_type_jump)
340 last_instr = nir_instr_prev(last_instr);
341
342 if (last_instr && last_instr->type == nir_instr_type_parallel_copy)
343 return nir_instr_as_parallel_copy(last_instr);
344 else
345 return NULL;
346 }
347
348 /** Isolate phi nodes with parallel copies
349 *
350 * In order to solve the dependency problems with the sources and
351 * destinations of phi nodes, we first isolate them by adding parallel
352 * copies to the beginnings and ends of basic blocks. For every block with
353 * phi nodes, we add a parallel copy immediately following the last phi
354 * node that copies the destinations of all of the phi nodes to new SSA
355 * values. We also add a parallel copy to the end of every block that has
356 * a successor with phi nodes that, for each phi node in each successor,
357 * copies the corresponding sorce of the phi node and adjust the phi to
358 * used the destination of the parallel copy.
359 *
360 * In SSA form, each value has exactly one definition. What this does is
361 * ensure that each value used in a phi also has exactly one use. The
362 * destinations of phis are only used by the parallel copy immediately
363 * following the phi nodes and. Thanks to the parallel copy at the end of
364 * the predecessor block, the sources of phi nodes are are the only use of
365 * that value. This allows us to immediately assign all the sources and
366 * destinations of any given phi node to the same register without worrying
367 * about interference at all. We do coalescing to get rid of the parallel
368 * copies where possible.
369 *
370 * Before this pass can be run, we have to iterate over the blocks with
371 * add_parallel_copy_to_end_of_block to ensure that the parallel copies at
372 * the ends of blocks exist. We can create the ones at the beginnings as
373 * we go, but the ones at the ends of blocks need to be created ahead of
374 * time because of potential back-edges in the CFG.
375 */
376 static bool
isolate_phi_nodes_block(nir_shader * shader,nir_block * block,void * dead_ctx)377 isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx)
378 {
379 /* If we don't have any phis, then there's nothing for us to do. */
380 nir_phi_instr *last_phi = nir_block_last_phi_instr(block);
381 if (last_phi == NULL)
382 return true;
383
384 /* If we have phi nodes, we need to create a parallel copy at the
385 * start of this block but after the phi nodes.
386 */
387 nir_parallel_copy_instr *block_pcopy =
388 nir_parallel_copy_instr_create(shader);
389 nir_instr_insert_after(&last_phi->instr, &block_pcopy->instr);
390
391 nir_foreach_phi(phi, block) {
392 nir_foreach_phi_src(src, phi) {
393 if (nir_src_is_undef(src->src))
394 continue;
395
396 nir_parallel_copy_instr *pcopy =
397 get_parallel_copy_at_end_of_block(src->pred);
398 assert(pcopy);
399
400 nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
401 nir_parallel_copy_entry);
402
403 entry->dest_is_reg = false;
404 nir_def_init(&pcopy->instr, &entry->dest.def,
405 phi->def.num_components, phi->def.bit_size);
406 entry->dest.def.divergent = nir_src_is_divergent(src->src);
407
408 /* We're adding a source to a live instruction so we need to use
409 * nir_instr_init_src()
410 */
411 entry->src_is_reg = false;
412 nir_instr_init_src(&pcopy->instr, &entry->src, src->src.ssa);
413
414 exec_list_push_tail(&pcopy->entries, &entry->node);
415
416 nir_src_rewrite(&src->src, &entry->dest.def);
417 }
418
419 nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
420 nir_parallel_copy_entry);
421
422 entry->dest_is_reg = false;
423 nir_def_init(&block_pcopy->instr, &entry->dest.def,
424 phi->def.num_components, phi->def.bit_size);
425 entry->dest.def.divergent = phi->def.divergent;
426
427 nir_def_rewrite_uses(&phi->def, &entry->dest.def);
428
429 /* We're adding a source to a live instruction so we need to use
430 * nir_instr_init_src().
431 *
432 * Note that we do this after we've rewritten all uses of the phi to
433 * entry->def, ensuring that entry->src will be the only remaining use
434 * of the phi.
435 */
436 entry->src_is_reg = false;
437 nir_instr_init_src(&block_pcopy->instr, &entry->src, &phi->def);
438
439 exec_list_push_tail(&block_pcopy->entries, &entry->node);
440 }
441
442 return true;
443 }
444
445 static bool
coalesce_phi_nodes_block(nir_block * block,struct from_ssa_state * state)446 coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
447 {
448 nir_foreach_phi(phi, block) {
449 merge_node *dest_node = get_merge_node(&phi->def, state);
450
451 nir_foreach_phi_src(src, phi) {
452 if (nir_src_is_undef(src->src))
453 continue;
454
455 merge_node *src_node = get_merge_node(src->src.ssa, state);
456 if (src_node->set != dest_node->set)
457 merge_merge_sets(dest_node->set, src_node->set);
458 }
459 }
460
461 return true;
462 }
463
464 static void
aggressive_coalesce_parallel_copy(nir_parallel_copy_instr * pcopy,struct from_ssa_state * state)465 aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
466 struct from_ssa_state *state)
467 {
468 nir_foreach_parallel_copy_entry(entry, pcopy) {
469 assert(!entry->src_is_reg);
470 assert(!entry->dest_is_reg);
471 assert(entry->dest.def.num_components ==
472 entry->src.ssa->num_components);
473
474 /* Since load_const instructions are SSA only, we can't replace their
475 * destinations with registers and, therefore, can't coalesce them.
476 */
477 if (entry->src.ssa->parent_instr->type == nir_instr_type_load_const)
478 continue;
479
480 merge_node *src_node = get_merge_node(entry->src.ssa, state);
481 merge_node *dest_node = get_merge_node(&entry->dest.def, state);
482
483 if (src_node->set == dest_node->set)
484 continue;
485
486 /* TODO: We can probably do better here but for now we should be safe if
487 * we just don't coalesce things with different divergence.
488 */
489 if (dest_node->set->divergent != src_node->set->divergent)
490 continue;
491
492 if (!merge_sets_interfere(src_node->set, dest_node->set))
493 merge_merge_sets(src_node->set, dest_node->set);
494 }
495 }
496
497 static bool
aggressive_coalesce_block(nir_block * block,struct from_ssa_state * state)498 aggressive_coalesce_block(nir_block *block, struct from_ssa_state *state)
499 {
500 nir_parallel_copy_instr *start_pcopy = NULL;
501 nir_foreach_instr(instr, block) {
502 /* Phi nodes only ever come at the start of a block */
503 if (instr->type != nir_instr_type_phi) {
504 if (instr->type != nir_instr_type_parallel_copy)
505 break; /* The parallel copy must be right after the phis */
506
507 start_pcopy = nir_instr_as_parallel_copy(instr);
508
509 aggressive_coalesce_parallel_copy(start_pcopy, state);
510
511 break;
512 }
513 }
514
515 nir_parallel_copy_instr *end_pcopy =
516 get_parallel_copy_at_end_of_block(block);
517
518 if (end_pcopy && end_pcopy != start_pcopy)
519 aggressive_coalesce_parallel_copy(end_pcopy, state);
520
521 return true;
522 }
523
524 static nir_def *
decl_reg_for_ssa_def(nir_builder * b,nir_def * def)525 decl_reg_for_ssa_def(nir_builder *b, nir_def *def)
526 {
527 return nir_decl_reg(b, def->num_components, def->bit_size, 0);
528 }
529
530 static void
set_reg_divergent(nir_def * reg,bool divergent)531 set_reg_divergent(nir_def *reg, bool divergent)
532 {
533 nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
534 nir_intrinsic_set_divergent(decl, divergent);
535 }
536
537 void
nir_rewrite_uses_to_load_reg(nir_builder * b,nir_def * old,nir_def * reg)538 nir_rewrite_uses_to_load_reg(nir_builder *b, nir_def *old,
539 nir_def *reg)
540 {
541 nir_foreach_use_including_if_safe(use, old) {
542 b->cursor = nir_before_src(use);
543
544 /* If this is a parallel copy, it can just take the register directly */
545 if (!nir_src_is_if(use) &&
546 nir_src_parent_instr(use)->type == nir_instr_type_parallel_copy) {
547
548 nir_parallel_copy_entry *copy_entry =
549 list_entry(use, nir_parallel_copy_entry, src);
550
551 assert(!copy_entry->src_is_reg);
552 copy_entry->src_is_reg = true;
553 nir_src_rewrite(©_entry->src, reg);
554 continue;
555 }
556
557 /* If the immediate preceding instruction is a load_reg from the same
558 * register, use it instead of creating a new load_reg. This helps when
559 * a register is referenced in multiple sources in the same instruction,
560 * which otherwise would turn into piles of unnecessary moves.
561 */
562 nir_def *load = NULL;
563 if (b->cursor.option == nir_cursor_before_instr) {
564 nir_instr *prev = nir_instr_prev(b->cursor.instr);
565
566 if (prev != NULL && prev->type == nir_instr_type_intrinsic) {
567 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(prev);
568 if (intr->intrinsic == nir_intrinsic_load_reg &&
569 intr->src[0].ssa == reg &&
570 nir_intrinsic_base(intr) == 0)
571 load = &intr->def;
572 }
573 }
574
575 if (load == NULL)
576 load = nir_load_reg(b, reg);
577
578 nir_src_rewrite(use, load);
579 }
580 }
581
582 static bool
def_replace_with_reg(nir_def * def,nir_function_impl * impl)583 def_replace_with_reg(nir_def *def, nir_function_impl *impl)
584 {
585 /* These are handled elsewhere */
586 assert(def->parent_instr->type != nir_instr_type_undef &&
587 def->parent_instr->type != nir_instr_type_load_const);
588
589 nir_builder b = nir_builder_create(impl);
590
591 nir_def *reg = decl_reg_for_ssa_def(&b, def);
592 nir_rewrite_uses_to_load_reg(&b, def, reg);
593
594 if (def->parent_instr->type == nir_instr_type_phi)
595 b.cursor = nir_before_block_after_phis(def->parent_instr->block);
596 else
597 b.cursor = nir_after_instr(def->parent_instr);
598
599 nir_store_reg(&b, def, reg);
600 return true;
601 }
602
603 static nir_def *
reg_for_ssa_def(nir_def * def,struct from_ssa_state * state)604 reg_for_ssa_def(nir_def *def, struct from_ssa_state *state)
605 {
606 struct hash_entry *entry =
607 _mesa_hash_table_search(state->merge_node_table, def);
608 if (entry) {
609 /* In this case, we're part of a phi web. Use the web's register. */
610 merge_node *node = (merge_node *)entry->data;
611
612 /* If it doesn't have a register yet, create one. Note that all of
613 * the things in the merge set should be the same so it doesn't
614 * matter which node's definition we use.
615 */
616 if (node->set->reg_decl == NULL) {
617 node->set->reg_decl = decl_reg_for_ssa_def(&state->builder, def);
618 set_reg_divergent(node->set->reg_decl, node->set->divergent);
619 }
620
621 return node->set->reg_decl;
622 } else {
623 assert(state->phi_webs_only);
624 return NULL;
625 }
626 }
627
628 static void
remove_no_op_phi(nir_instr * instr,struct from_ssa_state * state)629 remove_no_op_phi(nir_instr *instr, struct from_ssa_state *state)
630 {
631 #ifndef NDEBUG
632 nir_phi_instr *phi = nir_instr_as_phi(instr);
633
634 struct hash_entry *entry =
635 _mesa_hash_table_search(state->merge_node_table, &phi->def);
636 assert(entry != NULL);
637 merge_node *node = (merge_node *)entry->data;
638
639 nir_foreach_phi_src(src, phi) {
640 if (nir_src_is_undef(src->src))
641 continue;
642
643 entry = _mesa_hash_table_search(state->merge_node_table, src->src.ssa);
644 assert(entry != NULL);
645 merge_node *src_node = (merge_node *)entry->data;
646 assert(src_node->set == node->set);
647 }
648 #endif
649
650 nir_instr_remove(instr);
651 }
652
653 static bool
rewrite_ssa_def(nir_def * def,void * void_state)654 rewrite_ssa_def(nir_def *def, void *void_state)
655 {
656 struct from_ssa_state *state = void_state;
657
658 nir_def *reg = reg_for_ssa_def(def, state);
659 if (reg == NULL)
660 return true;
661
662 assert(nir_def_is_unused(def));
663
664 /* At this point we know a priori that this SSA def is part of a
665 * nir_dest. We can use exec_node_data to get the dest pointer.
666 */
667 assert(def->parent_instr->type != nir_instr_type_load_const);
668 nir_store_reg(&state->builder, def, reg);
669
670 state->progress = true;
671 return true;
672 }
673
674 static bool
rewrite_src(nir_src * src,void * void_state)675 rewrite_src(nir_src *src, void *void_state)
676 {
677 struct from_ssa_state *state = void_state;
678
679 nir_def *reg = reg_for_ssa_def(src->ssa, state);
680 if (reg == NULL)
681 return true;
682
683 nir_src_rewrite(src, nir_load_reg(&state->builder, reg));
684
685 state->progress = true;
686 return true;
687 }
688
689 /* Resolves ssa definitions to registers. While we're at it, we also
690 * remove phi nodes.
691 */
692 static void
resolve_registers_impl(nir_function_impl * impl,struct from_ssa_state * state)693 resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state)
694 {
695 nir_foreach_block_reverse(block, impl) {
696 /* Remove successor phis in case there's a back edge. */
697 for (unsigned i = 0; i < 2; i++) {
698 nir_block *succ = block->successors[i];
699 if (succ == NULL)
700 continue;
701
702 nir_foreach_instr_safe(instr, succ) {
703 if (instr->type != nir_instr_type_phi)
704 break;
705
706 remove_no_op_phi(instr, state);
707 }
708 }
709
710 /* The following if is right after the block, handle its condition as the
711 * last source "in" the block.
712 */
713 nir_if *nif = nir_block_get_following_if(block);
714 if (nif) {
715 state->builder.cursor = nir_before_src(&nif->condition);
716 rewrite_src(&nif->condition, state);
717 }
718
719 nir_foreach_instr_reverse_safe(instr, block) {
720 switch (instr->type) {
721 case nir_instr_type_phi:
722 remove_no_op_phi(instr, state);
723 break;
724
725 case nir_instr_type_parallel_copy: {
726 nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(instr);
727
728 nir_foreach_parallel_copy_entry(entry, pcopy) {
729 assert(!entry->dest_is_reg);
730
731 /* Parallel copy destinations will always be registers */
732 nir_def *reg = reg_for_ssa_def(&entry->dest.def, state);
733 assert(reg != NULL);
734
735 /* We're switching from the nir_def to the nir_src in the dest
736 * union so we need to use nir_instr_init_src() here.
737 */
738 assert(nir_def_is_unused(&entry->dest.def));
739 entry->dest_is_reg = true;
740 nir_instr_init_src(&pcopy->instr, &entry->dest.reg, reg);
741 }
742
743 nir_foreach_parallel_copy_entry(entry, pcopy) {
744 assert(!entry->src_is_reg);
745 nir_def *reg = reg_for_ssa_def(entry->src.ssa, state);
746 if (reg == NULL)
747 continue;
748
749 entry->src_is_reg = true;
750 nir_src_rewrite(&entry->src, reg);
751 }
752 break;
753 }
754
755 default:
756 state->builder.cursor = nir_after_instr(instr);
757 nir_foreach_def(instr, rewrite_ssa_def, state);
758 state->builder.cursor = nir_before_instr(instr);
759 nir_foreach_src(instr, rewrite_src, state);
760 }
761 }
762 }
763 }
764
765 /* Resolves a single parallel copy operation into a sequence of movs
766 *
767 * This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
768 * Correctness, Code Quality, and Efficiency" by Boissinot et al.
769 * However, I never got the algorithm to work as written, so this version
770 * is slightly modified.
771 *
772 * The algorithm works by playing this little shell game with the values.
773 * We start by recording where every source value is and which source value
774 * each destination value should receive. We then grab any copy whose
775 * destination is "empty", i.e. not used as a source, and do the following:
776 * - Find where its source value currently lives
777 * - Emit the move instruction
778 * - Set the location of the source value to the destination
779 * - Mark the location containing the source value
780 * - Mark the destination as no longer needing to be copied
781 *
782 * When we run out of "empty" destinations, we have a cycle and so we
783 * create a temporary register, copy to that register, and mark the value
784 * we copied as living in that temporary. Now, the cycle is broken, so we
785 * can continue with the above steps.
786 */
787 struct copy_value {
788 bool is_reg;
789 nir_def *ssa;
790 };
791
792 static bool
copy_values_equal(struct copy_value a,struct copy_value b)793 copy_values_equal(struct copy_value a, struct copy_value b)
794 {
795 return a.is_reg == b.is_reg && a.ssa == b.ssa;
796 }
797
798 static bool
copy_value_is_divergent(struct copy_value v)799 copy_value_is_divergent(struct copy_value v)
800 {
801 if (!v.is_reg)
802 return v.ssa->divergent;
803
804 nir_intrinsic_instr *decl = nir_reg_get_decl(v.ssa);
805 return nir_intrinsic_divergent(decl);
806 }
807
808 static void
copy_values(nir_builder * b,struct copy_value dest,struct copy_value src)809 copy_values(nir_builder *b, struct copy_value dest, struct copy_value src)
810 {
811 nir_def *val = src.is_reg ? nir_load_reg(b, src.ssa) : src.ssa;
812
813 assert(!copy_value_is_divergent(src) || copy_value_is_divergent(dest));
814
815 assert(dest.is_reg);
816 nir_store_reg(b, val, dest.ssa);
817 }
818
819 static void
resolve_parallel_copy(nir_parallel_copy_instr * pcopy,struct from_ssa_state * state)820 resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
821 struct from_ssa_state *state)
822 {
823 unsigned num_copies = 0;
824 nir_foreach_parallel_copy_entry(entry, pcopy) {
825 /* Sources may be SSA but destinations are always registers */
826 assert(entry->dest_is_reg);
827 if (entry->src_is_reg && entry->src.ssa == entry->dest.reg.ssa)
828 continue;
829
830 num_copies++;
831 }
832
833 if (num_copies == 0) {
834 /* Hooray, we don't need any copies! */
835 nir_instr_remove(&pcopy->instr);
836 exec_list_push_tail(&state->dead_instrs, &pcopy->instr.node);
837 return;
838 }
839
840 /* The register/source corresponding to the given index */
841 NIR_VLA_ZERO(struct copy_value, values, num_copies * 2);
842
843 /* The current location of a given piece of data. We will use -1 for "null" */
844 NIR_VLA_FILL(int, loc, num_copies * 2, -1);
845
846 /* The piece of data that the given piece of data is to be copied from. We will use -1 for "null" */
847 NIR_VLA_FILL(int, pred, num_copies * 2, -1);
848
849 /* The destinations we have yet to properly fill */
850 NIR_VLA(int, to_do, num_copies * 2);
851 int to_do_idx = -1;
852
853 state->builder.cursor = nir_before_instr(&pcopy->instr);
854
855 /* Now we set everything up:
856 * - All values get assigned a temporary index
857 * - Current locations are set from sources
858 * - Predecessors are recorded from sources and destinations
859 */
860 int num_vals = 0;
861 nir_foreach_parallel_copy_entry(entry, pcopy) {
862 /* Sources may be SSA but destinations are always registers */
863 if (entry->src_is_reg && entry->src.ssa == entry->dest.reg.ssa)
864 continue;
865
866 struct copy_value src_value = {
867 .is_reg = entry->src_is_reg,
868 .ssa = entry->src.ssa,
869 };
870
871 int src_idx = -1;
872 for (int i = 0; i < num_vals; ++i) {
873 if (copy_values_equal(values[i], src_value))
874 src_idx = i;
875 }
876 if (src_idx < 0) {
877 src_idx = num_vals++;
878 values[src_idx] = src_value;
879 }
880
881 assert(entry->dest_is_reg);
882 struct copy_value dest_value = {
883 .is_reg = true,
884 .ssa = entry->dest.reg.ssa,
885 };
886
887 int dest_idx = -1;
888 for (int i = 0; i < num_vals; ++i) {
889 if (copy_values_equal(values[i], dest_value)) {
890 /* Each destination of a parallel copy instruction should be
891 * unique. A destination may get used as a source, so we still
892 * have to walk the list. However, the predecessor should not,
893 * at this point, be set yet, so we should have -1 here.
894 */
895 assert(pred[i] == -1);
896 dest_idx = i;
897 }
898 }
899 if (dest_idx < 0) {
900 dest_idx = num_vals++;
901 values[dest_idx] = dest_value;
902 }
903
904 loc[src_idx] = src_idx;
905 pred[dest_idx] = src_idx;
906
907 to_do[++to_do_idx] = dest_idx;
908 }
909
910 /* Currently empty destinations we can go ahead and fill */
911 NIR_VLA(int, ready, num_copies * 2);
912 int ready_idx = -1;
913
914 /* Mark the ones that are ready for copying. We know an index is a
915 * destination if it has a predecessor and it's ready for copying if
916 * it's not marked as containing data.
917 */
918 for (int i = 0; i < num_vals; i++) {
919 if (pred[i] != -1 && loc[i] == -1)
920 ready[++ready_idx] = i;
921 }
922
923 while (1) {
924 while (ready_idx >= 0) {
925 int b = ready[ready_idx--];
926 int a = pred[b];
927 copy_values(&state->builder, values[b], values[loc[a]]);
928
929 /* b has been filled, mark it as not needing to be copied */
930 pred[b] = -1;
931
932 /* The next bit only applies if the source and destination have the
933 * same divergence. If they differ (it must be convergent ->
934 * divergent), then we can't guarantee we won't need the convergent
935 * version of it again.
936 */
937 if (copy_value_is_divergent(values[a]) ==
938 copy_value_is_divergent(values[b])) {
939 /* If a needs to be filled... */
940 if (pred[a] != -1) {
941 /* If any other copies want a they can find it at b */
942 loc[a] = b;
943
944 /* It's ready for copying now */
945 ready[++ready_idx] = a;
946 }
947 }
948 }
949
950 assert(ready_idx < 0);
951 if (to_do_idx < 0)
952 break;
953
954 int b = to_do[to_do_idx--];
955 if (pred[b] == -1)
956 continue;
957
958 /* If we got here, then we don't have any more trivial copies that we
959 * can do. We have to break a cycle, so we create a new temporary
960 * register for that purpose. Normally, if going out of SSA after
961 * register allocation, you would want to avoid creating temporary
962 * registers. However, we are going out of SSA before register
963 * allocation, so we would rather not create extra register
964 * dependencies for the backend to deal with. If it wants, the
965 * backend can coalesce the (possibly multiple) temporaries.
966 *
967 * We can also get here in the case where there is no cycle but our
968 * source value is convergent, is also used as a destination by another
969 * element of the parallel copy, and all the destinations of the
970 * parallel copy which copy from it are divergent. In this case, the
971 * above loop cannot detect that the value has moved due to all the
972 * divergent destinations and we'll end up emitting a copy to a
973 * temporary which never gets used. We can avoid this with additional
974 * tracking or we can just trust the back-end to dead-code the unused
975 * temporary (which is trivial).
976 */
977 assert(num_vals < num_copies * 2);
978 nir_def *reg;
979 if (values[b].is_reg) {
980 nir_intrinsic_instr *decl = nir_reg_get_decl(values[b].ssa);
981 uint8_t num_components = nir_intrinsic_num_components(decl);
982 uint8_t bit_size = nir_intrinsic_bit_size(decl);
983 reg = nir_decl_reg(&state->builder, num_components, bit_size, 0);
984 } else {
985 reg = decl_reg_for_ssa_def(&state->builder, values[b].ssa);
986 }
987 set_reg_divergent(reg, copy_value_is_divergent(values[b]));
988
989 values[num_vals] = (struct copy_value){
990 .is_reg = true,
991 .ssa = reg,
992 };
993 copy_values(&state->builder, values[num_vals], values[b]);
994 loc[b] = num_vals;
995 ready[++ready_idx] = b;
996 num_vals++;
997 }
998
999 nir_instr_remove(&pcopy->instr);
1000 exec_list_push_tail(&state->dead_instrs, &pcopy->instr.node);
1001 }
1002
1003 /* Resolves the parallel copies in a block. Each block can have at most
1004 * two: One at the beginning, right after all the phi noces, and one at
1005 * the end (or right before the final jump if it exists).
1006 */
1007 static bool
resolve_parallel_copies_block(nir_block * block,struct from_ssa_state * state)1008 resolve_parallel_copies_block(nir_block *block, struct from_ssa_state *state)
1009 {
1010 /* At this point, we have removed all of the phi nodes. If a parallel
1011 * copy existed right after the phi nodes in this block, it is now the
1012 * first instruction.
1013 */
1014 nir_instr *first_instr = nir_block_first_instr(block);
1015 if (first_instr == NULL)
1016 return true; /* Empty, nothing to do. */
1017
1018 /* There can be load_reg in the way of the copies... don't be clever. */
1019 nir_foreach_instr_safe(instr, block) {
1020 if (instr->type == nir_instr_type_parallel_copy) {
1021 nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(instr);
1022
1023 resolve_parallel_copy(pcopy, state);
1024 }
1025 }
1026
1027 return true;
1028 }
1029
1030 static bool
nir_convert_from_ssa_impl(nir_function_impl * impl,bool phi_webs_only)1031 nir_convert_from_ssa_impl(nir_function_impl *impl,
1032 bool phi_webs_only)
1033 {
1034 nir_shader *shader = impl->function->shader;
1035
1036 struct from_ssa_state state;
1037
1038 state.builder = nir_builder_create(impl);
1039 state.dead_ctx = ralloc_context(NULL);
1040 state.phi_webs_only = phi_webs_only;
1041 state.merge_node_table = _mesa_pointer_hash_table_create(NULL);
1042 state.progress = false;
1043 exec_list_make_empty(&state.dead_instrs);
1044
1045 nir_foreach_block(block, impl) {
1046 add_parallel_copy_to_end_of_block(shader, block, state.dead_ctx);
1047 }
1048
1049 nir_foreach_block(block, impl) {
1050 isolate_phi_nodes_block(shader, block, state.dead_ctx);
1051 }
1052
1053 /* Mark metadata as dirty before we ask for liveness analysis */
1054 nir_metadata_preserve(impl, nir_metadata_control_flow);
1055
1056 nir_metadata_require(impl, nir_metadata_instr_index |
1057 nir_metadata_live_defs |
1058 nir_metadata_dominance);
1059
1060 nir_foreach_block(block, impl) {
1061 coalesce_phi_nodes_block(block, &state);
1062 }
1063
1064 nir_foreach_block(block, impl) {
1065 aggressive_coalesce_block(block, &state);
1066 }
1067
1068 resolve_registers_impl(impl, &state);
1069
1070 nir_foreach_block(block, impl) {
1071 resolve_parallel_copies_block(block, &state);
1072 }
1073
1074 nir_metadata_preserve(impl, nir_metadata_control_flow);
1075
1076 /* Clean up dead instructions and the hash tables */
1077 nir_instr_free_list(&state.dead_instrs);
1078 _mesa_hash_table_destroy(state.merge_node_table, NULL);
1079 ralloc_free(state.dead_ctx);
1080 return state.progress;
1081 }
1082
1083 bool
nir_convert_from_ssa(nir_shader * shader,bool phi_webs_only)1084 nir_convert_from_ssa(nir_shader *shader,
1085 bool phi_webs_only)
1086 {
1087 bool progress = false;
1088
1089 nir_foreach_function_impl(impl, shader) {
1090 progress |= nir_convert_from_ssa_impl(impl, phi_webs_only);
1091 }
1092
1093 return progress;
1094 }
1095
1096 static void
place_phi_read(nir_builder * b,nir_def * reg,nir_def * def,nir_block * block,struct set * visited_blocks)1097 place_phi_read(nir_builder *b, nir_def *reg,
1098 nir_def *def, nir_block *block, struct set *visited_blocks)
1099 {
1100 /* Search already visited blocks to avoid back edges in tree */
1101 if (_mesa_set_search(visited_blocks, block) == NULL) {
1102 /* Try to go up the single-successor tree */
1103 bool all_single_successors = true;
1104 set_foreach(block->predecessors, entry) {
1105 nir_block *pred = (nir_block *)entry->key;
1106 if (pred->successors[0] && pred->successors[1]) {
1107 all_single_successors = false;
1108 break;
1109 }
1110 }
1111
1112 if (all_single_successors) {
1113 /* All predecessors of this block have exactly one successor and it
1114 * is this block so they must eventually lead here without
1115 * intersecting each other. Place the reads in the predecessors
1116 * instead of this block.
1117 */
1118 _mesa_set_add(visited_blocks, block);
1119
1120 set_foreach(block->predecessors, entry) {
1121 place_phi_read(b, reg, def, (nir_block *)entry->key, visited_blocks);
1122 }
1123 return;
1124 }
1125 }
1126
1127 b->cursor = nir_after_block_before_jump(block);
1128 nir_store_reg(b, def, reg);
1129 }
1130
1131 /** Lower all of the phi nodes in a block to movs to and from a register
1132 *
1133 * This provides a very quick-and-dirty out-of-SSA pass that you can run on a
1134 * single block to convert all of its phis to a register and some movs.
1135 * The code that is generated, while not optimal for actual codegen in a
1136 * back-end, is easy to generate, correct, and will turn into the same set of
1137 * phis after you call regs_to_ssa and do some copy propagation. For each phi
1138 * node we do the following:
1139 *
1140 * 1. For each phi instruction in the block, create a new nir_register
1141 *
1142 * 2. Insert movs at the top of the destination block for each phi and
1143 * rewrite all uses of the phi to use the mov.
1144 *
1145 * 3. For each phi source, insert movs in the predecessor block from the phi
1146 * source to the register associated with the phi.
1147 *
1148 * Correctness is guaranteed by the fact that we create a new register for
1149 * each phi and emit movs on both sides of the control-flow edge. Because all
1150 * the phis have SSA destinations (we assert this) and there is a separate
1151 * temporary for each phi, all movs inserted in any particular block have
1152 * unique destinations so the order of operations does not matter.
1153 *
1154 * The one intelligent thing this pass does is that it places the moves from
1155 * the phi sources as high up the predecessor tree as possible instead of in
1156 * the exact predecessor. This means that, in particular, it will crawl into
1157 * the deepest nesting of any if-ladders. In order to ensure that doing so is
1158 * safe, it stops as soon as one of the predecessors has multiple successors.
1159 */
1160 bool
nir_lower_phis_to_regs_block(nir_block * block)1161 nir_lower_phis_to_regs_block(nir_block *block)
1162 {
1163 nir_builder b = nir_builder_create(nir_cf_node_get_function(&block->cf_node));
1164 struct set *visited_blocks = _mesa_set_create(NULL, _mesa_hash_pointer,
1165 _mesa_key_pointer_equal);
1166
1167 bool progress = false;
1168 nir_foreach_phi_safe(phi, block) {
1169 nir_def *reg = decl_reg_for_ssa_def(&b, &phi->def);
1170 set_reg_divergent(reg, phi->def.divergent);
1171
1172 b.cursor = nir_after_instr(&phi->instr);
1173 nir_def_rewrite_uses(&phi->def, nir_load_reg(&b, reg));
1174
1175 nir_foreach_phi_src(src, phi) {
1176
1177 _mesa_set_add(visited_blocks, src->src.ssa->parent_instr->block);
1178 place_phi_read(&b, reg, src->src.ssa, src->pred, visited_blocks);
1179 _mesa_set_clear(visited_blocks, NULL);
1180 }
1181
1182 nir_instr_remove(&phi->instr);
1183
1184 progress = true;
1185 }
1186
1187 _mesa_set_destroy(visited_blocks, NULL);
1188
1189 return progress;
1190 }
1191
1192 struct ssa_def_to_reg_state {
1193 nir_function_impl *impl;
1194 bool progress;
1195 };
1196
1197 static bool
def_replace_with_reg_state(nir_def * def,void * void_state)1198 def_replace_with_reg_state(nir_def *def, void *void_state)
1199 {
1200 struct ssa_def_to_reg_state *state = void_state;
1201 state->progress |= def_replace_with_reg(def, state->impl);
1202 return true;
1203 }
1204
1205 static bool
ssa_def_is_local_to_block(nir_def * def,UNUSED void * state)1206 ssa_def_is_local_to_block(nir_def *def, UNUSED void *state)
1207 {
1208 nir_block *block = def->parent_instr->block;
1209 nir_foreach_use_including_if(use_src, def) {
1210 if (nir_src_is_if(use_src) ||
1211 nir_src_parent_instr(use_src)->block != block ||
1212 nir_src_parent_instr(use_src)->type == nir_instr_type_phi) {
1213 return false;
1214 }
1215 }
1216
1217 return true;
1218 }
1219
1220 static bool
instr_is_load_new_reg(nir_instr * instr,unsigned old_num_ssa)1221 instr_is_load_new_reg(nir_instr *instr, unsigned old_num_ssa)
1222 {
1223 if (instr->type != nir_instr_type_intrinsic)
1224 return false;
1225
1226 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
1227 if (load->intrinsic != nir_intrinsic_load_reg)
1228 return false;
1229
1230 nir_def *reg = load->src[0].ssa;
1231
1232 return reg->index >= old_num_ssa;
1233 }
1234
1235 /** Lower all of the SSA defs in a block to registers
1236 *
1237 * This performs the very simple operation of blindly replacing all of the SSA
1238 * defs in the given block with registers. If not used carefully, this may
1239 * result in phi nodes with register sources which is technically invalid.
1240 * Fortunately, the register-based into-SSA pass handles them anyway.
1241 */
1242 bool
nir_lower_ssa_defs_to_regs_block(nir_block * block)1243 nir_lower_ssa_defs_to_regs_block(nir_block *block)
1244 {
1245 nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
1246 nir_builder b = nir_builder_create(impl);
1247
1248 struct ssa_def_to_reg_state state = {
1249 .impl = impl,
1250 .progress = false,
1251 };
1252
1253 /* Save off the current number of SSA defs so we can detect which regs
1254 * we've added vs. regs that were already there.
1255 */
1256 const unsigned num_ssa = impl->ssa_alloc;
1257
1258 nir_foreach_instr_safe(instr, block) {
1259 if (instr->type == nir_instr_type_undef) {
1260 /* Undefs are just a read of something never written. */
1261 nir_undef_instr *undef = nir_instr_as_undef(instr);
1262 nir_def *reg = decl_reg_for_ssa_def(&b, &undef->def);
1263 nir_rewrite_uses_to_load_reg(&b, &undef->def, reg);
1264 } else if (instr->type == nir_instr_type_load_const) {
1265 nir_load_const_instr *load = nir_instr_as_load_const(instr);
1266 nir_def *reg = decl_reg_for_ssa_def(&b, &load->def);
1267 nir_rewrite_uses_to_load_reg(&b, &load->def, reg);
1268
1269 b.cursor = nir_after_instr(instr);
1270 nir_store_reg(&b, &load->def, reg);
1271 } else if (instr_is_load_new_reg(instr, num_ssa)) {
1272 /* Calls to nir_rewrite_uses_to_load_reg() may place new load_reg
1273 * intrinsics in this block with new SSA destinations. To avoid
1274 * infinite recursion, we don't want to lower any newly placed
1275 * load_reg instructions to yet anoter load/store_reg.
1276 */
1277 } else if (nir_foreach_def(instr, ssa_def_is_local_to_block, NULL)) {
1278 /* If the SSA def produced by this instruction is only in the block
1279 * in which it is defined and is not used by ifs or phis, then we
1280 * don't have a reason to convert it to a register.
1281 */
1282 } else {
1283 nir_foreach_def(instr, def_replace_with_reg_state, &state);
1284 }
1285 }
1286
1287 return state.progress;
1288 }
1289