1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_instr_set.h"
26
27 /*
28 * Implements Global Code Motion. A description of GCM can be found in
29 * "Global Code Motion; Global Value Numbering" by Cliff Click.
30 * Unfortunately, the algorithm presented in the paper is broken in a
31 * number of ways. The algorithm used here differs substantially from the
32 * one in the paper but it is, in my opinion, much easier to read and
33 * verify correcness.
34 */
35
36 /* This is used to stop GCM moving instruction out of a loop if the loop
37 * contains too many instructions and moving them would create excess spilling.
38 *
39 * TODO: Figure out a better way to decide if we should remove instructions from
40 * a loop.
41 */
42 #define MAX_LOOP_INSTRUCTIONS 100
43
44 struct gcm_block_info {
45 /* Number of loops this block is inside */
46 unsigned loop_depth;
47
48 /* Number of ifs this block is inside */
49 unsigned if_depth;
50
51 unsigned loop_instr_count;
52
53 /* The loop the block is nested inside or NULL */
54 nir_loop *loop;
55
56 /* The last instruction inserted into this block. This is used as we
57 * traverse the instructions and insert them back into the program to
58 * put them in the right order.
59 */
60 nir_instr *last_instr;
61 };
62
63 struct gcm_instr_info {
64 nir_block *early_block;
65 };
66
67 /* Flags used in the instr->pass_flags field for various instruction states */
68 enum {
69 GCM_INSTR_PINNED = (1 << 0),
70 GCM_INSTR_SCHEDULE_EARLIER_ONLY = (1 << 1),
71 GCM_INSTR_SCHEDULED_EARLY = (1 << 2),
72 GCM_INSTR_SCHEDULED_LATE = (1 << 3),
73 GCM_INSTR_PLACED = (1 << 4),
74 };
75
76 struct gcm_state {
77 nir_function_impl *impl;
78 nir_instr *instr;
79
80 bool progress;
81
82 /* The list of non-pinned instructions. As we do the late scheduling,
83 * we pull non-pinned instructions out of their blocks and place them in
84 * this list. This saves us from having linked-list problems when we go
85 * to put instructions back in their blocks.
86 */
87 struct exec_list instrs;
88
89 struct gcm_block_info *blocks;
90
91 unsigned num_instrs;
92 struct gcm_instr_info *instr_infos;
93 };
94
95 static unsigned
get_loop_instr_count(struct exec_list * cf_list)96 get_loop_instr_count(struct exec_list *cf_list)
97 {
98 unsigned loop_instr_count = 0;
99 foreach_list_typed(nir_cf_node, node, node, cf_list) {
100 switch (node->type) {
101 case nir_cf_node_block: {
102 nir_block *block = nir_cf_node_as_block(node);
103 nir_foreach_instr(instr, block) {
104 loop_instr_count++;
105 }
106 break;
107 }
108 case nir_cf_node_if: {
109 nir_if *if_stmt = nir_cf_node_as_if(node);
110 loop_instr_count += get_loop_instr_count(&if_stmt->then_list);
111 loop_instr_count += get_loop_instr_count(&if_stmt->else_list);
112 break;
113 }
114 case nir_cf_node_loop: {
115 nir_loop *loop = nir_cf_node_as_loop(node);
116 assert(!nir_loop_has_continue_construct(loop));
117 loop_instr_count += get_loop_instr_count(&loop->body);
118 break;
119 }
120 default:
121 unreachable("Invalid CF node type");
122 }
123 }
124
125 return loop_instr_count;
126 }
127
128 /* Recursively walks the CFG and builds the block_info structure */
129 static void
gcm_build_block_info(struct exec_list * cf_list,struct gcm_state * state,nir_loop * loop,unsigned loop_depth,unsigned if_depth,unsigned loop_instr_count)130 gcm_build_block_info(struct exec_list *cf_list, struct gcm_state *state,
131 nir_loop *loop, unsigned loop_depth, unsigned if_depth,
132 unsigned loop_instr_count)
133 {
134 foreach_list_typed(nir_cf_node, node, node, cf_list) {
135 switch (node->type) {
136 case nir_cf_node_block: {
137 nir_block *block = nir_cf_node_as_block(node);
138 state->blocks[block->index].if_depth = if_depth;
139 state->blocks[block->index].loop_depth = loop_depth;
140 state->blocks[block->index].loop_instr_count = loop_instr_count;
141 state->blocks[block->index].loop = loop;
142 break;
143 }
144 case nir_cf_node_if: {
145 nir_if *if_stmt = nir_cf_node_as_if(node);
146 gcm_build_block_info(&if_stmt->then_list, state, loop, loop_depth,
147 if_depth + 1, ~0u);
148 gcm_build_block_info(&if_stmt->else_list, state, loop, loop_depth,
149 if_depth + 1, ~0u);
150 break;
151 }
152 case nir_cf_node_loop: {
153 nir_loop *loop = nir_cf_node_as_loop(node);
154 assert(!nir_loop_has_continue_construct(loop));
155 gcm_build_block_info(&loop->body, state, loop, loop_depth + 1, if_depth,
156 get_loop_instr_count(&loop->body));
157 break;
158 }
159 default:
160 unreachable("Invalid CF node type");
161 }
162 }
163 }
164
165 static bool
is_src_scalarizable(nir_src * src)166 is_src_scalarizable(nir_src *src)
167 {
168
169 nir_instr *src_instr = src->ssa->parent_instr;
170 switch (src_instr->type) {
171 case nir_instr_type_alu: {
172 nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
173
174 /* ALU operations with output_size == 0 should be scalarized. We
175 * will also see a bunch of vecN operations from scalarizing ALU
176 * operations and, since they can easily be copy-propagated, they
177 * are ok too.
178 */
179 return nir_op_infos[src_alu->op].output_size == 0 ||
180 src_alu->op == nir_op_vec2 ||
181 src_alu->op == nir_op_vec3 ||
182 src_alu->op == nir_op_vec4;
183 }
184
185 case nir_instr_type_load_const:
186 /* These are trivially scalarizable */
187 return true;
188
189 case nir_instr_type_undef:
190 return true;
191
192 case nir_instr_type_intrinsic: {
193 nir_intrinsic_instr *src_intrin = nir_instr_as_intrinsic(src_instr);
194
195 switch (src_intrin->intrinsic) {
196 case nir_intrinsic_load_deref: {
197 /* Don't scalarize if we see a load of a local variable because it
198 * might turn into one of the things we can't scalarize.
199 */
200 nir_deref_instr *deref = nir_src_as_deref(src_intrin->src[0]);
201 return !nir_deref_mode_may_be(deref, (nir_var_function_temp |
202 nir_var_shader_temp));
203 }
204
205 case nir_intrinsic_interp_deref_at_centroid:
206 case nir_intrinsic_interp_deref_at_sample:
207 case nir_intrinsic_interp_deref_at_offset:
208 case nir_intrinsic_load_uniform:
209 case nir_intrinsic_load_ubo:
210 case nir_intrinsic_load_ssbo:
211 case nir_intrinsic_load_global:
212 case nir_intrinsic_load_global_constant:
213 case nir_intrinsic_load_input:
214 case nir_intrinsic_load_per_primitive_input:
215 return true;
216 default:
217 break;
218 }
219
220 return false;
221 }
222
223 default:
224 /* We can't scalarize this type of instruction */
225 return false;
226 }
227 }
228
229 static bool
is_binding_uniform(nir_src src)230 is_binding_uniform(nir_src src)
231 {
232 nir_binding binding = nir_chase_binding(src);
233 if (!binding.success)
234 return false;
235
236 for (unsigned i = 0; i < binding.num_indices; i++) {
237 if (!nir_src_is_always_uniform(binding.indices[i]))
238 return false;
239 }
240
241 return true;
242 }
243
244 static void
pin_intrinsic(nir_intrinsic_instr * intrin)245 pin_intrinsic(nir_intrinsic_instr *intrin)
246 {
247 nir_instr *instr = &intrin->instr;
248
249 if (!nir_intrinsic_can_reorder(intrin)) {
250 instr->pass_flags = GCM_INSTR_PINNED;
251 return;
252 }
253
254 instr->pass_flags = 0;
255
256 /* If the intrinsic requires a uniform source, we can't safely move it across non-uniform
257 * control flow if it's not uniform at the point it's defined.
258 * Stores and atomics can never be re-ordered, so we don't have to consider them here.
259 */
260 bool non_uniform = nir_intrinsic_has_access(intrin) &&
261 (nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM);
262 if (!non_uniform &&
263 (intrin->intrinsic == nir_intrinsic_load_ubo ||
264 intrin->intrinsic == nir_intrinsic_load_ssbo ||
265 intrin->intrinsic == nir_intrinsic_get_ubo_size ||
266 intrin->intrinsic == nir_intrinsic_get_ssbo_size ||
267 nir_intrinsic_has_image_dim(intrin) ||
268 ((intrin->intrinsic == nir_intrinsic_load_deref ||
269 intrin->intrinsic == nir_intrinsic_deref_buffer_array_length) &&
270 nir_deref_mode_may_be(nir_src_as_deref(intrin->src[0]),
271 nir_var_mem_ubo | nir_var_mem_ssbo)))) {
272 if (!is_binding_uniform(intrin->src[0]))
273 instr->pass_flags = GCM_INSTR_PINNED;
274 } else if (intrin->intrinsic == nir_intrinsic_load_push_constant) {
275 if (!nir_src_is_always_uniform(intrin->src[0]))
276 instr->pass_flags = GCM_INSTR_PINNED;
277 } else if (intrin->intrinsic == nir_intrinsic_load_deref &&
278 nir_deref_mode_is(nir_src_as_deref(intrin->src[0]),
279 nir_var_mem_push_const)) {
280 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
281 while (deref->deref_type != nir_deref_type_var) {
282 if ((deref->deref_type == nir_deref_type_array ||
283 deref->deref_type == nir_deref_type_ptr_as_array) &&
284 !nir_src_is_always_uniform(deref->arr.index)) {
285 instr->pass_flags = GCM_INSTR_PINNED;
286 return;
287 }
288 deref = nir_deref_instr_parent(deref);
289 if (!deref) {
290 instr->pass_flags = GCM_INSTR_PINNED;
291 return;
292 }
293 }
294 }
295 }
296
297 /* Walks the instruction list and marks immovable instructions as pinned or
298 * placed.
299 *
300 * This function also serves to initialize the instr->pass_flags field.
301 * After this is completed, all instructions' pass_flags fields will be set
302 * to either GCM_INSTR_PINNED, GCM_INSTR_PLACED or 0.
303 */
304 static void
gcm_pin_instructions(nir_function_impl * impl,struct gcm_state * state)305 gcm_pin_instructions(nir_function_impl *impl, struct gcm_state *state)
306 {
307 state->num_instrs = 0;
308
309 nir_foreach_block(block, impl) {
310 nir_foreach_instr_safe(instr, block) {
311 /* Index the instructions for use in gcm_state::instrs */
312 instr->index = state->num_instrs++;
313
314 switch (instr->type) {
315 case nir_instr_type_alu: {
316 nir_alu_instr *alu = nir_instr_as_alu(instr);
317
318 if (nir_op_is_derivative(alu->op)) {
319 /* These can only go in uniform control flow */
320 instr->pass_flags = GCM_INSTR_SCHEDULE_EARLIER_ONLY;
321 } else if (alu->op == nir_op_mov &&
322 !is_src_scalarizable(&alu->src[0].src)) {
323 instr->pass_flags = GCM_INSTR_PINNED;
324 } else {
325 instr->pass_flags = 0;
326 }
327 break;
328 }
329
330 case nir_instr_type_tex: {
331 nir_tex_instr *tex = nir_instr_as_tex(instr);
332 if (nir_tex_instr_has_implicit_derivative(tex))
333 instr->pass_flags = GCM_INSTR_SCHEDULE_EARLIER_ONLY;
334
335 for (unsigned i = 0; i < tex->num_srcs; i++) {
336 nir_tex_src *src = &tex->src[i];
337 switch (src->src_type) {
338 case nir_tex_src_texture_deref:
339 if (!tex->texture_non_uniform && !is_binding_uniform(src->src))
340 instr->pass_flags = GCM_INSTR_PINNED;
341 break;
342 case nir_tex_src_sampler_deref:
343 if (!tex->sampler_non_uniform && !is_binding_uniform(src->src))
344 instr->pass_flags = GCM_INSTR_PINNED;
345 break;
346 case nir_tex_src_texture_offset:
347 case nir_tex_src_texture_handle:
348 if (!tex->texture_non_uniform && !nir_src_is_always_uniform(src->src))
349 instr->pass_flags = GCM_INSTR_PINNED;
350 break;
351 case nir_tex_src_sampler_offset:
352 case nir_tex_src_sampler_handle:
353 if (!tex->sampler_non_uniform && !nir_src_is_always_uniform(src->src))
354 instr->pass_flags = GCM_INSTR_PINNED;
355 break;
356 default:
357 break;
358 }
359 }
360 break;
361 }
362
363 case nir_instr_type_deref:
364 case nir_instr_type_load_const:
365 instr->pass_flags = 0;
366 break;
367
368 case nir_instr_type_intrinsic:
369 pin_intrinsic(nir_instr_as_intrinsic(instr));
370 break;
371
372 case nir_instr_type_call:
373 instr->pass_flags = GCM_INSTR_PINNED;
374 break;
375
376 case nir_instr_type_jump:
377 case nir_instr_type_undef:
378 case nir_instr_type_phi:
379 instr->pass_flags = GCM_INSTR_PLACED;
380 break;
381
382 default:
383 unreachable("Invalid instruction type in GCM");
384 }
385
386 if (!(instr->pass_flags & GCM_INSTR_PLACED)) {
387 /* If this is an unplaced instruction, go ahead and pull it out of
388 * the program and put it on the instrs list. This has a couple
389 * of benifits. First, it makes the scheduling algorithm more
390 * efficient because we can avoid walking over basic blocks.
391 * Second, it keeps us from causing linked list confusion when
392 * we're trying to put everything in its proper place at the end
393 * of the pass.
394 *
395 * Note that we don't use nir_instr_remove here because that also
396 * cleans up uses and defs and we want to keep that information.
397 */
398 exec_node_remove(&instr->node);
399 exec_list_push_tail(&state->instrs, &instr->node);
400 }
401 }
402 }
403 }
404
405 static void
406 gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state);
407
408 /** Update an instructions schedule for the given source
409 *
410 * This function is called iteratively as we walk the sources of an
411 * instruction. It ensures that the given source instruction has been
412 * scheduled and then update this instruction's block if the source
413 * instruction is lower down the tree.
414 */
415 static bool
gcm_schedule_early_src(nir_src * src,void * void_state)416 gcm_schedule_early_src(nir_src *src, void *void_state)
417 {
418 struct gcm_state *state = void_state;
419 nir_instr *instr = state->instr;
420
421 gcm_schedule_early_instr(src->ssa->parent_instr, void_state);
422
423 /* While the index isn't a proper dominance depth, it does have the
424 * property that if A dominates B then A->index <= B->index. Since we
425 * know that this instruction must have been dominated by all of its
426 * sources at some point (even if it's gone through value-numbering),
427 * all of the sources must lie on the same branch of the dominance tree.
428 * Therefore, we can just go ahead and just compare indices.
429 */
430 struct gcm_instr_info *src_info =
431 &state->instr_infos[src->ssa->parent_instr->index];
432 struct gcm_instr_info *info = &state->instr_infos[instr->index];
433 if (info->early_block->index < src_info->early_block->index)
434 info->early_block = src_info->early_block;
435
436 /* We need to restore the state instruction because it may have been
437 * changed through the gcm_schedule_early_instr call above. Since we
438 * may still be iterating through sources and future calls to
439 * gcm_schedule_early_src for the same instruction will still need it.
440 */
441 state->instr = instr;
442
443 return true;
444 }
445
446 /** Schedules an instruction early
447 *
448 * This function performs a recursive depth-first search starting at the
449 * given instruction and proceeding through the sources to schedule
450 * instructions as early as they can possibly go in the dominance tree.
451 * The instructions are "scheduled" by updating the early_block field of
452 * the corresponding gcm_instr_state entry.
453 */
454 static void
gcm_schedule_early_instr(nir_instr * instr,struct gcm_state * state)455 gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state)
456 {
457 if (instr->pass_flags & GCM_INSTR_SCHEDULED_EARLY)
458 return;
459
460 instr->pass_flags |= GCM_INSTR_SCHEDULED_EARLY;
461
462 /* Pinned/placed instructions always get scheduled in their original block so
463 * we don't need to do anything. Also, bailing here keeps us from ever
464 * following the sources of phi nodes which can be back-edges.
465 */
466 if (instr->pass_flags & GCM_INSTR_PINNED ||
467 instr->pass_flags & GCM_INSTR_PLACED) {
468 state->instr_infos[instr->index].early_block = instr->block;
469 return;
470 }
471
472 /* Start with the instruction at the top. As we iterate over the
473 * sources, it will get moved down as needed.
474 */
475 state->instr_infos[instr->index].early_block = nir_start_block(state->impl);
476 state->instr = instr;
477
478 nir_foreach_src(instr, gcm_schedule_early_src, state);
479 }
480
481 static bool
set_block_for_loop_instr(struct gcm_state * state,nir_instr * instr,nir_block * block)482 set_block_for_loop_instr(struct gcm_state *state, nir_instr *instr,
483 nir_block *block)
484 {
485 /* If the instruction wasn't in a loop to begin with we don't want to push
486 * it down into one.
487 */
488 nir_loop *loop = state->blocks[instr->block->index].loop;
489 if (loop == NULL)
490 return true;
491
492 assert(!nir_loop_has_continue_construct(loop));
493 if (nir_block_dominates(instr->block, block))
494 return true;
495
496 /* If the loop only executes a single time i.e its wrapped in a:
497 * do{ ... break; } while(true)
498 * Don't move the instruction as it will not help anything.
499 */
500 if (loop->info->limiting_terminator == NULL && !loop->info->complex_loop &&
501 nir_block_ends_in_break(nir_loop_last_block(loop)))
502 return false;
503
504 /* Being too aggressive with how we pull instructions out of loops can
505 * result in extra register pressure and spilling. For example its fairly
506 * common for loops in compute shaders to calculate SSBO offsets using
507 * the workgroup id, subgroup id and subgroup invocation, pulling all
508 * these calculations outside the loop causes register pressure.
509 *
510 * To work around these issues for now we only allow constant and texture
511 * instructions to be moved outside their original loops, or instructions
512 * where the total loop instruction count is less than
513 * MAX_LOOP_INSTRUCTIONS.
514 *
515 * TODO: figure out some more heuristics to allow more to be moved out of
516 * loops.
517 */
518 if (state->blocks[instr->block->index].loop_instr_count < MAX_LOOP_INSTRUCTIONS)
519 return true;
520
521 if (instr->type == nir_instr_type_load_const ||
522 instr->type == nir_instr_type_tex ||
523 (instr->type == nir_instr_type_intrinsic &&
524 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_resource_intel))
525 return true;
526
527 return false;
528 }
529
530 static bool
set_block_to_if_block(struct gcm_state * state,nir_instr * instr,nir_block * block)531 set_block_to_if_block(struct gcm_state *state, nir_instr *instr,
532 nir_block *block)
533 {
534 if (instr->type == nir_instr_type_load_const)
535 return true;
536
537 if (instr->type == nir_instr_type_intrinsic &&
538 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_resource_intel)
539 return true;
540
541 /* TODO: Figure out some more heuristics to allow more to be moved into
542 * if-statements.
543 */
544
545 return false;
546 }
547
548 static nir_block *
gcm_choose_block_for_instr(nir_instr * instr,nir_block * early_block,nir_block * late_block,struct gcm_state * state)549 gcm_choose_block_for_instr(nir_instr *instr, nir_block *early_block,
550 nir_block *late_block, struct gcm_state *state)
551 {
552 assert(nir_block_dominates(early_block, late_block));
553
554 bool block_set = false;
555
556 /* First see if we can push the instruction down into an if-statements block */
557 nir_block *best = late_block;
558 for (nir_block *block = late_block; block != NULL; block = block->imm_dom) {
559 if (state->blocks[block->index].loop_depth >
560 state->blocks[instr->block->index].loop_depth)
561 continue;
562
563 if (state->blocks[block->index].if_depth >=
564 state->blocks[best->index].if_depth &&
565 set_block_to_if_block(state, instr, block)) {
566 /* If we are pushing the instruction into an if we want it to be
567 * in the earliest block not the latest to avoid creating register
568 * pressure issues. So we don't break unless we come across the
569 * block the instruction was originally in.
570 */
571 best = block;
572 block_set = true;
573 if (block == instr->block)
574 break;
575 } else if (block == instr->block) {
576 /* If we couldn't push the instruction later just put is back where it
577 * was previously.
578 */
579 if (!block_set)
580 best = block;
581 break;
582 }
583
584 if (block == early_block)
585 break;
586 }
587
588 /* Now see if we can evict the instruction from a loop */
589 for (nir_block *block = late_block; block != NULL; block = block->imm_dom) {
590 if (state->blocks[block->index].loop_depth <
591 state->blocks[best->index].loop_depth) {
592 if (set_block_for_loop_instr(state, instr, block)) {
593 best = block;
594 } else if (block == instr->block) {
595 if (!block_set)
596 best = block;
597 break;
598 }
599 }
600
601 if (block == early_block)
602 break;
603 }
604
605 return best;
606 }
607
608 static void
609 gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state);
610
611 /** Schedules the instruction associated with the given SSA def late
612 *
613 * This function works by first walking all of the uses of the given SSA
614 * definition, ensuring that they are scheduled, and then computing the LCA
615 * (least common ancestor) of its uses. It then schedules this instruction
616 * as close to the LCA as possible while trying to stay out of loops.
617 */
618 static bool
gcm_schedule_late_def(nir_def * def,void * void_state)619 gcm_schedule_late_def(nir_def *def, void *void_state)
620 {
621 struct gcm_state *state = void_state;
622
623 nir_block *lca = NULL;
624
625 nir_foreach_use(use_src, def) {
626 nir_instr *use_instr = nir_src_parent_instr(use_src);
627
628 gcm_schedule_late_instr(use_instr, state);
629
630 /* Phi instructions are a bit special. SSA definitions don't have to
631 * dominate the sources of the phi nodes that use them; instead, they
632 * have to dominate the predecessor block corresponding to the phi
633 * source. We handle this by looking through the sources, finding
634 * any that are usingg this SSA def, and using those blocks instead
635 * of the one the phi lives in.
636 */
637 if (use_instr->type == nir_instr_type_phi) {
638 nir_phi_instr *phi = nir_instr_as_phi(use_instr);
639
640 nir_foreach_phi_src(phi_src, phi) {
641 if (phi_src->src.ssa == def)
642 lca = nir_dominance_lca(lca, phi_src->pred);
643 }
644 } else {
645 lca = nir_dominance_lca(lca, use_instr->block);
646 }
647 }
648
649 nir_foreach_if_use(use_src, def) {
650 nir_if *if_stmt = nir_src_parent_if(use_src);
651
652 /* For if statements, we consider the block to be the one immediately
653 * preceding the if CF node.
654 */
655 nir_block *pred_block =
656 nir_cf_node_as_block(nir_cf_node_prev(&if_stmt->cf_node));
657
658 lca = nir_dominance_lca(lca, pred_block);
659 }
660
661 nir_block *early_block =
662 state->instr_infos[def->parent_instr->index].early_block;
663
664 /* Some instructions may never be used. Flag them and the instruction
665 * placement code will get rid of them for us.
666 */
667 if (lca == NULL) {
668 def->parent_instr->block = NULL;
669 return true;
670 }
671
672 if (def->parent_instr->pass_flags & GCM_INSTR_SCHEDULE_EARLIER_ONLY &&
673 lca != def->parent_instr->block &&
674 nir_block_dominates(def->parent_instr->block, lca)) {
675 lca = def->parent_instr->block;
676 }
677
678 /* We now have the LCA of all of the uses. If our invariants hold,
679 * this is dominated by the block that we chose when scheduling early.
680 * We now walk up the dominance tree and pick the lowest block that is
681 * as far outside loops as we can get.
682 */
683 nir_block *best_block =
684 gcm_choose_block_for_instr(def->parent_instr, early_block, lca, state);
685
686 if (def->parent_instr->block != best_block)
687 state->progress = true;
688
689 def->parent_instr->block = best_block;
690
691 return true;
692 }
693
694 /** Schedules an instruction late
695 *
696 * This function performs a depth-first search starting at the given
697 * instruction and proceeding through its uses to schedule instructions as
698 * late as they can reasonably go in the dominance tree. The instructions
699 * are "scheduled" by updating their instr->block field.
700 *
701 * The name of this function is actually a bit of a misnomer as it doesn't
702 * schedule them "as late as possible" as the paper implies. Instead, it
703 * first finds the lates possible place it can schedule the instruction and
704 * then possibly schedules it earlier than that. The actual location is as
705 * far down the tree as we can go while trying to stay out of loops.
706 */
707 static void
gcm_schedule_late_instr(nir_instr * instr,struct gcm_state * state)708 gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state)
709 {
710 if (instr->pass_flags & GCM_INSTR_SCHEDULED_LATE)
711 return;
712
713 instr->pass_flags |= GCM_INSTR_SCHEDULED_LATE;
714
715 /* Pinned/placed instructions are already scheduled so we don't need to do
716 * anything. Also, bailing here keeps us from ever following phi nodes
717 * which can be back-edges.
718 */
719 if (instr->pass_flags & GCM_INSTR_PLACED ||
720 instr->pass_flags & GCM_INSTR_PINNED)
721 return;
722
723 nir_foreach_def(instr, gcm_schedule_late_def, state);
724 }
725
726 static bool
gcm_replace_def_with_undef(nir_def * def,void * void_state)727 gcm_replace_def_with_undef(nir_def *def, void *void_state)
728 {
729 struct gcm_state *state = void_state;
730
731 if (nir_def_is_unused(def))
732 return true;
733
734 nir_undef_instr *undef =
735 nir_undef_instr_create(state->impl->function->shader,
736 def->num_components, def->bit_size);
737 nir_instr_insert(nir_before_impl(state->impl), &undef->instr);
738 nir_def_rewrite_uses(def, &undef->def);
739
740 return true;
741 }
742
743 /** Places an instrution back into the program
744 *
745 * The earlier passes of GCM simply choose blocks for each instruction and
746 * otherwise leave them alone. This pass actually places the instructions
747 * into their chosen blocks.
748 *
749 * To do so, we simply insert instructions in the reverse order they were
750 * extracted. This will simply place instructions that were scheduled earlier
751 * onto the end of their new block and instructions that were scheduled later to
752 * the start of their new block.
753 */
754 static void
gcm_place_instr(nir_instr * instr,struct gcm_state * state)755 gcm_place_instr(nir_instr *instr, struct gcm_state *state)
756 {
757 if (instr->pass_flags & GCM_INSTR_PLACED)
758 return;
759
760 instr->pass_flags |= GCM_INSTR_PLACED;
761
762 if (instr->block == NULL) {
763 nir_foreach_def(instr, gcm_replace_def_with_undef, state);
764 nir_instr_remove(instr);
765 return;
766 }
767
768 struct gcm_block_info *block_info = &state->blocks[instr->block->index];
769 exec_node_remove(&instr->node);
770
771 if (block_info->last_instr) {
772 exec_node_insert_node_before(&block_info->last_instr->node,
773 &instr->node);
774 } else {
775 /* Schedule it at the end of the block */
776 nir_instr *jump_instr = nir_block_last_instr(instr->block);
777 if (jump_instr && jump_instr->type == nir_instr_type_jump) {
778 exec_node_insert_node_before(&jump_instr->node, &instr->node);
779 } else {
780 exec_list_push_tail(&instr->block->instr_list, &instr->node);
781 }
782 }
783
784 block_info->last_instr = instr;
785 }
786
787 /**
788 * Are instructions a and b both contained in the same if/else block?
789 */
790 static bool
weak_gvn(const nir_instr * a,const nir_instr * b)791 weak_gvn(const nir_instr *a, const nir_instr *b)
792 {
793 const struct nir_cf_node *ap = a->block->cf_node.parent;
794 const struct nir_cf_node *bp = b->block->cf_node.parent;
795 return ap && ap == bp && ap->type == nir_cf_node_if;
796 }
797
798 static bool
opt_gcm_impl(nir_shader * shader,nir_function_impl * impl,bool value_number)799 opt_gcm_impl(nir_shader *shader, nir_function_impl *impl, bool value_number)
800 {
801 nir_metadata_require(impl, nir_metadata_control_flow);
802 nir_metadata_require(impl, nir_metadata_loop_analysis,
803 shader->options->force_indirect_unrolling,
804 shader->options->force_indirect_unrolling_sampler);
805
806 /* A previous pass may have left pass_flags dirty, so clear it all out. */
807 nir_foreach_block(block, impl)
808 nir_foreach_instr(instr, block)
809 instr->pass_flags = 0;
810
811 struct gcm_state state;
812
813 state.impl = impl;
814 state.instr = NULL;
815 state.progress = false;
816 exec_list_make_empty(&state.instrs);
817 state.blocks = rzalloc_array(NULL, struct gcm_block_info, impl->num_blocks);
818
819 gcm_build_block_info(&impl->body, &state, NULL, 0, 0, ~0u);
820
821 gcm_pin_instructions(impl, &state);
822
823 state.instr_infos =
824 rzalloc_array(NULL, struct gcm_instr_info, state.num_instrs);
825
826 /* Perform (at least some) Global Value Numbering (GVN).
827 *
828 * We perform full GVN when `value_number' is true. This can be too
829 * aggressive, moving values far away and extending their live ranges,
830 * so we don't always want to do it.
831 *
832 * Otherwise, we perform 'weaker' GVN: if identical ALU instructions appear
833 * on both sides of the same if/else block, we allow them to be moved.
834 * This cleans up a lot of mess without being -too- aggressive.
835 */
836 struct set *gvn_set = nir_instr_set_create(NULL);
837 foreach_list_typed_safe(nir_instr, instr, node, &state.instrs) {
838 if (instr->pass_flags & GCM_INSTR_PINNED)
839 continue;
840
841 if (nir_instr_set_add_or_rewrite(gvn_set, instr,
842 value_number ? NULL : weak_gvn)) {
843 state.progress = true;
844 nir_instr_remove(instr);
845 }
846 }
847 nir_instr_set_destroy(gvn_set);
848
849 foreach_list_typed(nir_instr, instr, node, &state.instrs)
850 gcm_schedule_early_instr(instr, &state);
851
852 foreach_list_typed(nir_instr, instr, node, &state.instrs)
853 gcm_schedule_late_instr(instr, &state);
854
855 while (!exec_list_is_empty(&state.instrs)) {
856 nir_instr *instr = exec_node_data(nir_instr,
857 state.instrs.tail_sentinel.prev, node);
858 gcm_place_instr(instr, &state);
859 }
860
861 ralloc_free(state.blocks);
862 ralloc_free(state.instr_infos);
863
864 nir_metadata_preserve(impl, nir_metadata_control_flow |
865 nir_metadata_loop_analysis);
866
867 return state.progress;
868 }
869
870 bool
nir_opt_gcm(nir_shader * shader,bool value_number)871 nir_opt_gcm(nir_shader *shader, bool value_number)
872 {
873 bool progress = false;
874
875 nir_foreach_function_impl(impl, shader) {
876 progress |= opt_gcm_impl(shader, impl, value_number);
877 }
878
879 return progress;
880 }
881