xref: /aosp_15_r20/external/mesa3d/src/amd/compiler/aco_scheduler.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "aco_builder.h"
8 #include "aco_ir.h"
9 
10 #include "common/amdgfxregs.h"
11 
12 #include <algorithm>
13 #include <unordered_set>
14 #include <vector>
15 
16 #define SMEM_WINDOW_SIZE    (350 - ctx.num_waves * 35)
17 #define VMEM_WINDOW_SIZE    (1024 - ctx.num_waves * 64)
18 #define LDS_WINDOW_SIZE     64
19 #define POS_EXP_WINDOW_SIZE 512
20 #define SMEM_MAX_MOVES      (64 - ctx.num_waves * 4)
21 #define VMEM_MAX_MOVES      (256 - ctx.num_waves * 16)
22 #define LDSDIR_MAX_MOVES    10
23 #define LDS_MAX_MOVES       32
24 /* creating clauses decreases def-use distances, so make it less aggressive the lower num_waves is */
25 #define VMEM_CLAUSE_MAX_GRAB_DIST (ctx.num_waves * 2)
26 #define VMEM_STORE_CLAUSE_MAX_GRAB_DIST (ctx.num_waves * 4)
27 #define POS_EXP_MAX_MOVES         512
28 
29 namespace aco {
30 
31 namespace {
32 
33 enum MoveResult {
34    move_success,
35    move_fail_ssa,
36    move_fail_rar,
37    move_fail_pressure,
38 };
39 
40 /**
41  * Cursor for downwards moves, where a single instruction is moved towards
42  * or below a group of instruction that hardware can execute as a clause.
43  */
44 struct DownwardsCursor {
45    int source_idx; /* Current instruction to consider for moving */
46 
47    int insert_idx_clause; /* First clause instruction */
48    int insert_idx;        /* First instruction *after* the clause */
49 
50    /* Maximum demand of all clause instructions,
51     * i.e. from insert_idx_clause (inclusive) to insert_idx (exclusive) */
52    RegisterDemand clause_demand;
53    /* Maximum demand of instructions from source_idx to insert_idx_clause (both exclusive) */
54    RegisterDemand total_demand;
55 
DownwardsCursoraco::__anond3dcce770111::DownwardsCursor56    DownwardsCursor(int current_idx, RegisterDemand initial_clause_demand)
57        : source_idx(current_idx - 1), insert_idx_clause(current_idx), insert_idx(current_idx + 1),
58          clause_demand(initial_clause_demand)
59    {}
60 
61    void verify_invariants(const Block* block);
62 };
63 
64 /**
65  * Cursor for upwards moves, where a single instruction is moved below
66  * another instruction.
67  */
68 struct UpwardsCursor {
69    int source_idx; /* Current instruction to consider for moving */
70    int insert_idx; /* Instruction to move in front of */
71 
72    /* Maximum demand of instructions from insert_idx (inclusive) to source_idx (exclusive) */
73    RegisterDemand total_demand;
74 
UpwardsCursoraco::__anond3dcce770111::UpwardsCursor75    UpwardsCursor(int source_idx_) : source_idx(source_idx_)
76    {
77       insert_idx = -1; /* to be initialized later */
78    }
79 
has_insert_idxaco::__anond3dcce770111::UpwardsCursor80    bool has_insert_idx() const { return insert_idx != -1; }
81    void verify_invariants(const Block* block);
82 };
83 
84 struct MoveState {
85    RegisterDemand max_registers;
86 
87    Block* block;
88    Instruction* current;
89    bool improved_rar;
90 
91    std::vector<bool> depends_on;
92    /* Two are needed because, for downwards VMEM scheduling, one needs to
93     * exclude the instructions in the clause, since new instructions in the
94     * clause are not moved past any other instructions in the clause. */
95    std::vector<bool> RAR_dependencies;
96    std::vector<bool> RAR_dependencies_clause;
97 
98    /* for moving instructions before the current instruction to after it */
99    DownwardsCursor downwards_init(int current_idx, bool improved_rar, bool may_form_clauses);
100    MoveResult downwards_move(DownwardsCursor&, bool clause);
101    void downwards_skip(DownwardsCursor&);
102 
103    /* for moving instructions after the first use of the current instruction upwards */
104    UpwardsCursor upwards_init(int source_idx, bool improved_rar);
105    bool upwards_check_deps(UpwardsCursor&);
106    void upwards_update_insert_idx(UpwardsCursor&);
107    MoveResult upwards_move(UpwardsCursor&);
108    void upwards_skip(UpwardsCursor&);
109 };
110 
111 struct sched_ctx {
112    amd_gfx_level gfx_level;
113    int16_t num_waves;
114    int16_t last_SMEM_stall;
115    int last_SMEM_dep_idx;
116    MoveState mv;
117    bool schedule_pos_exports = true;
118    unsigned schedule_pos_export_div = 1;
119 };
120 
121 /* This scheduler is a simple bottom-up pass based on ideas from
122  * "A Novel Lightweight Instruction Scheduling Algorithm for Just-In-Time Compiler"
123  * from Xiaohua Shi and Peng Guo.
124  * The basic approach is to iterate over all instructions. When a memory instruction
125  * is encountered it tries to move independent instructions from above and below
126  * between the memory instruction and it's first user.
127  * The novelty is that this scheduler cares for the current register pressure:
128  * Instructions will only be moved if the register pressure won't exceed a certain bound.
129  */
130 
131 template <typename T>
132 void
move_element(T begin_it,size_t idx,size_t before)133 move_element(T begin_it, size_t idx, size_t before)
134 {
135    if (idx < before) {
136       auto begin = std::next(begin_it, idx);
137       auto end = std::next(begin_it, before);
138       std::rotate(begin, begin + 1, end);
139    } else if (idx > before) {
140       auto begin = std::next(begin_it, before);
141       auto end = std::next(begin_it, idx + 1);
142       std::rotate(begin, end - 1, end);
143    }
144 }
145 
146 void
verify_invariants(const Block * block)147 DownwardsCursor::verify_invariants(const Block* block)
148 {
149    assert(source_idx < insert_idx_clause);
150    assert(insert_idx_clause < insert_idx);
151 
152 #ifndef NDEBUG
153    RegisterDemand reference_demand;
154    for (int i = source_idx + 1; i < insert_idx_clause; ++i) {
155       reference_demand.update(block->instructions[i]->register_demand);
156    }
157    assert(total_demand == reference_demand);
158 
159    reference_demand = {};
160    for (int i = insert_idx_clause; i < insert_idx; ++i) {
161       reference_demand.update(block->instructions[i]->register_demand);
162    }
163    assert(clause_demand == reference_demand);
164 #endif
165 }
166 
167 DownwardsCursor
downwards_init(int current_idx,bool improved_rar_,bool may_form_clauses)168 MoveState::downwards_init(int current_idx, bool improved_rar_, bool may_form_clauses)
169 {
170    improved_rar = improved_rar_;
171 
172    std::fill(depends_on.begin(), depends_on.end(), false);
173    if (improved_rar) {
174       std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);
175       if (may_form_clauses)
176          std::fill(RAR_dependencies_clause.begin(), RAR_dependencies_clause.end(), false);
177    }
178 
179    for (const Operand& op : current->operands) {
180       if (op.isTemp()) {
181          depends_on[op.tempId()] = true;
182          if (improved_rar && op.isFirstKill())
183             RAR_dependencies[op.tempId()] = true;
184       }
185    }
186 
187    DownwardsCursor cursor(current_idx, block->instructions[current_idx]->register_demand);
188    cursor.verify_invariants(block);
189    return cursor;
190 }
191 
192 /* If add_to_clause is true, the current clause is extended by moving the
193  * instruction at source_idx in front of the clause. Otherwise, the instruction
194  * is moved past the end of the clause without extending it */
195 MoveResult
downwards_move(DownwardsCursor & cursor,bool add_to_clause)196 MoveState::downwards_move(DownwardsCursor& cursor, bool add_to_clause)
197 {
198    aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
199 
200    for (const Definition& def : instr->definitions)
201       if (def.isTemp() && depends_on[def.tempId()])
202          return move_fail_ssa;
203 
204    /* check if one of candidate's operands is killed by depending instruction */
205    std::vector<bool>& RAR_deps =
206       improved_rar ? (add_to_clause ? RAR_dependencies_clause : RAR_dependencies) : depends_on;
207    for (const Operand& op : instr->operands) {
208       if (op.isTemp() && RAR_deps[op.tempId()]) {
209          // FIXME: account for difference in register pressure
210          return move_fail_rar;
211       }
212    }
213 
214    if (add_to_clause) {
215       for (const Operand& op : instr->operands) {
216          if (op.isTemp()) {
217             depends_on[op.tempId()] = true;
218             if (op.isFirstKill())
219                RAR_dependencies[op.tempId()] = true;
220          }
221       }
222    }
223 
224    const int dest_insert_idx = add_to_clause ? cursor.insert_idx_clause : cursor.insert_idx;
225    RegisterDemand register_pressure = cursor.total_demand;
226    if (!add_to_clause) {
227       register_pressure.update(cursor.clause_demand);
228    }
229 
230    /* Check the new demand of the instructions being moved over */
231    const RegisterDemand candidate_diff = get_live_changes(instr.get());
232    if (RegisterDemand(register_pressure - candidate_diff).exceeds(max_registers))
233       return move_fail_pressure;
234 
235    /* New demand for the moved instruction */
236    const RegisterDemand temp = get_temp_registers(instr.get());
237    const RegisterDemand temp2 = get_temp_registers(block->instructions[dest_insert_idx - 1].get());
238    const RegisterDemand new_demand =
239       block->instructions[dest_insert_idx - 1]->register_demand - temp2 + temp;
240    if (new_demand.exceeds(max_registers))
241       return move_fail_pressure;
242 
243    /* move the candidate below the memory load */
244    move_element(block->instructions.begin(), cursor.source_idx, dest_insert_idx);
245 
246    /* update register pressure */
247    for (int i = cursor.source_idx; i < dest_insert_idx - 1; i++)
248       block->instructions[i]->register_demand -= candidate_diff;
249    block->instructions[dest_insert_idx - 1]->register_demand = new_demand;
250    cursor.insert_idx_clause--;
251    if (cursor.source_idx != cursor.insert_idx_clause) {
252       /* Update demand if we moved over any instructions before the clause */
253       cursor.total_demand -= candidate_diff;
254    } else {
255       assert(cursor.total_demand == RegisterDemand{});
256    }
257    if (add_to_clause) {
258       cursor.clause_demand.update(new_demand);
259    } else {
260       cursor.clause_demand -= candidate_diff;
261       cursor.insert_idx--;
262    }
263 
264    cursor.source_idx--;
265    cursor.verify_invariants(block);
266    return move_success;
267 }
268 
269 void
downwards_skip(DownwardsCursor & cursor)270 MoveState::downwards_skip(DownwardsCursor& cursor)
271 {
272    aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
273 
274    for (const Operand& op : instr->operands) {
275       if (op.isTemp()) {
276          depends_on[op.tempId()] = true;
277          if (improved_rar && op.isFirstKill()) {
278             RAR_dependencies[op.tempId()] = true;
279             RAR_dependencies_clause[op.tempId()] = true;
280          }
281       }
282    }
283    cursor.total_demand.update(instr->register_demand);
284    cursor.source_idx--;
285    cursor.verify_invariants(block);
286 }
287 
288 void
verify_invariants(const Block * block)289 UpwardsCursor::verify_invariants(const Block* block)
290 {
291 #ifndef NDEBUG
292    if (!has_insert_idx()) {
293       return;
294    }
295 
296    assert(insert_idx < source_idx);
297 
298    RegisterDemand reference_demand;
299    for (int i = insert_idx; i < source_idx; ++i) {
300       reference_demand.update(block->instructions[i]->register_demand);
301    }
302    assert(total_demand == reference_demand);
303 #endif
304 }
305 
306 UpwardsCursor
upwards_init(int source_idx,bool improved_rar_)307 MoveState::upwards_init(int source_idx, bool improved_rar_)
308 {
309    improved_rar = improved_rar_;
310 
311    std::fill(depends_on.begin(), depends_on.end(), false);
312    std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);
313 
314    for (const Definition& def : current->definitions) {
315       if (def.isTemp())
316          depends_on[def.tempId()] = true;
317    }
318 
319    return UpwardsCursor(source_idx);
320 }
321 
322 bool
upwards_check_deps(UpwardsCursor & cursor)323 MoveState::upwards_check_deps(UpwardsCursor& cursor)
324 {
325    aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
326    for (const Operand& op : instr->operands) {
327       if (op.isTemp() && depends_on[op.tempId()])
328          return false;
329    }
330    return true;
331 }
332 
333 void
upwards_update_insert_idx(UpwardsCursor & cursor)334 MoveState::upwards_update_insert_idx(UpwardsCursor& cursor)
335 {
336    cursor.insert_idx = cursor.source_idx;
337    cursor.total_demand = block->instructions[cursor.insert_idx]->register_demand;
338 }
339 
340 MoveResult
upwards_move(UpwardsCursor & cursor)341 MoveState::upwards_move(UpwardsCursor& cursor)
342 {
343    assert(cursor.has_insert_idx());
344 
345    aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
346    for (const Operand& op : instr->operands) {
347       if (op.isTemp() && depends_on[op.tempId()])
348          return move_fail_ssa;
349    }
350 
351    /* check if candidate uses/kills an operand which is used by a dependency */
352    for (const Operand& op : instr->operands) {
353       if (op.isTemp() && (!improved_rar || op.isFirstKill()) && RAR_dependencies[op.tempId()])
354          return move_fail_rar;
355    }
356 
357    /* check if register pressure is low enough: the diff is negative if register pressure is
358     * decreased */
359    const RegisterDemand candidate_diff = get_live_changes(instr.get());
360    const RegisterDemand temp = get_temp_registers(instr.get());
361    if (RegisterDemand(cursor.total_demand + candidate_diff).exceeds(max_registers))
362       return move_fail_pressure;
363    const RegisterDemand temp2 =
364       get_temp_registers(block->instructions[cursor.insert_idx - 1].get());
365    const RegisterDemand new_demand =
366       block->instructions[cursor.insert_idx - 1]->register_demand - temp2 + candidate_diff + temp;
367    if (new_demand.exceeds(max_registers))
368       return move_fail_pressure;
369 
370    /* move the candidate above the insert_idx */
371    move_element(block->instructions.begin(), cursor.source_idx, cursor.insert_idx);
372 
373    /* update register pressure */
374    block->instructions[cursor.insert_idx]->register_demand = new_demand;
375    for (int i = cursor.insert_idx + 1; i <= cursor.source_idx; i++)
376       block->instructions[i]->register_demand += candidate_diff;
377    cursor.total_demand += candidate_diff;
378 
379    cursor.total_demand.update(block->instructions[cursor.source_idx]->register_demand);
380 
381    cursor.insert_idx++;
382    cursor.source_idx++;
383 
384    cursor.verify_invariants(block);
385 
386    return move_success;
387 }
388 
389 void
upwards_skip(UpwardsCursor & cursor)390 MoveState::upwards_skip(UpwardsCursor& cursor)
391 {
392    if (cursor.has_insert_idx()) {
393       aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];
394       for (const Definition& def : instr->definitions) {
395          if (def.isTemp())
396             depends_on[def.tempId()] = true;
397       }
398       for (const Operand& op : instr->operands) {
399          if (op.isTemp())
400             RAR_dependencies[op.tempId()] = true;
401       }
402       cursor.total_demand.update(instr->register_demand);
403    }
404 
405    cursor.source_idx++;
406 
407    cursor.verify_invariants(block);
408 }
409 
410 bool
is_done_sendmsg(amd_gfx_level gfx_level,const Instruction * instr)411 is_done_sendmsg(amd_gfx_level gfx_level, const Instruction* instr)
412 {
413    if (gfx_level <= GFX10_3 && instr->opcode == aco_opcode::s_sendmsg)
414       return (instr->salu().imm & sendmsg_id_mask) == sendmsg_gs_done;
415    return false;
416 }
417 
418 bool
is_pos_prim_export(amd_gfx_level gfx_level,const Instruction * instr)419 is_pos_prim_export(amd_gfx_level gfx_level, const Instruction* instr)
420 {
421    /* Because of NO_PC_EXPORT=1, a done=1 position or primitive export can launch PS waves before
422     * the NGG/VS wave finishes if there are no parameter exports.
423     */
424    return instr->opcode == aco_opcode::exp && instr->exp().dest >= V_008DFC_SQ_EXP_POS &&
425           instr->exp().dest <= V_008DFC_SQ_EXP_PRIM && gfx_level >= GFX10;
426 }
427 
428 memory_sync_info
get_sync_info_with_hack(const Instruction * instr)429 get_sync_info_with_hack(const Instruction* instr)
430 {
431    memory_sync_info sync = get_sync_info(instr);
432    if (instr->isSMEM() && !instr->operands.empty() && instr->operands[0].bytes() == 16) {
433       // FIXME: currently, it doesn't seem beneficial to omit this due to how our scheduler works
434       sync.storage = (storage_class)(sync.storage | storage_buffer);
435       sync.semantics =
436          (memory_semantics)((sync.semantics | semantic_private) & ~semantic_can_reorder);
437    }
438    return sync;
439 }
440 
441 struct memory_event_set {
442    bool has_control_barrier;
443 
444    unsigned bar_acquire;
445    unsigned bar_release;
446    unsigned bar_classes;
447 
448    unsigned access_acquire;
449    unsigned access_release;
450    unsigned access_relaxed;
451    unsigned access_atomic;
452 };
453 
454 struct hazard_query {
455    amd_gfx_level gfx_level;
456    bool contains_spill;
457    bool contains_sendmsg;
458    bool uses_exec;
459    bool writes_exec;
460    memory_event_set mem_events;
461    unsigned aliasing_storage;      /* storage classes which are accessed (non-SMEM) */
462    unsigned aliasing_storage_smem; /* storage classes which are accessed (SMEM) */
463 };
464 
465 void
init_hazard_query(const sched_ctx & ctx,hazard_query * query)466 init_hazard_query(const sched_ctx& ctx, hazard_query* query)
467 {
468    query->gfx_level = ctx.gfx_level;
469    query->contains_spill = false;
470    query->contains_sendmsg = false;
471    query->uses_exec = false;
472    query->writes_exec = false;
473    memset(&query->mem_events, 0, sizeof(query->mem_events));
474    query->aliasing_storage = 0;
475    query->aliasing_storage_smem = 0;
476 }
477 
478 void
add_memory_event(amd_gfx_level gfx_level,memory_event_set * set,Instruction * instr,memory_sync_info * sync)479 add_memory_event(amd_gfx_level gfx_level, memory_event_set* set, Instruction* instr,
480                  memory_sync_info* sync)
481 {
482    set->has_control_barrier |= is_done_sendmsg(gfx_level, instr);
483    set->has_control_barrier |= is_pos_prim_export(gfx_level, instr);
484    if (instr->opcode == aco_opcode::p_barrier) {
485       Pseudo_barrier_instruction& bar = instr->barrier();
486       if (bar.sync.semantics & semantic_acquire)
487          set->bar_acquire |= bar.sync.storage;
488       if (bar.sync.semantics & semantic_release)
489          set->bar_release |= bar.sync.storage;
490       set->bar_classes |= bar.sync.storage;
491 
492       set->has_control_barrier |= bar.exec_scope > scope_invocation;
493    }
494 
495    if (!sync->storage)
496       return;
497 
498    if (sync->semantics & semantic_acquire)
499       set->access_acquire |= sync->storage;
500    if (sync->semantics & semantic_release)
501       set->access_release |= sync->storage;
502 
503    if (!(sync->semantics & semantic_private)) {
504       if (sync->semantics & semantic_atomic)
505          set->access_atomic |= sync->storage;
506       else
507          set->access_relaxed |= sync->storage;
508    }
509 }
510 
511 void
add_to_hazard_query(hazard_query * query,Instruction * instr)512 add_to_hazard_query(hazard_query* query, Instruction* instr)
513 {
514    if (instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload)
515       query->contains_spill = true;
516    query->contains_sendmsg |= instr->opcode == aco_opcode::s_sendmsg;
517    query->uses_exec |= needs_exec_mask(instr);
518    for (const Definition& def : instr->definitions) {
519       if (def.isFixed() && def.physReg() == exec)
520          query->writes_exec = true;
521    }
522 
523    memory_sync_info sync = get_sync_info_with_hack(instr);
524 
525    add_memory_event(query->gfx_level, &query->mem_events, instr, &sync);
526 
527    if (!(sync.semantics & semantic_can_reorder)) {
528       unsigned storage = sync.storage;
529       /* images and buffer/global memory can alias */ // TODO: more precisely, buffer images and
530                                                       // buffer/global memory can alias
531       if (storage & (storage_buffer | storage_image))
532          storage |= storage_buffer | storage_image;
533       if (instr->isSMEM())
534          query->aliasing_storage_smem |= storage;
535       else
536          query->aliasing_storage |= storage;
537    }
538 }
539 
540 enum HazardResult {
541    hazard_success,
542    hazard_fail_reorder_vmem_smem,
543    hazard_fail_reorder_ds,
544    hazard_fail_reorder_sendmsg,
545    hazard_fail_spill,
546    hazard_fail_export,
547    hazard_fail_barrier,
548    /* Must stop at these failures. The hazard query code doesn't consider them
549     * when added. */
550    hazard_fail_exec,
551    hazard_fail_unreorderable,
552 };
553 
554 HazardResult
perform_hazard_query(hazard_query * query,Instruction * instr,bool upwards)555 perform_hazard_query(hazard_query* query, Instruction* instr, bool upwards)
556 {
557    /* don't schedule discards downwards */
558    if (!upwards && instr->opcode == aco_opcode::p_exit_early_if)
559       return hazard_fail_unreorderable;
560 
561    /* In Primitive Ordered Pixel Shading, await overlapped waves as late as possible, and notify
562     * overlapping waves that they can continue execution as early as possible.
563     */
564    if (upwards) {
565       if (instr->opcode == aco_opcode::p_pops_gfx9_add_exiting_wave_id ||
566           is_wait_export_ready(query->gfx_level, instr)) {
567          return hazard_fail_unreorderable;
568       }
569    } else {
570       if (instr->opcode == aco_opcode::p_pops_gfx9_ordered_section_done) {
571          return hazard_fail_unreorderable;
572       }
573    }
574 
575    if (query->uses_exec || query->writes_exec) {
576       for (const Definition& def : instr->definitions) {
577          if (def.isFixed() && def.physReg() == exec)
578             return hazard_fail_exec;
579       }
580    }
581    if (query->writes_exec && needs_exec_mask(instr))
582       return hazard_fail_exec;
583 
584    /* Don't move exports so that they stay closer together.
585     * Since GFX11, export order matters. MRTZ must come first,
586     * then color exports sorted from first to last.
587     * Also, with Primitive Ordered Pixel Shading on GFX11+, the `done` export must not be moved
588     * above the memory accesses before the queue family scope (more precisely, fragment interlock
589     * scope, but it's not available in ACO) release barrier that is expected to be inserted before
590     * the export, as well as before any `s_wait_event export_ready` which enters the ordered
591     * section, because the `done` export exits the ordered section.
592     */
593    if (instr->isEXP() || instr->opcode == aco_opcode::p_dual_src_export_gfx11)
594       return hazard_fail_export;
595 
596    /* don't move non-reorderable instructions */
597    if (instr->opcode == aco_opcode::s_memtime || instr->opcode == aco_opcode::s_memrealtime ||
598        instr->opcode == aco_opcode::s_setprio || instr->opcode == aco_opcode::s_getreg_b32 ||
599        instr->opcode == aco_opcode::p_shader_cycles_hi_lo_hi ||
600        instr->opcode == aco_opcode::p_init_scratch ||
601        instr->opcode == aco_opcode::p_jump_to_epilog ||
602        instr->opcode == aco_opcode::s_sendmsg_rtn_b32 ||
603        instr->opcode == aco_opcode::s_sendmsg_rtn_b64 ||
604        instr->opcode == aco_opcode::p_end_with_regs || instr->opcode == aco_opcode::s_nop ||
605        instr->opcode == aco_opcode::s_sleep)
606       return hazard_fail_unreorderable;
607 
608    memory_event_set instr_set;
609    memset(&instr_set, 0, sizeof(instr_set));
610    memory_sync_info sync = get_sync_info_with_hack(instr);
611    add_memory_event(query->gfx_level, &instr_set, instr, &sync);
612 
613    memory_event_set* first = &instr_set;
614    memory_event_set* second = &query->mem_events;
615    if (upwards)
616       std::swap(first, second);
617 
618    /* everything after barrier(acquire) happens after the atomics/control_barriers before
619     * everything after load(acquire) happens after the load
620     */
621    if ((first->has_control_barrier || first->access_atomic) && second->bar_acquire)
622       return hazard_fail_barrier;
623    if (((first->access_acquire || first->bar_acquire) && second->bar_classes) ||
624        ((first->access_acquire | first->bar_acquire) &
625         (second->access_relaxed | second->access_atomic)))
626       return hazard_fail_barrier;
627 
628    /* everything before barrier(release) happens before the atomics/control_barriers after *
629     * everything before store(release) happens before the store
630     */
631    if (first->bar_release && (second->has_control_barrier || second->access_atomic))
632       return hazard_fail_barrier;
633    if ((first->bar_classes && (second->bar_release || second->access_release)) ||
634        ((first->access_relaxed | first->access_atomic) &
635         (second->bar_release | second->access_release)))
636       return hazard_fail_barrier;
637 
638    /* don't move memory barriers around other memory barriers */
639    if (first->bar_classes && second->bar_classes)
640       return hazard_fail_barrier;
641 
642    /* Don't move memory accesses to before control barriers. I don't think
643     * this is necessary for the Vulkan memory model, but it might be for GLSL450. */
644    unsigned control_classes =
645       storage_buffer | storage_image | storage_shared | storage_task_payload;
646    if (first->has_control_barrier &&
647        ((second->access_atomic | second->access_relaxed) & control_classes))
648       return hazard_fail_barrier;
649 
650    /* don't move memory loads/stores past potentially aliasing loads/stores */
651    unsigned aliasing_storage =
652       instr->isSMEM() ? query->aliasing_storage_smem : query->aliasing_storage;
653    if ((sync.storage & aliasing_storage) && !(sync.semantics & semantic_can_reorder)) {
654       unsigned intersect = sync.storage & aliasing_storage;
655       if (intersect & storage_shared)
656          return hazard_fail_reorder_ds;
657       return hazard_fail_reorder_vmem_smem;
658    }
659 
660    if ((instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload) &&
661        query->contains_spill)
662       return hazard_fail_spill;
663 
664    if (instr->opcode == aco_opcode::s_sendmsg && query->contains_sendmsg)
665       return hazard_fail_reorder_sendmsg;
666 
667    return hazard_success;
668 }
669 
670 unsigned
get_likely_cost(Instruction * instr)671 get_likely_cost(Instruction* instr)
672 {
673    if (instr->opcode == aco_opcode::p_split_vector ||
674        instr->opcode == aco_opcode::p_extract_vector) {
675       unsigned cost = 0;
676       for (Definition def : instr->definitions) {
677          if (instr->operands[0].isKill() &&
678              def.regClass().type() == instr->operands[0].regClass().type())
679             continue;
680          cost += def.size();
681       }
682       return cost;
683    } else if (instr->opcode == aco_opcode::p_create_vector) {
684       unsigned cost = 0;
685       for (Operand op : instr->operands) {
686          if (op.isTemp() && op.isFirstKill() &&
687              op.regClass().type() == instr->definitions[0].regClass().type())
688             continue;
689          cost += op.size();
690       }
691       return cost;
692    } else {
693       /* For the moment, just assume the same cost for all other instructions. */
694       return 1;
695    }
696 }
697 
698 void
schedule_SMEM(sched_ctx & ctx,Block * block,Instruction * current,int idx)699 schedule_SMEM(sched_ctx& ctx, Block* block, Instruction* current, int idx)
700 {
701    assert(idx != 0);
702    int window_size = SMEM_WINDOW_SIZE;
703    int max_moves = SMEM_MAX_MOVES;
704    int16_t k = 0;
705 
706    /* don't move s_memtime/s_memrealtime */
707    if (current->opcode == aco_opcode::s_memtime || current->opcode == aco_opcode::s_memrealtime ||
708        current->opcode == aco_opcode::s_sendmsg_rtn_b32 ||
709        current->opcode == aco_opcode::s_sendmsg_rtn_b64)
710       return;
711 
712    /* first, check if we have instructions before current to move down */
713    hazard_query hq;
714    init_hazard_query(ctx, &hq);
715    add_to_hazard_query(&hq, current);
716 
717    DownwardsCursor cursor = ctx.mv.downwards_init(idx, false, false);
718 
719    for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;
720         candidate_idx--) {
721       assert(candidate_idx >= 0);
722       assert(candidate_idx == cursor.source_idx);
723       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
724 
725       /* break if we'd make the previous SMEM instruction stall */
726       bool can_stall_prev_smem =
727          idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;
728       if (can_stall_prev_smem && ctx.last_SMEM_stall >= 0)
729          break;
730 
731       /* break when encountering another MEM instruction, logical_start or barriers */
732       if (candidate->opcode == aco_opcode::p_logical_start)
733          break;
734       /* only move VMEM instructions below descriptor loads. be more aggressive at higher num_waves
735        * to help create more vmem clauses */
736       if ((candidate->isVMEM() || candidate->isFlatLike()) &&
737           (cursor.insert_idx - cursor.source_idx > (ctx.num_waves * 4) ||
738            current->operands[0].size() == 4))
739          break;
740       /* don't move descriptor loads below buffer loads */
741       if (candidate->isSMEM() && !candidate->operands.empty() && current->operands[0].size() == 4 &&
742           candidate->operands[0].size() == 2)
743          break;
744 
745       bool can_move_down = true;
746 
747       HazardResult haz = perform_hazard_query(&hq, candidate.get(), false);
748       if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
749           haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||
750           haz == hazard_fail_export)
751          can_move_down = false;
752       else if (haz != hazard_success)
753          break;
754 
755       /* don't use LDS/GDS instructions to hide latency since it can
756        * significantly worsen LDS scheduling */
757       if (candidate->isDS() || !can_move_down) {
758          add_to_hazard_query(&hq, candidate.get());
759          ctx.mv.downwards_skip(cursor);
760          continue;
761       }
762 
763       MoveResult res = ctx.mv.downwards_move(cursor, false);
764       if (res == move_fail_ssa || res == move_fail_rar) {
765          add_to_hazard_query(&hq, candidate.get());
766          ctx.mv.downwards_skip(cursor);
767          continue;
768       } else if (res == move_fail_pressure) {
769          break;
770       }
771 
772       if (candidate_idx < ctx.last_SMEM_dep_idx)
773          ctx.last_SMEM_stall++;
774       k++;
775    }
776 
777    /* find the first instruction depending on current or find another MEM */
778    UpwardsCursor up_cursor = ctx.mv.upwards_init(idx + 1, false);
779 
780    bool found_dependency = false;
781    /* second, check if we have instructions after current to move up */
782    for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int)idx + window_size;
783         candidate_idx++) {
784       assert(candidate_idx == up_cursor.source_idx);
785       assert(candidate_idx < (int)block->instructions.size());
786       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
787 
788       if (candidate->opcode == aco_opcode::p_logical_end)
789          break;
790 
791       /* check if candidate depends on current */
792       bool is_dependency = !found_dependency && !ctx.mv.upwards_check_deps(up_cursor);
793       /* no need to steal from following VMEM instructions */
794       if (is_dependency && (candidate->isVMEM() || candidate->isFlatLike()))
795          break;
796 
797       if (found_dependency) {
798          HazardResult haz = perform_hazard_query(&hq, candidate.get(), true);
799          if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
800              haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||
801              haz == hazard_fail_export)
802             is_dependency = true;
803          else if (haz != hazard_success)
804             break;
805       }
806 
807       if (is_dependency) {
808          if (!found_dependency) {
809             ctx.mv.upwards_update_insert_idx(up_cursor);
810             init_hazard_query(ctx, &hq);
811             found_dependency = true;
812          }
813       }
814 
815       if (is_dependency || !found_dependency) {
816          if (found_dependency)
817             add_to_hazard_query(&hq, candidate.get());
818          else
819             k++;
820          ctx.mv.upwards_skip(up_cursor);
821          continue;
822       }
823 
824       MoveResult res = ctx.mv.upwards_move(up_cursor);
825       if (res == move_fail_ssa || res == move_fail_rar) {
826          /* no need to steal from following VMEM instructions */
827          if (res == move_fail_ssa && (candidate->isVMEM() || candidate->isFlatLike()))
828             break;
829          add_to_hazard_query(&hq, candidate.get());
830          ctx.mv.upwards_skip(up_cursor);
831          continue;
832       } else if (res == move_fail_pressure) {
833          break;
834       }
835       k++;
836    }
837 
838    ctx.last_SMEM_dep_idx = found_dependency ? up_cursor.insert_idx : 0;
839    ctx.last_SMEM_stall = 10 - ctx.num_waves - k;
840 }
841 
842 void
schedule_VMEM(sched_ctx & ctx,Block * block,Instruction * current,int idx)843 schedule_VMEM(sched_ctx& ctx, Block* block, Instruction* current, int idx)
844 {
845    assert(idx != 0);
846    int window_size = VMEM_WINDOW_SIZE;
847    int max_moves = VMEM_MAX_MOVES;
848    int clause_max_grab_dist = VMEM_CLAUSE_MAX_GRAB_DIST;
849    bool only_clauses = false;
850    int16_t k = 0;
851 
852    /* first, check if we have instructions before current to move down */
853    hazard_query indep_hq;
854    hazard_query clause_hq;
855    init_hazard_query(ctx, &indep_hq);
856    init_hazard_query(ctx, &clause_hq);
857    add_to_hazard_query(&indep_hq, current);
858 
859    DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, true);
860 
861    for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;
862         candidate_idx--) {
863       assert(candidate_idx == cursor.source_idx);
864       assert(candidate_idx >= 0);
865       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
866       bool is_vmem = candidate->isVMEM() || candidate->isFlatLike();
867 
868       /* break when encountering another VMEM instruction, logical_start or barriers */
869       if (candidate->opcode == aco_opcode::p_logical_start)
870          break;
871 
872       /* break if we'd make the previous SMEM instruction stall */
873       bool can_stall_prev_smem =
874          idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;
875       if (can_stall_prev_smem && ctx.last_SMEM_stall >= 0)
876          break;
877 
878       bool part_of_clause = false;
879       if (current->isVMEM() == candidate->isVMEM()) {
880          int grab_dist = cursor.insert_idx_clause - candidate_idx;
881          /* We can't easily tell how much this will decrease the def-to-use
882           * distances, so just use how far it will be moved as a heuristic. */
883          part_of_clause =
884             grab_dist < clause_max_grab_dist + k && should_form_clause(current, candidate.get());
885       }
886 
887       /* if current depends on candidate, add additional dependencies and continue */
888       bool can_move_down = !is_vmem || part_of_clause || candidate->definitions.empty();
889       if (only_clauses) {
890          /* In case of high register pressure, only try to form clauses,
891           * and only if the previous clause is not larger
892           * than the current one will be.
893           */
894          if (part_of_clause) {
895             int clause_size = cursor.insert_idx - cursor.insert_idx_clause;
896             int prev_clause_size = 1;
897             while (should_form_clause(current,
898                                       block->instructions[candidate_idx - prev_clause_size].get()))
899                prev_clause_size++;
900             if (prev_clause_size > clause_size + 1)
901                break;
902          } else {
903             can_move_down = false;
904          }
905       }
906       HazardResult haz =
907          perform_hazard_query(part_of_clause ? &clause_hq : &indep_hq, candidate.get(), false);
908       if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
909           haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||
910           haz == hazard_fail_export)
911          can_move_down = false;
912       else if (haz != hazard_success)
913          break;
914 
915       if (!can_move_down) {
916          if (part_of_clause)
917             break;
918          add_to_hazard_query(&indep_hq, candidate.get());
919          add_to_hazard_query(&clause_hq, candidate.get());
920          ctx.mv.downwards_skip(cursor);
921          continue;
922       }
923 
924       Instruction* candidate_ptr = candidate.get();
925       MoveResult res = ctx.mv.downwards_move(cursor, part_of_clause);
926       if (res == move_fail_ssa || res == move_fail_rar) {
927          if (part_of_clause)
928             break;
929          add_to_hazard_query(&indep_hq, candidate.get());
930          add_to_hazard_query(&clause_hq, candidate.get());
931          ctx.mv.downwards_skip(cursor);
932          continue;
933       } else if (res == move_fail_pressure) {
934          only_clauses = true;
935          if (part_of_clause)
936             break;
937          add_to_hazard_query(&indep_hq, candidate.get());
938          add_to_hazard_query(&clause_hq, candidate.get());
939          ctx.mv.downwards_skip(cursor);
940          continue;
941       }
942       if (part_of_clause)
943          add_to_hazard_query(&indep_hq, candidate_ptr);
944       else
945          k++;
946       if (candidate_idx < ctx.last_SMEM_dep_idx)
947          ctx.last_SMEM_stall++;
948    }
949 
950    /* find the first instruction depending on current or find another VMEM */
951    UpwardsCursor up_cursor = ctx.mv.upwards_init(idx + 1, true);
952 
953    bool found_dependency = false;
954    /* second, check if we have instructions after current to move up */
955    for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int)idx + window_size;
956         candidate_idx++) {
957       assert(candidate_idx == up_cursor.source_idx);
958       assert(candidate_idx < (int)block->instructions.size());
959       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
960       bool is_vmem = candidate->isVMEM() || candidate->isFlatLike();
961 
962       if (candidate->opcode == aco_opcode::p_logical_end)
963          break;
964 
965       /* check if candidate depends on current */
966       bool is_dependency = false;
967       if (found_dependency) {
968          HazardResult haz = perform_hazard_query(&indep_hq, candidate.get(), true);
969          if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
970              haz == hazard_fail_reorder_vmem_smem || haz == hazard_fail_reorder_sendmsg ||
971              haz == hazard_fail_barrier || haz == hazard_fail_export)
972             is_dependency = true;
973          else if (haz != hazard_success)
974             break;
975       }
976 
977       is_dependency |= !found_dependency && !ctx.mv.upwards_check_deps(up_cursor);
978       if (is_dependency) {
979          if (!found_dependency) {
980             ctx.mv.upwards_update_insert_idx(up_cursor);
981             init_hazard_query(ctx, &indep_hq);
982             found_dependency = true;
983          }
984       } else if (is_vmem) {
985          /* don't move up dependencies of other VMEM instructions */
986          for (const Definition& def : candidate->definitions) {
987             if (def.isTemp())
988                ctx.mv.depends_on[def.tempId()] = true;
989          }
990       }
991 
992       if (is_dependency || !found_dependency) {
993          if (found_dependency)
994             add_to_hazard_query(&indep_hq, candidate.get());
995          else
996             k++;
997          ctx.mv.upwards_skip(up_cursor);
998          continue;
999       }
1000 
1001       MoveResult res = ctx.mv.upwards_move(up_cursor);
1002       if (res == move_fail_ssa || res == move_fail_rar) {
1003          add_to_hazard_query(&indep_hq, candidate.get());
1004          ctx.mv.upwards_skip(up_cursor);
1005          continue;
1006       } else if (res == move_fail_pressure) {
1007          break;
1008       }
1009       k++;
1010    }
1011 }
1012 
1013 void
schedule_LDS(sched_ctx & ctx,Block * block,Instruction * current,int idx)1014 schedule_LDS(sched_ctx& ctx, Block* block, Instruction* current, int idx)
1015 {
1016    assert(idx != 0);
1017    int window_size = LDS_WINDOW_SIZE;
1018    int max_moves = current->isLDSDIR() ? LDSDIR_MAX_MOVES : LDS_MAX_MOVES;
1019    int16_t k = 0;
1020 
1021    /* first, check if we have instructions before current to move down */
1022    hazard_query hq;
1023    init_hazard_query(ctx, &hq);
1024    add_to_hazard_query(&hq, current);
1025 
1026    DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, false);
1027 
1028    for (int i = 0; k < max_moves && i < window_size; i++) {
1029       aco_ptr<Instruction>& candidate = block->instructions[cursor.source_idx];
1030       bool is_mem = candidate->isVMEM() || candidate->isFlatLike() || candidate->isSMEM();
1031       if (candidate->opcode == aco_opcode::p_logical_start || is_mem)
1032          break;
1033 
1034       if (candidate->isDS() || candidate->isLDSDIR()) {
1035          add_to_hazard_query(&hq, candidate.get());
1036          ctx.mv.downwards_skip(cursor);
1037          continue;
1038       }
1039 
1040       if (perform_hazard_query(&hq, candidate.get(), false) != hazard_success ||
1041           ctx.mv.downwards_move(cursor, false) != move_success)
1042          break;
1043 
1044       k++;
1045    }
1046 
1047    /* second, check if we have instructions after current to move up */
1048    bool found_dependency = false;
1049    int i = 0;
1050    UpwardsCursor up_cursor = ctx.mv.upwards_init(idx + 1, true);
1051    /* find the first instruction depending on current */
1052    for (; k < max_moves && i < window_size; i++) {
1053       aco_ptr<Instruction>& candidate = block->instructions[up_cursor.source_idx];
1054       bool is_mem = candidate->isVMEM() || candidate->isFlatLike() || candidate->isSMEM();
1055       if (candidate->opcode == aco_opcode::p_logical_end || is_mem)
1056          break;
1057 
1058       /* check if candidate depends on current */
1059       if (!ctx.mv.upwards_check_deps(up_cursor)) {
1060          init_hazard_query(ctx, &hq);
1061          add_to_hazard_query(&hq, candidate.get());
1062          ctx.mv.upwards_update_insert_idx(up_cursor);
1063          ctx.mv.upwards_skip(up_cursor);
1064          found_dependency = true;
1065          i++;
1066          break;
1067       }
1068 
1069       ctx.mv.upwards_skip(up_cursor);
1070    }
1071 
1072    for (; found_dependency && k < max_moves && i < window_size; i++) {
1073       aco_ptr<Instruction>& candidate = block->instructions[up_cursor.source_idx];
1074       bool is_mem = candidate->isVMEM() || candidate->isFlatLike() || candidate->isSMEM();
1075       if (candidate->opcode == aco_opcode::p_logical_end || is_mem)
1076          break;
1077 
1078       HazardResult haz = perform_hazard_query(&hq, candidate.get(), true);
1079       if (haz == hazard_fail_exec || haz == hazard_fail_unreorderable)
1080          break;
1081 
1082       if (haz != hazard_success || ctx.mv.upwards_move(up_cursor) != move_success) {
1083          add_to_hazard_query(&hq, candidate.get());
1084          ctx.mv.upwards_skip(up_cursor);
1085       } else {
1086          k++;
1087       }
1088    }
1089 }
1090 
1091 void
schedule_position_export(sched_ctx & ctx,Block * block,Instruction * current,int idx)1092 schedule_position_export(sched_ctx& ctx, Block* block, Instruction* current, int idx)
1093 {
1094    assert(idx != 0);
1095    int window_size = POS_EXP_WINDOW_SIZE / ctx.schedule_pos_export_div;
1096    int max_moves = POS_EXP_MAX_MOVES / ctx.schedule_pos_export_div;
1097    int16_t k = 0;
1098 
1099    DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, false);
1100 
1101    hazard_query hq;
1102    init_hazard_query(ctx, &hq);
1103    add_to_hazard_query(&hq, current);
1104 
1105    for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;
1106         candidate_idx--) {
1107       assert(candidate_idx >= 0);
1108       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
1109 
1110       if (candidate->opcode == aco_opcode::p_logical_start)
1111          break;
1112       if (candidate->isVMEM() || candidate->isSMEM() || candidate->isFlatLike())
1113          break;
1114 
1115       HazardResult haz = perform_hazard_query(&hq, candidate.get(), false);
1116       if (haz == hazard_fail_exec || haz == hazard_fail_unreorderable)
1117          break;
1118 
1119       if (haz != hazard_success) {
1120          add_to_hazard_query(&hq, candidate.get());
1121          ctx.mv.downwards_skip(cursor);
1122          continue;
1123       }
1124 
1125       MoveResult res = ctx.mv.downwards_move(cursor, false);
1126       if (res == move_fail_ssa || res == move_fail_rar) {
1127          add_to_hazard_query(&hq, candidate.get());
1128          ctx.mv.downwards_skip(cursor);
1129          continue;
1130       } else if (res == move_fail_pressure) {
1131          break;
1132       }
1133       k++;
1134    }
1135 }
1136 
1137 unsigned
schedule_VMEM_store(sched_ctx & ctx,Block * block,Instruction * current,int idx)1138 schedule_VMEM_store(sched_ctx& ctx, Block* block, Instruction* current, int idx)
1139 {
1140    hazard_query hq;
1141    init_hazard_query(ctx, &hq);
1142 
1143    DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, true);
1144    int skip = 0;
1145 
1146    for (int16_t k = 0; k < VMEM_STORE_CLAUSE_MAX_GRAB_DIST;) {
1147       aco_ptr<Instruction>& candidate = block->instructions[cursor.source_idx];
1148       if (candidate->opcode == aco_opcode::p_logical_start)
1149          break;
1150 
1151       if (!should_form_clause(current, candidate.get())) {
1152          add_to_hazard_query(&hq, candidate.get());
1153          ctx.mv.downwards_skip(cursor);
1154          k += get_likely_cost(candidate.get());
1155          continue;
1156       }
1157 
1158       if (perform_hazard_query(&hq, candidate.get(), false) != hazard_success ||
1159           ctx.mv.downwards_move(cursor, true) != move_success)
1160          break;
1161 
1162       skip++;
1163    }
1164 
1165    return skip;
1166 }
1167 
1168 void
schedule_block(sched_ctx & ctx,Program * program,Block * block)1169 schedule_block(sched_ctx& ctx, Program* program, Block* block)
1170 {
1171    ctx.last_SMEM_dep_idx = 0;
1172    ctx.last_SMEM_stall = INT16_MIN;
1173    ctx.mv.block = block;
1174 
1175    /* go through all instructions and find memory loads */
1176    unsigned num_stores = 0;
1177    for (unsigned idx = 0; idx < block->instructions.size(); idx++) {
1178       Instruction* current = block->instructions[idx].get();
1179 
1180       if (current->opcode == aco_opcode::p_logical_end)
1181          break;
1182 
1183       if (block->kind & block_kind_export_end && current->isEXP() && ctx.schedule_pos_exports) {
1184          unsigned target = current->exp().dest;
1185          if (target >= V_008DFC_SQ_EXP_POS && target < V_008DFC_SQ_EXP_PRIM) {
1186             ctx.mv.current = current;
1187             schedule_position_export(ctx, block, current, idx);
1188          }
1189       }
1190 
1191       if (current->definitions.empty()) {
1192          num_stores += current->isVMEM() || current->isFlatLike() ? 1 : 0;
1193          continue;
1194       }
1195 
1196       if (current->isVMEM() || current->isFlatLike()) {
1197          ctx.mv.current = current;
1198          schedule_VMEM(ctx, block, current, idx);
1199       }
1200 
1201       if (current->isSMEM()) {
1202          ctx.mv.current = current;
1203          schedule_SMEM(ctx, block, current, idx);
1204       }
1205 
1206       if (current->isLDSDIR() || (current->isDS() && !current->ds().gds)) {
1207          ctx.mv.current = current;
1208          schedule_LDS(ctx, block, current, idx);
1209       }
1210    }
1211 
1212    /* GFX11 benefits from creating VMEM store clauses. */
1213    if (num_stores > 1 && program->gfx_level >= GFX11) {
1214       for (int idx = block->instructions.size() - 1; idx >= 0; idx--) {
1215          Instruction* current = block->instructions[idx].get();
1216          if (!current->definitions.empty() || !(current->isVMEM() || current->isFlatLike()))
1217             continue;
1218 
1219          ctx.mv.current = current;
1220          idx -= schedule_VMEM_store(ctx, block, current, idx);
1221       }
1222    }
1223 
1224    /* resummarize the block's register demand */
1225    block->register_demand = block->live_in_demand;
1226    for (const aco_ptr<Instruction>& instr : block->instructions)
1227       block->register_demand.update(instr->register_demand);
1228 }
1229 
1230 } /* end namespace */
1231 
1232 void
schedule_program(Program * program)1233 schedule_program(Program* program)
1234 {
1235    /* don't use program->max_reg_demand because that is affected by max_waves_per_simd */
1236    RegisterDemand demand;
1237    for (Block& block : program->blocks)
1238       demand.update(block.register_demand);
1239    demand.vgpr += program->config->num_shared_vgprs / 2;
1240 
1241    sched_ctx ctx;
1242    ctx.gfx_level = program->gfx_level;
1243    ctx.mv.depends_on.resize(program->peekAllocationId());
1244    ctx.mv.RAR_dependencies.resize(program->peekAllocationId());
1245    ctx.mv.RAR_dependencies_clause.resize(program->peekAllocationId());
1246    /* Allowing the scheduler to reduce the number of waves to as low as 5
1247     * improves performance of Thrones of Britannia significantly and doesn't
1248     * seem to hurt anything else. */
1249    // TODO: account for possible uneven num_waves on GFX10+
1250    unsigned wave_fac = program->dev.physical_vgprs / 256;
1251    if (program->num_waves <= 5 * wave_fac)
1252       ctx.num_waves = program->num_waves;
1253    else if (demand.vgpr >= 29)
1254       ctx.num_waves = 5 * wave_fac;
1255    else if (demand.vgpr >= 25)
1256       ctx.num_waves = 6 * wave_fac;
1257    else
1258       ctx.num_waves = 7 * wave_fac;
1259    ctx.num_waves = std::max<uint16_t>(ctx.num_waves, program->min_waves);
1260    ctx.num_waves = std::min<uint16_t>(ctx.num_waves, program->num_waves);
1261    ctx.num_waves = max_suitable_waves(program, ctx.num_waves);
1262 
1263    /* VMEM_MAX_MOVES and such assume pre-GFX10 wave count */
1264    ctx.num_waves = std::max<uint16_t>(ctx.num_waves / wave_fac, 1);
1265 
1266    assert(ctx.num_waves > 0);
1267    ctx.mv.max_registers = {int16_t(get_addr_vgpr_from_waves(program, ctx.num_waves * wave_fac) - 2),
1268                            int16_t(get_addr_sgpr_from_waves(program, ctx.num_waves * wave_fac))};
1269 
1270    /* NGG culling shaders are very sensitive to position export scheduling.
1271     * Schedule less aggressively when early primitive export is used, and
1272     * keep the position export at the very bottom when late primitive export is used.
1273     */
1274    if (program->info.has_ngg_culling && program->stage.num_sw_stages() == 1) {
1275       if (!program->info.has_ngg_early_prim_export)
1276          ctx.schedule_pos_exports = false;
1277       else
1278          ctx.schedule_pos_export_div = 4;
1279    }
1280 
1281    for (Block& block : program->blocks)
1282       schedule_block(ctx, program, &block);
1283 
1284    /* update max_reg_demand and num_waves */
1285    RegisterDemand new_demand;
1286    for (Block& block : program->blocks) {
1287       new_demand.update(block.register_demand);
1288    }
1289    update_vgpr_sgpr_demand(program, new_demand);
1290 
1291    /* Validate live variable information */
1292    if (!validate_live_vars(program))
1293       abort();
1294 }
1295 
1296 } // namespace aco
1297