1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * SPDX-License-Identifier: MIT
6 */
7
8 #include "aco_ir.h"
9
10 namespace aco {
11
12 RegisterDemand
get_live_changes(Instruction * instr)13 get_live_changes(Instruction* instr)
14 {
15 RegisterDemand changes;
16 for (const Definition& def : instr->definitions) {
17 if (!def.isTemp() || def.isKill())
18 continue;
19 changes += def.getTemp();
20 }
21
22 for (const Operand& op : instr->operands) {
23 if (!op.isTemp() || !op.isFirstKill())
24 continue;
25 changes -= op.getTemp();
26 }
27
28 return changes;
29 }
30
31 RegisterDemand
get_temp_registers(Instruction * instr)32 get_temp_registers(Instruction* instr)
33 {
34 RegisterDemand demand_before;
35 RegisterDemand demand_after;
36
37 for (Definition def : instr->definitions) {
38 if (def.isKill())
39 demand_after += def.getTemp();
40 else if (def.isTemp())
41 demand_before -= def.getTemp();
42 }
43
44 for (Operand op : instr->operands) {
45 if (op.isFirstKill() || op.isCopyKill()) {
46 demand_before += op.getTemp();
47 if (op.isLateKill())
48 demand_after += op.getTemp();
49 } else if (op.isClobbered() && !op.isKill()) {
50 demand_before += op.getTemp();
51 }
52 }
53
54 demand_after.update(demand_before);
55 return demand_after;
56 }
57
58 namespace {
59
60 struct live_ctx {
61 monotonic_buffer_resource m;
62 Program* program;
63 int32_t worklist;
64 uint32_t handled_once;
65 };
66
67 bool
instr_needs_vcc(Instruction * instr)68 instr_needs_vcc(Instruction* instr)
69 {
70 if (instr->isVOPC())
71 return true;
72 if (instr->isVOP2() && !instr->isVOP3()) {
73 if (instr->operands.size() == 3 && instr->operands[2].isTemp() &&
74 instr->operands[2].regClass().type() == RegType::sgpr)
75 return true;
76 if (instr->definitions.size() == 2)
77 return true;
78 }
79 return false;
80 }
81
82 IDSet
compute_live_out(live_ctx & ctx,Block * block)83 compute_live_out(live_ctx& ctx, Block* block)
84 {
85 IDSet live(ctx.m);
86
87 if (block->logical_succs.empty()) {
88 /* Linear blocks:
89 * Directly insert the successor if it is a linear block as well.
90 */
91 for (unsigned succ : block->linear_succs) {
92 if (ctx.program->blocks[succ].logical_preds.empty()) {
93 live.insert(ctx.program->live.live_in[succ]);
94 } else {
95 for (unsigned t : ctx.program->live.live_in[succ]) {
96 if (ctx.program->temp_rc[t].is_linear())
97 live.insert(t);
98 }
99 }
100 }
101 } else {
102 /* Logical blocks:
103 * Linear successors are either linear blocks or logical targets.
104 */
105 live = IDSet(ctx.program->live.live_in[block->linear_succs[0]], ctx.m);
106 if (block->linear_succs.size() == 2)
107 live.insert(ctx.program->live.live_in[block->linear_succs[1]]);
108
109 /* At most one logical target needs a separate insertion. */
110 if (block->logical_succs.back() != block->linear_succs.back()) {
111 for (unsigned t : ctx.program->live.live_in[block->logical_succs.back()]) {
112 if (!ctx.program->temp_rc[t].is_linear())
113 live.insert(t);
114 }
115 } else {
116 assert(block->logical_succs[0] == block->linear_succs[0]);
117 }
118 }
119
120 /* Handle phi operands */
121 if (block->linear_succs.size() == 1 && block->linear_succs[0] >= ctx.handled_once) {
122 Block& succ = ctx.program->blocks[block->linear_succs[0]];
123 auto it = std::find(succ.linear_preds.begin(), succ.linear_preds.end(), block->index);
124 unsigned op_idx = std::distance(succ.linear_preds.begin(), it);
125 for (aco_ptr<Instruction>& phi : succ.instructions) {
126 if (!is_phi(phi))
127 break;
128 if (phi->opcode == aco_opcode::p_phi || phi->definitions[0].isKill())
129 continue;
130 if (phi->operands[op_idx].isTemp())
131 live.insert(phi->operands[op_idx].tempId());
132 }
133 }
134 if (block->logical_succs.size() == 1 && block->logical_succs[0] >= ctx.handled_once) {
135 Block& succ = ctx.program->blocks[block->logical_succs[0]];
136 auto it = std::find(succ.logical_preds.begin(), succ.logical_preds.end(), block->index);
137 unsigned op_idx = std::distance(succ.logical_preds.begin(), it);
138 for (aco_ptr<Instruction>& phi : succ.instructions) {
139 if (!is_phi(phi))
140 break;
141 if (phi->opcode == aco_opcode::p_linear_phi || phi->definitions[0].isKill())
142 continue;
143 if (phi->operands[op_idx].isTemp())
144 live.insert(phi->operands[op_idx].tempId());
145 }
146 }
147
148 return live;
149 }
150
151 void
process_live_temps_per_block(live_ctx & ctx,Block * block)152 process_live_temps_per_block(live_ctx& ctx, Block* block)
153 {
154 RegisterDemand new_demand;
155 block->register_demand = RegisterDemand();
156 IDSet live = compute_live_out(ctx, block);
157
158 /* initialize register demand */
159 for (unsigned t : live)
160 new_demand += Temp(t, ctx.program->temp_rc[t]);
161
162 /* traverse the instructions backwards */
163 int idx;
164 for (idx = block->instructions.size() - 1; idx >= 0; idx--) {
165 Instruction* insn = block->instructions[idx].get();
166 if (is_phi(insn))
167 break;
168
169 ctx.program->needs_vcc |= instr_needs_vcc(insn);
170 insn->register_demand = RegisterDemand(new_demand.vgpr, new_demand.sgpr);
171
172 bool has_vgpr_def = false;
173
174 /* KILL */
175 for (Definition& definition : insn->definitions) {
176 has_vgpr_def |= definition.regClass().type() == RegType::vgpr &&
177 !definition.regClass().is_linear_vgpr();
178
179 if (!definition.isTemp()) {
180 continue;
181 }
182 if (definition.isFixed() && definition.physReg() == vcc)
183 ctx.program->needs_vcc = true;
184
185 const Temp temp = definition.getTemp();
186 const size_t n = live.erase(temp.id());
187
188 if (n) {
189 new_demand -= temp;
190 definition.setKill(false);
191 } else {
192 insn->register_demand += temp;
193 definition.setKill(true);
194 }
195 }
196
197 if (ctx.program->gfx_level >= GFX10 && insn->isVALU() &&
198 insn->definitions.back().regClass() == s2) {
199 /* RDNA2 ISA doc, 6.2.4. Wave64 Destination Restrictions:
200 * The first pass of a wave64 VALU instruction may not overwrite a scalar value used by
201 * the second half.
202 */
203 bool carry_in = insn->opcode == aco_opcode::v_addc_co_u32 ||
204 insn->opcode == aco_opcode::v_subb_co_u32 ||
205 insn->opcode == aco_opcode::v_subbrev_co_u32;
206 for (unsigned op_idx = 0; op_idx < (carry_in ? 2 : insn->operands.size()); op_idx++) {
207 if (insn->operands[op_idx].isOfType(RegType::sgpr))
208 insn->operands[op_idx].setLateKill(true);
209 }
210 } else if (insn->opcode == aco_opcode::p_bpermute_readlane ||
211 insn->opcode == aco_opcode::p_bpermute_permlane ||
212 insn->opcode == aco_opcode::p_bpermute_shared_vgpr ||
213 insn->opcode == aco_opcode::p_dual_src_export_gfx11 ||
214 insn->opcode == aco_opcode::v_mqsad_u32_u8) {
215 for (Operand& op : insn->operands)
216 op.setLateKill(true);
217 } else if (insn->opcode == aco_opcode::p_interp_gfx11 && insn->operands.size() == 7) {
218 insn->operands[5].setLateKill(true); /* we re-use the destination reg in the middle */
219 } else if (insn->opcode == aco_opcode::v_interp_p1_f32 && ctx.program->dev.has_16bank_lds) {
220 insn->operands[0].setLateKill(true);
221 } else if (insn->opcode == aco_opcode::p_init_scratch) {
222 insn->operands.back().setLateKill(true);
223 } else if (instr_info.classes[(int)insn->opcode] == instr_class::wmma) {
224 insn->operands[0].setLateKill(true);
225 insn->operands[1].setLateKill(true);
226 }
227
228 /* Check if a definition clobbers some operand */
229 int op_idx = get_op_fixed_to_def(insn);
230 if (op_idx != -1)
231 insn->operands[op_idx].setClobbered(true);
232
233 /* we need to do this in a separate loop because the next one can
234 * setKill() for several operands at once and we don't want to
235 * overwrite that in a later iteration */
236 for (Operand& op : insn->operands) {
237 op.setKill(false);
238 /* Linear vgprs must be late kill: this is to ensure linear VGPR operands and
239 * normal VGPR definitions don't try to use the same register, which is problematic
240 * because of assignment restrictions.
241 */
242 if (op.hasRegClass() && op.regClass().is_linear_vgpr() && !op.isUndefined() &&
243 has_vgpr_def)
244 op.setLateKill(true);
245 }
246
247 /* GEN */
248 RegisterDemand operand_demand;
249 for (unsigned i = 0; i < insn->operands.size(); ++i) {
250 Operand& operand = insn->operands[i];
251 if (!operand.isTemp())
252 continue;
253
254 const Temp temp = operand.getTemp();
255 if (operand.isFixed() && ctx.program->progress < CompilationProgress::after_ra) {
256 assert(!operand.isLateKill());
257 ctx.program->needs_vcc |= operand.physReg() == vcc;
258
259 /* Check if this operand gets overwritten by a precolored definition. */
260 if (std::any_of(insn->definitions.begin(), insn->definitions.end(),
261 [=](Definition def)
262 {
263 return def.isFixed() &&
264 def.physReg() + def.size() > operand.physReg() &&
265 operand.physReg() + operand.size() > def.physReg();
266 }))
267 operand.setClobbered(true);
268
269 /* Check if this temp is fixed to a different register as well.
270 * This assumes that operands of one instruction are not precolored twice to
271 * the same register. In this case, register pressure might be overestimated.
272 */
273 for (unsigned j = i + 1; !operand.isCopyKill() && j < insn->operands.size(); ++j) {
274 if (insn->operands[j].isTemp() && insn->operands[j].getTemp() == temp &&
275 insn->operands[j].isFixed()) {
276 operand_demand += temp;
277 insn->operands[j].setCopyKill(true);
278 }
279 }
280 }
281
282 if (operand.isKill())
283 continue;
284
285 if (live.insert(temp.id()).second) {
286 operand.setFirstKill(true);
287 for (unsigned j = i + 1; j < insn->operands.size(); ++j) {
288 if (insn->operands[j].isTemp() && insn->operands[j].getTemp() == temp)
289 insn->operands[j].setKill(true);
290 }
291 if (operand.isLateKill())
292 insn->register_demand += temp;
293 new_demand += temp;
294 } else if (operand.isClobbered()) {
295 operand_demand += temp;
296 }
297 }
298
299 operand_demand += new_demand;
300 insn->register_demand.update(operand_demand);
301 block->register_demand.update(insn->register_demand);
302 }
303
304 /* handle phi definitions */
305 for (int phi_idx = 0; phi_idx <= idx; phi_idx++) {
306 Instruction* insn = block->instructions[phi_idx].get();
307 insn->register_demand = new_demand;
308
309 assert(is_phi(insn) && insn->definitions.size() == 1);
310 if (!insn->definitions[0].isTemp()) {
311 assert(insn->definitions[0].isFixed() && insn->definitions[0].physReg() == exec);
312 continue;
313 }
314 Definition& definition = insn->definitions[0];
315 if (definition.isFixed() && definition.physReg() == vcc)
316 ctx.program->needs_vcc = true;
317 const size_t n = live.erase(definition.tempId());
318 if (n && (definition.isKill() || ctx.handled_once > block->index)) {
319 Block::edge_vec& preds =
320 insn->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
321 for (unsigned i = 0; i < preds.size(); i++) {
322 if (insn->operands[i].isTemp())
323 ctx.worklist = std::max<int>(ctx.worklist, preds[i]);
324 }
325 }
326 definition.setKill(!n);
327 }
328
329 /* handle phi operands */
330 for (int phi_idx = 0; phi_idx <= idx; phi_idx++) {
331 Instruction* insn = block->instructions[phi_idx].get();
332 assert(is_phi(insn));
333 /* Ignore dead phis. */
334 if (insn->definitions[0].isKill())
335 continue;
336 for (Operand& operand : insn->operands) {
337 if (!operand.isTemp())
338 continue;
339 if (operand.isFixed() && operand.physReg() == vcc)
340 ctx.program->needs_vcc = true;
341
342 /* set if the operand is killed by this (or another) phi instruction */
343 operand.setKill(!live.count(operand.tempId()));
344 }
345 }
346
347 if (ctx.program->live.live_in[block->index].insert(live)) {
348 if (block->linear_preds.size()) {
349 assert(block->logical_preds.empty() ||
350 block->logical_preds.back() <= block->linear_preds.back());
351 ctx.worklist = std::max<int>(ctx.worklist, block->linear_preds.back());
352 } else {
353 ASSERTED bool is_valid = validate_ir(ctx.program);
354 assert(!is_valid);
355 }
356 }
357
358 block->live_in_demand = new_demand;
359 block->live_in_demand.sgpr += 2; /* Add 2 SGPRs for potential long-jumps. */
360 block->register_demand.update(block->live_in_demand);
361 ctx.program->max_reg_demand.update(block->register_demand);
362 ctx.handled_once = std::min(ctx.handled_once, block->index);
363
364 assert(!block->linear_preds.empty() || (new_demand == RegisterDemand() && live.empty()));
365 }
366
367 unsigned
calc_waves_per_workgroup(Program * program)368 calc_waves_per_workgroup(Program* program)
369 {
370 /* When workgroup size is not known, just go with wave_size */
371 unsigned workgroup_size =
372 program->workgroup_size == UINT_MAX ? program->wave_size : program->workgroup_size;
373
374 return align(workgroup_size, program->wave_size) / program->wave_size;
375 }
376 } /* end namespace */
377
378 bool
uses_scratch(Program * program)379 uses_scratch(Program* program)
380 {
381 /* RT uses scratch but we don't yet know how much. */
382 return program->config->scratch_bytes_per_wave || program->stage == raytracing_cs;
383 }
384
385 uint16_t
get_extra_sgprs(Program * program)386 get_extra_sgprs(Program* program)
387 {
388 /* We don't use this register on GFX6-8 and it's removed on GFX10+. */
389 bool needs_flat_scr = uses_scratch(program) && program->gfx_level == GFX9;
390
391 if (program->gfx_level >= GFX10) {
392 assert(!program->dev.xnack_enabled);
393 return 0;
394 } else if (program->gfx_level >= GFX8) {
395 if (needs_flat_scr)
396 return 6;
397 else if (program->dev.xnack_enabled)
398 return 4;
399 else if (program->needs_vcc)
400 return 2;
401 else
402 return 0;
403 } else {
404 assert(!program->dev.xnack_enabled);
405 if (needs_flat_scr)
406 return 4;
407 else if (program->needs_vcc)
408 return 2;
409 else
410 return 0;
411 }
412 }
413
414 uint16_t
get_sgpr_alloc(Program * program,uint16_t addressable_sgprs)415 get_sgpr_alloc(Program* program, uint16_t addressable_sgprs)
416 {
417 uint16_t sgprs = addressable_sgprs + get_extra_sgprs(program);
418 uint16_t granule = program->dev.sgpr_alloc_granule;
419 return ALIGN_NPOT(std::max(sgprs, granule), granule);
420 }
421
422 uint16_t
get_vgpr_alloc(Program * program,uint16_t addressable_vgprs)423 get_vgpr_alloc(Program* program, uint16_t addressable_vgprs)
424 {
425 assert(addressable_vgprs <= program->dev.vgpr_limit);
426 uint16_t granule = program->dev.vgpr_alloc_granule;
427 return ALIGN_NPOT(std::max(addressable_vgprs, granule), granule);
428 }
429
430 unsigned
round_down(unsigned a,unsigned b)431 round_down(unsigned a, unsigned b)
432 {
433 return a - (a % b);
434 }
435
436 uint16_t
get_addr_sgpr_from_waves(Program * program,uint16_t waves)437 get_addr_sgpr_from_waves(Program* program, uint16_t waves)
438 {
439 /* it's not possible to allocate more than 128 SGPRs */
440 uint16_t sgprs = std::min(program->dev.physical_sgprs / waves, 128);
441 sgprs = round_down(sgprs, program->dev.sgpr_alloc_granule);
442 sgprs -= get_extra_sgprs(program);
443 return std::min(sgprs, program->dev.sgpr_limit);
444 }
445
446 uint16_t
get_addr_vgpr_from_waves(Program * program,uint16_t waves)447 get_addr_vgpr_from_waves(Program* program, uint16_t waves)
448 {
449 uint16_t vgprs = program->dev.physical_vgprs / waves;
450 vgprs = vgprs / program->dev.vgpr_alloc_granule * program->dev.vgpr_alloc_granule;
451 vgprs -= program->config->num_shared_vgprs / 2;
452 return std::min(vgprs, program->dev.vgpr_limit);
453 }
454
455 void
calc_min_waves(Program * program)456 calc_min_waves(Program* program)
457 {
458 unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
459 unsigned simd_per_cu_wgp = program->dev.simd_per_cu * (program->wgp_mode ? 2 : 1);
460 program->min_waves = DIV_ROUND_UP(waves_per_workgroup, simd_per_cu_wgp);
461 }
462
463 uint16_t
max_suitable_waves(Program * program,uint16_t waves)464 max_suitable_waves(Program* program, uint16_t waves)
465 {
466 unsigned num_simd = program->dev.simd_per_cu * (program->wgp_mode ? 2 : 1);
467 unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
468 unsigned num_workgroups = waves * num_simd / waves_per_workgroup;
469
470 /* Adjust #workgroups for LDS */
471 unsigned lds_per_workgroup = align(program->config->lds_size * program->dev.lds_encoding_granule,
472 program->dev.lds_alloc_granule);
473
474 if (program->stage == fragment_fs) {
475 /* PS inputs are moved from PC (parameter cache) to LDS before PS waves are launched.
476 * Each PS input occupies 3x vec4 of LDS space. See Figure 10.3 in GCN3 ISA manual.
477 * These limit occupancy the same way as other stages' LDS usage does.
478 */
479 unsigned lds_bytes_per_interp = 3 * 16;
480 unsigned lds_param_bytes = lds_bytes_per_interp * program->info.ps.num_interp;
481 lds_per_workgroup += align(lds_param_bytes, program->dev.lds_alloc_granule);
482 }
483 unsigned lds_limit = program->wgp_mode ? program->dev.lds_limit * 2 : program->dev.lds_limit;
484 if (lds_per_workgroup)
485 num_workgroups = std::min(num_workgroups, lds_limit / lds_per_workgroup);
486
487 /* Hardware limitation */
488 if (waves_per_workgroup > 1)
489 num_workgroups = std::min(num_workgroups, program->wgp_mode ? 32u : 16u);
490
491 /* Adjust #waves for workgroup multiples:
492 * In cases like waves_per_workgroup=3 or lds=65536 and
493 * waves_per_workgroup=1, we want the maximum possible number of waves per
494 * SIMD and not the minimum. so DIV_ROUND_UP is used
495 */
496 unsigned workgroup_waves = num_workgroups * waves_per_workgroup;
497 return DIV_ROUND_UP(workgroup_waves, num_simd);
498 }
499
500 void
update_vgpr_sgpr_demand(Program * program,const RegisterDemand new_demand)501 update_vgpr_sgpr_demand(Program* program, const RegisterDemand new_demand)
502 {
503 assert(program->min_waves >= 1);
504 uint16_t sgpr_limit = get_addr_sgpr_from_waves(program, program->min_waves);
505 uint16_t vgpr_limit = get_addr_vgpr_from_waves(program, program->min_waves);
506
507 /* this won't compile, register pressure reduction necessary */
508 if (new_demand.vgpr > vgpr_limit || new_demand.sgpr > sgpr_limit) {
509 program->num_waves = 0;
510 program->max_reg_demand = new_demand;
511 } else {
512 program->num_waves = program->dev.physical_sgprs / get_sgpr_alloc(program, new_demand.sgpr);
513 uint16_t vgpr_demand =
514 get_vgpr_alloc(program, new_demand.vgpr) + program->config->num_shared_vgprs / 2;
515 program->num_waves =
516 std::min<uint16_t>(program->num_waves, program->dev.physical_vgprs / vgpr_demand);
517 program->num_waves = std::min(program->num_waves, program->dev.max_waves_per_simd);
518
519 /* Adjust for LDS and workgroup multiples and calculate max_reg_demand */
520 program->num_waves = max_suitable_waves(program, program->num_waves);
521 program->max_reg_demand.vgpr = get_addr_vgpr_from_waves(program, program->num_waves);
522 program->max_reg_demand.sgpr = get_addr_sgpr_from_waves(program, program->num_waves);
523 }
524 }
525
526 void
live_var_analysis(Program * program)527 live_var_analysis(Program* program)
528 {
529 program->live.live_in.clear();
530 program->live.memory.release();
531 program->live.live_in.resize(program->blocks.size(), IDSet(program->live.memory));
532 program->max_reg_demand = RegisterDemand();
533 program->needs_vcc = program->gfx_level >= GFX10;
534
535 live_ctx ctx;
536 ctx.program = program;
537 ctx.worklist = program->blocks.size() - 1;
538 ctx.handled_once = program->blocks.size();
539
540 /* this implementation assumes that the block idx corresponds to the block's position in
541 * program->blocks vector */
542 while (ctx.worklist >= 0) {
543 process_live_temps_per_block(ctx, &program->blocks[ctx.worklist--]);
544 }
545
546 /* calculate the program's register demand and number of waves */
547 if (program->progress < CompilationProgress::after_ra)
548 update_vgpr_sgpr_demand(program, program->max_reg_demand);
549 }
550
551 } // namespace aco
552