1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <[email protected]>
25 *
26 */
27
28 #include "elk_eu.h"
29 #include "elk_fs.h"
30 #include "elk_fs_builder.h"
31 #include "elk_cfg.h"
32 #include "util/set.h"
33 #include "util/register_allocate.h"
34
35 using namespace elk;
36
37 static void
assign_reg(const struct intel_device_info * devinfo,unsigned * reg_hw_locations,elk_fs_reg * reg)38 assign_reg(const struct intel_device_info *devinfo,
39 unsigned *reg_hw_locations, elk_fs_reg *reg)
40 {
41 if (reg->file == VGRF) {
42 reg->nr = reg_unit(devinfo) * reg_hw_locations[reg->nr] + reg->offset / REG_SIZE;
43 reg->offset %= REG_SIZE;
44 }
45 }
46
47 void
assign_regs_trivial()48 elk_fs_visitor::assign_regs_trivial()
49 {
50 unsigned hw_reg_mapping[this->alloc.count + 1];
51 unsigned i;
52 int reg_width = dispatch_width / 8;
53
54 /* Note that compressed instructions require alignment to 2 registers. */
55 hw_reg_mapping[0] = ALIGN(this->first_non_payload_grf, reg_width);
56 for (i = 1; i <= this->alloc.count; i++) {
57 hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
58 DIV_ROUND_UP(this->alloc.sizes[i - 1],
59 reg_unit(devinfo)));
60 }
61 this->grf_used = hw_reg_mapping[this->alloc.count];
62
63 foreach_block_and_inst(block, elk_fs_inst, inst, cfg) {
64 assign_reg(devinfo, hw_reg_mapping, &inst->dst);
65 for (i = 0; i < inst->sources; i++) {
66 assign_reg(devinfo, hw_reg_mapping, &inst->src[i]);
67 }
68 }
69
70 if (this->grf_used >= max_grf) {
71 fail("Ran out of regs on trivial allocator (%d/%d)\n",
72 this->grf_used, max_grf);
73 } else {
74 this->alloc.count = this->grf_used;
75 }
76
77 }
78
79 /**
80 * Size of a register from the aligned_bary_class register class.
81 */
82 static unsigned
aligned_bary_size(unsigned dispatch_width)83 aligned_bary_size(unsigned dispatch_width)
84 {
85 return (dispatch_width == 8 ? 2 : 4);
86 }
87
88 static void
elk_alloc_reg_set(struct elk_compiler * compiler,int dispatch_width)89 elk_alloc_reg_set(struct elk_compiler *compiler, int dispatch_width)
90 {
91 const struct intel_device_info *devinfo = compiler->devinfo;
92 int base_reg_count = ELK_MAX_GRF;
93 const int index = util_logbase2(dispatch_width / 8);
94
95 if (dispatch_width > 8 && devinfo->ver >= 7) {
96 /* For IVB+, we don't need the PLN hacks or the even-reg alignment in
97 * SIMD16. Therefore, we can use the exact same register sets for
98 * SIMD16 as we do for SIMD8 and we don't need to recalculate them.
99 */
100 compiler->fs_reg_sets[index] = compiler->fs_reg_sets[0];
101 return;
102 }
103
104 /* The registers used to make up almost all values handled in the compiler
105 * are a scalar value occupying a single register (or 2 registers in the
106 * case of SIMD16, which is handled by dividing base_reg_count by 2 and
107 * multiplying allocated register numbers by 2). Things that were
108 * aggregates of scalar values at the GLSL level were split to scalar
109 * values by split_virtual_grfs().
110 *
111 * However, texture SEND messages return a series of contiguous registers
112 * to write into. We currently always ask for 4 registers, but we may
113 * convert that to use less some day.
114 *
115 * Additionally, on gfx5 we need aligned pairs of registers for the PLN
116 * instruction, and on gfx4 we need 8 contiguous regs for workaround simd16
117 * texturing.
118 */
119 assert(REG_CLASS_COUNT == MAX_VGRF_SIZE(devinfo) / reg_unit(devinfo));
120 int class_sizes[REG_CLASS_COUNT];
121 for (unsigned i = 0; i < REG_CLASS_COUNT; i++)
122 class_sizes[i] = i + 1;
123
124 struct ra_regs *regs = ra_alloc_reg_set(compiler, ELK_MAX_GRF, false);
125 if (devinfo->ver >= 6)
126 ra_set_allocate_round_robin(regs);
127 struct ra_class **classes = ralloc_array(compiler, struct ra_class *,
128 REG_CLASS_COUNT);
129 struct ra_class *aligned_bary_class = NULL;
130
131 /* Now, make the register classes for each size of contiguous register
132 * allocation we might need to make.
133 */
134 for (int i = 0; i < REG_CLASS_COUNT; i++) {
135 classes[i] = ra_alloc_contig_reg_class(regs, class_sizes[i]);
136
137 if (devinfo->ver <= 5 && dispatch_width >= 16) {
138 /* From the G45 PRM:
139 *
140 * In order to reduce the hardware complexity, the following
141 * rules and restrictions apply to the compressed instruction:
142 * ...
143 * * Operand Alignment Rule: With the exceptions listed below, a
144 * source/destination operand in general should be aligned to
145 * even 256-bit physical register with a region size equal to
146 * two 256-bit physical register
147 */
148 for (int reg = 0; reg <= base_reg_count - class_sizes[i]; reg += 2)
149 ra_class_add_reg(classes[i], reg);
150 } else {
151 for (int reg = 0; reg <= base_reg_count - class_sizes[i]; reg++)
152 ra_class_add_reg(classes[i], reg);
153 }
154 }
155
156 /* Add a special class for aligned barycentrics, which we'll put the
157 * first source of LINTERP on so that we can do PLN on Gen <= 6.
158 */
159 if (devinfo->has_pln && (devinfo->ver == 6 ||
160 (dispatch_width == 8 && devinfo->ver <= 5))) {
161 int contig_len = aligned_bary_size(dispatch_width);
162 aligned_bary_class = ra_alloc_contig_reg_class(regs, contig_len);
163
164 for (int i = 0; i <= base_reg_count - contig_len; i += 2)
165 ra_class_add_reg(aligned_bary_class, i);
166 }
167
168 ra_set_finalize(regs, NULL);
169
170 compiler->fs_reg_sets[index].regs = regs;
171 for (unsigned i = 0; i < ARRAY_SIZE(compiler->fs_reg_sets[index].classes); i++)
172 compiler->fs_reg_sets[index].classes[i] = NULL;
173 for (int i = 0; i < REG_CLASS_COUNT; i++)
174 compiler->fs_reg_sets[index].classes[class_sizes[i] - 1] = classes[i];
175 compiler->fs_reg_sets[index].aligned_bary_class = aligned_bary_class;
176 }
177
178 void
elk_fs_alloc_reg_sets(struct elk_compiler * compiler)179 elk_fs_alloc_reg_sets(struct elk_compiler *compiler)
180 {
181 elk_alloc_reg_set(compiler, 8);
182 elk_alloc_reg_set(compiler, 16);
183 elk_alloc_reg_set(compiler, 32);
184 }
185
186 static int
count_to_loop_end(const elk_bblock_t * block)187 count_to_loop_end(const elk_bblock_t *block)
188 {
189 if (block->end()->opcode == ELK_OPCODE_WHILE)
190 return block->end_ip;
191
192 int depth = 1;
193 /* Skip the first block, since we don't want to count the do the calling
194 * function found.
195 */
196 for (block = block->next();
197 depth > 0;
198 block = block->next()) {
199 if (block->start()->opcode == ELK_OPCODE_DO)
200 depth++;
201 if (block->end()->opcode == ELK_OPCODE_WHILE) {
202 depth--;
203 if (depth == 0)
204 return block->end_ip;
205 }
206 }
207 unreachable("not reached");
208 }
209
calculate_payload_ranges(unsigned payload_node_count,int * payload_last_use_ip) const210 void elk_fs_visitor::calculate_payload_ranges(unsigned payload_node_count,
211 int *payload_last_use_ip) const
212 {
213 int loop_depth = 0;
214 int loop_end_ip = 0;
215
216 for (unsigned i = 0; i < payload_node_count; i++)
217 payload_last_use_ip[i] = -1;
218
219 int ip = 0;
220 foreach_block_and_inst(block, elk_fs_inst, inst, cfg) {
221 switch (inst->opcode) {
222 case ELK_OPCODE_DO:
223 loop_depth++;
224
225 /* Since payload regs are deffed only at the start of the shader
226 * execution, any uses of the payload within a loop mean the live
227 * interval extends to the end of the outermost loop. Find the ip of
228 * the end now.
229 */
230 if (loop_depth == 1)
231 loop_end_ip = count_to_loop_end(block);
232 break;
233 case ELK_OPCODE_WHILE:
234 loop_depth--;
235 break;
236 default:
237 break;
238 }
239
240 int use_ip;
241 if (loop_depth > 0)
242 use_ip = loop_end_ip;
243 else
244 use_ip = ip;
245
246 /* Note that UNIFORM args have been turned into FIXED_GRF by
247 * assign_curbe_setup(), and interpolation uses fixed hardware regs from
248 * the start (see interp_reg()).
249 */
250 for (int i = 0; i < inst->sources; i++) {
251 if (inst->src[i].file == FIXED_GRF) {
252 unsigned reg_nr = inst->src[i].nr;
253 if (reg_nr / reg_unit(devinfo) >= payload_node_count)
254 continue;
255
256 for (unsigned j = reg_nr / reg_unit(devinfo);
257 j < DIV_ROUND_UP(reg_nr + regs_read(inst, i),
258 reg_unit(devinfo));
259 j++) {
260 payload_last_use_ip[j] = use_ip;
261 assert(j < payload_node_count);
262 }
263 }
264 }
265
266 if (inst->dst.file == FIXED_GRF) {
267 unsigned reg_nr = inst->dst.nr;
268 if (reg_nr / reg_unit(devinfo) < payload_node_count) {
269 for (unsigned j = reg_nr / reg_unit(devinfo);
270 j < DIV_ROUND_UP(reg_nr + regs_written(inst),
271 reg_unit(devinfo));
272 j++) {
273 payload_last_use_ip[j] = use_ip;
274 assert(j < payload_node_count);
275 }
276 }
277 }
278
279 /* Special case instructions which have extra implied registers used. */
280 switch (inst->opcode) {
281 case ELK_CS_OPCODE_CS_TERMINATE:
282 payload_last_use_ip[0] = use_ip;
283 break;
284
285 default:
286 if (inst->eot) {
287 /* We could omit this for the !inst->header_present case, except
288 * that the simulator apparently incorrectly reads from g0/g1
289 * instead of sideband. It also really freaks out driver
290 * developers to see g0 used in unusual places, so just always
291 * reserve it.
292 */
293 payload_last_use_ip[0] = use_ip;
294 payload_last_use_ip[1] = use_ip;
295 }
296 break;
297 }
298
299 ip++;
300 }
301 }
302
303 class elk_fs_reg_alloc {
304 public:
elk_fs_reg_alloc(elk_fs_visitor * fs)305 elk_fs_reg_alloc(elk_fs_visitor *fs):
306 fs(fs), devinfo(fs->devinfo), compiler(fs->compiler),
307 live(fs->live_analysis.require()), g(NULL),
308 have_spill_costs(false)
309 {
310 mem_ctx = ralloc_context(NULL);
311
312 /* Stash the number of instructions so we can sanity check that our
313 * counts still match liveness.
314 */
315 live_instr_count = fs->cfg->last_block()->end_ip + 1;
316
317 spill_insts = _mesa_pointer_set_create(mem_ctx);
318
319 /* Most of this allocation was written for a reg_width of 1
320 * (dispatch_width == 8). In extending to SIMD16, the code was
321 * left in place and it was converted to have the hardware
322 * registers it's allocating be contiguous physical pairs of regs
323 * for reg_width == 2.
324 */
325 int reg_width = fs->dispatch_width / 8;
326 rsi = util_logbase2(reg_width);
327 payload_node_count = ALIGN(fs->first_non_payload_grf, reg_width);
328
329 /* Get payload IP information */
330 payload_last_use_ip = ralloc_array(mem_ctx, int, payload_node_count);
331
332 node_count = 0;
333 first_payload_node = 0;
334 first_mrf_hack_node = 0;
335 grf127_send_hack_node = 0;
336 first_vgrf_node = 0;
337 last_vgrf_node = 0;
338 first_spill_node = 0;
339
340 spill_vgrf_ip = NULL;
341 spill_vgrf_ip_alloc = 0;
342 spill_node_count = 0;
343 }
344
345 elk_fs_reg_alloc(const elk_fs_reg_alloc &) = delete;
346 elk_fs_reg_alloc & operator=(const elk_fs_reg_alloc &) = delete;
347
~elk_fs_reg_alloc()348 ~elk_fs_reg_alloc()
349 {
350 ralloc_free(mem_ctx);
351 }
352
353 bool assign_regs(bool allow_spilling, bool spill_all);
354
355 private:
356 void setup_live_interference(unsigned node,
357 int node_start_ip, int node_end_ip);
358 void setup_inst_interference(const elk_fs_inst *inst);
359
360 void build_interference_graph(bool allow_spilling);
361 void discard_interference_graph();
362
363 elk_fs_reg build_lane_offsets(const fs_builder &bld,
364 uint32_t spill_offset, int ip);
365 elk_fs_reg build_single_offset(const fs_builder &bld,
366 uint32_t spill_offset, int ip);
367
368 void emit_unspill(const fs_builder &bld, struct shader_stats *stats,
369 elk_fs_reg dst, uint32_t spill_offset, unsigned count, int ip);
370 void emit_spill(const fs_builder &bld, struct shader_stats *stats,
371 elk_fs_reg src, uint32_t spill_offset, unsigned count, int ip);
372
373 void set_spill_costs();
374 int choose_spill_reg();
375 elk_fs_reg alloc_spill_reg(unsigned size, int ip);
376 void spill_reg(unsigned spill_reg);
377
378 void *mem_ctx;
379 elk_fs_visitor *fs;
380 const intel_device_info *devinfo;
381 const elk_compiler *compiler;
382 const fs_live_variables &live;
383 int live_instr_count;
384
385 set *spill_insts;
386
387 /* Which compiler->fs_reg_sets[] to use */
388 int rsi;
389
390 ra_graph *g;
391 bool have_spill_costs;
392
393 int payload_node_count;
394 int *payload_last_use_ip;
395
396 int node_count;
397 int first_payload_node;
398 int first_mrf_hack_node;
399 int grf127_send_hack_node;
400 int first_vgrf_node;
401 int last_vgrf_node;
402 int first_spill_node;
403
404 int *spill_vgrf_ip;
405 int spill_vgrf_ip_alloc;
406 int spill_node_count;
407
408 elk_fs_reg scratch_header;
409 };
410
411 /**
412 * Sets the mrf_used array to indicate which MRFs are used by the shader IR
413 *
414 * This is used in assign_regs() to decide which of the GRFs that we use as
415 * MRFs on gfx7 get normally register allocated, and in register spilling to
416 * see if we can actually use MRFs to do spills without overwriting normal MRF
417 * contents.
418 */
419 static void
get_used_mrfs(const elk_fs_visitor * v,bool * mrf_used)420 get_used_mrfs(const elk_fs_visitor *v, bool *mrf_used)
421 {
422 int reg_width = v->dispatch_width / 8;
423
424 memset(mrf_used, 0, ELK_MAX_MRF(v->devinfo->ver) * sizeof(bool));
425
426 foreach_block_and_inst(block, elk_fs_inst, inst, v->cfg) {
427 if (inst->dst.file == MRF) {
428 int reg = inst->dst.nr & ~ELK_MRF_COMPR4;
429 mrf_used[reg] = true;
430 if (reg_width == 2) {
431 if (inst->dst.nr & ELK_MRF_COMPR4) {
432 mrf_used[reg + 4] = true;
433 } else {
434 mrf_used[reg + 1] = true;
435 }
436 }
437 }
438
439 if (inst->mlen > 0) {
440 for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
441 mrf_used[inst->base_mrf + i] = true;
442 }
443 }
444 }
445 }
446
447 namespace {
448 /**
449 * Maximum spill block size we expect to encounter in 32B units.
450 *
451 * This is somewhat arbitrary and doesn't necessarily limit the maximum
452 * variable size that can be spilled -- A higher value will allow a
453 * variable of a given size to be spilled more efficiently with a smaller
454 * number of scratch messages, but will increase the likelihood of a
455 * collision between the MRFs reserved for spilling and other MRFs used by
456 * the program (and possibly increase GRF register pressure on platforms
457 * without hardware MRFs), what could cause register allocation to fail.
458 *
459 * For the moment reserve just enough space so a register of 32 bit
460 * component type and natural region width can be spilled without splitting
461 * into multiple (force_writemask_all) scratch messages.
462 */
463 unsigned
spill_max_size(const elk_backend_shader * s)464 spill_max_size(const elk_backend_shader *s)
465 {
466 /* LSC is limited to SIMD16 sends */
467 assert(!s->devinfo->has_lsc);
468
469 /* FINISHME - On Gfx7+ it should be possible to avoid this limit
470 * altogether by spilling directly from the temporary GRF
471 * allocated to hold the result of the instruction (and the
472 * scratch write header).
473 */
474 /* FINISHME - The shader's dispatch width probably belongs in
475 * elk_backend_shader (or some nonexistent fs_shader class?)
476 * rather than in the visitor class.
477 */
478 return static_cast<const elk_fs_visitor *>(s)->dispatch_width / 8;
479 }
480
481 /**
482 * First MRF register available for spilling.
483 */
484 unsigned
spill_base_mrf(const elk_backend_shader * s)485 spill_base_mrf(const elk_backend_shader *s)
486 {
487 return ELK_MAX_MRF(s->devinfo->ver) - spill_max_size(s) - 1;
488 }
489 }
490
491 void
setup_live_interference(unsigned node,int node_start_ip,int node_end_ip)492 elk_fs_reg_alloc::setup_live_interference(unsigned node,
493 int node_start_ip, int node_end_ip)
494 {
495 /* Mark any virtual grf that is live between the start of the program and
496 * the last use of a payload node interfering with that payload node.
497 */
498 for (int i = 0; i < payload_node_count; i++) {
499 if (payload_last_use_ip[i] == -1)
500 continue;
501
502 /* Note that we use a <= comparison, unlike vgrfs_interfere(),
503 * in order to not have to worry about the uniform issue described in
504 * calculate_live_intervals().
505 */
506 if (node_start_ip <= payload_last_use_ip[i])
507 ra_add_node_interference(g, node, first_payload_node + i);
508 }
509
510 /* If we have the MRF hack enabled, mark this node as interfering with all
511 * MRF registers.
512 */
513 if (first_mrf_hack_node >= 0) {
514 for (int i = spill_base_mrf(fs); i < ELK_MAX_MRF(devinfo->ver); i++)
515 ra_add_node_interference(g, node, first_mrf_hack_node + i);
516 }
517
518 /* Add interference with every vgrf whose live range intersects this
519 * node's. We only need to look at nodes below this one as the reflexivity
520 * of interference will take care of the rest.
521 */
522 for (unsigned n2 = first_vgrf_node;
523 n2 <= (unsigned)last_vgrf_node && n2 < node; n2++) {
524 unsigned vgrf = n2 - first_vgrf_node;
525 if (!(node_end_ip <= live.vgrf_start[vgrf] ||
526 live.vgrf_end[vgrf] <= node_start_ip))
527 ra_add_node_interference(g, node, n2);
528 }
529 }
530
531 void
setup_inst_interference(const elk_fs_inst * inst)532 elk_fs_reg_alloc::setup_inst_interference(const elk_fs_inst *inst)
533 {
534 /* Certain instructions can't safely use the same register for their
535 * sources and destination. Add interference.
536 */
537 if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) {
538 for (unsigned i = 0; i < inst->sources; i++) {
539 if (inst->src[i].file == VGRF) {
540 ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
541 first_vgrf_node + inst->src[i].nr);
542 }
543 }
544 }
545
546 /* A compressed instruction is actually two instructions executed
547 * simultaneously. On most platforms, it ok to have the source and
548 * destination registers be the same. In this case, each instruction
549 * over-writes its own source and there's no problem. The real problem
550 * here is if the source and destination registers are off by one. Then
551 * you can end up in a scenario where the first instruction over-writes the
552 * source of the second instruction. Since the compiler doesn't know about
553 * this level of granularity, we simply make the source and destination
554 * interfere.
555 */
556 if (inst->dst.component_size(inst->exec_size) > REG_SIZE &&
557 inst->dst.file == VGRF) {
558 for (int i = 0; i < inst->sources; ++i) {
559 if (inst->src[i].file == VGRF) {
560 ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
561 first_vgrf_node + inst->src[i].nr);
562 }
563 }
564 }
565
566 if (grf127_send_hack_node >= 0) {
567 /* At Intel Broadwell PRM, vol 07, section "Instruction Set Reference",
568 * subsection "EUISA Instructions", Send Message (page 990):
569 *
570 * "r127 must not be used for return address when there is a src and
571 * dest overlap in send instruction."
572 *
573 * We are avoiding using grf127 as part of the destination of send
574 * messages adding a node interference to the grf127_send_hack_node.
575 * This node has a fixed assignment to grf127.
576 *
577 * We don't apply it to SIMD16 instructions because previous code avoids
578 * any register overlap between sources and destination.
579 */
580 if (inst->exec_size < 16 && inst->is_send_from_grf() &&
581 inst->dst.file == VGRF)
582 ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
583 grf127_send_hack_node);
584
585 /* Spilling instruction are generated as SEND messages from MRF but as
586 * Gfx7+ supports sending from GRF the driver will maps assingn these
587 * MRF registers to a GRF. Implementations reuses the dest of the send
588 * message as source. So as we will have an overlap for sure, we create
589 * an interference between destination and grf127.
590 */
591 if ((inst->opcode == ELK_SHADER_OPCODE_GFX7_SCRATCH_READ ||
592 inst->opcode == ELK_SHADER_OPCODE_GFX4_SCRATCH_READ) &&
593 inst->dst.file == VGRF)
594 ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
595 grf127_send_hack_node);
596 }
597
598 /* When we do send-from-GRF for FB writes, we need to ensure that the last
599 * write instruction sends from a high register. This is because the
600 * vertex fetcher wants to start filling the low payload registers while
601 * the pixel data port is still working on writing out the memory. If we
602 * don't do this, we get rendering artifacts.
603 *
604 * We could just do "something high". Instead, we just pick the highest
605 * register that works.
606 */
607 if (inst->eot) {
608 const int vgrf = inst->opcode == ELK_SHADER_OPCODE_SEND ?
609 inst->src[1].nr : inst->src[0].nr;
610 const int size = DIV_ROUND_UP(fs->alloc.sizes[vgrf], reg_unit(devinfo));
611 int reg = ELK_MAX_GRF - size;
612
613 if (first_mrf_hack_node >= 0) {
614 /* If something happened to spill, we want to push the EOT send
615 * register early enough in the register file that we don't
616 * conflict with any used MRF hack registers.
617 */
618 reg -= ELK_MAX_MRF(devinfo->ver) - spill_base_mrf(fs);
619 } else if (grf127_send_hack_node >= 0) {
620 /* Avoid r127 which might be unusable if the node was previously
621 * written by a SIMD8 SEND message with source/destination overlap.
622 */
623 reg--;
624 }
625
626 ra_set_node_reg(g, first_vgrf_node + vgrf, reg);
627 }
628 }
629
630 void
build_interference_graph(bool allow_spilling)631 elk_fs_reg_alloc::build_interference_graph(bool allow_spilling)
632 {
633 /* Compute the RA node layout */
634 node_count = 0;
635 first_payload_node = node_count;
636 node_count += payload_node_count;
637 if (devinfo->ver >= 7 && allow_spilling) {
638 first_mrf_hack_node = node_count;
639 node_count += ELK_MAX_GRF - GFX7_MRF_HACK_START;
640 } else {
641 first_mrf_hack_node = -1;
642 }
643 if (devinfo->ver >= 8) {
644 grf127_send_hack_node = node_count;
645 node_count ++;
646 } else {
647 grf127_send_hack_node = -1;
648 }
649 first_vgrf_node = node_count;
650 node_count += fs->alloc.count;
651 last_vgrf_node = node_count - 1;
652 first_spill_node = node_count;
653
654 fs->calculate_payload_ranges(payload_node_count,
655 payload_last_use_ip);
656
657 assert(g == NULL);
658 g = ra_alloc_interference_graph(compiler->fs_reg_sets[rsi].regs, node_count);
659 ralloc_steal(mem_ctx, g);
660
661 /* Set up the payload nodes */
662 for (int i = 0; i < payload_node_count; i++)
663 ra_set_node_reg(g, first_payload_node + i, i);
664
665 if (first_mrf_hack_node >= 0) {
666 /* Mark each MRF reg node as being allocated to its physical
667 * register.
668 *
669 * The alternative would be to have per-physical-register classes,
670 * which would just be silly.
671 */
672 for (int i = 0; i < ELK_MAX_MRF(devinfo->ver); i++) {
673 ra_set_node_reg(g, first_mrf_hack_node + i,
674 GFX7_MRF_HACK_START + i);
675 }
676 }
677
678 if (grf127_send_hack_node >= 0)
679 ra_set_node_reg(g, grf127_send_hack_node, 127);
680
681 /* Specify the classes of each virtual register. */
682 for (unsigned i = 0; i < fs->alloc.count; i++) {
683 unsigned size = DIV_ROUND_UP(fs->alloc.sizes[i], reg_unit(devinfo));
684
685 assert(size <= ARRAY_SIZE(compiler->fs_reg_sets[rsi].classes) &&
686 "Register allocation relies on split_virtual_grfs()");
687
688 ra_set_node_class(g, first_vgrf_node + i,
689 compiler->fs_reg_sets[rsi].classes[size - 1]);
690 }
691
692 /* Special case: on pre-Gfx7 hardware that supports PLN, the second operand
693 * of a PLN instruction needs to be an even-numbered register, so we have a
694 * special register class aligned_bary_class to handle this case.
695 */
696 if (compiler->fs_reg_sets[rsi].aligned_bary_class) {
697 foreach_block_and_inst(block, elk_fs_inst, inst, fs->cfg) {
698 if (inst->opcode == ELK_FS_OPCODE_LINTERP && inst->src[0].file == VGRF &&
699 fs->alloc.sizes[inst->src[0].nr] ==
700 aligned_bary_size(fs->dispatch_width)) {
701 ra_set_node_class(g, first_vgrf_node + inst->src[0].nr,
702 compiler->fs_reg_sets[rsi].aligned_bary_class);
703 }
704 }
705 }
706
707 /* Add interference based on the live range of the register */
708 for (unsigned i = 0; i < fs->alloc.count; i++) {
709 setup_live_interference(first_vgrf_node + i,
710 live.vgrf_start[i],
711 live.vgrf_end[i]);
712 }
713
714 /* Add interference based on the instructions in which a register is used.
715 */
716 foreach_block_and_inst(block, elk_fs_inst, inst, fs->cfg)
717 setup_inst_interference(inst);
718 }
719
720 void
discard_interference_graph()721 elk_fs_reg_alloc::discard_interference_graph()
722 {
723 ralloc_free(g);
724 g = NULL;
725 have_spill_costs = false;
726 }
727
728 elk_fs_reg
build_single_offset(const fs_builder & bld,uint32_t spill_offset,int ip)729 elk_fs_reg_alloc::build_single_offset(const fs_builder &bld, uint32_t spill_offset, int ip)
730 {
731 elk_fs_reg offset = retype(alloc_spill_reg(1, ip), ELK_REGISTER_TYPE_UD);
732 elk_fs_inst *inst = bld.MOV(offset, elk_imm_ud(spill_offset));
733 _mesa_set_add(spill_insts, inst);
734 return offset;
735 }
736
737 elk_fs_reg
build_lane_offsets(const fs_builder & bld,uint32_t spill_offset,int ip)738 elk_fs_reg_alloc::build_lane_offsets(const fs_builder &bld, uint32_t spill_offset, int ip)
739 {
740 /* LSC messages are limited to SIMD16 */
741 assert(bld.dispatch_width() <= 16);
742
743 const fs_builder ubld = bld.exec_all();
744 const unsigned reg_count = ubld.dispatch_width() / 8;
745
746 elk_fs_reg offset = retype(alloc_spill_reg(reg_count, ip), ELK_REGISTER_TYPE_UD);
747 elk_fs_inst *inst;
748
749 /* Build an offset per lane in SIMD8 */
750 inst = ubld.group(8, 0).MOV(retype(offset, ELK_REGISTER_TYPE_UW),
751 elk_imm_uv(0x76543210));
752 _mesa_set_add(spill_insts, inst);
753 inst = ubld.group(8, 0).MOV(offset, retype(offset, ELK_REGISTER_TYPE_UW));
754 _mesa_set_add(spill_insts, inst);
755
756 /* Build offsets in the upper 8 lanes of SIMD16 */
757 if (ubld.dispatch_width() > 8) {
758 inst = ubld.group(8, 0).ADD(
759 byte_offset(offset, REG_SIZE),
760 byte_offset(offset, 0),
761 elk_imm_ud(8));
762 _mesa_set_add(spill_insts, inst);
763 }
764
765 /* Make the offset a dword */
766 inst = ubld.SHL(offset, offset, elk_imm_ud(2));
767 _mesa_set_add(spill_insts, inst);
768
769 /* Add the base offset */
770 inst = ubld.ADD(offset, offset, elk_imm_ud(spill_offset));
771 _mesa_set_add(spill_insts, inst);
772
773 return offset;
774 }
775
776 void
emit_unspill(const fs_builder & bld,struct shader_stats * stats,elk_fs_reg dst,uint32_t spill_offset,unsigned count,int ip)777 elk_fs_reg_alloc::emit_unspill(const fs_builder &bld,
778 struct shader_stats *stats,
779 elk_fs_reg dst,
780 uint32_t spill_offset, unsigned count, int ip)
781 {
782 const intel_device_info *devinfo = bld.shader->devinfo;
783 const unsigned reg_size = dst.component_size(bld.dispatch_width()) /
784 REG_SIZE;
785 assert(count % reg_size == 0);
786
787 for (unsigned i = 0; i < count / reg_size; i++) {
788 ++stats->fill_count;
789
790 elk_fs_inst *unspill_inst;
791 if (devinfo->ver >= 7 && spill_offset < (1 << 12) * REG_SIZE) {
792 /* The Gfx7 descriptor-based offset is 12 bits of HWORD units.
793 * Because the Gfx7-style scratch block read is hardwired to BTI 255,
794 * on Gfx9+ it would cause the DC to do an IA-coherent read, what
795 * largely outweighs the slight advantage from not having to provide
796 * the address as part of the message header, so we're better off
797 * using plain old oword block reads.
798 */
799 unspill_inst = bld.emit(ELK_SHADER_OPCODE_GFX7_SCRATCH_READ, dst);
800 unspill_inst->offset = spill_offset;
801 } else {
802 unspill_inst = bld.emit(ELK_SHADER_OPCODE_GFX4_SCRATCH_READ, dst);
803 unspill_inst->offset = spill_offset;
804 unspill_inst->base_mrf = spill_base_mrf(bld.shader);
805 unspill_inst->mlen = 1; /* header contains offset */
806 }
807 _mesa_set_add(spill_insts, unspill_inst);
808
809 dst.offset += reg_size * REG_SIZE;
810 spill_offset += reg_size * REG_SIZE;
811 }
812 }
813
814 void
emit_spill(const fs_builder & bld,struct shader_stats * stats,elk_fs_reg src,uint32_t spill_offset,unsigned count,int ip)815 elk_fs_reg_alloc::emit_spill(const fs_builder &bld,
816 struct shader_stats *stats,
817 elk_fs_reg src,
818 uint32_t spill_offset, unsigned count, int ip)
819 {
820 const unsigned reg_size = src.component_size(bld.dispatch_width()) /
821 REG_SIZE;
822 assert(count % reg_size == 0);
823
824 for (unsigned i = 0; i < count / reg_size; i++) {
825 ++stats->spill_count;
826
827 elk_fs_inst *spill_inst = bld.emit(ELK_SHADER_OPCODE_GFX4_SCRATCH_WRITE,
828 bld.null_reg_f(), src);
829 spill_inst->offset = spill_offset;
830 spill_inst->mlen = 1 + reg_size; /* header, value */
831 spill_inst->base_mrf = spill_base_mrf(bld.shader);
832 _mesa_set_add(spill_insts, spill_inst);
833
834 src.offset += reg_size * REG_SIZE;
835 spill_offset += reg_size * REG_SIZE;
836 }
837 }
838
839 void
set_spill_costs()840 elk_fs_reg_alloc::set_spill_costs()
841 {
842 float block_scale = 1.0;
843 float spill_costs[fs->alloc.count];
844 bool no_spill[fs->alloc.count];
845
846 for (unsigned i = 0; i < fs->alloc.count; i++) {
847 spill_costs[i] = 0.0;
848 no_spill[i] = false;
849 }
850
851 /* Calculate costs for spilling nodes. Call it a cost of 1 per
852 * spill/unspill we'll have to do, and guess that the insides of
853 * loops run 10 times.
854 */
855 foreach_block_and_inst(block, elk_fs_inst, inst, fs->cfg) {
856 for (unsigned int i = 0; i < inst->sources; i++) {
857 if (inst->src[i].file == VGRF)
858 spill_costs[inst->src[i].nr] += regs_read(inst, i) * block_scale;
859 }
860
861 if (inst->dst.file == VGRF)
862 spill_costs[inst->dst.nr] += regs_written(inst) * block_scale;
863
864 /* Don't spill anything we generated while spilling */
865 if (_mesa_set_search(spill_insts, inst)) {
866 for (unsigned int i = 0; i < inst->sources; i++) {
867 if (inst->src[i].file == VGRF)
868 no_spill[inst->src[i].nr] = true;
869 }
870 if (inst->dst.file == VGRF)
871 no_spill[inst->dst.nr] = true;
872 }
873
874 switch (inst->opcode) {
875
876 case ELK_OPCODE_DO:
877 block_scale *= 10;
878 break;
879
880 case ELK_OPCODE_WHILE:
881 block_scale /= 10;
882 break;
883
884 case ELK_OPCODE_IF:
885 case ELK_OPCODE_IFF:
886 block_scale *= 0.5;
887 break;
888
889 case ELK_OPCODE_ENDIF:
890 block_scale /= 0.5;
891 break;
892
893 default:
894 break;
895 }
896 }
897
898 for (unsigned i = 0; i < fs->alloc.count; i++) {
899 /* Do the no_spill check first. Registers that are used as spill
900 * temporaries may have been allocated after we calculated liveness so
901 * we shouldn't look their liveness up. Fortunately, they're always
902 * used in SCRATCH_READ/WRITE instructions so they'll always be flagged
903 * no_spill.
904 */
905 if (no_spill[i])
906 continue;
907
908 int live_length = live.vgrf_end[i] - live.vgrf_start[i];
909 if (live_length <= 0)
910 continue;
911
912 /* Divide the cost (in number of spills/fills) by the log of the length
913 * of the live range of the register. This will encourage spill logic
914 * to spill long-living things before spilling short-lived things where
915 * spilling is less likely to actually do us any good. We use the log
916 * of the length because it will fall off very quickly and not cause us
917 * to spill medium length registers with more uses.
918 */
919 float adjusted_cost = spill_costs[i] / logf(live_length);
920 ra_set_node_spill_cost(g, first_vgrf_node + i, adjusted_cost);
921 }
922
923 have_spill_costs = true;
924 }
925
926 int
choose_spill_reg()927 elk_fs_reg_alloc::choose_spill_reg()
928 {
929 if (!have_spill_costs)
930 set_spill_costs();
931
932 int node = ra_get_best_spill_node(g);
933 if (node < 0)
934 return -1;
935
936 assert(node >= first_vgrf_node);
937 return node - first_vgrf_node;
938 }
939
940 elk_fs_reg
alloc_spill_reg(unsigned size,int ip)941 elk_fs_reg_alloc::alloc_spill_reg(unsigned size, int ip)
942 {
943 int vgrf = fs->alloc.allocate(ALIGN(size, reg_unit(devinfo)));
944 int class_idx = DIV_ROUND_UP(size, reg_unit(devinfo)) - 1;
945 int n = ra_add_node(g, compiler->fs_reg_sets[rsi].classes[class_idx]);
946 assert(n == first_vgrf_node + vgrf);
947 assert(n == first_spill_node + spill_node_count);
948
949 setup_live_interference(n, ip - 1, ip + 1);
950
951 /* Add interference between this spill node and any other spill nodes for
952 * the same instruction.
953 */
954 for (int s = 0; s < spill_node_count; s++) {
955 if (spill_vgrf_ip[s] == ip)
956 ra_add_node_interference(g, n, first_spill_node + s);
957 }
958
959 /* Add this spill node to the list for next time */
960 if (spill_node_count >= spill_vgrf_ip_alloc) {
961 if (spill_vgrf_ip_alloc == 0)
962 spill_vgrf_ip_alloc = 16;
963 else
964 spill_vgrf_ip_alloc *= 2;
965 spill_vgrf_ip = reralloc(mem_ctx, spill_vgrf_ip, int,
966 spill_vgrf_ip_alloc);
967 }
968 spill_vgrf_ip[spill_node_count++] = ip;
969
970 return elk_fs_reg(VGRF, vgrf);
971 }
972
973 void
spill_reg(unsigned spill_reg)974 elk_fs_reg_alloc::spill_reg(unsigned spill_reg)
975 {
976 int size = fs->alloc.sizes[spill_reg];
977 unsigned int spill_offset = fs->last_scratch;
978 assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */
979
980 /* Spills may use MRFs 13-15 in the SIMD16 case. Our texturing is done
981 * using up to 11 MRFs starting from either m1 or m2, and fb writes can use
982 * up to m13 (gfx6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or
983 * m15 (gfx4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst
984 * depth), starting from m1. In summary: We may not be able to spill in
985 * SIMD16 mode, because we'd stomp the FB writes.
986 */
987 if (!fs->spilled_any_registers) {
988 bool mrf_used[ELK_MAX_MRF(devinfo->ver)];
989 get_used_mrfs(fs, mrf_used);
990
991 for (int i = spill_base_mrf(fs); i < ELK_MAX_MRF(devinfo->ver); i++) {
992 if (mrf_used[i]) {
993 fs->fail("Register spilling not supported with m%d used", i);
994 return;
995 }
996 }
997
998 fs->spilled_any_registers = true;
999 }
1000
1001 fs->last_scratch += size * REG_SIZE;
1002
1003 /* We're about to replace all uses of this register. It no longer
1004 * conflicts with anything so we can get rid of its interference.
1005 */
1006 ra_set_node_spill_cost(g, first_vgrf_node + spill_reg, 0);
1007 ra_reset_node_interference(g, first_vgrf_node + spill_reg);
1008
1009 /* Generate spill/unspill instructions for the objects being
1010 * spilled. Right now, we spill or unspill the whole thing to a
1011 * virtual grf of the same size. For most instructions, though, we
1012 * could just spill/unspill the GRF being accessed.
1013 */
1014 int ip = 0;
1015 foreach_block_and_inst (block, elk_fs_inst, inst, fs->cfg) {
1016 const fs_builder ibld = fs_builder(fs, block, inst);
1017 exec_node *before = inst->prev;
1018 exec_node *after = inst->next;
1019
1020 for (unsigned int i = 0; i < inst->sources; i++) {
1021 if (inst->src[i].file == VGRF &&
1022 inst->src[i].nr == spill_reg) {
1023 int count = regs_read(inst, i);
1024 int subset_spill_offset = spill_offset +
1025 ROUND_DOWN_TO(inst->src[i].offset, REG_SIZE);
1026 elk_fs_reg unspill_dst = alloc_spill_reg(count, ip);
1027
1028 inst->src[i].nr = unspill_dst.nr;
1029 inst->src[i].offset %= REG_SIZE;
1030
1031 /* We read the largest power-of-two divisor of the register count
1032 * (because only POT scratch read blocks are allowed by the
1033 * hardware) up to the maximum supported block size.
1034 */
1035 const unsigned width =
1036 MIN2(32, 1u << (ffs(MAX2(1, count) * 8) - 1));
1037
1038 /* Set exec_all() on unspill messages under the (rather
1039 * pessimistic) assumption that there is no one-to-one
1040 * correspondence between channels of the spilled variable in
1041 * scratch space and the scratch read message, which operates on
1042 * 32 bit channels. It shouldn't hurt in any case because the
1043 * unspill destination is a block-local temporary.
1044 */
1045 emit_unspill(ibld.exec_all().group(width, 0), &fs->shader_stats,
1046 unspill_dst, subset_spill_offset, count, ip);
1047 }
1048 }
1049
1050 if (inst->dst.file == VGRF &&
1051 inst->dst.nr == spill_reg &&
1052 inst->opcode != ELK_SHADER_OPCODE_UNDEF) {
1053 int subset_spill_offset = spill_offset +
1054 ROUND_DOWN_TO(inst->dst.offset, REG_SIZE);
1055 elk_fs_reg spill_src = alloc_spill_reg(regs_written(inst), ip);
1056
1057 inst->dst.nr = spill_src.nr;
1058 inst->dst.offset %= REG_SIZE;
1059
1060 /* If we're immediately spilling the register, we should not use
1061 * destination dependency hints. Doing so will cause the GPU do
1062 * try to read and write the register at the same time and may
1063 * hang the GPU.
1064 */
1065 inst->no_dd_clear = false;
1066 inst->no_dd_check = false;
1067
1068 /* Calculate the execution width of the scratch messages (which work
1069 * in terms of 32 bit components so we have a fixed number of eight
1070 * channels per spilled register). We attempt to write one
1071 * exec_size-wide component of the variable at a time without
1072 * exceeding the maximum number of (fake) MRF registers reserved for
1073 * spills.
1074 */
1075 const unsigned width = 8 * reg_unit(devinfo) *
1076 DIV_ROUND_UP(MIN2(inst->dst.component_size(inst->exec_size),
1077 spill_max_size(fs) * REG_SIZE),
1078 reg_unit(devinfo) * REG_SIZE);
1079
1080 /* Spills should only write data initialized by the instruction for
1081 * whichever channels are enabled in the execution mask. If that's
1082 * not possible we'll have to emit a matching unspill before the
1083 * instruction and set force_writemask_all on the spill.
1084 */
1085 const bool per_channel =
1086 inst->dst.is_contiguous() && type_sz(inst->dst.type) == 4 &&
1087 inst->exec_size == width;
1088
1089 /* Builder used to emit the scratch messages. */
1090 const fs_builder ubld = ibld.exec_all(!per_channel).group(width, 0);
1091
1092 /* If our write is going to affect just part of the
1093 * regs_written(inst), then we need to unspill the destination since
1094 * we write back out all of the regs_written(). If the original
1095 * instruction had force_writemask_all set and is not a partial
1096 * write, there should be no need for the unspill since the
1097 * instruction will be overwriting the whole destination in any case.
1098 */
1099 if (inst->is_partial_write() ||
1100 (!inst->force_writemask_all && !per_channel))
1101 emit_unspill(ubld, &fs->shader_stats, spill_src,
1102 subset_spill_offset, regs_written(inst), ip);
1103
1104 emit_spill(ubld.at(block, inst->next), &fs->shader_stats, spill_src,
1105 subset_spill_offset, regs_written(inst), ip);
1106 }
1107
1108 for (elk_fs_inst *inst = (elk_fs_inst *)before->next;
1109 inst != after; inst = (elk_fs_inst *)inst->next)
1110 setup_inst_interference(inst);
1111
1112 /* We don't advance the ip for scratch read/write instructions
1113 * because we consider them to have the same ip as instruction we're
1114 * spilling around for the purposes of interference. Also, we're
1115 * inserting spill instructions without re-running liveness analysis
1116 * and we don't want to mess up our IPs.
1117 */
1118 if (!_mesa_set_search(spill_insts, inst))
1119 ip++;
1120 }
1121
1122 assert(ip == live_instr_count);
1123 }
1124
1125 bool
assign_regs(bool allow_spilling,bool spill_all)1126 elk_fs_reg_alloc::assign_regs(bool allow_spilling, bool spill_all)
1127 {
1128 build_interference_graph(fs->spilled_any_registers || spill_all);
1129
1130 unsigned spilled = 0;
1131 while (1) {
1132 /* Debug of register spilling: Go spill everything. */
1133 if (unlikely(spill_all)) {
1134 int reg = choose_spill_reg();
1135 if (reg != -1) {
1136 spill_reg(reg);
1137 continue;
1138 }
1139 }
1140
1141 if (ra_allocate(g))
1142 break;
1143
1144 if (!allow_spilling)
1145 return false;
1146
1147 /* Failed to allocate registers. Spill some regs, and the caller will
1148 * loop back into here to try again.
1149 */
1150 unsigned nr_spills = 1;
1151 if (compiler->spilling_rate)
1152 nr_spills = MAX2(1, spilled / compiler->spilling_rate);
1153
1154 for (unsigned j = 0; j < nr_spills; j++) {
1155 int reg = choose_spill_reg();
1156 if (reg == -1) {
1157 if (j == 0)
1158 return false; /* Nothing to spill */
1159 break;
1160 }
1161
1162 /* If we're going to spill but we've never spilled before, we need
1163 * to re-build the interference graph with MRFs enabled to allow
1164 * spilling.
1165 */
1166 if (!fs->spilled_any_registers) {
1167 discard_interference_graph();
1168 build_interference_graph(true);
1169 }
1170
1171 spill_reg(reg);
1172 spilled++;
1173 }
1174 }
1175
1176 if (spilled)
1177 fs->invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
1178
1179 /* Get the chosen virtual registers for each node, and map virtual
1180 * regs in the register classes back down to real hardware reg
1181 * numbers.
1182 */
1183 unsigned hw_reg_mapping[fs->alloc.count];
1184 fs->grf_used = fs->first_non_payload_grf;
1185 for (unsigned i = 0; i < fs->alloc.count; i++) {
1186 int reg = ra_get_node_reg(g, first_vgrf_node + i);
1187
1188 hw_reg_mapping[i] = reg;
1189 fs->grf_used = MAX2(fs->grf_used,
1190 hw_reg_mapping[i] + DIV_ROUND_UP(fs->alloc.sizes[i],
1191 reg_unit(devinfo)));
1192 }
1193
1194 foreach_block_and_inst(block, elk_fs_inst, inst, fs->cfg) {
1195 assign_reg(devinfo, hw_reg_mapping, &inst->dst);
1196 for (int i = 0; i < inst->sources; i++) {
1197 assign_reg(devinfo, hw_reg_mapping, &inst->src[i]);
1198 }
1199 }
1200
1201 fs->alloc.count = fs->grf_used;
1202
1203 return true;
1204 }
1205
1206 bool
assign_regs(bool allow_spilling,bool spill_all)1207 elk_fs_visitor::assign_regs(bool allow_spilling, bool spill_all)
1208 {
1209 elk_fs_reg_alloc alloc(this);
1210 bool success = alloc.assign_regs(allow_spilling, spill_all);
1211 if (!success && allow_spilling) {
1212 fail("no register to spill:\n");
1213 dump_instructions(NULL);
1214 }
1215 return success;
1216 }
1217