1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "nv50_ir.h"
24 #include "nv50_ir_target.h"
25
26 #include <algorithm>
27 #include <stack>
28 #include <limits>
29 #include <unordered_map>
30
31 namespace nv50_ir {
32 namespace {
33
34 #define MAX_REGISTER_FILE_SIZE 256
35
36 class RegisterSet
37 {
38 public:
39 RegisterSet(const Target *);
40
41 void init(const Target *);
42 void reset(DataFile, bool resetMax = false);
43
44 bool assign(int32_t& reg, DataFile f, unsigned int size, unsigned int maxReg);
45 void occupy(DataFile f, int32_t reg, unsigned int size);
46 void occupyMask(DataFile f, int32_t reg, uint8_t mask);
47 bool isOccupied(DataFile f, int32_t reg, unsigned int size) const;
48 bool testOccupy(DataFile f, int32_t reg, unsigned int size);
49
getMaxAssigned(DataFile f) const50 inline int getMaxAssigned(DataFile f) const { return fill[f]; }
51
getFileSize(DataFile f) const52 inline unsigned int getFileSize(DataFile f) const
53 {
54 return last[f] + 1;
55 }
56
units(DataFile f,unsigned int size) const57 inline unsigned int units(DataFile f, unsigned int size) const
58 {
59 return size >> unit[f];
60 }
61 // for regs of size >= 4, id is counted in 4-byte words (like nv50/c0 binary)
idToBytes(const Value * v) const62 inline unsigned int idToBytes(const Value *v) const
63 {
64 return v->reg.data.id * MIN2(v->reg.size, 4);
65 }
idToUnits(const Value * v) const66 inline unsigned int idToUnits(const Value *v) const
67 {
68 return units(v->reg.file, idToBytes(v));
69 }
bytesToId(Value * v,unsigned int bytes) const70 inline int bytesToId(Value *v, unsigned int bytes) const
71 {
72 if (v->reg.size < 4)
73 return units(v->reg.file, bytes);
74 return bytes / 4;
75 }
unitsToId(DataFile f,int u,uint8_t size) const76 inline int unitsToId(DataFile f, int u, uint8_t size) const
77 {
78 if (u < 0)
79 return -1;
80 return (size < 4) ? u : ((u << unit[f]) / 4);
81 }
82
83 void print(DataFile f) const;
84
85 const bool restrictedGPR16Range;
86
87 private:
88 BitSet bits[LAST_REGISTER_FILE + 1];
89
90 int unit[LAST_REGISTER_FILE + 1]; // log2 of allocation granularity
91
92 int last[LAST_REGISTER_FILE + 1];
93 int fill[LAST_REGISTER_FILE + 1];
94 };
95
96 void
reset(DataFile f,bool resetMax)97 RegisterSet::reset(DataFile f, bool resetMax)
98 {
99 bits[f].fill(0);
100 if (resetMax)
101 fill[f] = -1;
102 }
103
104 void
init(const Target * targ)105 RegisterSet::init(const Target *targ)
106 {
107 for (unsigned int rf = 0; rf <= LAST_REGISTER_FILE; ++rf) {
108 DataFile f = static_cast<DataFile>(rf);
109 last[rf] = targ->getFileSize(f) - 1;
110 unit[rf] = targ->getFileUnit(f);
111 fill[rf] = -1;
112 assert(last[rf] < MAX_REGISTER_FILE_SIZE);
113 bits[rf].allocate(last[rf] + 1, true);
114 }
115 }
116
RegisterSet(const Target * targ)117 RegisterSet::RegisterSet(const Target *targ)
118 : restrictedGPR16Range(targ->getChipset() < 0xc0)
119 {
120 init(targ);
121 for (unsigned int i = 0; i <= LAST_REGISTER_FILE; ++i)
122 reset(static_cast<DataFile>(i));
123 }
124
125 void
print(DataFile f) const126 RegisterSet::print(DataFile f) const
127 {
128 INFO("GPR:");
129 bits[f].print();
130 INFO("\n");
131 }
132
133 bool
assign(int32_t & reg,DataFile f,unsigned int size,unsigned int maxReg)134 RegisterSet::assign(int32_t& reg, DataFile f, unsigned int size, unsigned int maxReg)
135 {
136 reg = bits[f].findFreeRange(size, maxReg);
137 if (reg < 0)
138 return false;
139 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
140 return true;
141 }
142
143 bool
isOccupied(DataFile f,int32_t reg,unsigned int size) const144 RegisterSet::isOccupied(DataFile f, int32_t reg, unsigned int size) const
145 {
146 return bits[f].testRange(reg, size);
147 }
148
149 void
occupyMask(DataFile f,int32_t reg,uint8_t mask)150 RegisterSet::occupyMask(DataFile f, int32_t reg, uint8_t mask)
151 {
152 bits[f].setMask(reg & ~31, static_cast<uint32_t>(mask) << (reg % 32));
153 }
154
155 void
occupy(DataFile f,int32_t reg,unsigned int size)156 RegisterSet::occupy(DataFile f, int32_t reg, unsigned int size)
157 {
158 bits[f].setRange(reg, size);
159
160 INFO_DBG(0, REG_ALLOC, "reg occupy: %u[%i] %u\n", f, reg, size);
161
162 fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
163 }
164
165 bool
testOccupy(DataFile f,int32_t reg,unsigned int size)166 RegisterSet::testOccupy(DataFile f, int32_t reg, unsigned int size)
167 {
168 if (isOccupied(f, reg, size))
169 return false;
170 occupy(f, reg, size);
171 return true;
172 }
173
174 class RegAlloc
175 {
176 public:
RegAlloc(Program * program)177 RegAlloc(Program *program) : prog(program), func(NULL), sequence(0) { }
178
179 bool exec();
180 bool execFunc();
181
182 private:
183 class PhiMovesPass : public Pass {
184 private:
185 virtual bool visit(BasicBlock *);
186 inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
187 inline void splitEdges(BasicBlock *b);
188 };
189
190 class BuildIntervalsPass : public Pass {
191 private:
192 virtual bool visit(BasicBlock *);
193 void collectLiveValues(BasicBlock *);
194 void addLiveRange(Value *, const BasicBlock *, int end);
195 };
196
197 class InsertConstraintsPass : public Pass {
198 public:
InsertConstraintsPass()199 InsertConstraintsPass() : targ(NULL) { }
200 bool exec(Function *func);
201 private:
202 virtual bool visit(BasicBlock *);
203
204 void insertConstraintMove(Instruction *, int s);
205 bool insertConstraintMoves();
206
207 void condenseDefs(Instruction *);
208 void condenseDefs(Instruction *, const int first, const int last);
209 void condenseSrcs(Instruction *, const int first, const int last);
210
211 void addHazard(Instruction *i, const ValueRef *src);
212 void textureMask(TexInstruction *);
213
214 // target specific functions, TODO: put in subclass or Target
215 void texConstraintNV50(TexInstruction *);
216 void texConstraintNVC0(TexInstruction *);
217 void texConstraintNVE0(TexInstruction *);
218 void texConstraintGM107(TexInstruction *);
219
220 bool isScalarTexGM107(TexInstruction *);
221 void handleScalarTexGM107(TexInstruction *);
222
223 std::list<Instruction *> constrList;
224
225 const Target *targ;
226 };
227
228 bool buildLiveSets(BasicBlock *);
229
230 private:
231 Program *prog;
232 Function *func;
233
234 // instructions in control flow / chronological order
235 ArrayList insns;
236
237 int sequence; // for manual passes through CFG
238 };
239
240 typedef std::pair<Value *, Value *> ValuePair;
241
242 class MergedDefs
243 {
244 private:
entry(Value * val)245 std::list<ValueDef *>& entry(Value *val) {
246 auto it = defs.find(val);
247
248 if (it == defs.end()) {
249 std::list<ValueDef *> &res = defs[val];
250 res = val->defs;
251 return res;
252 } else {
253 return (*it).second;
254 }
255 }
256
257 std::unordered_map<Value *, std::list<ValueDef *> > defs;
258
259 public:
operator ()(Value * val)260 std::list<ValueDef *>& operator()(Value *val) {
261 return entry(val);
262 }
263
add(Value * val,const std::list<ValueDef * > & vals)264 void add(Value *val, const std::list<ValueDef *> &vals) {
265 assert(val);
266 std::list<ValueDef *> &valdefs = entry(val);
267 valdefs.insert(valdefs.end(), vals.begin(), vals.end());
268 }
269
removeDefsOfInstruction(Instruction * insn)270 void removeDefsOfInstruction(Instruction *insn) {
271 for (int d = 0; insn->defExists(d); ++d) {
272 ValueDef *def = &insn->def(d);
273 defs.erase(def->get());
274 for (auto &p : defs)
275 p.second.remove(def);
276 }
277 }
278
merge()279 void merge() {
280 for (auto &p : defs)
281 p.first->defs = p.second;
282 }
283 };
284
285 class SpillCodeInserter
286 {
287 public:
SpillCodeInserter(Function * fn,MergedDefs & mergedDefs)288 SpillCodeInserter(Function *fn, MergedDefs &mergedDefs) : func(fn), mergedDefs(mergedDefs), stackSize(0), stackBase(0) { }
289
290 bool run(const std::list<ValuePair>&);
291
292 Symbol *assignSlot(const Interval&, const unsigned int size);
293 Value *offsetSlot(Value *, const LValue *);
getStackSize() const294 inline int32_t getStackSize() const { return stackSize; }
295
296 private:
297 Function *func;
298 MergedDefs &mergedDefs;
299
300 int32_t stackSize;
301 int32_t stackBase;
302
303 LValue *unspill(Instruction *usei, LValue *, Value *slot);
304 void spill(Instruction *defi, Value *slot, LValue *);
305 };
306
307 void
addLiveRange(Value * val,const BasicBlock * bb,int end)308 RegAlloc::BuildIntervalsPass::addLiveRange(Value *val,
309 const BasicBlock *bb,
310 int end)
311 {
312 Instruction *insn = val->getUniqueInsn();
313
314 if (!insn)
315 insn = bb->getFirst();
316
317 assert(bb->getFirst()->serial <= bb->getExit()->serial);
318 assert(bb->getExit()->serial + 1 >= end);
319
320 int begin = insn->serial;
321 if (begin < bb->getEntry()->serial || begin > bb->getExit()->serial)
322 begin = bb->getEntry()->serial;
323
324 INFO_DBG(prog->dbgFlags, REG_ALLOC, "%%%i <- live range [%i(%i), %i)\n",
325 val->id, begin, insn->serial, end);
326
327 if (begin != end) // empty ranges are only added as hazards for fixed regs
328 val->livei.extend(begin, end);
329 }
330
331 bool
needNewElseBlock(BasicBlock * b,BasicBlock * p)332 RegAlloc::PhiMovesPass::needNewElseBlock(BasicBlock *b, BasicBlock *p)
333 {
334 if (b->cfg.incidentCount() <= 1)
335 return false;
336
337 int n = 0;
338 for (Graph::EdgeIterator ei = p->cfg.outgoing(); !ei.end(); ei.next())
339 if (ei.getType() == Graph::Edge::TREE ||
340 ei.getType() == Graph::Edge::FORWARD)
341 ++n;
342 return (n == 2);
343 }
344
345 struct PhiMapHash {
operator ()nv50_ir::__anon93abb1c60111::PhiMapHash346 size_t operator()(const std::pair<Instruction *, BasicBlock *>& val) const {
347 return std::hash<Instruction*>()(val.first) * 31 +
348 std::hash<BasicBlock*>()(val.second);
349 }
350 };
351
352 typedef std::unordered_map<
353 std::pair<Instruction *, BasicBlock *>, Value *, PhiMapHash> PhiMap;
354
355 // Critical edges need to be split up so that work can be inserted along
356 // specific edge transitions. Unfortunately manipulating incident edges into a
357 // BB invalidates all the PHI nodes since their sources are implicitly ordered
358 // by incident edge order.
359 //
360 // TODO: Make it so that that is not the case, and PHI nodes store pointers to
361 // the original BBs.
362 void
splitEdges(BasicBlock * bb)363 RegAlloc::PhiMovesPass::splitEdges(BasicBlock *bb)
364 {
365 BasicBlock *pb, *pn;
366 Instruction *phi;
367 Graph::EdgeIterator ei;
368 std::stack<BasicBlock *> stack;
369 int j = 0;
370
371 for (ei = bb->cfg.incident(); !ei.end(); ei.next()) {
372 pb = BasicBlock::get(ei.getNode());
373 assert(pb);
374 if (needNewElseBlock(bb, pb))
375 stack.push(pb);
376 }
377
378 // No critical edges were found, no need to perform any work.
379 if (stack.empty())
380 return;
381
382 // We're about to, potentially, reorder the inbound edges. This means that
383 // we need to hold on to the (phi, bb) -> src mapping, and fix up the phi
384 // nodes after the graph has been modified.
385 PhiMap phis;
386
387 j = 0;
388 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
389 pb = BasicBlock::get(ei.getNode());
390 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next)
391 phis.insert(std::make_pair(std::make_pair(phi, pb), phi->getSrc(j)));
392 }
393
394 while (!stack.empty()) {
395 pb = stack.top();
396 pn = new BasicBlock(func);
397 stack.pop();
398
399 pb->cfg.detach(&bb->cfg);
400 pb->cfg.attach(&pn->cfg, Graph::Edge::TREE);
401 pn->cfg.attach(&bb->cfg, Graph::Edge::FORWARD);
402
403 assert(pb->getExit()->op != OP_CALL);
404 if (pb->getExit()->asFlow()->target.bb == bb)
405 pb->getExit()->asFlow()->target.bb = pn;
406
407 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
408 PhiMap::iterator it = phis.find(std::make_pair(phi, pb));
409 assert(it != phis.end());
410 phis.insert(std::make_pair(std::make_pair(phi, pn), it->second));
411 phis.erase(it);
412 }
413 }
414
415 // Now go through and fix up all of the phi node sources.
416 j = 0;
417 for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
418 pb = BasicBlock::get(ei.getNode());
419 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
420 PhiMap::const_iterator it = phis.find(std::make_pair(phi, pb));
421 assert(it != phis.end());
422
423 phi->setSrc(j, it->second);
424 }
425 }
426 }
427
428 // For each operand of each PHI in b, generate a new value by inserting a MOV
429 // at the end of the block it is coming from and replace the operand with its
430 // result. This eliminates liveness conflicts and enables us to let values be
431 // copied to the right register if such a conflict exists nonetheless.
432 //
433 // These MOVs are also crucial in making sure the live intervals of phi srces
434 // are extended until the end of the loop, since they are not included in the
435 // live-in sets.
436 bool
visit(BasicBlock * bb)437 RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
438 {
439 Instruction *phi, *mov;
440
441 splitEdges(bb);
442
443 // insert MOVs (phi->src(j) should stem from j-th in-BB)
444 int j = 0;
445 for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
446 BasicBlock *pb = BasicBlock::get(ei.getNode());
447 if (!pb->isTerminated())
448 pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
449
450 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
451 LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
452 mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
453
454 mov->setSrc(0, phi->getSrc(j));
455 mov->setDef(0, tmp);
456 phi->setSrc(j, tmp);
457
458 pb->insertBefore(pb->getExit(), mov);
459 }
460 ++j;
461 }
462
463 return true;
464 }
465
466 // Build the set of live-in variables of bb.
467 bool
buildLiveSets(BasicBlock * bb)468 RegAlloc::buildLiveSets(BasicBlock *bb)
469 {
470 Function *f = bb->getFunction();
471 BasicBlock *bn;
472 Instruction *i;
473 unsigned int s, d;
474
475 INFO_DBG(prog->dbgFlags, REG_ALLOC, "buildLiveSets(BB:%i)\n", bb->getId());
476
477 bb->liveSet.allocate(func->allLValues.getSize(), false);
478
479 int n = 0;
480 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
481 bn = BasicBlock::get(ei.getNode());
482 if (bn == bb)
483 continue;
484 if (bn->cfg.visit(sequence))
485 if (!buildLiveSets(bn))
486 return false;
487 if (n++ || bb->liveSet.marker)
488 bb->liveSet |= bn->liveSet;
489 else
490 bb->liveSet = bn->liveSet;
491 }
492 if (!n && !bb->liveSet.marker)
493 bb->liveSet.fill(0);
494 bb->liveSet.marker = true;
495
496 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
497 INFO("BB:%i live set of out blocks:\n", bb->getId());
498 bb->liveSet.print();
499 }
500
501 // if (!bb->getEntry())
502 // return true;
503
504 if (bb == BasicBlock::get(f->cfgExit)) {
505 for (std::deque<ValueRef>::iterator it = f->outs.begin();
506 it != f->outs.end(); ++it) {
507 assert(it->get()->asLValue());
508 bb->liveSet.set(it->get()->id);
509 }
510 }
511
512 for (i = bb->getExit(); i && i != bb->getEntry()->prev; i = i->prev) {
513 for (d = 0; i->defExists(d); ++d)
514 bb->liveSet.clr(i->getDef(d)->id);
515 for (s = 0; i->srcExists(s); ++s)
516 if (i->getSrc(s)->asLValue())
517 bb->liveSet.set(i->getSrc(s)->id);
518 }
519 for (i = bb->getPhi(); i && i->op == OP_PHI; i = i->next)
520 bb->liveSet.clr(i->getDef(0)->id);
521
522 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
523 INFO("BB:%i live set after propagation:\n", bb->getId());
524 bb->liveSet.print();
525 }
526
527 return true;
528 }
529
530 void
collectLiveValues(BasicBlock * bb)531 RegAlloc::BuildIntervalsPass::collectLiveValues(BasicBlock *bb)
532 {
533 BasicBlock *bbA = NULL, *bbB = NULL;
534
535 if (bb->cfg.outgoingCount()) {
536 // trickery to save a loop of OR'ing liveSets
537 // aliasing works fine with BitSet::setOr
538 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
539 if (bbA) {
540 bb->liveSet.setOr(&bbA->liveSet, &bbB->liveSet);
541 bbA = bb;
542 } else {
543 bbA = bbB;
544 }
545 bbB = BasicBlock::get(ei.getNode());
546 }
547 bb->liveSet.setOr(&bbB->liveSet, bbA ? &bbA->liveSet : NULL);
548 } else
549 if (bb->cfg.incidentCount()) {
550 bb->liveSet.fill(0);
551 }
552 }
553
554 bool
visit(BasicBlock * bb)555 RegAlloc::BuildIntervalsPass::visit(BasicBlock *bb)
556 {
557 collectLiveValues(bb);
558
559 INFO_DBG(prog->dbgFlags, REG_ALLOC, "BuildIntervals(BB:%i)\n", bb->getId());
560
561 // go through out blocks and delete phi sources that do not originate from
562 // the current block from the live set
563 for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
564 BasicBlock *out = BasicBlock::get(ei.getNode());
565
566 for (Instruction *i = out->getPhi(); i && i->op == OP_PHI; i = i->next) {
567 bb->liveSet.clr(i->getDef(0)->id);
568
569 for (int s = 0; i->srcExists(s); ++s) {
570 assert(i->src(s).getInsn());
571 if (i->getSrc(s)->getUniqueInsn()->bb == bb) // XXX: reachableBy ?
572 bb->liveSet.set(i->getSrc(s)->id);
573 else
574 bb->liveSet.clr(i->getSrc(s)->id);
575 }
576 }
577 }
578
579 // remaining live-outs are live until end
580 if (bb->getExit()) {
581 for (unsigned int j = 0; j < bb->liveSet.getSize(); ++j)
582 if (bb->liveSet.test(j))
583 addLiveRange(func->getLValue(j), bb, bb->getExit()->serial + 1);
584 }
585
586 for (Instruction *i = bb->getExit(); i && i->op != OP_PHI; i = i->prev) {
587 for (int d = 0; i->defExists(d); ++d) {
588 bb->liveSet.clr(i->getDef(d)->id);
589 if (i->getDef(d)->reg.data.id >= 0) // add hazard for fixed regs
590 i->getDef(d)->livei.extend(i->serial, i->serial);
591 }
592
593 for (int s = 0; i->srcExists(s); ++s) {
594 if (!i->getSrc(s)->asLValue())
595 continue;
596 if (!bb->liveSet.test(i->getSrc(s)->id)) {
597 bb->liveSet.set(i->getSrc(s)->id);
598 addLiveRange(i->getSrc(s), bb, i->serial);
599 }
600 }
601 }
602
603 if (bb == BasicBlock::get(func->cfg.getRoot())) {
604 for (std::deque<ValueDef>::iterator it = func->ins.begin();
605 it != func->ins.end(); ++it) {
606 if (it->get()->reg.data.id >= 0) // add hazard for fixed regs
607 it->get()->livei.extend(0, 1);
608 }
609 }
610
611 return true;
612 }
613
614
615 #define JOIN_MASK_PHI (1 << 0)
616 #define JOIN_MASK_UNION (1 << 1)
617 #define JOIN_MASK_MOV (1 << 2)
618 #define JOIN_MASK_TEX (1 << 3)
619
620 class GCRA
621 {
622 public:
623 GCRA(Function *, SpillCodeInserter&, MergedDefs&);
624 ~GCRA();
625
626 bool allocateRegisters(ArrayList& insns);
627
628 void printNodeInfo() const;
629
630 private:
631 class RIG_Node : public Graph::Node
632 {
633 public:
634 RIG_Node();
635
636 void init(const RegisterSet&, LValue *);
637
638 void addInterference(RIG_Node *);
639 void addRegPreference(RIG_Node *);
640
getValue() const641 inline LValue *getValue() const
642 {
643 return reinterpret_cast<LValue *>(data);
644 }
setValue(LValue * lval)645 inline void setValue(LValue *lval) { data = lval; }
646
getCompMask() const647 inline uint8_t getCompMask() const
648 {
649 return ((1 << colors) - 1) << (reg & 7);
650 }
651
get(const Graph::EdgeIterator & ei)652 static inline RIG_Node *get(const Graph::EdgeIterator& ei)
653 {
654 return static_cast<RIG_Node *>(ei.getNode());
655 }
656
657 public:
658 uint32_t degree;
659 uint16_t degreeLimit; // if deg < degLimit, node is trivially colourable
660 uint16_t maxReg;
661 uint16_t colors;
662
663 DataFile f;
664 int32_t reg;
665
666 float weight;
667
668 // list pointers for simplify() phase
669 RIG_Node *next;
670 RIG_Node *prev;
671
672 // union of the live intervals of all coalesced values (we want to retain
673 // the separate intervals for testing interference of compound values)
674 Interval livei;
675
676 std::list<RIG_Node *> prefRegs;
677 };
678
679 private:
getNode(const LValue * v) const680 inline RIG_Node *getNode(const LValue *v) const { return &nodes[v->id]; }
681
682 void buildRIG(ArrayList&);
683 bool coalesce(ArrayList&);
684 bool doCoalesce(ArrayList&, unsigned int mask);
685 void calculateSpillWeights();
686 bool simplify();
687 bool selectRegisters();
688 void cleanup(const bool success);
689
690 void simplifyEdge(RIG_Node *, RIG_Node *);
691 void simplifyNode(RIG_Node *);
692
693 void copyCompound(Value *dst, Value *src);
694 bool coalesceValues(Value *, Value *, bool force);
695 void resolveSplitsAndMerges();
696 void makeCompound(Instruction *, bool isSplit);
697
698 inline void checkInterference(const RIG_Node *, Graph::EdgeIterator&);
699
700 inline void insertOrderedTail(std::list<RIG_Node *>&, RIG_Node *);
701 void checkList(std::list<RIG_Node *>&);
702
703 private:
704 std::stack<uint32_t> stack;
705
706 // list headers for simplify() phase
707 RIG_Node lo[2];
708 RIG_Node hi;
709
710 Graph RIG;
711 RIG_Node *nodes;
712 unsigned int nodeCount;
713
714 Function *func;
715 Program *prog;
716
717 struct RelDegree {
718 uint8_t data[17][17];
719
RelDegreenv50_ir::__anon93abb1c60111::GCRA::RelDegree720 RelDegree() {
721 for (int i = 1; i <= 16; ++i)
722 for (int j = 1; j <= 16; ++j)
723 data[i][j] = j * ((i + j - 1) / j);
724 }
725
operator []nv50_ir::__anon93abb1c60111::GCRA::RelDegree726 const uint8_t* operator[](std::size_t i) const {
727 return data[i];
728 }
729 };
730
731 static const RelDegree relDegree;
732
733 RegisterSet regs;
734
735 // need to fixup register id for participants of OP_MERGE/SPLIT
736 std::list<Instruction *> merges;
737 std::list<Instruction *> splits;
738
739 SpillCodeInserter& spill;
740 std::list<ValuePair> mustSpill;
741
742 MergedDefs &mergedDefs;
743 };
744
745 const GCRA::RelDegree GCRA::relDegree;
746
RIG_Node()747 GCRA::RIG_Node::RIG_Node() : Node(NULL), degree(0), degreeLimit(0), maxReg(0),
748 colors(0), f(FILE_NULL), reg(0), weight(0), next(this), prev(this)
749 {
750 }
751
752 void
printNodeInfo() const753 GCRA::printNodeInfo() const
754 {
755 for (unsigned int i = 0; i < nodeCount; ++i) {
756 if (!nodes[i].colors)
757 continue;
758 INFO("RIG_Node[%%%i]($[%u]%i): %u colors, weight %f, deg %u/%u\n X",
759 i,
760 nodes[i].f,nodes[i].reg,nodes[i].colors,
761 nodes[i].weight,
762 nodes[i].degree, nodes[i].degreeLimit);
763
764 for (Graph::EdgeIterator ei = nodes[i].outgoing(); !ei.end(); ei.next())
765 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
766 for (Graph::EdgeIterator ei = nodes[i].incident(); !ei.end(); ei.next())
767 INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
768 INFO("\n");
769 }
770 }
771
772 static bool
isShortRegOp(Instruction * insn)773 isShortRegOp(Instruction *insn)
774 {
775 // Immediates are always in src1 (except zeroes, which end up getting
776 // replaced with a zero reg). Every other situation can be resolved by
777 // using a long encoding.
778 return insn->srcExists(1) && insn->src(1).getFile() == FILE_IMMEDIATE &&
779 insn->getSrc(1)->reg.data.u64;
780 }
781
782 // Check if this LValue is ever used in an instruction that can't be encoded
783 // with long registers (i.e. > r63)
784 static bool
isShortRegVal(LValue * lval)785 isShortRegVal(LValue *lval)
786 {
787 if (lval->getInsn() == NULL)
788 return false;
789 for (Value::DefCIterator def = lval->defs.begin();
790 def != lval->defs.end(); ++def)
791 if (isShortRegOp((*def)->getInsn()))
792 return true;
793 for (Value::UseCIterator use = lval->uses.begin();
794 use != lval->uses.end(); ++use)
795 if (isShortRegOp((*use)->getInsn()))
796 return true;
797 return false;
798 }
799
800 void
init(const RegisterSet & regs,LValue * lval)801 GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
802 {
803 setValue(lval);
804 if (lval->reg.data.id >= 0)
805 lval->noSpill = lval->fixedReg = 1;
806
807 colors = regs.units(lval->reg.file, lval->reg.size);
808 f = lval->reg.file;
809 reg = -1;
810 if (lval->reg.data.id >= 0)
811 reg = regs.idToUnits(lval);
812
813 weight = std::numeric_limits<float>::infinity();
814 degree = 0;
815 maxReg = regs.getFileSize(f);
816 // On nv50, we lose a bit of gpr encoding when there's an embedded
817 // immediate.
818 if (regs.restrictedGPR16Range && f == FILE_GPR && (lval->reg.size == 2 || isShortRegVal(lval)))
819 maxReg /= 2;
820 degreeLimit = maxReg;
821 degreeLimit -= relDegree[1][colors] - 1;
822
823 livei.insert(lval->livei);
824 }
825
826 // Used when coalescing moves. The non-compound value will become one, e.g.:
827 // mov b32 $r0 $r2 / merge b64 $r0d { $r0 $r1 }
828 // split b64 { $r0 $r1 } $r0d / mov b64 $r0d f64 $r2d
829 void
copyCompound(Value * dst,Value * src)830 GCRA::copyCompound(Value *dst, Value *src)
831 {
832 LValue *ldst = dst->asLValue();
833 LValue *lsrc = src->asLValue();
834
835 if (ldst->compound && !lsrc->compound) {
836 LValue *swap = lsrc;
837 lsrc = ldst;
838 ldst = swap;
839 }
840
841 assert(!ldst->compound);
842
843 if (lsrc->compound) {
844 for (ValueDef *d : mergedDefs(ldst->join)) {
845 LValue *ldst = d->get()->asLValue();
846 if (!ldst->compound)
847 ldst->compMask = 0xff;
848 ldst->compound = 1;
849 ldst->compMask &= lsrc->compMask;
850 }
851 }
852 }
853
854 bool
coalesceValues(Value * dst,Value * src,bool force)855 GCRA::coalesceValues(Value *dst, Value *src, bool force)
856 {
857 LValue *rep = dst->join->asLValue();
858 LValue *val = src->join->asLValue();
859
860 if (!force && val->reg.data.id >= 0) {
861 rep = src->join->asLValue();
862 val = dst->join->asLValue();
863 }
864 RIG_Node *nRep = &nodes[rep->id];
865 RIG_Node *nVal = &nodes[val->id];
866
867 if (src->reg.file != dst->reg.file) {
868 if (!force)
869 return false;
870 WARN("forced coalescing of values in different files !\n");
871 }
872 if (!force && dst->reg.size != src->reg.size)
873 return false;
874
875 if ((rep->reg.data.id >= 0) && (rep->reg.data.id != val->reg.data.id)) {
876 if (force) {
877 if (val->reg.data.id >= 0)
878 WARN("forced coalescing of values in different fixed regs !\n");
879 } else {
880 if (val->reg.data.id >= 0)
881 return false;
882 // make sure that there is no overlap with the fixed register of rep
883 for (ArrayList::Iterator it = func->allLValues.iterator();
884 !it.end(); it.next()) {
885 Value *reg = reinterpret_cast<Value *>(it.get())->asLValue();
886 assert(reg);
887 if (reg->interfers(rep) && reg->livei.overlaps(nVal->livei))
888 return false;
889 }
890 }
891 }
892
893 if (!force && nRep->livei.overlaps(nVal->livei))
894 return false;
895
896 // TODO: Handle this case properly.
897 if (!force && rep->compound && val->compound)
898 return false;
899
900 INFO_DBG(prog->dbgFlags, REG_ALLOC, "joining %%%i($%i) <- %%%i\n",
901 rep->id, rep->reg.data.id, val->id);
902
903 if (!force)
904 copyCompound(dst, src);
905
906 // set join pointer of all values joined with val
907 const std::list<ValueDef *> &defs = mergedDefs(val);
908 for (ValueDef *def : defs)
909 def->get()->join = rep;
910 assert(rep->join == rep && val->join == rep);
911
912 // add val's definitions to rep and extend the live interval of its RIG node
913 mergedDefs.add(rep, defs);
914 nRep->livei.unify(nVal->livei);
915 nRep->degreeLimit = MIN2(nRep->degreeLimit, nVal->degreeLimit);
916 nRep->maxReg = MIN2(nRep->maxReg, nVal->maxReg);
917 return true;
918 }
919
920 bool
coalesce(ArrayList & insns)921 GCRA::coalesce(ArrayList& insns)
922 {
923 bool ret = doCoalesce(insns, JOIN_MASK_PHI);
924 if (!ret)
925 return false;
926 switch (func->getProgram()->getTarget()->getChipset() & ~0xf) {
927 case 0x50:
928 case 0x80:
929 case 0x90:
930 case 0xa0:
931 ret = doCoalesce(insns, JOIN_MASK_UNION | JOIN_MASK_TEX);
932 break;
933 case 0xc0:
934 case 0xd0:
935 case 0xe0:
936 case 0xf0:
937 case 0x100:
938 case 0x110:
939 case 0x120:
940 case 0x130:
941 case 0x140:
942 case 0x160:
943 case 0x170:
944 case 0x190:
945 ret = doCoalesce(insns, JOIN_MASK_UNION);
946 break;
947 default:
948 break;
949 }
950 if (!ret)
951 return false;
952 return doCoalesce(insns, JOIN_MASK_MOV);
953 }
954
makeCompMask(int compSize,int base,int size)955 static inline uint8_t makeCompMask(int compSize, int base, int size)
956 {
957 uint8_t m = ((1 << size) - 1) << base;
958
959 switch (compSize) {
960 case 1:
961 return 0xff;
962 case 2:
963 m |= (m << 2);
964 return (m << 4) | m;
965 case 3:
966 case 4:
967 return (m << 4) | m;
968 default:
969 assert(compSize <= 8);
970 return m;
971 }
972 }
973
974 void
makeCompound(Instruction * insn,bool split)975 GCRA::makeCompound(Instruction *insn, bool split)
976 {
977 LValue *rep = (split ? insn->getSrc(0) : insn->getDef(0))->asLValue();
978
979 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
980 INFO("makeCompound(split = %i): ", split);
981 insn->print();
982 }
983
984 const unsigned int size = getNode(rep)->colors;
985 unsigned int base = 0;
986
987 if (!rep->compound)
988 rep->compMask = 0xff;
989 rep->compound = 1;
990
991 for (int c = 0; split ? insn->defExists(c) : insn->srcExists(c); ++c) {
992 LValue *val = (split ? insn->getDef(c) : insn->getSrc(c))->asLValue();
993
994 val->compound = 1;
995 if (!val->compMask)
996 val->compMask = 0xff;
997 val->compMask &= makeCompMask(size, base, getNode(val)->colors);
998 assert(val->compMask);
999
1000 INFO_DBG(prog->dbgFlags, REG_ALLOC, "compound: %%%i:%02x <- %%%i:%02x\n",
1001 rep->id, rep->compMask, val->id, val->compMask);
1002
1003 base += getNode(val)->colors;
1004 }
1005 assert(base == size);
1006 }
1007
1008 bool
doCoalesce(ArrayList & insns,unsigned int mask)1009 GCRA::doCoalesce(ArrayList& insns, unsigned int mask)
1010 {
1011 int c;
1012
1013 for (unsigned int n = 0; n < insns.getSize(); ++n) {
1014 Instruction *i;
1015 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(n));
1016
1017 switch (insn->op) {
1018 case OP_PHI:
1019 if (!(mask & JOIN_MASK_PHI))
1020 break;
1021 for (c = 0; insn->srcExists(c); ++c)
1022 if (!coalesceValues(insn->getDef(0), insn->getSrc(c), false)) {
1023 // this is bad
1024 ERROR("failed to coalesce phi operands\n");
1025 return false;
1026 }
1027 break;
1028 case OP_UNION:
1029 case OP_MERGE:
1030 if (!(mask & JOIN_MASK_UNION))
1031 break;
1032 for (c = 0; insn->srcExists(c); ++c)
1033 coalesceValues(insn->getDef(0), insn->getSrc(c), true);
1034 if (insn->op == OP_MERGE) {
1035 merges.push_back(insn);
1036 if (insn->srcExists(1))
1037 makeCompound(insn, false);
1038 }
1039 break;
1040 case OP_SPLIT:
1041 if (!(mask & JOIN_MASK_UNION))
1042 break;
1043 splits.push_back(insn);
1044 for (c = 0; insn->defExists(c); ++c)
1045 coalesceValues(insn->getSrc(0), insn->getDef(c), true);
1046 makeCompound(insn, true);
1047 break;
1048 case OP_MOV:
1049 if (!(mask & JOIN_MASK_MOV))
1050 break;
1051 i = NULL;
1052 if (!insn->getDef(0)->uses.empty())
1053 i = (*insn->getDef(0)->uses.begin())->getInsn();
1054 // if this is a contraint-move there will only be a single use
1055 if (i && i->op == OP_MERGE) // do we really still need this ?
1056 break;
1057 i = insn->getSrc(0)->getUniqueInsn();
1058 if (i && !i->constrainedDefs()) {
1059 coalesceValues(insn->getDef(0), insn->getSrc(0), false);
1060 }
1061 break;
1062 case OP_TEX:
1063 case OP_TXB:
1064 case OP_TXL:
1065 case OP_TXF:
1066 case OP_TXQ:
1067 case OP_TXD:
1068 case OP_TXG:
1069 case OP_TXLQ:
1070 case OP_TEXCSAA:
1071 case OP_TEXPREP:
1072 if (!(mask & JOIN_MASK_TEX))
1073 break;
1074 for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
1075 coalesceValues(insn->getDef(c), insn->getSrc(c), true);
1076 break;
1077 default:
1078 break;
1079 }
1080 }
1081 return true;
1082 }
1083
1084 void
addInterference(RIG_Node * node)1085 GCRA::RIG_Node::addInterference(RIG_Node *node)
1086 {
1087 this->degree += relDegree[node->colors][colors];
1088 node->degree += relDegree[colors][node->colors];
1089
1090 this->attach(node, Graph::Edge::CROSS);
1091 }
1092
1093 void
addRegPreference(RIG_Node * node)1094 GCRA::RIG_Node::addRegPreference(RIG_Node *node)
1095 {
1096 prefRegs.push_back(node);
1097 }
1098
GCRA(Function * fn,SpillCodeInserter & spill,MergedDefs & mergedDefs)1099 GCRA::GCRA(Function *fn, SpillCodeInserter& spill, MergedDefs& mergedDefs) :
1100 nodes(NULL),
1101 nodeCount(0),
1102 func(fn),
1103 regs(fn->getProgram()->getTarget()),
1104 spill(spill),
1105 mergedDefs(mergedDefs)
1106 {
1107 prog = func->getProgram();
1108 }
1109
~GCRA()1110 GCRA::~GCRA()
1111 {
1112 if (nodes)
1113 delete[] nodes;
1114 }
1115
1116 void
checkList(std::list<RIG_Node * > & lst)1117 GCRA::checkList(std::list<RIG_Node *>& lst)
1118 {
1119 GCRA::RIG_Node *prev = NULL;
1120
1121 for (std::list<RIG_Node *>::iterator it = lst.begin();
1122 it != lst.end();
1123 ++it) {
1124 assert((*it)->getValue()->join == (*it)->getValue());
1125 if (prev)
1126 assert(prev->livei.begin() <= (*it)->livei.begin());
1127 prev = *it;
1128 }
1129 }
1130
1131 void
insertOrderedTail(std::list<RIG_Node * > & list,RIG_Node * node)1132 GCRA::insertOrderedTail(std::list<RIG_Node *>& list, RIG_Node *node)
1133 {
1134 if (node->livei.isEmpty())
1135 return;
1136 // only the intervals of joined values don't necessarily arrive in order
1137 std::list<RIG_Node *>::iterator prev, it;
1138 for (it = list.end(); it != list.begin(); it = prev) {
1139 prev = it;
1140 --prev;
1141 if ((*prev)->livei.begin() <= node->livei.begin())
1142 break;
1143 }
1144 list.insert(it, node);
1145 }
1146
1147 void
buildRIG(ArrayList & insns)1148 GCRA::buildRIG(ArrayList& insns)
1149 {
1150 std::list<RIG_Node *> values, active;
1151
1152 for (std::deque<ValueDef>::iterator it = func->ins.begin();
1153 it != func->ins.end(); ++it)
1154 insertOrderedTail(values, getNode(it->get()->asLValue()));
1155
1156 for (unsigned int i = 0; i < insns.getSize(); ++i) {
1157 Instruction *insn = reinterpret_cast<Instruction *>(insns.get(i));
1158 for (int d = 0; insn->defExists(d); ++d)
1159 if (insn->getDef(d)->reg.file <= LAST_REGISTER_FILE &&
1160 insn->getDef(d)->rep() == insn->getDef(d))
1161 insertOrderedTail(values, getNode(insn->getDef(d)->asLValue()));
1162 }
1163 checkList(values);
1164
1165 while (!values.empty()) {
1166 RIG_Node *cur = values.front();
1167
1168 for (std::list<RIG_Node *>::iterator it = active.begin();
1169 it != active.end();) {
1170 RIG_Node *node = *it;
1171
1172 if (node->livei.end() <= cur->livei.begin()) {
1173 it = active.erase(it);
1174 } else {
1175 if (node->f == cur->f && node->livei.overlaps(cur->livei))
1176 cur->addInterference(node);
1177 ++it;
1178 }
1179 }
1180 values.pop_front();
1181 active.push_back(cur);
1182 }
1183 }
1184
1185 void
calculateSpillWeights()1186 GCRA::calculateSpillWeights()
1187 {
1188 for (unsigned int i = 0; i < nodeCount; ++i) {
1189 RIG_Node *const n = &nodes[i];
1190 if (!nodes[i].colors || nodes[i].livei.isEmpty())
1191 continue;
1192 if (nodes[i].reg >= 0) {
1193 // update max reg
1194 regs.occupy(n->f, n->reg, n->colors);
1195 continue;
1196 }
1197 LValue *val = nodes[i].getValue();
1198
1199 if (!val->noSpill) {
1200 int rc = 0;
1201 for (ValueDef *def : mergedDefs(val))
1202 rc += def->get()->refCount();
1203
1204 nodes[i].weight =
1205 (float)rc * (float)rc / (float)nodes[i].livei.extent();
1206 }
1207
1208 if (nodes[i].degree < nodes[i].degreeLimit) {
1209 int l = 0;
1210 if (val->reg.size > 4)
1211 l = 1;
1212 DLLIST_ADDHEAD(&lo[l], &nodes[i]);
1213 } else {
1214 DLLIST_ADDHEAD(&hi, &nodes[i]);
1215 }
1216 }
1217 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1218 printNodeInfo();
1219 }
1220
1221 void
simplifyEdge(RIG_Node * a,RIG_Node * b)1222 GCRA::simplifyEdge(RIG_Node *a, RIG_Node *b)
1223 {
1224 bool move = b->degree >= b->degreeLimit;
1225
1226 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1227 "edge: (%%%i, deg %u/%u) >-< (%%%i, deg %u/%u)\n",
1228 a->getValue()->id, a->degree, a->degreeLimit,
1229 b->getValue()->id, b->degree, b->degreeLimit);
1230
1231 b->degree -= relDegree[a->colors][b->colors];
1232
1233 move = move && b->degree < b->degreeLimit;
1234 if (move && !DLLIST_EMPTY(b)) {
1235 int l = (b->getValue()->reg.size > 4) ? 1 : 0;
1236 DLLIST_DEL(b);
1237 DLLIST_ADDTAIL(&lo[l], b);
1238 }
1239 }
1240
1241 void
simplifyNode(RIG_Node * node)1242 GCRA::simplifyNode(RIG_Node *node)
1243 {
1244 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1245 simplifyEdge(node, RIG_Node::get(ei));
1246
1247 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1248 simplifyEdge(node, RIG_Node::get(ei));
1249
1250 DLLIST_DEL(node);
1251 stack.push(node->getValue()->id);
1252
1253 INFO_DBG(prog->dbgFlags, REG_ALLOC, "SIMPLIFY: pushed %%%i%s\n",
1254 node->getValue()->id,
1255 (node->degree < node->degreeLimit) ? "" : "(spill)");
1256 }
1257
1258 bool
simplify()1259 GCRA::simplify()
1260 {
1261 for (;;) {
1262 if (!DLLIST_EMPTY(&lo[0])) {
1263 do {
1264 simplifyNode(lo[0].next);
1265 } while (!DLLIST_EMPTY(&lo[0]));
1266 } else
1267 if (!DLLIST_EMPTY(&lo[1])) {
1268 simplifyNode(lo[1].next);
1269 } else
1270 if (!DLLIST_EMPTY(&hi)) {
1271 RIG_Node *best = hi.next;
1272 unsigned bestMaxReg = best->maxReg;
1273 float bestScore = best->weight / (float)best->degree;
1274 // Spill candidate. First go through the ones with the highest max
1275 // register, then the ones with lower. That way the ones with the
1276 // lowest requirement will be allocated first, since it's a stack.
1277 for (RIG_Node *it = best->next; it != &hi; it = it->next) {
1278 float score = it->weight / (float)it->degree;
1279 if (score < bestScore || it->maxReg > bestMaxReg) {
1280 best = it;
1281 bestScore = score;
1282 bestMaxReg = it->maxReg;
1283 }
1284 }
1285 if (isinf(bestScore)) {
1286 ERROR("no viable spill candidates left\n");
1287 return false;
1288 }
1289 simplifyNode(best);
1290 } else {
1291 return true;
1292 }
1293 }
1294 }
1295
1296 void
checkInterference(const RIG_Node * node,Graph::EdgeIterator & ei)1297 GCRA::checkInterference(const RIG_Node *node, Graph::EdgeIterator& ei)
1298 {
1299 const RIG_Node *intf = RIG_Node::get(ei);
1300
1301 if (intf->reg < 0)
1302 return;
1303 LValue *vA = node->getValue();
1304 LValue *vB = intf->getValue();
1305
1306 const uint8_t intfMask = ((1 << intf->colors) - 1) << (intf->reg & 7);
1307
1308 if (vA->compound | vB->compound) {
1309 // NOTE: this only works for >aligned< register tuples !
1310 for (const ValueDef *D : mergedDefs(vA)) {
1311 for (const ValueDef *d : mergedDefs(vB)) {
1312 const LValue *vD = D->get()->asLValue();
1313 const LValue *vd = d->get()->asLValue();
1314
1315 if (!vD->livei.overlaps(vd->livei)) {
1316 INFO_DBG(prog->dbgFlags, REG_ALLOC, "(%%%i) X (%%%i): no overlap\n",
1317 vD->id, vd->id);
1318 continue;
1319 }
1320
1321 uint8_t mask = vD->compound ? vD->compMask : ~0;
1322 if (vd->compound) {
1323 assert(vB->compound);
1324 mask &= vd->compMask & vB->compMask;
1325 } else {
1326 mask &= intfMask;
1327 }
1328
1329 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1330 "(%%%i)%02x X (%%%i)%02x & %02x: $r%i.%02x\n",
1331 vD->id,
1332 vD->compound ? vD->compMask : 0xff,
1333 vd->id,
1334 vd->compound ? vd->compMask : intfMask,
1335 vB->compMask, intf->reg & ~7, mask);
1336 if (mask)
1337 regs.occupyMask(node->f, intf->reg & ~7, mask);
1338 }
1339 }
1340 } else {
1341 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1342 "(%%%i) X (%%%i): $r%i + %u\n",
1343 vA->id, vB->id, intf->reg, intf->colors);
1344 regs.occupy(node->f, intf->reg, intf->colors);
1345 }
1346 }
1347
1348 bool
selectRegisters()1349 GCRA::selectRegisters()
1350 {
1351 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nSELECT phase\n");
1352
1353 while (!stack.empty()) {
1354 RIG_Node *node = &nodes[stack.top()];
1355 stack.pop();
1356
1357 regs.reset(node->f);
1358
1359 INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nNODE[%%%i, %u colors]\n",
1360 node->getValue()->id, node->colors);
1361
1362 for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1363 checkInterference(node, ei);
1364 for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1365 checkInterference(node, ei);
1366
1367 if (!node->prefRegs.empty()) {
1368 for (std::list<RIG_Node *>::const_iterator it = node->prefRegs.begin();
1369 it != node->prefRegs.end();
1370 ++it) {
1371 if ((*it)->reg >= 0 &&
1372 regs.testOccupy(node->f, (*it)->reg, node->colors)) {
1373 node->reg = (*it)->reg;
1374 break;
1375 }
1376 }
1377 }
1378 if (node->reg >= 0)
1379 continue;
1380 LValue *lval = node->getValue();
1381 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1382 regs.print(node->f);
1383 bool ret = regs.assign(node->reg, node->f, node->colors, node->maxReg);
1384 if (ret) {
1385 INFO_DBG(prog->dbgFlags, REG_ALLOC, "assigned reg %i\n", node->reg);
1386 lval->compMask = node->getCompMask();
1387 } else {
1388 INFO_DBG(prog->dbgFlags, REG_ALLOC, "must spill: %%%i (size %u)\n",
1389 lval->id, lval->reg.size);
1390 Symbol *slot = NULL;
1391 if (lval->reg.file == FILE_GPR)
1392 slot = spill.assignSlot(node->livei, lval->reg.size);
1393 mustSpill.push_back(ValuePair(lval, slot));
1394 }
1395 }
1396 if (!mustSpill.empty())
1397 return false;
1398 for (unsigned int i = 0; i < nodeCount; ++i) {
1399 LValue *lval = nodes[i].getValue();
1400 if (nodes[i].reg >= 0 && nodes[i].colors > 0)
1401 lval->reg.data.id =
1402 regs.unitsToId(nodes[i].f, nodes[i].reg, lval->reg.size);
1403 }
1404 return true;
1405 }
1406
1407 bool
allocateRegisters(ArrayList & insns)1408 GCRA::allocateRegisters(ArrayList& insns)
1409 {
1410 bool ret;
1411
1412 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1413 "allocateRegisters to %u instructions\n", insns.getSize());
1414
1415 nodeCount = func->allLValues.getSize();
1416 nodes = new RIG_Node[nodeCount];
1417 if (!nodes)
1418 return false;
1419 for (unsigned int i = 0; i < nodeCount; ++i) {
1420 LValue *lval = reinterpret_cast<LValue *>(func->allLValues.get(i));
1421 if (lval) {
1422 nodes[i].init(regs, lval);
1423 RIG.insert(&nodes[i]);
1424
1425 if (lval->inFile(FILE_GPR) && lval->getInsn() != NULL) {
1426 Instruction *insn = lval->getInsn();
1427 if (insn->op != OP_MAD && insn->op != OP_FMA && insn->op != OP_SAD)
1428 continue;
1429 // For both of the cases below, we only want to add the preference
1430 // if all arguments are in registers.
1431 if (insn->src(0).getFile() != FILE_GPR ||
1432 insn->src(1).getFile() != FILE_GPR ||
1433 insn->src(2).getFile() != FILE_GPR)
1434 continue;
1435 if (prog->getTarget()->getChipset() < 0xc0) {
1436 // Outputting a flag is not supported with short encodings nor
1437 // with immediate arguments.
1438 // See handleMADforNV50.
1439 if (insn->flagsDef >= 0)
1440 continue;
1441 } else {
1442 // We can only fold immediate arguments if dst == src2. This
1443 // only matters if one of the first two arguments is an
1444 // immediate. This form is also only supported for floats.
1445 // See handleMADforNVC0.
1446 ImmediateValue imm;
1447 if (insn->dType != TYPE_F32)
1448 continue;
1449 if (!insn->src(0).getImmediate(imm) &&
1450 !insn->src(1).getImmediate(imm))
1451 continue;
1452 }
1453
1454 nodes[i].addRegPreference(getNode(insn->getSrc(2)->asLValue()));
1455 }
1456 }
1457 }
1458
1459 // coalesce first, we use only 1 RIG node for a group of joined values
1460 ret = coalesce(insns);
1461 if (!ret)
1462 goto out;
1463
1464 if (func->getProgram()->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1465 func->printLiveIntervals();
1466
1467 buildRIG(insns);
1468 calculateSpillWeights();
1469 ret = simplify();
1470 if (!ret)
1471 goto out;
1472
1473 ret = selectRegisters();
1474 if (!ret) {
1475 INFO_DBG(prog->dbgFlags, REG_ALLOC,
1476 "selectRegisters failed, inserting spill code ...\n");
1477 regs.reset(FILE_GPR, true);
1478 spill.run(mustSpill);
1479 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1480 func->print();
1481 } else {
1482 mergedDefs.merge();
1483 prog->maxGPR = std::max(prog->maxGPR, regs.getMaxAssigned(FILE_GPR));
1484 }
1485
1486 out:
1487 cleanup(ret);
1488 return ret;
1489 }
1490
1491 void
cleanup(const bool success)1492 GCRA::cleanup(const bool success)
1493 {
1494 mustSpill.clear();
1495
1496 for (ArrayList::Iterator it = func->allLValues.iterator();
1497 !it.end(); it.next()) {
1498 LValue *lval = reinterpret_cast<LValue *>(it.get());
1499
1500 lval->livei.clear();
1501
1502 lval->compound = 0;
1503 lval->compMask = 0;
1504
1505 if (lval->join == lval)
1506 continue;
1507
1508 if (success)
1509 lval->reg.data.id = lval->join->reg.data.id;
1510 else
1511 lval->join = lval;
1512 }
1513
1514 if (success)
1515 resolveSplitsAndMerges();
1516 splits.clear(); // avoid duplicate entries on next coalesce pass
1517 merges.clear();
1518
1519 delete[] nodes;
1520 nodes = NULL;
1521 hi.next = hi.prev = &hi;
1522 lo[0].next = lo[0].prev = &lo[0];
1523 lo[1].next = lo[1].prev = &lo[1];
1524 }
1525
1526 Symbol *
assignSlot(const Interval & livei,const unsigned int size)1527 SpillCodeInserter::assignSlot(const Interval &livei, const unsigned int size)
1528 {
1529 int32_t address = align(stackSize + func->tlsBase, size);
1530
1531 Symbol *sym = new_Symbol(func->getProgram(), FILE_MEMORY_LOCAL);
1532 sym->setAddress(NULL, address);
1533 sym->reg.size = size;
1534
1535 stackSize = address + size - func->tlsBase;
1536
1537 return sym;
1538 }
1539
1540 Value *
offsetSlot(Value * base,const LValue * lval)1541 SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
1542 {
1543 if (!lval->compound || (lval->compMask & 0x1))
1544 return base;
1545 Value *slot = cloneShallow(func, base);
1546
1547 const unsigned int unit = func->getProgram()->getTarget()->getFileUnit(lval->reg.file);
1548 slot->reg.data.offset += (ffs(lval->compMask) - 1) << unit;
1549 assert((slot->reg.data.offset % lval->reg.size) == 0);
1550 slot->reg.size = lval->reg.size;
1551
1552 return slot;
1553 }
1554
1555 void
spill(Instruction * defi,Value * slot,LValue * lval)1556 SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
1557 {
1558 const DataType ty = typeOfSize(lval->reg.size);
1559
1560 slot = offsetSlot(slot, lval);
1561
1562 Instruction *st;
1563 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1564 lval->noSpill = 1;
1565 if (ty != TYPE_B96) {
1566 st = new_Instruction(func, OP_STORE, ty);
1567 st->setSrc(0, slot);
1568 st->setSrc(1, lval);
1569 } else {
1570 st = new_Instruction(func, OP_SPLIT, ty);
1571 st->setSrc(0, lval);
1572 for (int d = 0; d < lval->reg.size / 4; ++d)
1573 st->setDef(d, new_LValue(func, FILE_GPR));
1574
1575 for (int d = lval->reg.size / 4 - 1; d >= 0; --d) {
1576 Value *tmp = cloneShallow(func, slot);
1577 tmp->reg.size = 4;
1578 tmp->reg.data.offset += 4 * d;
1579
1580 Instruction *s = new_Instruction(func, OP_STORE, TYPE_U32);
1581 s->setSrc(0, tmp);
1582 s->setSrc(1, st->getDef(d));
1583 defi->bb->insertAfter(defi, s);
1584 }
1585 }
1586 } else {
1587 st = new_Instruction(func, OP_CVT, ty);
1588 st->setDef(0, slot);
1589 st->setSrc(0, lval);
1590 if (lval->reg.file == FILE_FLAGS)
1591 st->flagsSrc = 0;
1592 }
1593 defi->bb->insertAfter(defi, st);
1594 }
1595
1596 LValue *
unspill(Instruction * usei,LValue * lval,Value * slot)1597 SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
1598 {
1599 const DataType ty = typeOfSize(lval->reg.size);
1600
1601 slot = offsetSlot(slot, lval);
1602 lval = cloneShallow(func, lval);
1603
1604 Instruction *ld;
1605 if (slot->reg.file == FILE_MEMORY_LOCAL) {
1606 lval->noSpill = 1;
1607 if (ty != TYPE_B96) {
1608 ld = new_Instruction(func, OP_LOAD, ty);
1609 } else {
1610 ld = new_Instruction(func, OP_MERGE, ty);
1611 for (int d = 0; d < lval->reg.size / 4; ++d) {
1612 Value *tmp = cloneShallow(func, slot);
1613 LValue *val;
1614 tmp->reg.size = 4;
1615 tmp->reg.data.offset += 4 * d;
1616
1617 Instruction *l = new_Instruction(func, OP_LOAD, TYPE_U32);
1618 l->setDef(0, (val = new_LValue(func, FILE_GPR)));
1619 l->setSrc(0, tmp);
1620 usei->bb->insertBefore(usei, l);
1621 ld->setSrc(d, val);
1622 val->noSpill = 1;
1623 }
1624 ld->setDef(0, lval);
1625 usei->bb->insertBefore(usei, ld);
1626 return lval;
1627 }
1628 } else {
1629 ld = new_Instruction(func, OP_CVT, ty);
1630 }
1631 ld->setDef(0, lval);
1632 ld->setSrc(0, slot);
1633 if (lval->reg.file == FILE_FLAGS)
1634 ld->flagsDef = 0;
1635
1636 usei->bb->insertBefore(usei, ld);
1637 return lval;
1638 }
1639
1640 static bool
value_cmp(ValueRef * a,ValueRef * b)1641 value_cmp(ValueRef *a, ValueRef *b) {
1642 Instruction *ai = a->getInsn(), *bi = b->getInsn();
1643 if (ai->bb != bi->bb)
1644 return ai->bb->getId() < bi->bb->getId();
1645 return ai->serial < bi->serial;
1646 }
1647
1648 // For each value that is to be spilled, go through all its definitions.
1649 // A value can have multiple definitions if it has been coalesced before.
1650 // For each definition, first go through all its uses and insert an unspill
1651 // instruction before it, then replace the use with the temporary register.
1652 // Unspill can be either a load from memory or simply a move to another
1653 // register file.
1654 // For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
1655 // if we have spilled to a memory location, or simply with the new register.
1656 // No load or conversion instruction should be needed.
1657 bool
run(const std::list<ValuePair> & lst)1658 SpillCodeInserter::run(const std::list<ValuePair>& lst)
1659 {
1660 for (std::list<ValuePair>::const_iterator it = lst.begin(); it != lst.end();
1661 ++it) {
1662 LValue *lval = it->first->asLValue();
1663 Symbol *mem = it->second ? it->second->asSym() : NULL;
1664
1665 // Keep track of which instructions to delete later. Deleting them
1666 // inside the loop is unsafe since a single instruction may have
1667 // multiple destinations that all need to be spilled (like OP_SPLIT).
1668 std::unordered_set<Instruction *> to_del;
1669
1670 std::list<ValueDef *> &defs = mergedDefs(lval);
1671 for (Value::DefIterator d = defs.begin(); d != defs.end();
1672 ++d) {
1673 Value *slot = mem ?
1674 static_cast<Value *>(mem) : new_LValue(func, FILE_GPR);
1675 Value *tmp = NULL;
1676 Instruction *last = NULL;
1677
1678 LValue *dval = (*d)->get()->asLValue();
1679 Instruction *defi = (*d)->getInsn();
1680
1681 // Sort all the uses by BB/instruction so that we don't unspill
1682 // multiple times in a row, and also remove a source of
1683 // non-determinism.
1684 std::vector<ValueRef *> refs(dval->uses.begin(), dval->uses.end());
1685 std::sort(refs.begin(), refs.end(), value_cmp);
1686
1687 // Unspill at each use *before* inserting spill instructions,
1688 // we don't want to have the spill instructions in the use list here.
1689 for (std::vector<ValueRef*>::const_iterator it = refs.begin();
1690 it != refs.end(); ++it) {
1691 ValueRef *u = *it;
1692 Instruction *usei = u->getInsn();
1693 assert(usei);
1694 if (usei->isPseudo()) {
1695 tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
1696 last = NULL;
1697 } else {
1698 if (!last || (usei != last->next && usei != last))
1699 tmp = unspill(usei, dval, slot);
1700 last = usei;
1701 }
1702 u->set(tmp);
1703 }
1704
1705 assert(defi);
1706 if (defi->isPseudo()) {
1707 d = defs.erase(d);
1708 --d;
1709 if (slot->reg.file == FILE_MEMORY_LOCAL)
1710 to_del.insert(defi);
1711 else
1712 defi->setDef(0, slot);
1713 } else {
1714 spill(defi, slot, dval);
1715 }
1716 }
1717
1718 for (std::unordered_set<Instruction *>::const_iterator it = to_del.begin();
1719 it != to_del.end(); ++it) {
1720 mergedDefs.removeDefsOfInstruction(*it);
1721 delete_Instruction(func->getProgram(), *it);
1722 }
1723 }
1724
1725 stackBase = stackSize;
1726 return true;
1727 }
1728
1729 bool
exec()1730 RegAlloc::exec()
1731 {
1732 for (IteratorRef it = prog->calls.iteratorDFS(false);
1733 !it->end(); it->next()) {
1734 func = Function::get(reinterpret_cast<Graph::Node *>(it->get()));
1735
1736 func->tlsBase = prog->tlsSize;
1737 if (!execFunc())
1738 return false;
1739 prog->tlsSize += func->tlsSize;
1740 }
1741 return true;
1742 }
1743
1744 bool
execFunc()1745 RegAlloc::execFunc()
1746 {
1747 MergedDefs mergedDefs;
1748 InsertConstraintsPass insertConstr;
1749 PhiMovesPass insertPhiMoves;
1750 BuildIntervalsPass buildIntervals;
1751 SpillCodeInserter insertSpills(func, mergedDefs);
1752
1753 GCRA gcra(func, insertSpills, mergedDefs);
1754
1755 unsigned int i, retries;
1756 bool ret;
1757
1758 if (!func->ins.empty()) {
1759 // Insert a nop at the entry so inputs only used by the first instruction
1760 // don't count as having an empty live range.
1761 Instruction *nop = new_Instruction(func, OP_NOP, TYPE_NONE);
1762 BasicBlock::get(func->cfg.getRoot())->insertHead(nop);
1763 }
1764
1765 ret = insertConstr.exec(func);
1766 if (!ret)
1767 goto out;
1768
1769 ret = insertPhiMoves.run(func);
1770 if (!ret)
1771 goto out;
1772
1773 // TODO: need to fix up spill slot usage ranges to support > 1 retry
1774 for (retries = 0; retries < 3; ++retries) {
1775 if (retries && (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC))
1776 INFO("Retry: %i\n", retries);
1777 if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1778 func->print();
1779
1780 // spilling to registers may add live ranges, need to rebuild everything
1781 ret = true;
1782 for (sequence = func->cfg.nextSequence(), i = 0;
1783 ret && i <= func->loopNestingBound;
1784 sequence = func->cfg.nextSequence(), ++i)
1785 ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
1786 // reset marker
1787 for (ArrayList::Iterator bi = func->allBBlocks.iterator();
1788 !bi.end(); bi.next())
1789 BasicBlock::get(bi)->liveSet.marker = false;
1790 if (!ret)
1791 break;
1792 func->orderInstructions(this->insns);
1793
1794 ret = buildIntervals.run(func);
1795 if (!ret)
1796 break;
1797 ret = gcra.allocateRegisters(insns);
1798 if (ret)
1799 break; // success
1800 }
1801 INFO_DBG(prog->dbgFlags, REG_ALLOC, "RegAlloc done: %i\n", ret);
1802
1803 func->tlsSize = insertSpills.getStackSize();
1804 out:
1805 return ret;
1806 }
1807
1808 // TODO: check if modifying Instruction::join here breaks anything
1809 void
resolveSplitsAndMerges()1810 GCRA::resolveSplitsAndMerges()
1811 {
1812 for (std::list<Instruction *>::iterator it = splits.begin();
1813 it != splits.end();
1814 ++it) {
1815 Instruction *split = *it;
1816 unsigned int reg = regs.idToBytes(split->getSrc(0));
1817 for (int d = 0; split->defExists(d); ++d) {
1818 Value *v = split->getDef(d);
1819 v->reg.data.id = regs.bytesToId(v, reg);
1820 v->join = v;
1821 reg += v->reg.size;
1822 }
1823 }
1824 splits.clear();
1825
1826 for (std::list<Instruction *>::iterator it = merges.begin();
1827 it != merges.end();
1828 ++it) {
1829 Instruction *merge = *it;
1830 unsigned int reg = regs.idToBytes(merge->getDef(0));
1831 for (int s = 0; merge->srcExists(s); ++s) {
1832 Value *v = merge->getSrc(s);
1833 v->reg.data.id = regs.bytesToId(v, reg);
1834 v->join = v;
1835 // If the value is defined by a phi/union node, we also need to
1836 // perform the same fixup on that node's sources, since after RA
1837 // their registers should be identical.
1838 if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
1839 Instruction *phi = v->getInsn();
1840 for (int phis = 0; phi->srcExists(phis); ++phis) {
1841 phi->getSrc(phis)->join = v;
1842 phi->getSrc(phis)->reg.data.id = v->reg.data.id;
1843 }
1844 }
1845 reg += v->reg.size;
1846 }
1847 }
1848 merges.clear();
1849 }
1850
1851 bool
exec(Function * ir)1852 RegAlloc::InsertConstraintsPass::exec(Function *ir)
1853 {
1854 constrList.clear();
1855
1856 bool ret = run(ir, true, true);
1857 if (ret)
1858 ret = insertConstraintMoves();
1859 return ret;
1860 }
1861
1862 // TODO: make part of texture insn
1863 void
textureMask(TexInstruction * tex)1864 RegAlloc::InsertConstraintsPass::textureMask(TexInstruction *tex)
1865 {
1866 Value *def[4];
1867 int c, k, d;
1868 uint8_t mask = 0;
1869
1870 for (d = 0, k = 0, c = 0; c < 4; ++c) {
1871 if (!(tex->tex.mask & (1 << c)))
1872 continue;
1873 if (tex->getDef(k)->refCount()) {
1874 mask |= 1 << c;
1875 def[d++] = tex->getDef(k);
1876 }
1877 ++k;
1878 }
1879 tex->tex.mask = mask;
1880
1881 for (c = 0; c < d; ++c)
1882 tex->setDef(c, def[c]);
1883 for (; c < 4; ++c)
1884 tex->setDef(c, NULL);
1885 }
1886
1887 // Add a dummy use of the pointer source of >= 8 byte loads after the load
1888 // to prevent it from being assigned a register which overlapping the load's
1889 // destination, which would produce random corruptions.
1890 void
addHazard(Instruction * i,const ValueRef * src)1891 RegAlloc::InsertConstraintsPass::addHazard(Instruction *i, const ValueRef *src)
1892 {
1893 Instruction *hzd = new_Instruction(func, OP_NOP, TYPE_NONE);
1894 hzd->setSrc(0, src->get());
1895 i->bb->insertAfter(i, hzd);
1896
1897 }
1898
1899 // b32 { %r0 %r1 %r2 %r3 } -> b128 %r0q
1900 void
condenseDefs(Instruction * insn)1901 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn)
1902 {
1903 int n;
1904 for (n = 0; insn->defExists(n) && insn->def(n).getFile() == FILE_GPR; ++n);
1905 condenseDefs(insn, 0, n - 1);
1906 }
1907
1908 void
condenseDefs(Instruction * insn,const int a,const int b)1909 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn,
1910 const int a, const int b)
1911 {
1912 uint8_t size = 0;
1913 if (a >= b)
1914 return;
1915 for (int s = a; s <= b; ++s)
1916 size += insn->getDef(s)->reg.size;
1917 if (!size)
1918 return;
1919
1920 LValue *lval = new_LValue(func, FILE_GPR);
1921 lval->reg.size = size;
1922
1923 Instruction *split = new_Instruction(func, OP_SPLIT, typeOfSize(size));
1924 split->setSrc(0, lval);
1925 for (int d = a; d <= b; ++d) {
1926 split->setDef(d - a, insn->getDef(d));
1927 insn->setDef(d, NULL);
1928 }
1929 insn->setDef(a, lval);
1930
1931 for (int k = a + 1, d = b + 1; insn->defExists(d); ++d, ++k) {
1932 insn->setDef(k, insn->getDef(d));
1933 insn->setDef(d, NULL);
1934 }
1935 // carry over predicate if any (mainly for OP_UNION uses)
1936 split->setPredicate(insn->cc, insn->getPredicate());
1937
1938 insn->bb->insertAfter(insn, split);
1939 constrList.push_back(split);
1940 }
1941
1942 void
condenseSrcs(Instruction * insn,const int a,const int b)1943 RegAlloc::InsertConstraintsPass::condenseSrcs(Instruction *insn,
1944 const int a, const int b)
1945 {
1946 uint8_t size = 0;
1947 if (a >= b)
1948 return;
1949 for (int s = a; s <= b; ++s)
1950 size += insn->getSrc(s)->reg.size;
1951 if (!size)
1952 return;
1953 LValue *lval = new_LValue(func, FILE_GPR);
1954 lval->reg.size = size;
1955
1956 Value *save[3];
1957 insn->takeExtraSources(0, save);
1958
1959 Instruction *merge = new_Instruction(func, OP_MERGE, typeOfSize(size));
1960 merge->setDef(0, lval);
1961 for (int s = a, i = 0; s <= b; ++s, ++i) {
1962 merge->setSrc(i, insn->getSrc(s));
1963 }
1964 insn->moveSources(b + 1, a - b);
1965 insn->setSrc(a, lval);
1966 insn->bb->insertBefore(insn, merge);
1967
1968 insn->putExtraSources(0, save);
1969
1970 constrList.push_back(merge);
1971 }
1972
1973 bool
isScalarTexGM107(TexInstruction * tex)1974 RegAlloc::InsertConstraintsPass::isScalarTexGM107(TexInstruction *tex)
1975 {
1976 if (tex->tex.sIndirectSrc >= 0 ||
1977 tex->tex.rIndirectSrc >= 0 ||
1978 tex->tex.derivAll)
1979 return false;
1980
1981 if (tex->tex.mask == 5 || tex->tex.mask == 6)
1982 return false;
1983
1984 switch (tex->op) {
1985 case OP_TEX:
1986 case OP_TXF:
1987 case OP_TXG:
1988 case OP_TXL:
1989 break;
1990 default:
1991 return false;
1992 }
1993
1994 // legal variants:
1995 // TEXS.1D.LZ
1996 // TEXS.2D
1997 // TEXS.2D.LZ
1998 // TEXS.2D.LL
1999 // TEXS.2D.DC
2000 // TEXS.2D.LL.DC
2001 // TEXS.2D.LZ.DC
2002 // TEXS.A2D
2003 // TEXS.A2D.LZ
2004 // TEXS.A2D.LZ.DC
2005 // TEXS.3D
2006 // TEXS.3D.LZ
2007 // TEXS.CUBE
2008 // TEXS.CUBE.LL
2009
2010 // TLDS.1D.LZ
2011 // TLDS.1D.LL
2012 // TLDS.2D.LZ
2013 // TLSD.2D.LZ.AOFFI
2014 // TLDS.2D.LZ.MZ
2015 // TLDS.2D.LL
2016 // TLDS.2D.LL.AOFFI
2017 // TLDS.A2D.LZ
2018 // TLDS.3D.LZ
2019
2020 // TLD4S: all 2D/RECT variants and only offset
2021
2022 switch (tex->op) {
2023 case OP_TEX:
2024 if (tex->tex.useOffsets)
2025 return false;
2026
2027 switch (tex->tex.target.getEnum()) {
2028 case TEX_TARGET_1D:
2029 case TEX_TARGET_2D_ARRAY_SHADOW:
2030 return tex->tex.levelZero;
2031 case TEX_TARGET_CUBE:
2032 return !tex->tex.levelZero;
2033 case TEX_TARGET_2D:
2034 case TEX_TARGET_2D_ARRAY:
2035 case TEX_TARGET_2D_SHADOW:
2036 case TEX_TARGET_3D:
2037 case TEX_TARGET_RECT:
2038 case TEX_TARGET_RECT_SHADOW:
2039 return true;
2040 default:
2041 return false;
2042 }
2043
2044 case OP_TXL:
2045 if (tex->tex.useOffsets)
2046 return false;
2047
2048 switch (tex->tex.target.getEnum()) {
2049 case TEX_TARGET_2D:
2050 case TEX_TARGET_2D_SHADOW:
2051 case TEX_TARGET_RECT:
2052 case TEX_TARGET_RECT_SHADOW:
2053 case TEX_TARGET_CUBE:
2054 return true;
2055 default:
2056 return false;
2057 }
2058
2059 case OP_TXF:
2060 switch (tex->tex.target.getEnum()) {
2061 case TEX_TARGET_1D:
2062 return !tex->tex.useOffsets;
2063 case TEX_TARGET_2D:
2064 case TEX_TARGET_RECT:
2065 return true;
2066 case TEX_TARGET_2D_ARRAY:
2067 case TEX_TARGET_2D_MS:
2068 case TEX_TARGET_3D:
2069 return !tex->tex.useOffsets && tex->tex.levelZero;
2070 default:
2071 return false;
2072 }
2073
2074 case OP_TXG:
2075 if (tex->tex.useOffsets > 1)
2076 return false;
2077 if (tex->tex.mask != 0x3 && tex->tex.mask != 0xf)
2078 return false;
2079
2080 switch (tex->tex.target.getEnum()) {
2081 case TEX_TARGET_2D:
2082 case TEX_TARGET_2D_MS:
2083 case TEX_TARGET_2D_SHADOW:
2084 case TEX_TARGET_RECT:
2085 case TEX_TARGET_RECT_SHADOW:
2086 return true;
2087 default:
2088 return false;
2089 }
2090
2091 default:
2092 return false;
2093 }
2094 }
2095
2096 void
handleScalarTexGM107(TexInstruction * tex)2097 RegAlloc::InsertConstraintsPass::handleScalarTexGM107(TexInstruction *tex)
2098 {
2099 int defCount = tex->defCount(0xff);
2100 int srcCount = tex->srcCount(0xff);
2101
2102 tex->tex.scalar = true;
2103
2104 // 1. handle defs
2105 if (defCount > 3)
2106 condenseDefs(tex, 2, 3);
2107 if (defCount > 1)
2108 condenseDefs(tex, 0, 1);
2109
2110 // 2. handle srcs
2111 // special case for TXF.A2D
2112 if (tex->op == OP_TXF && tex->tex.target == TEX_TARGET_2D_ARRAY) {
2113 assert(srcCount >= 3);
2114 condenseSrcs(tex, 1, 2);
2115 } else {
2116 if (srcCount > 3)
2117 condenseSrcs(tex, 2, 3);
2118 // only if we have more than 2 sources
2119 if (srcCount > 2)
2120 condenseSrcs(tex, 0, 1);
2121 }
2122
2123 assert(!tex->defExists(2) && !tex->srcExists(2));
2124 }
2125
2126 void
texConstraintGM107(TexInstruction * tex)2127 RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
2128 {
2129 int n, s;
2130
2131 if (isTextureOp(tex->op))
2132 textureMask(tex);
2133
2134 if (targ->getChipset() < NVISA_GV100_CHIPSET) {
2135 if (isScalarTexGM107(tex)) {
2136 handleScalarTexGM107(tex);
2137 return;
2138 }
2139
2140 assert(!tex->tex.scalar);
2141 condenseDefs(tex);
2142 } else {
2143 if (isTextureOp(tex->op)) {
2144 int defCount = tex->defCount(0xff);
2145 if (defCount > 3)
2146 condenseDefs(tex, 2, 3);
2147 if (defCount > 1)
2148 condenseDefs(tex, 0, 1);
2149 } else {
2150 condenseDefs(tex);
2151 }
2152 }
2153
2154 if (isSurfaceOp(tex->op)) {
2155 int s = tex->tex.target.getDim() +
2156 (tex->tex.target.isArray() || tex->tex.target.isCube());
2157 int n = 0;
2158
2159 switch (tex->op) {
2160 case OP_SUSTB:
2161 case OP_SUSTP:
2162 n = 4;
2163 break;
2164 case OP_SUREDB:
2165 case OP_SUREDP:
2166 if (tex->subOp == NV50_IR_SUBOP_ATOM_CAS)
2167 n = 2;
2168 break;
2169 default:
2170 break;
2171 }
2172
2173 if (s > 1)
2174 condenseSrcs(tex, 0, s - 1);
2175 if (n > 1)
2176 condenseSrcs(tex, 1, n); // do not condense the tex handle
2177 } else
2178 if (isTextureOp(tex->op)) {
2179 if (tex->op != OP_TXQ) {
2180 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2181 if (tex->op == OP_TXD) {
2182 // Indirect handle belongs in the first arg
2183 if (tex->tex.rIndirectSrc >= 0)
2184 s++;
2185 if (!tex->tex.target.isArray() && tex->tex.useOffsets)
2186 s++;
2187 }
2188 n = tex->srcCount(0xff, true) - s;
2189 // TODO: Is this necessary? Perhaps just has to be aligned to the
2190 // level that the first arg is, not necessarily to 4. This
2191 // requirement has not been rigorously verified, as it has been on
2192 // Kepler.
2193 if (n > 0 && n < 3) {
2194 if (tex->srcExists(n + s)) // move potential predicate out of the way
2195 tex->moveSources(n + s, 3 - n);
2196 while (n < 3)
2197 tex->setSrc(s + n++, new_LValue(func, FILE_GPR));
2198 }
2199 } else {
2200 s = tex->srcCount(0xff, true);
2201 n = 0;
2202 }
2203
2204 if (s > 1)
2205 condenseSrcs(tex, 0, s - 1);
2206 if (n > 1) // NOTE: first call modified positions already
2207 condenseSrcs(tex, 1, n);
2208 }
2209 }
2210
2211 void
texConstraintNVE0(TexInstruction * tex)2212 RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
2213 {
2214 if (isTextureOp(tex->op))
2215 textureMask(tex);
2216 condenseDefs(tex);
2217
2218 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
2219 condenseSrcs(tex, 3, 6);
2220 } else
2221 if (isTextureOp(tex->op)) {
2222 int n = tex->srcCount(0xff, true);
2223 int s = n > 4 ? 4 : n;
2224 if (n > 4 && n < 7) {
2225 if (tex->srcExists(n)) // move potential predicate out of the way
2226 tex->moveSources(n, 7 - n);
2227
2228 while (n < 7)
2229 tex->setSrc(n++, new_LValue(func, FILE_GPR));
2230 }
2231 if (s > 1)
2232 condenseSrcs(tex, 0, s - 1);
2233 if (n > 4)
2234 condenseSrcs(tex, 1, n - s);
2235 }
2236 }
2237
2238 void
texConstraintNVC0(TexInstruction * tex)2239 RegAlloc::InsertConstraintsPass::texConstraintNVC0(TexInstruction *tex)
2240 {
2241 int n, s;
2242
2243 if (isTextureOp(tex->op))
2244 textureMask(tex);
2245
2246 if (tex->op == OP_TXQ) {
2247 s = tex->srcCount(0xff);
2248 n = 0;
2249 } else if (isSurfaceOp(tex->op)) {
2250 s = tex->tex.target.getDim() + (tex->tex.target.isArray() || tex->tex.target.isCube());
2251 if (tex->op == OP_SUSTB || tex->op == OP_SUSTP)
2252 n = 4;
2253 else
2254 n = 0;
2255 } else {
2256 s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2257 if (!tex->tex.target.isArray() &&
2258 (tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
2259 ++s;
2260 if (tex->op == OP_TXD && tex->tex.useOffsets)
2261 ++s;
2262 n = tex->srcCount(0xff) - s;
2263 assert(n <= 4);
2264 }
2265
2266 if (s > 1)
2267 condenseSrcs(tex, 0, s - 1);
2268 if (n > 1) // NOTE: first call modified positions already
2269 condenseSrcs(tex, 1, n);
2270
2271 condenseDefs(tex);
2272 }
2273
2274 void
texConstraintNV50(TexInstruction * tex)2275 RegAlloc::InsertConstraintsPass::texConstraintNV50(TexInstruction *tex)
2276 {
2277 Value *pred = tex->getPredicate();
2278 if (pred)
2279 tex->setPredicate(tex->cc, NULL);
2280
2281 textureMask(tex);
2282
2283 assert(tex->defExists(0) && tex->srcExists(0));
2284 // make src and def count match
2285 int c;
2286 for (c = 0; tex->srcExists(c) || tex->defExists(c); ++c) {
2287 if (!tex->srcExists(c))
2288 tex->setSrc(c, new_LValue(func, tex->getSrc(0)->asLValue()));
2289 else
2290 insertConstraintMove(tex, c);
2291 if (!tex->defExists(c))
2292 tex->setDef(c, new_LValue(func, tex->getDef(0)->asLValue()));
2293 }
2294 if (pred)
2295 tex->setPredicate(tex->cc, pred);
2296 condenseDefs(tex);
2297 condenseSrcs(tex, 0, c - 1);
2298 }
2299
2300 // Insert constraint markers for instructions whose multiple sources must be
2301 // located in consecutive registers.
2302 bool
visit(BasicBlock * bb)2303 RegAlloc::InsertConstraintsPass::visit(BasicBlock *bb)
2304 {
2305 TexInstruction *tex;
2306 Instruction *next;
2307 int s, size;
2308
2309 targ = bb->getProgram()->getTarget();
2310
2311 for (Instruction *i = bb->getEntry(); i; i = next) {
2312 next = i->next;
2313
2314 if ((tex = i->asTex())) {
2315 switch (targ->getChipset() & ~0xf) {
2316 case 0x50:
2317 case 0x80:
2318 case 0x90:
2319 case 0xa0:
2320 texConstraintNV50(tex);
2321 break;
2322 case 0xc0:
2323 case 0xd0:
2324 texConstraintNVC0(tex);
2325 break;
2326 case 0xe0:
2327 case 0xf0:
2328 case 0x100:
2329 texConstraintNVE0(tex);
2330 break;
2331 case 0x110:
2332 case 0x120:
2333 case 0x130:
2334 case 0x140:
2335 case 0x160:
2336 case 0x170:
2337 case 0x190:
2338 texConstraintGM107(tex);
2339 break;
2340 default:
2341 break;
2342 }
2343 } else
2344 if (i->op == OP_EXPORT || i->op == OP_STORE) {
2345 for (size = typeSizeof(i->dType), s = 1; size > 0; ++s) {
2346 assert(i->srcExists(s));
2347 size -= i->getSrc(s)->reg.size;
2348 }
2349 condenseSrcs(i, 1, s - 1);
2350 } else
2351 if (i->op == OP_LOAD || i->op == OP_VFETCH) {
2352 condenseDefs(i);
2353 if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
2354 addHazard(i, i->src(0).getIndirect(0));
2355 if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
2356 addHazard(i, i->src(0).getIndirect(1));
2357 if (i->op == OP_LOAD && i->fixed && targ->getChipset() < 0xc0) {
2358 // Add a hazard to make sure we keep the op around. These are used
2359 // for membars.
2360 Instruction *nop = new_Instruction(func, OP_NOP, i->dType);
2361 nop->setSrc(0, i->getDef(0));
2362 i->bb->insertAfter(i, nop);
2363 }
2364 } else
2365 if (i->op == OP_UNION ||
2366 i->op == OP_MERGE ||
2367 i->op == OP_SPLIT) {
2368 constrList.push_back(i);
2369 } else
2370 if (i->op == OP_ATOM && i->subOp == NV50_IR_SUBOP_ATOM_CAS &&
2371 targ->getChipset() < 0xc0) {
2372 // Like a hazard, but for a def.
2373 Instruction *nop = new_Instruction(func, OP_NOP, i->dType);
2374 nop->setSrc(0, i->getDef(0));
2375 i->bb->insertAfter(i, nop);
2376 }
2377 }
2378 return true;
2379 }
2380
2381 void
insertConstraintMove(Instruction * cst,int s)2382 RegAlloc::InsertConstraintsPass::insertConstraintMove(Instruction *cst, int s)
2383 {
2384 const uint8_t size = cst->src(s).getSize();
2385
2386 assert(cst->getSrc(s)->defs.size() == 1); // still SSA
2387
2388 Instruction *defi = cst->getSrc(s)->defs.front()->getInsn();
2389
2390 bool imm = defi->op == OP_MOV &&
2391 defi->src(0).getFile() == FILE_IMMEDIATE;
2392 bool load = defi->op == OP_LOAD &&
2393 defi->src(0).getFile() == FILE_MEMORY_CONST &&
2394 !defi->src(0).isIndirect(0);
2395 // catch some cases where don't really need MOVs
2396 if (cst->getSrc(s)->refCount() == 1 && !defi->constrainedDefs()
2397 && defi->op != OP_MERGE && defi->op != OP_SPLIT) {
2398 if (imm || load) {
2399 // Move the defi right before the cst. No point in expanding
2400 // the range.
2401 defi->bb->remove(defi);
2402 cst->bb->insertBefore(cst, defi);
2403 }
2404 return;
2405 }
2406
2407 LValue *lval = new_LValue(func, cst->src(s).getFile());
2408 lval->reg.size = size;
2409
2410 Instruction *mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2411 mov->setDef(0, lval);
2412 mov->setSrc(0, cst->getSrc(s));
2413
2414 if (load) {
2415 mov->op = OP_LOAD;
2416 mov->setSrc(0, defi->getSrc(0));
2417 } else if (imm) {
2418 mov->setSrc(0, defi->getSrc(0));
2419 }
2420
2421 if (defi->getPredicate())
2422 mov->setPredicate(defi->cc, defi->getPredicate());
2423
2424 cst->setSrc(s, mov->getDef(0));
2425 cst->bb->insertBefore(cst, mov);
2426
2427 cst->getDef(0)->asLValue()->noSpill = 1; // doesn't help
2428 }
2429
2430 // Insert extra moves so that, if multiple register constraints on a value are
2431 // in conflict, these conflicts can be resolved.
2432 bool
insertConstraintMoves()2433 RegAlloc::InsertConstraintsPass::insertConstraintMoves()
2434 {
2435 for (std::list<Instruction *>::iterator it = constrList.begin();
2436 it != constrList.end();
2437 ++it) {
2438 Instruction *cst = *it;
2439 Instruction *mov;
2440
2441 if (cst->op == OP_SPLIT && false) {
2442 // spilling splits is annoying, just make sure they're separate
2443 for (int d = 0; cst->defExists(d); ++d) {
2444 if (!cst->getDef(d)->refCount())
2445 continue;
2446 LValue *lval = new_LValue(func, cst->def(d).getFile());
2447 const uint8_t size = cst->def(d).getSize();
2448 lval->reg.size = size;
2449
2450 mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2451 mov->setSrc(0, lval);
2452 mov->setDef(0, cst->getDef(d));
2453 cst->setDef(d, mov->getSrc(0));
2454 cst->bb->insertAfter(cst, mov);
2455
2456 cst->getSrc(0)->asLValue()->noSpill = 1;
2457 mov->getSrc(0)->asLValue()->noSpill = 1;
2458 }
2459 } else
2460 if (cst->op == OP_MERGE || cst->op == OP_UNION) {
2461 for (int s = 0; cst->srcExists(s); ++s) {
2462 const uint8_t size = cst->src(s).getSize();
2463
2464 if (!cst->getSrc(s)->defs.size()) {
2465 mov = new_Instruction(func, OP_NOP, typeOfSize(size));
2466 mov->setDef(0, cst->getSrc(s));
2467 cst->bb->insertBefore(cst, mov);
2468 continue;
2469 }
2470
2471 insertConstraintMove(cst, s);
2472 }
2473 }
2474 }
2475
2476 return true;
2477 }
2478
2479 } // anonymous namespace
2480
registerAllocation()2481 bool Program::registerAllocation()
2482 {
2483 RegAlloc ra(this);
2484 return ra.exec();
2485 }
2486
2487 } // namespace nv50_ir
2488