1 //===- subzero/src/IceCfg.cpp - Control flow graph implementation ---------===//
2 //
3 // The Subzero Code Generator
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// \brief Implements the Cfg class.
12 ///
13 //===----------------------------------------------------------------------===//
14
15 #include "IceCfg.h"
16
17 #include "IceAssembler.h"
18 #include "IceBitVector.h"
19 #include "IceCfgNode.h"
20 #include "IceClFlags.h"
21 #include "IceDefs.h"
22 #include "IceELFObjectWriter.h"
23 #include "IceGlobalInits.h"
24 #include "IceInst.h"
25 #include "IceInstVarIter.h"
26 #include "IceInstrumentation.h"
27 #include "IceLiveness.h"
28 #include "IceLoopAnalyzer.h"
29 #include "IceOperand.h"
30 #include "IceTargetLowering.h"
31
32 #include <memory>
33 #include <utility>
34
35 namespace Ice {
36
Cfg(GlobalContext * Ctx,uint32_t SequenceNumber)37 Cfg::Cfg(GlobalContext *Ctx, uint32_t SequenceNumber)
38 : Allocator(createAllocator()), Ctx(Ctx), SequenceNumber(SequenceNumber),
39 VMask(getFlags().getVerbose()), FunctionName(),
40 NextInstNumber(Inst::NumberInitial), Live(nullptr) {
41 NodeStrings.reset(new StringPool);
42 VarStrings.reset(new StringPool);
43 CfgLocalAllocatorScope _(this);
44 Target = TargetLowering::createLowering(getFlags().getTargetArch(), this);
45 VMetadata.reset(new VariablesMetadata(this));
46 TargetAssembler = Target->createAssembler();
47 }
48
~Cfg()49 Cfg::~Cfg() {
50 assert(CfgAllocatorTraits::current() == nullptr);
51 if (getFlags().getDumpStrings()) {
52 OstreamLocker _(Ctx);
53 Ostream &Str = Ctx->getStrDump();
54 getNodeStrings()->dump(Str);
55 getVarStrings()->dump(Str);
56 }
57 }
58
59 // Called in the initalizer list of Cfg's constructor to create the Allocator
60 // and set it as TLS before any other member fields are constructed, since they
61 // may depend on it.
createAllocator()62 ArenaAllocator *Cfg::createAllocator() {
63 ArenaAllocator *Allocator = new ArenaAllocator();
64 CfgAllocatorTraits::set_current(Allocator);
65 return Allocator;
66 }
67
68 /// Create a string like "foo(i=123:b=9)" indicating the function name, number
69 /// of high-level instructions, and number of basic blocks. This string is only
70 /// used for dumping and other diagnostics, and the idea is that given a set of
71 /// functions to debug a problem on, it's easy to find the smallest or simplest
72 /// function to attack. Note that the counts may change somewhat depending on
73 /// what point it is called during the translation passes.
getFunctionNameAndSize() const74 std::string Cfg::getFunctionNameAndSize() const {
75 if (!BuildDefs::dump())
76 return getFunctionName().toString();
77 SizeT NodeCount = 0;
78 SizeT InstCount = 0;
79 for (CfgNode *Node : getNodes()) {
80 ++NodeCount;
81 // Note: deleted instructions are *not* ignored.
82 InstCount += Node->getPhis().size();
83 for (Inst &I : Node->getInsts()) {
84 if (!llvm::isa<InstTarget>(&I))
85 ++InstCount;
86 }
87 }
88 return getFunctionName() + "(i=" + std::to_string(InstCount) +
89 ":b=" + std::to_string(NodeCount) + ")";
90 }
91
setError(const std::string & Message)92 void Cfg::setError(const std::string &Message) {
93 HasError = true;
94 ErrorMessage = Message;
95 }
96
makeNode()97 CfgNode *Cfg::makeNode() {
98 SizeT LabelIndex = Nodes.size();
99 auto *Node = CfgNode::create(this, LabelIndex);
100 Nodes.push_back(Node);
101 return Node;
102 }
103
swapNodes(NodeList & NewNodes)104 void Cfg::swapNodes(NodeList &NewNodes) {
105 assert(Nodes.size() == NewNodes.size());
106 Nodes.swap(NewNodes);
107 for (SizeT I = 0, NumNodes = getNumNodes(); I < NumNodes; ++I)
108 Nodes[I]->resetIndex(I);
109 }
110
makeVariable(Type Ty)111 template <> Variable *Cfg::makeVariable<Variable>(Type Ty) {
112 SizeT Index = Variables.size();
113 Variable *Var;
114 if (Target->shouldSplitToVariableVecOn32(Ty)) {
115 Var = VariableVecOn32::create(this, Ty, Index);
116 } else if (Target->shouldSplitToVariable64On32(Ty)) {
117 Var = Variable64On32::create(this, Ty, Index);
118 } else {
119 Var = Variable::create(this, Ty, Index);
120 }
121 Variables.push_back(Var);
122 return Var;
123 }
124
addArg(Variable * Arg)125 void Cfg::addArg(Variable *Arg) {
126 Arg->setIsArg();
127 Args.push_back(Arg);
128 }
129
addImplicitArg(Variable * Arg)130 void Cfg::addImplicitArg(Variable *Arg) {
131 Arg->setIsImplicitArg();
132 ImplicitArgs.push_back(Arg);
133 }
134
135 // Returns whether the stack frame layout has been computed yet. This is used
136 // for dumping the stack frame location of Variables.
hasComputedFrame() const137 bool Cfg::hasComputedFrame() const { return getTarget()->hasComputedFrame(); }
138
139 namespace {
140 constexpr char BlockNameGlobalPrefix[] = ".L$profiler$block_name$";
141 constexpr char BlockStatsGlobalPrefix[] = ".L$profiler$block_info$";
142 } // end of anonymous namespace
143
createNodeNameDeclaration(const std::string & NodeAsmName)144 void Cfg::createNodeNameDeclaration(const std::string &NodeAsmName) {
145 auto *Var = VariableDeclaration::create(GlobalInits.get());
146 Var->setName(Ctx, BlockNameGlobalPrefix + NodeAsmName);
147 Var->setIsConstant(true);
148 Var->addInitializer(VariableDeclaration::DataInitializer::create(
149 GlobalInits.get(), NodeAsmName.data(), NodeAsmName.size() + 1));
150 const SizeT Int64ByteSize = typeWidthInBytes(IceType_i64);
151 Var->setAlignment(Int64ByteSize); // Wasteful, 32-bit could use 4 bytes.
152 GlobalInits->push_back(Var);
153 }
154
createBlockProfilingInfoDeclaration(const std::string & NodeAsmName,VariableDeclaration * NodeNameDeclaration)155 void Cfg::createBlockProfilingInfoDeclaration(
156 const std::string &NodeAsmName, VariableDeclaration *NodeNameDeclaration) {
157 auto *Var = VariableDeclaration::create(GlobalInits.get());
158 Var->setName(Ctx, BlockStatsGlobalPrefix + NodeAsmName);
159 const SizeT Int64ByteSize = typeWidthInBytes(IceType_i64);
160 Var->addInitializer(VariableDeclaration::ZeroInitializer::create(
161 GlobalInits.get(), Int64ByteSize));
162
163 const RelocOffsetT NodeNameDeclarationOffset = 0;
164 Var->addInitializer(VariableDeclaration::RelocInitializer::create(
165 GlobalInits.get(), NodeNameDeclaration,
166 {RelocOffset::create(Ctx, NodeNameDeclarationOffset)}));
167 Var->setAlignment(Int64ByteSize);
168 GlobalInits->push_back(Var);
169 }
170
translate()171 void Cfg::translate() {
172 if (hasError())
173 return;
174 // Cache the possibly-overridden optimization level once translation begins.
175 // It would be nicer to do this in the constructor, but we need to wait until
176 // after setFunctionName() has a chance to be called.
177 OptimizationLevel =
178 getFlags().matchForceO2(getFunctionName(), getSequenceNumber())
179 ? Opt_2
180 : getFlags().getOptLevel();
181 if (BuildDefs::timers()) {
182 if (getFlags().matchTimingFocus(getFunctionName(), getSequenceNumber())) {
183 setFocusedTiming();
184 getContext()->resetTimer(GlobalContext::TSK_Default);
185 }
186 }
187 if (BuildDefs::dump()) {
188 if (isVerbose(IceV_Status) &&
189 getFlags().matchTestStatus(getFunctionName(), getSequenceNumber())) {
190 getContext()->getStrDump()
191 << ">>>Translating " << getFunctionNameAndSize()
192 << " seq=" << getSequenceNumber() << "\n";
193 }
194 }
195 TimerMarker T_func(getContext(), getFunctionName().toStringOrEmpty());
196 TimerMarker T(TimerStack::TT_translate, this);
197
198 dump("Initial CFG");
199
200 // Create the Hi and Lo variables where a split was needed
201 for (Variable *Var : Variables) {
202 if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Var)) {
203 Var64On32->initHiLo(this);
204 } else if (auto *VarVecOn32 = llvm::dyn_cast<VariableVecOn32>(Var)) {
205 VarVecOn32->initVecElement(this);
206 }
207 }
208
209 // Instrument the Cfg, e.g. with AddressSanitizer
210 if (!BuildDefs::minimal() && getFlags().getSanitizeAddresses()) {
211 getContext()->instrumentFunc(this);
212 dump("Instrumented CFG");
213 }
214
215 // The set of translation passes and their order are determined by the
216 // target.
217 getTarget()->translate();
218
219 dump("Final output");
220 if (getFocusedTiming()) {
221 getContext()->dumpLocalTimers(getFunctionName().toString());
222 }
223 }
224
fixPhiNodes()225 void Cfg::fixPhiNodes() {
226 for (auto *Node : Nodes) {
227 // Fix all the phi edges since WASM can't tell how to make them correctly at
228 // the beginning.
229 assert(Node);
230 const auto &InEdges = Node->getInEdges();
231 for (auto &Instr : Node->getPhis()) {
232 auto *Phi = llvm::cast<InstPhi>(&Instr);
233 assert(Phi);
234 for (SizeT i = 0; i < InEdges.size(); ++i) {
235 Phi->setLabel(i, InEdges[i]);
236 }
237 }
238 }
239 }
240
computeInOutEdges()241 void Cfg::computeInOutEdges() {
242 // Compute the out-edges.
243 for (CfgNode *Node : Nodes) {
244 Node->computeSuccessors();
245 }
246
247 // Prune any unreachable nodes before computing in-edges.
248 SizeT NumNodes = getNumNodes();
249 BitVector Reachable(NumNodes);
250 BitVector Pending(NumNodes);
251 Pending.set(getEntryNode()->getIndex());
252 while (true) {
253 int Index = Pending.find_first();
254 if (Index == -1)
255 break;
256 Pending.reset(Index);
257 Reachable.set(Index);
258 CfgNode *Node = Nodes[Index];
259 assert(Node->getIndex() == (SizeT)Index);
260 for (CfgNode *Succ : Node->getOutEdges()) {
261 SizeT SuccIndex = Succ->getIndex();
262 if (!Reachable.test(SuccIndex))
263 Pending.set(SuccIndex);
264 }
265 }
266 SizeT Dest = 0;
267 for (SizeT Source = 0; Source < NumNodes; ++Source) {
268 if (Reachable.test(Source)) {
269 Nodes[Dest] = Nodes[Source];
270 Nodes[Dest]->resetIndex(Dest);
271 // Compute the in-edges.
272 Nodes[Dest]->computePredecessors();
273 ++Dest;
274 }
275 }
276 Nodes.resize(Dest);
277
278 TimerMarker T(TimerStack::TT_phiValidation, this);
279 for (CfgNode *Node : Nodes)
280 Node->enforcePhiConsistency();
281 }
282
renumberInstructions()283 void Cfg::renumberInstructions() {
284 TimerMarker T(TimerStack::TT_renumberInstructions, this);
285 NextInstNumber = Inst::NumberInitial;
286 for (CfgNode *Node : Nodes)
287 Node->renumberInstructions();
288 // Make sure the entry node is the first node and therefore got the lowest
289 // instruction numbers, to facilitate live range computation of function
290 // arguments. We want to model function arguments as being live on entry to
291 // the function, otherwise an argument whose only use is in the first
292 // instruction will be assigned a trivial live range and the register
293 // allocator will not recognize its live range as overlapping another
294 // variable's live range.
295 assert(Nodes.empty() || (*Nodes.begin() == getEntryNode()));
296 }
297
298 // placePhiLoads() must be called before placePhiStores().
placePhiLoads()299 void Cfg::placePhiLoads() {
300 TimerMarker T(TimerStack::TT_placePhiLoads, this);
301 for (CfgNode *Node : Nodes)
302 Node->placePhiLoads();
303 }
304
305 // placePhiStores() must be called after placePhiLoads().
placePhiStores()306 void Cfg::placePhiStores() {
307 TimerMarker T(TimerStack::TT_placePhiStores, this);
308 for (CfgNode *Node : Nodes)
309 Node->placePhiStores();
310 }
311
deletePhis()312 void Cfg::deletePhis() {
313 TimerMarker T(TimerStack::TT_deletePhis, this);
314 for (CfgNode *Node : Nodes)
315 Node->deletePhis();
316 }
317
advancedPhiLowering()318 void Cfg::advancedPhiLowering() {
319 TimerMarker T(TimerStack::TT_advancedPhiLowering, this);
320 // Clear all previously computed live ranges (but not live-in/live-out bit
321 // vectors or last-use markers), because the follow-on register allocation is
322 // only concerned with live ranges across the newly created blocks.
323 for (Variable *Var : Variables) {
324 Var->getLiveRange().reset();
325 }
326 // This splits edges and appends new nodes to the end of the node list. This
327 // can invalidate iterators, so don't use an iterator.
328 SizeT NumNodes = getNumNodes();
329 SizeT NumVars = getNumVariables();
330 for (SizeT I = 0; I < NumNodes; ++I)
331 Nodes[I]->advancedPhiLowering();
332
333 TimerMarker TT(TimerStack::TT_lowerPhiAssignments, this);
334 if (true) {
335 // The following code does an in-place update of liveness and live ranges
336 // as a result of adding the new phi edge split nodes.
337 getLiveness()->initPhiEdgeSplits(Nodes.begin() + NumNodes,
338 Variables.begin() + NumVars);
339 TimerMarker TTT(TimerStack::TT_liveness, this);
340 // Iterate over the newly added nodes to add their liveness info.
341 for (auto I = Nodes.begin() + NumNodes, E = Nodes.end(); I != E; ++I) {
342 InstNumberT FirstInstNum = getNextInstNumber();
343 (*I)->renumberInstructions();
344 InstNumberT LastInstNum = getNextInstNumber() - 1;
345 (*I)->liveness(getLiveness());
346 (*I)->livenessAddIntervals(getLiveness(), FirstInstNum, LastInstNum);
347 }
348 } else {
349 // The following code does a brute-force recalculation of live ranges as a
350 // result of adding the new phi edge split nodes. The liveness calculation
351 // is particularly expensive because the new nodes are not yet in a proper
352 // topological order and so convergence is slower.
353 //
354 // This code is kept here for reference and can be temporarily enabled in
355 // case the incremental code is under suspicion.
356 renumberInstructions();
357 liveness(Liveness_Intervals);
358 getVMetadata()->init(VMK_All);
359 }
360 Target->regAlloc(RAK_Phi);
361 }
362
363 // Find a reasonable placement for nodes that have not yet been placed, while
364 // maintaining the same relative ordering among already placed nodes.
reorderNodes()365 void Cfg::reorderNodes() {
366 // TODO(ascull): it would be nice if the switch tests were always followed by
367 // the default case to allow for fall through.
368 using PlacedList = CfgList<CfgNode *>;
369 PlacedList Placed; // Nodes with relative placement locked down
370 PlacedList Unreachable; // Unreachable nodes
371 PlacedList::iterator NoPlace = Placed.end();
372 // Keep track of where each node has been tentatively placed so that we can
373 // manage insertions into the middle.
374 CfgVector<PlacedList::iterator> PlaceIndex(Nodes.size(), NoPlace);
375 for (CfgNode *Node : Nodes) {
376 // The "do ... while(0);" construct is to factor out the --PlaceIndex and
377 // assert() statements before moving to the next node.
378 do {
379 if (Node != getEntryNode() && Node->getInEdges().empty()) {
380 // The node has essentially been deleted since it is not a successor of
381 // any other node.
382 Unreachable.push_back(Node);
383 PlaceIndex[Node->getIndex()] = Unreachable.end();
384 Node->setNeedsPlacement(false);
385 continue;
386 }
387 if (!Node->needsPlacement()) {
388 // Add to the end of the Placed list.
389 Placed.push_back(Node);
390 PlaceIndex[Node->getIndex()] = Placed.end();
391 continue;
392 }
393 Node->setNeedsPlacement(false);
394 // Assume for now that the unplaced node is from edge-splitting and
395 // therefore has 1 in-edge and 1 out-edge (actually, possibly more than 1
396 // in-edge if the predecessor node was contracted). If this changes in
397 // the future, rethink the strategy.
398 assert(Node->getInEdges().size() >= 1);
399 assert(Node->hasSingleOutEdge());
400
401 // If it's a (non-critical) edge where the successor has a single
402 // in-edge, then place it before the successor.
403 CfgNode *Succ = Node->getOutEdges().front();
404 if (Succ->getInEdges().size() == 1 &&
405 PlaceIndex[Succ->getIndex()] != NoPlace) {
406 Placed.insert(PlaceIndex[Succ->getIndex()], Node);
407 PlaceIndex[Node->getIndex()] = PlaceIndex[Succ->getIndex()];
408 continue;
409 }
410
411 // Otherwise, place it after the (first) predecessor.
412 CfgNode *Pred = Node->getInEdges().front();
413 auto PredPosition = PlaceIndex[Pred->getIndex()];
414 // It shouldn't be the case that PredPosition==NoPlace, but if that
415 // somehow turns out to be true, we just insert Node before
416 // PredPosition=NoPlace=Placed.end() .
417 if (PredPosition != NoPlace)
418 ++PredPosition;
419 Placed.insert(PredPosition, Node);
420 PlaceIndex[Node->getIndex()] = PredPosition;
421 } while (0);
422
423 --PlaceIndex[Node->getIndex()];
424 assert(*PlaceIndex[Node->getIndex()] == Node);
425 }
426
427 // Reorder Nodes according to the built-up lists.
428 NodeList Reordered;
429 Reordered.reserve(Placed.size() + Unreachable.size());
430 for (CfgNode *Node : Placed)
431 Reordered.push_back(Node);
432 for (CfgNode *Node : Unreachable)
433 Reordered.push_back(Node);
434 assert(getNumNodes() == Reordered.size());
435 swapNodes(Reordered);
436 }
437
localCSE(bool AssumeSSA)438 void Cfg::localCSE(bool AssumeSSA) {
439 // Performs basic-block local common-subexpression elimination
440 // If we have
441 // t1 = op b c
442 // t2 = op b c
443 // This pass will replace future references to t2 in a basic block by t1
444 // Points to note:
445 // 1. Assumes SSA by default. To change this, use -lcse=no-ssa
446 // This is needed if this pass is moved to a point later in the pipeline.
447 // If variables have a single definition (in the node), CSE can work just
448 // on the basis of an equality compare on instructions (sans Dest). When
449 // variables can be updated (hence, non-SSA) the result of a previous
450 // instruction which used that variable as an operand can not be reused.
451 // 2. Leaves removal of instructions to DCE.
452 // 3. Only enabled on arithmetic instructions. pnacl-clang (-O2) is expected
453 // to take care of cases not arising from GEP simplification.
454 // 4. By default, a single pass is made over each basic block. Control this
455 // with -lcse-max-iters=N
456
457 TimerMarker T(TimerStack::TT_localCse, this);
458 struct VariableHash {
459 size_t operator()(const Variable *Var) const { return Var->hashValue(); }
460 };
461
462 struct InstHash {
463 size_t operator()(const Inst *Instr) const {
464 auto Kind = Instr->getKind();
465 auto Result =
466 std::hash<typename std::underlying_type<Inst::InstKind>::type>()(
467 Kind);
468 for (SizeT i = 0; i < Instr->getSrcSize(); ++i) {
469 Result ^= Instr->getSrc(i)->hashValue();
470 }
471 return Result;
472 }
473 };
474 struct InstEq {
475 bool srcEq(const Operand *A, const Operand *B) const {
476 if (llvm::isa<Variable>(A) || llvm::isa<Constant>(A))
477 return (A == B);
478 return false;
479 }
480 bool operator()(const Inst *InstrA, const Inst *InstrB) const {
481 if ((InstrA->getKind() != InstrB->getKind()) ||
482 (InstrA->getSrcSize() != InstrB->getSrcSize()))
483 return false;
484
485 if (auto *A = llvm::dyn_cast<InstArithmetic>(InstrA)) {
486 auto *B = llvm::cast<InstArithmetic>(InstrB);
487 // A, B are guaranteed to be of the same 'kind' at this point
488 // So, dyn_cast is not needed
489 if (A->getOp() != B->getOp())
490 return false;
491 }
492 // Does not enter loop if different kind or number of operands
493 for (SizeT i = 0; i < InstrA->getSrcSize(); ++i) {
494 if (!srcEq(InstrA->getSrc(i), InstrB->getSrc(i)))
495 return false;
496 }
497 return true;
498 }
499 };
500
501 for (CfgNode *Node : getNodes()) {
502 CfgUnorderedSet<Inst *, InstHash, InstEq> Seen;
503 // Stores currently available instructions.
504
505 CfgUnorderedMap<Variable *, Variable *, VariableHash> Replacements;
506 // Combining the above two into a single data structure might consume less
507 // memory but will be slower i.e map of Instruction -> Set of Variables
508
509 CfgUnorderedMap<Variable *, std::vector<Inst *>, VariableHash> Dependency;
510 // Maps a variable to the Instructions that depend on it.
511 // a = op1 b c
512 // x = op2 c d
513 // Will result in the map : b -> {a}, c -> {a, x}, d -> {x}
514 // Not necessary for SSA as dependencies will never be invalidated, and the
515 // container will use minimal memory when left unused.
516
517 auto IterCount = getFlags().getLocalCseMaxIterations();
518
519 for (uint32_t i = 0; i < IterCount; ++i) {
520 // TODO(manasijm): Stats on IterCount -> performance
521 for (Inst &Instr : Node->getInsts()) {
522 if (Instr.isDeleted() || !llvm::isa<InstArithmetic>(&Instr))
523 continue;
524 if (!AssumeSSA) {
525 // Invalidate replacements
526 auto Iter = Replacements.find(Instr.getDest());
527 if (Iter != Replacements.end()) {
528 Replacements.erase(Iter);
529 }
530
531 // Invalidate 'seen' instructions whose operands were just updated.
532 auto DepIter = Dependency.find(Instr.getDest());
533 if (DepIter != Dependency.end()) {
534 for (auto *DepInst : DepIter->second) {
535 Seen.erase(DepInst);
536 }
537 }
538 }
539
540 // Replace - doing this before checking for repetitions might enable
541 // more optimizations
542 for (SizeT i = 0; i < Instr.getSrcSize(); ++i) {
543 auto *Opnd = Instr.getSrc(i);
544 if (auto *Var = llvm::dyn_cast<Variable>(Opnd)) {
545 if (Replacements.find(Var) != Replacements.end()) {
546 Instr.replaceSource(i, Replacements[Var]);
547 }
548 }
549 }
550
551 // Check for repetitions
552 auto SeenIter = Seen.find(&Instr);
553 if (SeenIter != Seen.end()) { // seen before
554 const Inst *Found = *SeenIter;
555 Replacements[Instr.getDest()] = Found->getDest();
556 } else { // new
557 Seen.insert(&Instr);
558
559 if (!AssumeSSA) {
560 // Update dependencies
561 for (SizeT i = 0; i < Instr.getSrcSize(); ++i) {
562 auto *Opnd = Instr.getSrc(i);
563 if (auto *Var = llvm::dyn_cast<Variable>(Opnd)) {
564 Dependency[Var].push_back(&Instr);
565 }
566 }
567 }
568 }
569 }
570 }
571 }
572 }
573
loopInvariantCodeMotion()574 void Cfg::loopInvariantCodeMotion() {
575 TimerMarker T(TimerStack::TT_loopInvariantCodeMotion, this);
576 // Does not introduce new nodes as of now.
577 for (auto &Loop : LoopInfo) {
578 CfgNode *Header = Loop.Header;
579 assert(Header);
580 if (Header->getLoopNestDepth() < 1)
581 return;
582 CfgNode *PreHeader = Loop.PreHeader;
583 if (PreHeader == nullptr || PreHeader->getInsts().size() == 0) {
584 return; // try next loop
585 }
586
587 auto &Insts = PreHeader->getInsts();
588 auto &LastInst = Insts.back();
589 Insts.pop_back();
590
591 for (auto *Inst : findLoopInvariantInstructions(Loop.Body)) {
592 PreHeader->appendInst(Inst);
593 }
594 PreHeader->appendInst(&LastInst);
595 }
596 }
597
598 CfgVector<Inst *>
findLoopInvariantInstructions(const CfgUnorderedSet<SizeT> & Body)599 Cfg::findLoopInvariantInstructions(const CfgUnorderedSet<SizeT> &Body) {
600 CfgUnorderedSet<Inst *> InvariantInsts;
601 CfgUnorderedSet<Variable *> InvariantVars;
602 for (auto *Var : getArgs()) {
603 InvariantVars.insert(Var);
604 }
605 bool Changed = false;
606 do {
607 Changed = false;
608 for (auto NodeIndex : Body) {
609 auto *Node = Nodes[NodeIndex];
610 CfgVector<std::reference_wrapper<Inst>> Insts(Node->getInsts().begin(),
611 Node->getInsts().end());
612
613 for (auto &InstRef : Insts) {
614 auto &Inst = InstRef.get();
615 if (Inst.isDeleted() ||
616 InvariantInsts.find(&Inst) != InvariantInsts.end())
617 continue;
618 switch (Inst.getKind()) {
619 case Inst::InstKind::Alloca:
620 case Inst::InstKind::Br:
621 case Inst::InstKind::Ret:
622 case Inst::InstKind::Phi:
623 case Inst::InstKind::Call:
624 case Inst::InstKind::Intrinsic:
625 case Inst::InstKind::Load:
626 case Inst::InstKind::Store:
627 case Inst::InstKind::Switch:
628 continue;
629 default:
630 break;
631 }
632
633 bool IsInvariant = true;
634 for (SizeT i = 0; i < Inst.getSrcSize(); ++i) {
635 if (auto *Var = llvm::dyn_cast<Variable>(Inst.getSrc(i))) {
636 if (InvariantVars.find(Var) == InvariantVars.end()) {
637 IsInvariant = false;
638 }
639 }
640 }
641 if (IsInvariant) {
642 Changed = true;
643 InvariantInsts.insert(&Inst);
644 Node->getInsts().remove(Inst);
645 if (Inst.getDest() != nullptr) {
646 InvariantVars.insert(Inst.getDest());
647 }
648 }
649 }
650 }
651 } while (Changed);
652
653 CfgVector<Inst *> InstVector(InvariantInsts.begin(), InvariantInsts.end());
654 std::sort(InstVector.begin(), InstVector.end(),
655 [](Inst *A, Inst *B) { return A->getNumber() < B->getNumber(); });
656 return InstVector;
657 }
658
shortCircuitJumps()659 void Cfg::shortCircuitJumps() {
660 // Split Nodes whenever an early jump is possible.
661 // __N :
662 // a = <something>
663 // Instruction 1 without side effect
664 // ... b = <something> ...
665 // Instruction N without side effect
666 // t1 = or a b
667 // br t1 __X __Y
668 //
669 // is transformed into:
670 // __N :
671 // a = <something>
672 // br a __X __N_ext
673 //
674 // __N_ext :
675 // Instruction 1 without side effect
676 // ... b = <something> ...
677 // Instruction N without side effect
678 // br b __X __Y
679 // (Similar logic for AND, jump to false instead of true target.)
680
681 TimerMarker T(TimerStack::TT_shortCircuit, this);
682 getVMetadata()->init(VMK_Uses);
683 auto NodeStack = this->getNodes();
684 CfgUnorderedMap<SizeT, CfgVector<CfgNode *>> Splits;
685 while (!NodeStack.empty()) {
686 auto *Node = NodeStack.back();
687 NodeStack.pop_back();
688 auto NewNode = Node->shortCircuit();
689 if (NewNode) {
690 NodeStack.push_back(NewNode);
691 NodeStack.push_back(Node);
692 Splits[Node->getIndex()].push_back(NewNode);
693 }
694 }
695
696 // Insert nodes in the right place
697 NodeList NewList;
698 NewList.reserve(Nodes.size());
699 CfgUnorderedSet<SizeT> Inserted;
700 for (auto *Node : Nodes) {
701 if (Inserted.find(Node->getIndex()) != Inserted.end())
702 continue; // already inserted
703 NodeList Stack{Node};
704 while (!Stack.empty()) {
705 auto *Current = Stack.back();
706 Stack.pop_back();
707 Inserted.insert(Current->getIndex());
708 NewList.push_back(Current);
709 for (auto *Next : Splits[Current->getIndex()]) {
710 Stack.push_back(Next);
711 }
712 }
713 }
714
715 SizeT NodeIndex = 0;
716 for (auto *Node : NewList) {
717 Node->resetIndex(NodeIndex++);
718 }
719 Nodes = NewList;
720 }
721
floatConstantCSE()722 void Cfg::floatConstantCSE() {
723 // Load multiple uses of a floating point constant (between two call
724 // instructions or block start/end) into a variable before its first use.
725 // t1 = b + 1.0
726 // t2 = c + 1.0
727 // Gets transformed to:
728 // t0 = 1.0
729 // t0_1 = t0
730 // t1 = b + t0_1
731 // t2 = c + t0_1
732 // Call instructions reset the procedure, but use the same variable, just in
733 // case it got a register. We are assuming floating point registers are not
734 // callee saved in general. Example, continuing from before:
735 // result = call <some function>
736 // t3 = d + 1.0
737 // Gets transformed to:
738 // result = call <some function>
739 // t0_2 = t0
740 // t3 = d + t0_2
741 // TODO(manasijm, stichnot): Figure out how to 'link' t0 to the stack slot of
742 // 1.0. When t0 does not get a register, introducing an extra assignment
743 // statement does not make sense. The relevant portion is marked below.
744
745 TimerMarker _(TimerStack::TT_floatConstantCse, this);
746 for (CfgNode *Node : getNodes()) {
747
748 CfgUnorderedMap<Constant *, Variable *> ConstCache;
749 auto Current = Node->getInsts().begin();
750 auto End = Node->getInsts().end();
751 while (Current != End) {
752 CfgUnorderedMap<Constant *, CfgVector<InstList::iterator>> FloatUses;
753 if (llvm::isa<InstCall>(iteratorToInst(Current))) {
754 ++Current;
755 assert(Current != End);
756 // Block should not end with a call
757 }
758 while (Current != End && !llvm::isa<InstCall>(iteratorToInst(Current))) {
759 if (!Current->isDeleted()) {
760 for (SizeT i = 0; i < Current->getSrcSize(); ++i) {
761 if (auto *Const = llvm::dyn_cast<Constant>(Current->getSrc(i))) {
762 if (Const->getType() == IceType_f32 ||
763 Const->getType() == IceType_f64) {
764 FloatUses[Const].push_back(Current);
765 }
766 }
767 }
768 }
769 ++Current;
770 }
771 for (auto &Pair : FloatUses) {
772 static constexpr SizeT MinUseThreshold = 3;
773 if (Pair.second.size() < MinUseThreshold)
774 continue;
775 // Only consider constants with at least `MinUseThreshold` uses
776 auto &Insts = Node->getInsts();
777
778 if (ConstCache.find(Pair.first) == ConstCache.end()) {
779 // Saw a constant (which is used at least twice) for the first time
780 auto *NewVar = makeVariable(Pair.first->getType());
781 // NewVar->setLinkedTo(Pair.first);
782 // TODO(manasijm): Plumbing for linking to an Operand.
783 auto *Assign = InstAssign::create(Node->getCfg(), NewVar, Pair.first);
784 Insts.insert(Pair.second[0], Assign);
785 ConstCache[Pair.first] = NewVar;
786 }
787
788 auto *NewVar = makeVariable(Pair.first->getType());
789 NewVar->setLinkedTo(ConstCache[Pair.first]);
790 auto *Assign =
791 InstAssign::create(Node->getCfg(), NewVar, ConstCache[Pair.first]);
792
793 Insts.insert(Pair.second[0], Assign);
794 for (auto InstUse : Pair.second) {
795 for (SizeT i = 0; i < InstUse->getSrcSize(); ++i) {
796 if (auto *Const = llvm::dyn_cast<Constant>(InstUse->getSrc(i))) {
797 if (Const == Pair.first) {
798 InstUse->replaceSource(i, NewVar);
799 }
800 }
801 }
802 }
803 }
804 }
805 }
806 }
807
doArgLowering()808 void Cfg::doArgLowering() {
809 TimerMarker T(TimerStack::TT_doArgLowering, this);
810 getTarget()->lowerArguments();
811 }
812
sortAndCombineAllocas(CfgVector<InstAlloca * > & Allocas,uint32_t CombinedAlignment,InstList & Insts,AllocaBaseVariableType BaseVariableType)813 void Cfg::sortAndCombineAllocas(CfgVector<InstAlloca *> &Allocas,
814 uint32_t CombinedAlignment, InstList &Insts,
815 AllocaBaseVariableType BaseVariableType) {
816 if (Allocas.empty())
817 return;
818 // Sort by decreasing alignment.
819 std::sort(Allocas.begin(), Allocas.end(), [](InstAlloca *A1, InstAlloca *A2) {
820 uint32_t Align1 = A1->getAlignInBytes();
821 uint32_t Align2 = A2->getAlignInBytes();
822 if (Align1 == Align2)
823 return A1->getNumber() < A2->getNumber();
824 else
825 return Align1 > Align2;
826 });
827 // Process the allocas in order of decreasing stack alignment. This allows
828 // us to pack less-aligned pieces after more-aligned ones, resulting in less
829 // stack growth. It also allows there to be at most one stack alignment "and"
830 // instruction for a whole list of allocas.
831 uint32_t CurrentOffset = 0;
832 CfgVector<int32_t> Offsets;
833 for (Inst *Instr : Allocas) {
834 auto *Alloca = llvm::cast<InstAlloca>(Instr);
835 // Adjust the size of the allocation up to the next multiple of the
836 // object's alignment.
837 uint32_t Alignment = std::max(Alloca->getAlignInBytes(), 1u);
838 auto *ConstSize =
839 llvm::dyn_cast<ConstantInteger32>(Alloca->getSizeInBytes());
840 const uint32_t Size =
841 Utils::applyAlignment(ConstSize->getValue(), Alignment);
842
843 // Ensure that the Size does not exceed StackSizeLimit which can lead to
844 // undefined behavior below.
845 if (Size > StackSizeLimit) {
846 llvm::report_fatal_error("Local variable exceeds stack size limit");
847 return; // NOTREACHED
848 }
849
850 if (BaseVariableType == BVT_FramePointer) {
851 // Addressing is relative to the frame pointer. Subtract the offset after
852 // adding the size of the alloca, because it grows downwards from the
853 // frame pointer.
854 Offsets.push_back(Target->getFramePointerOffset(CurrentOffset, Size));
855 } else {
856 // Addressing is relative to the stack pointer or to a user pointer. Add
857 // the offset before adding the size of the object, because it grows
858 // upwards from the stack pointer. In addition, if the addressing is
859 // relative to the stack pointer, we need to add the pre-computed max out
860 // args size bytes.
861 const uint32_t OutArgsOffsetOrZero =
862 (BaseVariableType == BVT_StackPointer)
863 ? getTarget()->maxOutArgsSizeBytes()
864 : 0;
865 Offsets.push_back(CurrentOffset + OutArgsOffsetOrZero);
866 }
867
868 // Ensure that the addition below does not overflow or exceed
869 // StackSizeLimit as this leads to undefined behavior.
870 if (CurrentOffset + Size > StackSizeLimit) {
871 llvm::report_fatal_error("Local variable exceeds stack size limit");
872 return; // NOTREACHED
873 }
874
875 // Update the running offset of the fused alloca region.
876 CurrentOffset += Size;
877 }
878 // Round the offset up to the alignment granularity to use as the size.
879 uint32_t TotalSize = Utils::applyAlignment(CurrentOffset, CombinedAlignment);
880 // Ensure every alloca was assigned an offset.
881 assert(Allocas.size() == Offsets.size());
882
883 switch (BaseVariableType) {
884 case BVT_UserPointer: {
885 Variable *BaseVariable = makeVariable(IceType_i32);
886 for (SizeT i = 0; i < Allocas.size(); ++i) {
887 auto *Alloca = llvm::cast<InstAlloca>(Allocas[i]);
888 // Emit a new addition operation to replace the alloca.
889 Operand *AllocaOffset = Ctx->getConstantInt32(Offsets[i]);
890 InstArithmetic *Add =
891 InstArithmetic::create(this, InstArithmetic::Add, Alloca->getDest(),
892 BaseVariable, AllocaOffset);
893 Insts.push_front(Add);
894 Alloca->setDeleted();
895 }
896 Operand *AllocaSize = Ctx->getConstantInt32(TotalSize);
897 InstAlloca *CombinedAlloca =
898 InstAlloca::create(this, BaseVariable, AllocaSize, CombinedAlignment);
899 CombinedAlloca->setKnownFrameOffset();
900 Insts.push_front(CombinedAlloca);
901 } break;
902 case BVT_StackPointer:
903 case BVT_FramePointer: {
904 for (SizeT i = 0; i < Allocas.size(); ++i) {
905 auto *Alloca = llvm::cast<InstAlloca>(Allocas[i]);
906 // Emit a fake definition of the rematerializable variable.
907 Variable *Dest = Alloca->getDest();
908 auto *Def = InstFakeDef::create(this, Dest);
909 if (BaseVariableType == BVT_StackPointer)
910 Dest->setRematerializable(getTarget()->getStackReg(), Offsets[i]);
911 else
912 Dest->setRematerializable(getTarget()->getFrameReg(), Offsets[i]);
913 Insts.push_front(Def);
914 Alloca->setDeleted();
915 }
916 // Allocate the fixed area in the function prolog.
917 getTarget()->reserveFixedAllocaArea(TotalSize, CombinedAlignment);
918 } break;
919 }
920 }
921
processAllocas(bool SortAndCombine)922 void Cfg::processAllocas(bool SortAndCombine) {
923 TimerMarker _(TimerStack::TT_alloca, this);
924 const uint32_t StackAlignment = getTarget()->getStackAlignment();
925 CfgNode *EntryNode = getEntryNode();
926 assert(EntryNode);
927 // LLVM enforces power of 2 alignment.
928 assert(llvm::isPowerOf2_32(StackAlignment));
929 // If the ABI's stack alignment is smaller than the vector size (16 bytes),
930 // conservatively use a frame pointer to allow for explicit alignment of the
931 // stack pointer. This needs to happen before register allocation so the frame
932 // pointer can be reserved.
933 if (getTarget()->needsStackPointerAlignment()) {
934 getTarget()->setHasFramePointer();
935 }
936 // Determine if there are large alignment allocations in the entry block or
937 // dynamic allocations (variable size in the entry block).
938 bool HasLargeAlignment = false;
939 bool HasDynamicAllocation = false;
940 for (Inst &Instr : EntryNode->getInsts()) {
941 if (Instr.isDeleted())
942 continue;
943 if (auto *Alloca = llvm::dyn_cast<InstAlloca>(&Instr)) {
944 uint32_t AlignmentParam = Alloca->getAlignInBytes();
945 if (AlignmentParam > StackAlignment)
946 HasLargeAlignment = true;
947 if (llvm::isa<Constant>(Alloca->getSizeInBytes()))
948 Alloca->setKnownFrameOffset();
949 else {
950 HasDynamicAllocation = true;
951 // If Allocas are not sorted, the first dynamic allocation causes
952 // later Allocas to be at unknown offsets relative to the stack/frame.
953 if (!SortAndCombine)
954 break;
955 }
956 }
957 }
958 // Don't do the heavyweight sorting and layout for low optimization levels.
959 if (!SortAndCombine)
960 return;
961 // Any alloca outside the entry block is a dynamic allocation.
962 for (CfgNode *Node : Nodes) {
963 if (Node == EntryNode)
964 continue;
965 for (Inst &Instr : Node->getInsts()) {
966 if (Instr.isDeleted())
967 continue;
968 if (llvm::isa<InstAlloca>(&Instr)) {
969 // Allocations outside the entry block require a frame pointer.
970 HasDynamicAllocation = true;
971 break;
972 }
973 }
974 if (HasDynamicAllocation)
975 break;
976 }
977 // Mark the target as requiring a frame pointer.
978 if (HasLargeAlignment || HasDynamicAllocation)
979 getTarget()->setHasFramePointer();
980 // Collect the Allocas into the two vectors.
981 // Allocas in the entry block that have constant size and alignment less
982 // than or equal to the function's stack alignment.
983 CfgVector<InstAlloca *> FixedAllocas;
984 // Allocas in the entry block that have constant size and alignment greater
985 // than the function's stack alignment.
986 CfgVector<InstAlloca *> AlignedAllocas;
987 // Maximum alignment used by any alloca.
988 uint32_t MaxAlignment = StackAlignment;
989 for (Inst &Instr : EntryNode->getInsts()) {
990 if (Instr.isDeleted())
991 continue;
992 if (auto *Alloca = llvm::dyn_cast<InstAlloca>(&Instr)) {
993 if (!llvm::isa<Constant>(Alloca->getSizeInBytes()))
994 continue;
995 uint32_t AlignmentParam = Alloca->getAlignInBytes();
996 // For default align=0, set it to the real value 1, to avoid any
997 // bit-manipulation problems below.
998 AlignmentParam = std::max(AlignmentParam, 1u);
999 assert(llvm::isPowerOf2_32(AlignmentParam));
1000 if (HasDynamicAllocation && AlignmentParam > StackAlignment) {
1001 // If we have both dynamic allocations and large stack alignments,
1002 // high-alignment allocations are pulled out with their own base.
1003 AlignedAllocas.push_back(Alloca);
1004 } else {
1005 FixedAllocas.push_back(Alloca);
1006 }
1007 MaxAlignment = std::max(AlignmentParam, MaxAlignment);
1008 }
1009 }
1010 // Add instructions to the head of the entry block in reverse order.
1011 InstList &Insts = getEntryNode()->getInsts();
1012 if (HasDynamicAllocation && HasLargeAlignment) {
1013 // We are using a frame pointer, but fixed large-alignment alloca addresses
1014 // do not have a known offset from either the stack or frame pointer.
1015 // They grow up from a user pointer from an alloca.
1016 sortAndCombineAllocas(AlignedAllocas, MaxAlignment, Insts, BVT_UserPointer);
1017 // Fixed size allocas are addressed relative to the frame pointer.
1018 sortAndCombineAllocas(FixedAllocas, StackAlignment, Insts,
1019 BVT_FramePointer);
1020 } else {
1021 // Otherwise, fixed size allocas are addressed relative to the stack unless
1022 // there are dynamic allocas.
1023 const AllocaBaseVariableType BasePointerType =
1024 (HasDynamicAllocation ? BVT_FramePointer : BVT_StackPointer);
1025 sortAndCombineAllocas(FixedAllocas, MaxAlignment, Insts, BasePointerType);
1026 }
1027 if (!FixedAllocas.empty() || !AlignedAllocas.empty())
1028 // No use calling findRematerializable() unless there is some
1029 // rematerializable alloca instruction to seed it.
1030 findRematerializable();
1031 }
1032
1033 namespace {
1034
1035 // Helpers for findRematerializable(). For each of them, if a suitable
1036 // rematerialization is found, the instruction's Dest variable is set to be
1037 // rematerializable and it returns true, otherwise it returns false.
1038
rematerializeArithmetic(const Inst * Instr)1039 bool rematerializeArithmetic(const Inst *Instr) {
1040 // Check that it's an Arithmetic instruction with an Add operation.
1041 auto *Arith = llvm::dyn_cast<InstArithmetic>(Instr);
1042 if (Arith == nullptr || Arith->getOp() != InstArithmetic::Add)
1043 return false;
1044 // Check that Src(0) is rematerializable.
1045 auto *Src0Var = llvm::dyn_cast<Variable>(Arith->getSrc(0));
1046 if (Src0Var == nullptr || !Src0Var->isRematerializable())
1047 return false;
1048 // Check that Src(1) is an immediate.
1049 auto *Src1Imm = llvm::dyn_cast<ConstantInteger32>(Arith->getSrc(1));
1050 if (Src1Imm == nullptr)
1051 return false;
1052 Arith->getDest()->setRematerializable(
1053 Src0Var->getRegNum(), Src0Var->getStackOffset() + Src1Imm->getValue());
1054 return true;
1055 }
1056
rematerializeAssign(const Inst * Instr)1057 bool rematerializeAssign(const Inst *Instr) {
1058 // An InstAssign only originates from an inttoptr or ptrtoint instruction,
1059 // which never occurs in a MINIMAL build.
1060 if (BuildDefs::minimal())
1061 return false;
1062 // Check that it's an Assign instruction.
1063 if (!llvm::isa<InstAssign>(Instr))
1064 return false;
1065 // Check that Src(0) is rematerializable.
1066 auto *Src0Var = llvm::dyn_cast<Variable>(Instr->getSrc(0));
1067 if (Src0Var == nullptr || !Src0Var->isRematerializable())
1068 return false;
1069 Instr->getDest()->setRematerializable(Src0Var->getRegNum(),
1070 Src0Var->getStackOffset());
1071 return true;
1072 }
1073
rematerializeCast(const Inst * Instr)1074 bool rematerializeCast(const Inst *Instr) {
1075 // An pointer-type bitcast never occurs in a MINIMAL build.
1076 if (BuildDefs::minimal())
1077 return false;
1078 // Check that it's a Cast instruction with a Bitcast operation.
1079 auto *Cast = llvm::dyn_cast<InstCast>(Instr);
1080 if (Cast == nullptr || Cast->getCastKind() != InstCast::Bitcast)
1081 return false;
1082 // Check that Src(0) is rematerializable.
1083 auto *Src0Var = llvm::dyn_cast<Variable>(Cast->getSrc(0));
1084 if (Src0Var == nullptr || !Src0Var->isRematerializable())
1085 return false;
1086 // Check that Dest and Src(0) have the same type.
1087 Variable *Dest = Cast->getDest();
1088 if (Dest->getType() != Src0Var->getType())
1089 return false;
1090 Dest->setRematerializable(Src0Var->getRegNum(), Src0Var->getStackOffset());
1091 return true;
1092 }
1093
1094 } // end of anonymous namespace
1095
1096 /// Scan the function to find additional rematerializable variables. This is
1097 /// possible when the source operand of an InstAssignment is a rematerializable
1098 /// variable, or the same for a pointer-type InstCast::Bitcast, or when an
1099 /// InstArithmetic is an add of a rematerializable variable and an immediate.
1100 /// Note that InstAssignment instructions and pointer-type InstCast::Bitcast
1101 /// instructions generally only come about from the IceConverter's treatment of
1102 /// inttoptr, ptrtoint, and bitcast instructions. TODO(stichnot): Consider
1103 /// other possibilities, however unlikely, such as InstArithmetic::Sub, or
1104 /// commutativity.
findRematerializable()1105 void Cfg::findRematerializable() {
1106 // Scan the instructions in order, and repeat until no new opportunities are
1107 // found. It may take more than one iteration because a variable's defining
1108 // block may happen to come after a block where it is used, depending on the
1109 // CfgNode linearization order.
1110 bool FoundNewAssignment;
1111 do {
1112 FoundNewAssignment = false;
1113 for (CfgNode *Node : getNodes()) {
1114 // No need to process Phi instructions.
1115 for (Inst &Instr : Node->getInsts()) {
1116 if (Instr.isDeleted())
1117 continue;
1118 Variable *Dest = Instr.getDest();
1119 if (Dest == nullptr || Dest->isRematerializable())
1120 continue;
1121 if (rematerializeArithmetic(&Instr) || rematerializeAssign(&Instr) ||
1122 rematerializeCast(&Instr)) {
1123 FoundNewAssignment = true;
1124 }
1125 }
1126 }
1127 } while (FoundNewAssignment);
1128 }
1129
doAddressOpt()1130 void Cfg::doAddressOpt() {
1131 TimerMarker T(TimerStack::TT_doAddressOpt, this);
1132 for (CfgNode *Node : Nodes)
1133 Node->doAddressOpt();
1134 }
1135
1136 namespace {
1137 // ShuffleVectorUtils implements helper functions for rematerializing
1138 // shufflevector instructions from a sequence of extractelement/insertelement
1139 // instructions. It looks for the following pattern:
1140 //
1141 // %t0 = extractelement A, %n0
1142 // %t1 = extractelement B, %n1
1143 // %t2 = extractelement C, %n2
1144 // ...
1145 // %tN = extractelement N, %nN
1146 // %d0 = insertelement undef, %t0, 0
1147 // %d1 = insertelement %d0, %t1, 1
1148 // %d2 = insertelement %d1, %t2, 2
1149 // ...
1150 // %dest = insertelement %d_N-1, %tN, N
1151 //
1152 // where N is num_element(typeof(%dest)), and A, B, C, ... N are at most two
1153 // distinct variables.
1154 namespace ShuffleVectorUtils {
1155 // findAllInserts is used when searching for all the insertelements that are
1156 // used in a shufflevector operation. This function works recursively, when
1157 // invoked with I = i, the function assumes Insts[i] is the last found
1158 // insertelement in the chain. The next insertelement insertruction is saved in
1159 // Insts[i+1].
findAllInserts(Cfg * Func,GlobalContext * Ctx,VariablesMetadata * VM,CfgVector<const Inst * > * Insts,SizeT I=0)1160 bool findAllInserts(Cfg *Func, GlobalContext *Ctx, VariablesMetadata *VM,
1161 CfgVector<const Inst *> *Insts, SizeT I = 0) {
1162 const bool Verbose = BuildDefs::dump() && Func->isVerbose(IceV_ShufMat);
1163
1164 if (I > Insts->size()) {
1165 if (Verbose) {
1166 Ctx->getStrDump() << "\tToo many inserts.\n";
1167 }
1168 return false;
1169 }
1170
1171 const auto *LastInsert = Insts->at(I);
1172 assert(llvm::isa<InstInsertElement>(LastInsert));
1173
1174 if (I == Insts->size() - 1) {
1175 // Matching against undef is not really needed because the value in Src(0)
1176 // will be totally overwritten. We still enforce it anyways because the
1177 // PNaCl toolchain generates the bitcode with it.
1178 if (!llvm::isa<ConstantUndef>(LastInsert->getSrc(0))) {
1179 if (Verbose) {
1180 Ctx->getStrDump() << "\tSrc0 is not undef: " << I << " "
1181 << Insts->size();
1182 LastInsert->dump(Func);
1183 Ctx->getStrDump() << "\n";
1184 }
1185 return false;
1186 }
1187
1188 // The following loop ensures that the insertelements are sorted. In theory,
1189 // we could relax this restriction and allow any order. As long as each
1190 // index appears exactly once, this chain is still a candidate for becoming
1191 // a shufflevector. The Insts vector is traversed backwards because the
1192 // instructions are "enqueued" in reverse order.
1193 int32_t ExpectedElement = 0;
1194 for (const auto *I : reverse_range(*Insts)) {
1195 if (llvm::cast<ConstantInteger32>(I->getSrc(2))->getValue() !=
1196 ExpectedElement) {
1197 return false;
1198 }
1199 ++ExpectedElement;
1200 }
1201 return true;
1202 }
1203
1204 const auto *Src0V = llvm::cast<Variable>(LastInsert->getSrc(0));
1205 const auto *Def = VM->getSingleDefinition(Src0V);
1206
1207 // Only optimize if the first operand in
1208 //
1209 // Dest = insertelement A, B, 10
1210 //
1211 // is singly-def'ed.
1212 if (Def == nullptr) {
1213 if (Verbose) {
1214 Ctx->getStrDump() << "\tmulti-def: ";
1215 (*Insts)[I]->dump(Func);
1216 Ctx->getStrDump() << "\n";
1217 }
1218 return false;
1219 }
1220
1221 // We also require the (single) definition to come from an insertelement
1222 // instruction.
1223 if (!llvm::isa<InstInsertElement>(Def)) {
1224 if (Verbose) {
1225 Ctx->getStrDump() << "\tnot insert element: ";
1226 Def->dump(Func);
1227 Ctx->getStrDump() << "\n";
1228 }
1229 return false;
1230 }
1231
1232 // Everything seems fine, so we save Def in Insts, and delegate the decision
1233 // to findAllInserts.
1234 (*Insts)[I + 1] = Def;
1235
1236 return findAllInserts(Func, Ctx, VM, Insts, I + 1);
1237 }
1238
1239 // insertsLastElement returns true if Insert is inserting an element in the last
1240 // position of a vector.
insertsLastElement(const Inst & Insert)1241 bool insertsLastElement(const Inst &Insert) {
1242 const Type DestTy = Insert.getDest()->getType();
1243 assert(isVectorType(DestTy));
1244 const SizeT Elem =
1245 llvm::cast<ConstantInteger32>(Insert.getSrc(2))->getValue();
1246 return Elem == typeNumElements(DestTy) - 1;
1247 }
1248
1249 // findAllExtracts goes over all the insertelement instructions that are
1250 // candidates to be replaced by a shufflevector, and searches for all the
1251 // definitions of the elements being inserted. If all of the elements are the
1252 // result of an extractelement instruction, and all of the extractelements
1253 // operate on at most two different sources, than the instructions can be
1254 // replaced by a shufflevector.
findAllExtracts(Cfg * Func,GlobalContext * Ctx,VariablesMetadata * VM,const CfgVector<const Inst * > & Insts,Variable ** Src0,Variable ** Src1,CfgVector<const Inst * > * Extracts)1255 bool findAllExtracts(Cfg *Func, GlobalContext *Ctx, VariablesMetadata *VM,
1256 const CfgVector<const Inst *> &Insts, Variable **Src0,
1257 Variable **Src1, CfgVector<const Inst *> *Extracts) {
1258 const bool Verbose = BuildDefs::dump() && Func->isVerbose(IceV_ShufMat);
1259
1260 *Src0 = nullptr;
1261 *Src1 = nullptr;
1262 assert(Insts.size() > 0);
1263 for (SizeT I = 0; I < Insts.size(); ++I) {
1264 const auto *Insert = Insts.at(I);
1265 const auto *Src1V = llvm::dyn_cast<Variable>(Insert->getSrc(1));
1266 if (Src1V == nullptr) {
1267 if (Verbose) {
1268 Ctx->getStrDump() << "src(1) is not a variable: ";
1269 Insert->dump(Func);
1270 Ctx->getStrDump() << "\n";
1271 }
1272 return false;
1273 }
1274
1275 const auto *Def = VM->getSingleDefinition(Src1V);
1276 if (Def == nullptr) {
1277 if (Verbose) {
1278 Ctx->getStrDump() << "multi-def src(1): ";
1279 Insert->dump(Func);
1280 Ctx->getStrDump() << "\n";
1281 }
1282 return false;
1283 }
1284
1285 if (!llvm::isa<InstExtractElement>(Def)) {
1286 if (Verbose) {
1287 Ctx->getStrDump() << "not extractelement: ";
1288 Def->dump(Func);
1289 Ctx->getStrDump() << "\n";
1290 }
1291 return false;
1292 }
1293
1294 auto *Src = llvm::cast<Variable>(Def->getSrc(0));
1295 if (*Src0 == nullptr) {
1296 // No sources yet. Save Src to Src0.
1297 *Src0 = Src;
1298 } else if (*Src1 == nullptr) {
1299 // We already have a source, so we might save Src in Src1 -- but only if
1300 // Src0 is not Src.
1301 if (*Src0 != Src) {
1302 *Src1 = Src;
1303 }
1304 } else if (Src != *Src0 && Src != *Src1) {
1305 // More than two sources, so we can't rematerialize the shufflevector
1306 // instruction.
1307 if (Verbose) {
1308 Ctx->getStrDump() << "Can't shuffle more than two sources.\n";
1309 }
1310 return false;
1311 }
1312
1313 (*Extracts)[I] = Def;
1314 }
1315
1316 // We should have seen at least one source operand.
1317 assert(*Src0 != nullptr);
1318
1319 // If a second source was not seen, then we just make Src1 = Src0 to simplify
1320 // things down stream. This should not matter, as all of the indexes in the
1321 // shufflevector instruction will point to Src0.
1322 if (*Src1 == nullptr) {
1323 *Src1 = *Src0;
1324 }
1325
1326 return true;
1327 }
1328
1329 } // end of namespace ShuffleVectorUtils
1330 } // end of anonymous namespace
1331
materializeVectorShuffles()1332 void Cfg::materializeVectorShuffles() {
1333 const bool Verbose = BuildDefs::dump() && isVerbose(IceV_ShufMat);
1334
1335 std::unique_ptr<OstreamLocker> L;
1336 if (Verbose) {
1337 L.reset(new OstreamLocker(getContext()));
1338 getContext()->getStrDump() << "\nShuffle materialization:\n";
1339 }
1340
1341 // MaxVectorElements is the maximum number of elements in the vector types
1342 // handled by Subzero. We use it to create the Inserts and Extracts vectors
1343 // with the appropriate size, thus avoiding resize() calls.
1344 const SizeT MaxVectorElements = typeNumElements(IceType_v16i8);
1345 CfgVector<const Inst *> Inserts(MaxVectorElements);
1346 CfgVector<const Inst *> Extracts(MaxVectorElements);
1347
1348 TimerMarker T(TimerStack::TT_materializeVectorShuffles, this);
1349 for (CfgNode *Node : Nodes) {
1350 for (auto &Instr : Node->getInsts()) {
1351 if (!llvm::isa<InstInsertElement>(Instr)) {
1352 continue;
1353 }
1354 if (!ShuffleVectorUtils::insertsLastElement(Instr)) {
1355 // To avoid wasting time, we only start the pattern match at the last
1356 // insertelement instruction -- and go backwards from there.
1357 continue;
1358 }
1359 if (Verbose) {
1360 getContext()->getStrDump() << "\tCandidate: ";
1361 Instr.dump(this);
1362 getContext()->getStrDump() << "\n";
1363 }
1364 Inserts.resize(typeNumElements(Instr.getDest()->getType()));
1365 Inserts[0] = &Instr;
1366 if (!ShuffleVectorUtils::findAllInserts(this, getContext(),
1367 VMetadata.get(), &Inserts)) {
1368 // If we fail to find a sequence of insertelements, we stop the
1369 // optimization.
1370 if (Verbose) {
1371 getContext()->getStrDump() << "\tFalse alarm.\n";
1372 }
1373 continue;
1374 }
1375 if (Verbose) {
1376 getContext()->getStrDump() << "\tFound the following insertelement: \n";
1377 for (auto *I : reverse_range(Inserts)) {
1378 getContext()->getStrDump() << "\t\t";
1379 I->dump(this);
1380 getContext()->getStrDump() << "\n";
1381 }
1382 }
1383 Extracts.resize(Inserts.size());
1384 Variable *Src0;
1385 Variable *Src1;
1386 if (!ShuffleVectorUtils::findAllExtracts(this, getContext(),
1387 VMetadata.get(), Inserts, &Src0,
1388 &Src1, &Extracts)) {
1389 // If we fail to match the definitions of the insertelements' sources
1390 // with extractelement instructions -- or if those instructions operate
1391 // on more than two different variables -- we stop the optimization.
1392 if (Verbose) {
1393 getContext()->getStrDump() << "\tFailed to match extractelements.\n";
1394 }
1395 continue;
1396 }
1397 if (Verbose) {
1398 getContext()->getStrDump()
1399 << "\tFound the following insert/extract element pairs: \n";
1400 for (SizeT I = 0; I < Inserts.size(); ++I) {
1401 const SizeT Pos = Inserts.size() - I - 1;
1402 getContext()->getStrDump() << "\t\tInsert : ";
1403 Inserts[Pos]->dump(this);
1404 getContext()->getStrDump() << "\n\t\tExtract: ";
1405 Extracts[Pos]->dump(this);
1406 getContext()->getStrDump() << "\n";
1407 }
1408 }
1409
1410 assert(Src0 != nullptr);
1411 assert(Src1 != nullptr);
1412
1413 auto *ShuffleVector =
1414 InstShuffleVector::create(this, Instr.getDest(), Src0, Src1);
1415 assert(ShuffleVector->getSrc(0) == Src0);
1416 assert(ShuffleVector->getSrc(1) == Src1);
1417 for (SizeT I = 0; I < Extracts.size(); ++I) {
1418 const SizeT Pos = Extracts.size() - I - 1;
1419 auto *Index = llvm::cast<ConstantInteger32>(Extracts[Pos]->getSrc(1));
1420 if (Src0 == Extracts[Pos]->getSrc(0)) {
1421 ShuffleVector->addIndex(Index);
1422 } else {
1423 ShuffleVector->addIndex(llvm::cast<ConstantInteger32>(
1424 Ctx->getConstantInt32(Index->getValue() + Extracts.size())));
1425 }
1426 }
1427
1428 if (Verbose) {
1429 getContext()->getStrDump() << "Created: ";
1430 ShuffleVector->dump(this);
1431 getContext()->getStrDump() << "\n";
1432 }
1433
1434 Instr.setDeleted();
1435 auto &LoweringContext = getTarget()->getContext();
1436 LoweringContext.setInsertPoint(instToIterator(&Instr));
1437 LoweringContext.insert(ShuffleVector);
1438 }
1439 }
1440 }
1441
genCode()1442 void Cfg::genCode() {
1443 TimerMarker T(TimerStack::TT_genCode, this);
1444 for (CfgNode *Node : Nodes)
1445 Node->genCode();
1446 }
1447
1448 // Compute the stack frame layout.
genFrame()1449 void Cfg::genFrame() {
1450 TimerMarker T(TimerStack::TT_genFrame, this);
1451 getTarget()->addProlog(Entry);
1452 for (CfgNode *Node : Nodes)
1453 if (Node->getHasReturn())
1454 getTarget()->addEpilog(Node);
1455 }
1456
generateLoopInfo()1457 void Cfg::generateLoopInfo() {
1458 TimerMarker T(TimerStack::TT_computeLoopNestDepth, this);
1459 LoopInfo = ComputeLoopInfo(this);
1460 }
1461
1462 // This is a lightweight version of live-range-end calculation. Marks the last
1463 // use of only those variables whose definition and uses are completely with a
1464 // single block. It is a quick single pass and doesn't need to iterate until
1465 // convergence.
livenessLightweight()1466 void Cfg::livenessLightweight() {
1467 TimerMarker T(TimerStack::TT_livenessLightweight, this);
1468 getVMetadata()->init(VMK_Uses);
1469 for (CfgNode *Node : Nodes)
1470 Node->livenessLightweight();
1471 }
1472
liveness(LivenessMode Mode)1473 void Cfg::liveness(LivenessMode Mode) {
1474 TimerMarker T(TimerStack::TT_liveness, this);
1475 // Destroying the previous (if any) Liveness information clears the Liveness
1476 // allocator TLS pointer.
1477 Live = nullptr;
1478 Live = Liveness::create(this, Mode);
1479
1480 getVMetadata()->init(VMK_Uses);
1481 Live->init();
1482
1483 // Initialize with all nodes needing to be processed.
1484 BitVector NeedToProcess(Nodes.size(), true);
1485 while (NeedToProcess.any()) {
1486 // Iterate in reverse topological order to speed up convergence.
1487 for (CfgNode *Node : reverse_range(Nodes)) {
1488 if (NeedToProcess[Node->getIndex()]) {
1489 NeedToProcess[Node->getIndex()] = false;
1490 bool Changed = Node->liveness(getLiveness());
1491 if (Changed) {
1492 // If the beginning-of-block liveness changed since the last
1493 // iteration, mark all in-edges as needing to be processed.
1494 for (CfgNode *Pred : Node->getInEdges())
1495 NeedToProcess[Pred->getIndex()] = true;
1496 }
1497 }
1498 }
1499 }
1500 if (Mode == Liveness_Intervals) {
1501 // Reset each variable's live range.
1502 for (Variable *Var : Variables)
1503 Var->resetLiveRange();
1504 }
1505 // Make a final pass over each node to delete dead instructions, collect the
1506 // first and last instruction numbers, and add live range segments for that
1507 // node.
1508 for (CfgNode *Node : Nodes) {
1509 InstNumberT FirstInstNum = Inst::NumberSentinel;
1510 InstNumberT LastInstNum = Inst::NumberSentinel;
1511 for (Inst &I : Node->getPhis()) {
1512 I.deleteIfDead();
1513 if (Mode == Liveness_Intervals && !I.isDeleted()) {
1514 if (FirstInstNum == Inst::NumberSentinel)
1515 FirstInstNum = I.getNumber();
1516 assert(I.getNumber() > LastInstNum);
1517 LastInstNum = I.getNumber();
1518 }
1519 }
1520 for (Inst &I : Node->getInsts()) {
1521 I.deleteIfDead();
1522 if (Mode == Liveness_Intervals && !I.isDeleted()) {
1523 if (FirstInstNum == Inst::NumberSentinel)
1524 FirstInstNum = I.getNumber();
1525 assert(I.getNumber() > LastInstNum);
1526 LastInstNum = I.getNumber();
1527 }
1528 }
1529 if (Mode == Liveness_Intervals) {
1530 // Special treatment for live in-args. Their liveness needs to extend
1531 // beyond the beginning of the function, otherwise an arg whose only use
1532 // is in the first instruction will end up having the trivial live range
1533 // [2,2) and will *not* interfere with other arguments. So if the first
1534 // instruction of the method is "r=arg1+arg2", both args may be assigned
1535 // the same register. This is accomplished by extending the entry block's
1536 // instruction range from [2,n) to [1,n) which will transform the
1537 // problematic [2,2) live ranges into [1,2). This extension works because
1538 // the entry node is guaranteed to have the lowest instruction numbers.
1539 if (Node == getEntryNode()) {
1540 FirstInstNum = Inst::NumberExtended;
1541 // Just in case the entry node somehow contains no instructions...
1542 if (LastInstNum == Inst::NumberSentinel)
1543 LastInstNum = FirstInstNum;
1544 }
1545 // If this node somehow contains no instructions, don't bother trying to
1546 // add liveness intervals for it, because variables that are live-in and
1547 // live-out will have a bogus interval added.
1548 if (FirstInstNum != Inst::NumberSentinel)
1549 Node->livenessAddIntervals(getLiveness(), FirstInstNum, LastInstNum);
1550 }
1551 }
1552 }
1553
1554 // Traverse every Variable of every Inst and verify that it appears within the
1555 // Variable's computed live range.
validateLiveness() const1556 bool Cfg::validateLiveness() const {
1557 TimerMarker T(TimerStack::TT_validateLiveness, this);
1558 bool Valid = true;
1559 OstreamLocker L(Ctx);
1560 Ostream &Str = Ctx->getStrDump();
1561 for (CfgNode *Node : Nodes) {
1562 Inst *FirstInst = nullptr;
1563 for (Inst &Instr : Node->getInsts()) {
1564 if (Instr.isDeleted())
1565 continue;
1566 if (FirstInst == nullptr)
1567 FirstInst = &Instr;
1568 InstNumberT InstNumber = Instr.getNumber();
1569 if (Variable *Dest = Instr.getDest()) {
1570 if (!Dest->getIgnoreLiveness()) {
1571 bool Invalid = false;
1572 constexpr bool IsDest = true;
1573 if (!Dest->getLiveRange().containsValue(InstNumber, IsDest))
1574 Invalid = true;
1575 // Check that this instruction actually *begins* Dest's live range,
1576 // by checking that Dest is not live in the previous instruction. As
1577 // a special exception, we don't check this for the first instruction
1578 // of the block, because a Phi temporary may be live at the end of
1579 // the previous block, and if it is also assigned in the first
1580 // instruction of this block, the adjacent live ranges get merged.
1581 if (&Instr != FirstInst && !Instr.isDestRedefined() &&
1582 Dest->getLiveRange().containsValue(InstNumber - 1, IsDest))
1583 Invalid = true;
1584 if (Invalid) {
1585 Valid = false;
1586 Str << "Liveness error: inst " << Instr.getNumber() << " dest ";
1587 Dest->dump(this);
1588 Str << " live range " << Dest->getLiveRange() << "\n";
1589 }
1590 }
1591 }
1592 FOREACH_VAR_IN_INST(Var, Instr) {
1593 static constexpr bool IsDest = false;
1594 if (!Var->getIgnoreLiveness() &&
1595 !Var->getLiveRange().containsValue(InstNumber, IsDest)) {
1596 Valid = false;
1597 Str << "Liveness error: inst " << Instr.getNumber() << " var ";
1598 Var->dump(this);
1599 Str << " live range " << Var->getLiveRange() << "\n";
1600 }
1601 }
1602 }
1603 }
1604 return Valid;
1605 }
1606
contractEmptyNodes()1607 void Cfg::contractEmptyNodes() {
1608 // If we're decorating the asm output with register liveness info, this
1609 // information may become corrupted or incorrect after contracting nodes that
1610 // contain only redundant assignments. As such, we disable this pass when
1611 // DecorateAsm is specified. This may make the resulting code look more
1612 // branchy, but it should have no effect on the register assignments.
1613 if (getFlags().getDecorateAsm())
1614 return;
1615 for (CfgNode *Node : Nodes) {
1616 Node->contractIfEmpty();
1617 }
1618 }
1619
doBranchOpt()1620 void Cfg::doBranchOpt() {
1621 TimerMarker T(TimerStack::TT_doBranchOpt, this);
1622 for (auto I = Nodes.begin(), E = Nodes.end(); I != E; ++I) {
1623 auto NextNode = I + 1;
1624 (*I)->doBranchOpt(NextNode == E ? nullptr : *NextNode);
1625 }
1626 }
1627
1628 // ======================== Dump routines ======================== //
1629
1630 // emitTextHeader() is not target-specific (apart from what is abstracted by
1631 // the Assembler), so it is defined here rather than in the target lowering
1632 // class.
emitTextHeader(GlobalString Name,GlobalContext * Ctx,const Assembler * Asm)1633 void Cfg::emitTextHeader(GlobalString Name, GlobalContext *Ctx,
1634 const Assembler *Asm) {
1635 if (!BuildDefs::dump())
1636 return;
1637 Ostream &Str = Ctx->getStrEmit();
1638 Str << "\t.text\n";
1639 if (getFlags().getFunctionSections())
1640 Str << "\t.section\t.text." << Name << ",\"ax\",%progbits\n";
1641 if (!Asm->getInternal() || getFlags().getDisableInternal()) {
1642 Str << "\t.globl\t" << Name << "\n";
1643 Str << "\t.type\t" << Name << ",%function\n";
1644 }
1645 Str << "\t" << Asm->getAlignDirective() << " "
1646 << Asm->getBundleAlignLog2Bytes() << ",0x";
1647 for (uint8_t I : Asm->getNonExecBundlePadding())
1648 Str.write_hex(I);
1649 Str << "\n";
1650 Str << Name << ":\n";
1651 }
1652
emitJumpTables()1653 void Cfg::emitJumpTables() {
1654 switch (getFlags().getOutFileType()) {
1655 case FT_Elf:
1656 case FT_Iasm: {
1657 // The emission needs to be delayed until the after the text section so
1658 // save the offsets in the global context.
1659 for (const InstJumpTable *JumpTable : JumpTables) {
1660 Ctx->addJumpTableData(JumpTable->toJumpTableData(getAssembler()));
1661 }
1662 } break;
1663 case FT_Asm: {
1664 // Emit the assembly directly so we don't need to hang on to all the names
1665 for (const InstJumpTable *JumpTable : JumpTables)
1666 getTarget()->emitJumpTable(this, JumpTable);
1667 } break;
1668 }
1669 }
1670
emit()1671 void Cfg::emit() {
1672 if (!BuildDefs::dump())
1673 return;
1674 TimerMarker T(TimerStack::TT_emitAsm, this);
1675 if (getFlags().getDecorateAsm()) {
1676 renumberInstructions();
1677 getVMetadata()->init(VMK_Uses);
1678 liveness(Liveness_Basic);
1679 dump("After recomputing liveness for -decorate-asm");
1680 }
1681 OstreamLocker L(Ctx);
1682 Ostream &Str = Ctx->getStrEmit();
1683 const Assembler *Asm = getAssembler<>();
1684
1685 emitTextHeader(FunctionName, Ctx, Asm);
1686 if (getFlags().getDecorateAsm()) {
1687 for (Variable *Var : getVariables()) {
1688 if (Var->hasKnownStackOffset() && !Var->isRematerializable()) {
1689 Str << "\t" << Var->getSymbolicStackOffset() << " = "
1690 << Var->getStackOffset() << "\n";
1691 }
1692 }
1693 }
1694 for (CfgNode *Node : Nodes) {
1695 Node->emit(this);
1696 }
1697 emitJumpTables();
1698 Str << "\n";
1699 }
1700
emitIAS()1701 void Cfg::emitIAS() {
1702 TimerMarker T(TimerStack::TT_emitAsm, this);
1703 // The emitIAS() routines emit into the internal assembler buffer, so there's
1704 // no need to lock the streams.
1705 for (CfgNode *Node : Nodes) {
1706 Node->emitIAS(this);
1707 }
1708 emitJumpTables();
1709 }
1710
getTotalMemoryMB() const1711 size_t Cfg::getTotalMemoryMB() const {
1712 constexpr size_t _1MB = 1024 * 1024;
1713 assert(Allocator != nullptr);
1714 assert(CfgAllocatorTraits::current() == Allocator.get());
1715 return Allocator->getTotalMemory() / _1MB;
1716 }
1717
getLivenessMemoryMB() const1718 size_t Cfg::getLivenessMemoryMB() const {
1719 constexpr size_t _1MB = 1024 * 1024;
1720 if (Live == nullptr) {
1721 return 0;
1722 }
1723 return Live->getAllocator()->getTotalMemory() / _1MB;
1724 }
1725
1726 // Dumps the IR with an optional introductory message.
dump(const char * Message)1727 void Cfg::dump(const char *Message) {
1728 if (!BuildDefs::dump())
1729 return;
1730 if (!isVerbose())
1731 return;
1732 OstreamLocker L(Ctx);
1733 Ostream &Str = Ctx->getStrDump();
1734 if (Message[0])
1735 Str << "================ " << Message << " ================\n";
1736 if (isVerbose(IceV_Mem)) {
1737 Str << "Memory size = " << getTotalMemoryMB() << " MB\n";
1738 }
1739 setCurrentNode(getEntryNode());
1740 // Print function name+args
1741 if (isVerbose(IceV_Instructions)) {
1742 Str << "define ";
1743 if (getInternal() && !getFlags().getDisableInternal())
1744 Str << "internal ";
1745 Str << ReturnType << " @" << getFunctionName() << "(";
1746 for (SizeT i = 0; i < Args.size(); ++i) {
1747 if (i > 0)
1748 Str << ", ";
1749 Str << Args[i]->getType() << " ";
1750 Args[i]->dump(this);
1751 }
1752 // Append an extra copy of the function name here, in order to print its
1753 // size stats but not mess up lit tests.
1754 Str << ") { # " << getFunctionNameAndSize() << "\n";
1755 }
1756 resetCurrentNode();
1757 if (isVerbose(IceV_Liveness)) {
1758 // Print summary info about variables
1759 for (Variable *Var : Variables) {
1760 Str << "// multiblock=";
1761 if (getVMetadata()->isTracked(Var))
1762 Str << getVMetadata()->isMultiBlock(Var);
1763 else
1764 Str << "?";
1765 Str << " defs=";
1766 bool FirstPrint = true;
1767 if (VMetadata->getKind() != VMK_Uses) {
1768 if (const Inst *FirstDef = VMetadata->getFirstDefinition(Var)) {
1769 Str << FirstDef->getNumber();
1770 FirstPrint = false;
1771 }
1772 }
1773 if (VMetadata->getKind() == VMK_All) {
1774 for (const Inst *Instr : VMetadata->getLatterDefinitions(Var)) {
1775 if (!FirstPrint)
1776 Str << ",";
1777 Str << Instr->getNumber();
1778 FirstPrint = false;
1779 }
1780 }
1781 Str << " weight=" << Var->getWeight(this) << " ";
1782 Var->dump(this);
1783 Str << " LIVE=" << Var->getLiveRange() << "\n";
1784 }
1785 }
1786 // Print each basic block
1787 for (CfgNode *Node : Nodes)
1788 Node->dump(this);
1789 if (isVerbose(IceV_Instructions))
1790 Str << "}\n";
1791 }
1792
1793 } // end of namespace Ice
1794