1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Pass to verify generated machine code. The following is checked:
10 //
11 // Operand counts: All explicit operands must be present.
12 //
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
15 //
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
18 //
19 // The machine code verifier is enabled with the command-line option
20 // -verify-machineinstrs.
21 //===----------------------------------------------------------------------===//
22
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DenseSet.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/PostOrderIterator.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SetOperations.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/StringRef.h"
33 #include "llvm/ADT/Twine.h"
34 #include "llvm/Analysis/EHPersonalities.h"
35 #include "llvm/CodeGen/CodeGenCommonISel.h"
36 #include "llvm/CodeGen/LiveInterval.h"
37 #include "llvm/CodeGen/LiveIntervals.h"
38 #include "llvm/CodeGen/LiveRangeCalc.h"
39 #include "llvm/CodeGen/LiveStacks.h"
40 #include "llvm/CodeGen/LiveVariables.h"
41 #include "llvm/CodeGen/MachineBasicBlock.h"
42 #include "llvm/CodeGen/MachineFrameInfo.h"
43 #include "llvm/CodeGen/MachineFunction.h"
44 #include "llvm/CodeGen/MachineFunctionPass.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBundle.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/PseudoSourceValue.h"
51 #include "llvm/CodeGen/RegisterBank.h"
52 #include "llvm/CodeGen/RegisterBankInfo.h"
53 #include "llvm/CodeGen/SlotIndexes.h"
54 #include "llvm/CodeGen/StackMaps.h"
55 #include "llvm/CodeGen/TargetInstrInfo.h"
56 #include "llvm/CodeGen/TargetOpcodes.h"
57 #include "llvm/CodeGen/TargetRegisterInfo.h"
58 #include "llvm/CodeGen/TargetSubtargetInfo.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/InlineAsm.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/InitializePasses.h"
65 #include "llvm/MC/LaneBitmask.h"
66 #include "llvm/MC/MCAsmInfo.h"
67 #include "llvm/MC/MCDwarf.h"
68 #include "llvm/MC/MCInstrDesc.h"
69 #include "llvm/MC/MCRegisterInfo.h"
70 #include "llvm/MC/MCTargetOptions.h"
71 #include "llvm/Pass.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/ErrorHandling.h"
74 #include "llvm/Support/LowLevelTypeImpl.h"
75 #include "llvm/Support/MathExtras.h"
76 #include "llvm/Support/ModRef.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Target/TargetMachine.h"
79 #include <algorithm>
80 #include <cassert>
81 #include <cstddef>
82 #include <cstdint>
83 #include <iterator>
84 #include <string>
85 #include <utility>
86
87 using namespace llvm;
88
89 namespace {
90
91 struct MachineVerifier {
MachineVerifier__anonad4071450111::MachineVerifier92 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
93
94 unsigned verify(const MachineFunction &MF);
95
96 Pass *const PASS;
97 const char *Banner;
98 const MachineFunction *MF;
99 const TargetMachine *TM;
100 const TargetInstrInfo *TII;
101 const TargetRegisterInfo *TRI;
102 const MachineRegisterInfo *MRI;
103 const RegisterBankInfo *RBI;
104
105 unsigned foundErrors;
106
107 // Avoid querying the MachineFunctionProperties for each operand.
108 bool isFunctionRegBankSelected;
109 bool isFunctionSelected;
110 bool isFunctionTracksDebugUserValues;
111
112 using RegVector = SmallVector<Register, 16>;
113 using RegMaskVector = SmallVector<const uint32_t *, 4>;
114 using RegSet = DenseSet<Register>;
115 using RegMap = DenseMap<Register, const MachineInstr *>;
116 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
117
118 const MachineInstr *FirstNonPHI;
119 const MachineInstr *FirstTerminator;
120 BlockSet FunctionBlocks;
121
122 BitVector regsReserved;
123 RegSet regsLive;
124 RegVector regsDefined, regsDead, regsKilled;
125 RegMaskVector regMasks;
126
127 SlotIndex lastIndex;
128
129 // Add Reg and any sub-registers to RV
addRegWithSubRegs__anonad4071450111::MachineVerifier130 void addRegWithSubRegs(RegVector &RV, Register Reg) {
131 RV.push_back(Reg);
132 if (Reg.isPhysical())
133 append_range(RV, TRI->subregs(Reg.asMCReg()));
134 }
135
136 struct BBInfo {
137 // Is this MBB reachable from the MF entry point?
138 bool reachable = false;
139
140 // Vregs that must be live in because they are used without being
141 // defined. Map value is the user. vregsLiveIn doesn't include regs
142 // that only are used by PHI nodes.
143 RegMap vregsLiveIn;
144
145 // Regs killed in MBB. They may be defined again, and will then be in both
146 // regsKilled and regsLiveOut.
147 RegSet regsKilled;
148
149 // Regs defined in MBB and live out. Note that vregs passing through may
150 // be live out without being mentioned here.
151 RegSet regsLiveOut;
152
153 // Vregs that pass through MBB untouched. This set is disjoint from
154 // regsKilled and regsLiveOut.
155 RegSet vregsPassed;
156
157 // Vregs that must pass through MBB because they are needed by a successor
158 // block. This set is disjoint from regsLiveOut.
159 RegSet vregsRequired;
160
161 // Set versions of block's predecessor and successor lists.
162 BlockSet Preds, Succs;
163
164 BBInfo() = default;
165
166 // Add register to vregsRequired if it belongs there. Return true if
167 // anything changed.
addRequired__anonad4071450111::MachineVerifier::BBInfo168 bool addRequired(Register Reg) {
169 if (!Reg.isVirtual())
170 return false;
171 if (regsLiveOut.count(Reg))
172 return false;
173 return vregsRequired.insert(Reg).second;
174 }
175
176 // Same for a full set.
addRequired__anonad4071450111::MachineVerifier::BBInfo177 bool addRequired(const RegSet &RS) {
178 bool Changed = false;
179 for (Register Reg : RS)
180 Changed |= addRequired(Reg);
181 return Changed;
182 }
183
184 // Same for a full map.
addRequired__anonad4071450111::MachineVerifier::BBInfo185 bool addRequired(const RegMap &RM) {
186 bool Changed = false;
187 for (const auto &I : RM)
188 Changed |= addRequired(I.first);
189 return Changed;
190 }
191
192 // Live-out registers are either in regsLiveOut or vregsPassed.
isLiveOut__anonad4071450111::MachineVerifier::BBInfo193 bool isLiveOut(Register Reg) const {
194 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
195 }
196 };
197
198 // Extra register info per MBB.
199 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
200
isReserved__anonad4071450111::MachineVerifier201 bool isReserved(Register Reg) {
202 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
203 }
204
isAllocatable__anonad4071450111::MachineVerifier205 bool isAllocatable(Register Reg) const {
206 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
207 !regsReserved.test(Reg.id());
208 }
209
210 // Analysis information if available
211 LiveVariables *LiveVars;
212 LiveIntervals *LiveInts;
213 LiveStacks *LiveStks;
214 SlotIndexes *Indexes;
215
216 void visitMachineFunctionBefore();
217 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
218 void visitMachineBundleBefore(const MachineInstr *MI);
219
220 /// Verify that all of \p MI's virtual register operands are scalars.
221 /// \returns True if all virtual register operands are scalar. False
222 /// otherwise.
223 bool verifyAllRegOpsScalar(const MachineInstr &MI,
224 const MachineRegisterInfo &MRI);
225 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
226 void verifyPreISelGenericInstruction(const MachineInstr *MI);
227 void visitMachineInstrBefore(const MachineInstr *MI);
228 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
229 void visitMachineBundleAfter(const MachineInstr *MI);
230 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
231 void visitMachineFunctionAfter();
232
233 void report(const char *msg, const MachineFunction *MF);
234 void report(const char *msg, const MachineBasicBlock *MBB);
235 void report(const char *msg, const MachineInstr *MI);
236 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
237 LLT MOVRegType = LLT{});
238 void report(const Twine &Msg, const MachineInstr *MI);
239
240 void report_context(const LiveInterval &LI) const;
241 void report_context(const LiveRange &LR, Register VRegUnit,
242 LaneBitmask LaneMask) const;
243 void report_context(const LiveRange::Segment &S) const;
244 void report_context(const VNInfo &VNI) const;
245 void report_context(SlotIndex Pos) const;
246 void report_context(MCPhysReg PhysReg) const;
247 void report_context_liverange(const LiveRange &LR) const;
248 void report_context_lanemask(LaneBitmask LaneMask) const;
249 void report_context_vreg(Register VReg) const;
250 void report_context_vreg_regunit(Register VRegOrUnit) const;
251
252 void verifyInlineAsm(const MachineInstr *MI);
253
254 void checkLiveness(const MachineOperand *MO, unsigned MONum);
255 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
256 SlotIndex UseIdx, const LiveRange &LR,
257 Register VRegOrUnit,
258 LaneBitmask LaneMask = LaneBitmask::getNone());
259 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
260 SlotIndex DefIdx, const LiveRange &LR,
261 Register VRegOrUnit, bool SubRangeCheck = false,
262 LaneBitmask LaneMask = LaneBitmask::getNone());
263
264 void markReachable(const MachineBasicBlock *MBB);
265 void calcRegsPassed();
266 void checkPHIOps(const MachineBasicBlock &MBB);
267
268 void calcRegsRequired();
269 void verifyLiveVariables();
270 void verifyLiveIntervals();
271 void verifyLiveInterval(const LiveInterval&);
272 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
273 LaneBitmask);
274 void verifyLiveRangeSegment(const LiveRange &,
275 const LiveRange::const_iterator I, Register,
276 LaneBitmask);
277 void verifyLiveRange(const LiveRange &, Register,
278 LaneBitmask LaneMask = LaneBitmask::getNone());
279
280 void verifyStackFrame();
281
282 void verifySlotIndexes() const;
283 void verifyProperties(const MachineFunction &MF);
284 };
285
286 struct MachineVerifierPass : public MachineFunctionPass {
287 static char ID; // Pass ID, replacement for typeid
288
289 const std::string Banner;
290
MachineVerifierPass__anonad4071450111::MachineVerifierPass291 MachineVerifierPass(std::string banner = std::string())
292 : MachineFunctionPass(ID), Banner(std::move(banner)) {
293 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
294 }
295
getAnalysisUsage__anonad4071450111::MachineVerifierPass296 void getAnalysisUsage(AnalysisUsage &AU) const override {
297 AU.addUsedIfAvailable<LiveStacks>();
298 AU.addUsedIfAvailable<LiveVariables>();
299 AU.setPreservesAll();
300 MachineFunctionPass::getAnalysisUsage(AU);
301 }
302
runOnMachineFunction__anonad4071450111::MachineVerifierPass303 bool runOnMachineFunction(MachineFunction &MF) override {
304 // Skip functions that have known verification problems.
305 // FIXME: Remove this mechanism when all problematic passes have been
306 // fixed.
307 if (MF.getProperties().hasProperty(
308 MachineFunctionProperties::Property::FailsVerification))
309 return false;
310
311 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
312 if (FoundErrors)
313 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
314 return false;
315 }
316 };
317
318 } // end anonymous namespace
319
320 char MachineVerifierPass::ID = 0;
321
322 INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
323 "Verify generated machine code", false, false)
324
createMachineVerifierPass(const std::string & Banner)325 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
326 return new MachineVerifierPass(Banner);
327 }
328
verifyMachineFunction(MachineFunctionAnalysisManager *,const std::string & Banner,const MachineFunction & MF)329 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *,
330 const std::string &Banner,
331 const MachineFunction &MF) {
332 // TODO: Use MFAM after porting below analyses.
333 // LiveVariables *LiveVars;
334 // LiveIntervals *LiveInts;
335 // LiveStacks *LiveStks;
336 // SlotIndexes *Indexes;
337 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
338 if (FoundErrors)
339 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
340 }
341
verify(Pass * p,const char * Banner,bool AbortOnErrors) const342 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
343 const {
344 MachineFunction &MF = const_cast<MachineFunction&>(*this);
345 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
346 if (AbortOnErrors && FoundErrors)
347 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
348 return FoundErrors == 0;
349 }
350
verifySlotIndexes() const351 void MachineVerifier::verifySlotIndexes() const {
352 if (Indexes == nullptr)
353 return;
354
355 // Ensure the IdxMBB list is sorted by slot indexes.
356 SlotIndex Last;
357 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
358 E = Indexes->MBBIndexEnd(); I != E; ++I) {
359 assert(!Last.isValid() || I->first > Last);
360 Last = I->first;
361 }
362 }
363
verifyProperties(const MachineFunction & MF)364 void MachineVerifier::verifyProperties(const MachineFunction &MF) {
365 // If a pass has introduced virtual registers without clearing the
366 // NoVRegs property (or set it without allocating the vregs)
367 // then report an error.
368 if (MF.getProperties().hasProperty(
369 MachineFunctionProperties::Property::NoVRegs) &&
370 MRI->getNumVirtRegs())
371 report("Function has NoVRegs property but there are VReg operands", &MF);
372 }
373
verify(const MachineFunction & MF)374 unsigned MachineVerifier::verify(const MachineFunction &MF) {
375 foundErrors = 0;
376
377 this->MF = &MF;
378 TM = &MF.getTarget();
379 TII = MF.getSubtarget().getInstrInfo();
380 TRI = MF.getSubtarget().getRegisterInfo();
381 RBI = MF.getSubtarget().getRegBankInfo();
382 MRI = &MF.getRegInfo();
383
384 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
385 MachineFunctionProperties::Property::FailedISel);
386
387 // If we're mid-GlobalISel and we already triggered the fallback path then
388 // it's expected that the MIR is somewhat broken but that's ok since we'll
389 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
390 if (isFunctionFailedISel)
391 return foundErrors;
392
393 isFunctionRegBankSelected = MF.getProperties().hasProperty(
394 MachineFunctionProperties::Property::RegBankSelected);
395 isFunctionSelected = MF.getProperties().hasProperty(
396 MachineFunctionProperties::Property::Selected);
397 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
398 MachineFunctionProperties::Property::TracksDebugUserValues);
399
400 LiveVars = nullptr;
401 LiveInts = nullptr;
402 LiveStks = nullptr;
403 Indexes = nullptr;
404 if (PASS) {
405 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
406 // We don't want to verify LiveVariables if LiveIntervals is available.
407 if (!LiveInts)
408 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
409 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
410 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
411 }
412
413 verifySlotIndexes();
414
415 verifyProperties(MF);
416
417 visitMachineFunctionBefore();
418 for (const MachineBasicBlock &MBB : MF) {
419 visitMachineBasicBlockBefore(&MBB);
420 // Keep track of the current bundle header.
421 const MachineInstr *CurBundle = nullptr;
422 // Do we expect the next instruction to be part of the same bundle?
423 bool InBundle = false;
424
425 for (const MachineInstr &MI : MBB.instrs()) {
426 if (MI.getParent() != &MBB) {
427 report("Bad instruction parent pointer", &MBB);
428 errs() << "Instruction: " << MI;
429 continue;
430 }
431
432 // Check for consistent bundle flags.
433 if (InBundle && !MI.isBundledWithPred())
434 report("Missing BundledPred flag, "
435 "BundledSucc was set on predecessor",
436 &MI);
437 if (!InBundle && MI.isBundledWithPred())
438 report("BundledPred flag is set, "
439 "but BundledSucc not set on predecessor",
440 &MI);
441
442 // Is this a bundle header?
443 if (!MI.isInsideBundle()) {
444 if (CurBundle)
445 visitMachineBundleAfter(CurBundle);
446 CurBundle = &MI;
447 visitMachineBundleBefore(CurBundle);
448 } else if (!CurBundle)
449 report("No bundle header", &MI);
450 visitMachineInstrBefore(&MI);
451 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
452 const MachineOperand &Op = MI.getOperand(I);
453 if (Op.getParent() != &MI) {
454 // Make sure to use correct addOperand / removeOperand / ChangeTo
455 // functions when replacing operands of a MachineInstr.
456 report("Instruction has operand with wrong parent set", &MI);
457 }
458
459 visitMachineOperand(&Op, I);
460 }
461
462 // Was this the last bundled instruction?
463 InBundle = MI.isBundledWithSucc();
464 }
465 if (CurBundle)
466 visitMachineBundleAfter(CurBundle);
467 if (InBundle)
468 report("BundledSucc flag set on last instruction in block", &MBB.back());
469 visitMachineBasicBlockAfter(&MBB);
470 }
471 visitMachineFunctionAfter();
472
473 // Clean up.
474 regsLive.clear();
475 regsDefined.clear();
476 regsDead.clear();
477 regsKilled.clear();
478 regMasks.clear();
479 MBBInfoMap.clear();
480
481 return foundErrors;
482 }
483
report(const char * msg,const MachineFunction * MF)484 void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
485 assert(MF);
486 errs() << '\n';
487 if (!foundErrors++) {
488 if (Banner)
489 errs() << "# " << Banner << '\n';
490 if (LiveInts != nullptr)
491 LiveInts->print(errs());
492 else
493 MF->print(errs(), Indexes);
494 }
495 errs() << "*** Bad machine code: " << msg << " ***\n"
496 << "- function: " << MF->getName() << "\n";
497 }
498
report(const char * msg,const MachineBasicBlock * MBB)499 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
500 assert(MBB);
501 report(msg, MBB->getParent());
502 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
503 << MBB->getName() << " (" << (const void *)MBB << ')';
504 if (Indexes)
505 errs() << " [" << Indexes->getMBBStartIdx(MBB)
506 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
507 errs() << '\n';
508 }
509
report(const char * msg,const MachineInstr * MI)510 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
511 assert(MI);
512 report(msg, MI->getParent());
513 errs() << "- instruction: ";
514 if (Indexes && Indexes->hasIndex(*MI))
515 errs() << Indexes->getInstructionIndex(*MI) << '\t';
516 MI->print(errs(), /*IsStandalone=*/true);
517 }
518
report(const char * msg,const MachineOperand * MO,unsigned MONum,LLT MOVRegType)519 void MachineVerifier::report(const char *msg, const MachineOperand *MO,
520 unsigned MONum, LLT MOVRegType) {
521 assert(MO);
522 report(msg, MO->getParent());
523 errs() << "- operand " << MONum << ": ";
524 MO->print(errs(), MOVRegType, TRI);
525 errs() << "\n";
526 }
527
report(const Twine & Msg,const MachineInstr * MI)528 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
529 report(Msg.str().c_str(), MI);
530 }
531
report_context(SlotIndex Pos) const532 void MachineVerifier::report_context(SlotIndex Pos) const {
533 errs() << "- at: " << Pos << '\n';
534 }
535
report_context(const LiveInterval & LI) const536 void MachineVerifier::report_context(const LiveInterval &LI) const {
537 errs() << "- interval: " << LI << '\n';
538 }
539
report_context(const LiveRange & LR,Register VRegUnit,LaneBitmask LaneMask) const540 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
541 LaneBitmask LaneMask) const {
542 report_context_liverange(LR);
543 report_context_vreg_regunit(VRegUnit);
544 if (LaneMask.any())
545 report_context_lanemask(LaneMask);
546 }
547
report_context(const LiveRange::Segment & S) const548 void MachineVerifier::report_context(const LiveRange::Segment &S) const {
549 errs() << "- segment: " << S << '\n';
550 }
551
report_context(const VNInfo & VNI) const552 void MachineVerifier::report_context(const VNInfo &VNI) const {
553 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
554 }
555
report_context_liverange(const LiveRange & LR) const556 void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
557 errs() << "- liverange: " << LR << '\n';
558 }
559
report_context(MCPhysReg PReg) const560 void MachineVerifier::report_context(MCPhysReg PReg) const {
561 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
562 }
563
report_context_vreg(Register VReg) const564 void MachineVerifier::report_context_vreg(Register VReg) const {
565 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
566 }
567
report_context_vreg_regunit(Register VRegOrUnit) const568 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
569 if (VRegOrUnit.isVirtual()) {
570 report_context_vreg(VRegOrUnit);
571 } else {
572 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
573 }
574 }
575
report_context_lanemask(LaneBitmask LaneMask) const576 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
577 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
578 }
579
markReachable(const MachineBasicBlock * MBB)580 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
581 BBInfo &MInfo = MBBInfoMap[MBB];
582 if (!MInfo.reachable) {
583 MInfo.reachable = true;
584 for (const MachineBasicBlock *Succ : MBB->successors())
585 markReachable(Succ);
586 }
587 }
588
visitMachineFunctionBefore()589 void MachineVerifier::visitMachineFunctionBefore() {
590 lastIndex = SlotIndex();
591 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
592 : TRI->getReservedRegs(*MF);
593
594 if (!MF->empty())
595 markReachable(&MF->front());
596
597 // Build a set of the basic blocks in the function.
598 FunctionBlocks.clear();
599 for (const auto &MBB : *MF) {
600 FunctionBlocks.insert(&MBB);
601 BBInfo &MInfo = MBBInfoMap[&MBB];
602
603 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
604 if (MInfo.Preds.size() != MBB.pred_size())
605 report("MBB has duplicate entries in its predecessor list.", &MBB);
606
607 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
608 if (MInfo.Succs.size() != MBB.succ_size())
609 report("MBB has duplicate entries in its successor list.", &MBB);
610 }
611
612 // Check that the register use lists are sane.
613 MRI->verifyUseLists();
614
615 if (!MF->empty())
616 verifyStackFrame();
617 }
618
619 void
visitMachineBasicBlockBefore(const MachineBasicBlock * MBB)620 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
621 FirstTerminator = nullptr;
622 FirstNonPHI = nullptr;
623
624 if (!MF->getProperties().hasProperty(
625 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
626 // If this block has allocatable physical registers live-in, check that
627 // it is an entry block or landing pad.
628 for (const auto &LI : MBB->liveins()) {
629 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
630 MBB->getIterator() != MBB->getParent()->begin()) {
631 report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB);
632 report_context(LI.PhysReg);
633 }
634 }
635 }
636
637 if (MBB->isIRBlockAddressTaken()) {
638 if (!MBB->getAddressTakenIRBlock()->hasAddressTaken())
639 report("ir-block-address-taken is associated with basic block not used by "
640 "a blockaddress.",
641 MBB);
642 }
643
644 // Count the number of landing pad successors.
645 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
646 for (const auto *succ : MBB->successors()) {
647 if (succ->isEHPad())
648 LandingPadSuccs.insert(succ);
649 if (!FunctionBlocks.count(succ))
650 report("MBB has successor that isn't part of the function.", MBB);
651 if (!MBBInfoMap[succ].Preds.count(MBB)) {
652 report("Inconsistent CFG", MBB);
653 errs() << "MBB is not in the predecessor list of the successor "
654 << printMBBReference(*succ) << ".\n";
655 }
656 }
657
658 // Check the predecessor list.
659 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
660 if (!FunctionBlocks.count(Pred))
661 report("MBB has predecessor that isn't part of the function.", MBB);
662 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
663 report("Inconsistent CFG", MBB);
664 errs() << "MBB is not in the successor list of the predecessor "
665 << printMBBReference(*Pred) << ".\n";
666 }
667 }
668
669 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
670 const BasicBlock *BB = MBB->getBasicBlock();
671 const Function &F = MF->getFunction();
672 if (LandingPadSuccs.size() > 1 &&
673 !(AsmInfo &&
674 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
675 BB && isa<SwitchInst>(BB->getTerminator())) &&
676 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
677 report("MBB has more than one landing pad successor", MBB);
678
679 // Call analyzeBranch. If it succeeds, there several more conditions to check.
680 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
681 SmallVector<MachineOperand, 4> Cond;
682 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
683 Cond)) {
684 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
685 // check whether its answers match up with reality.
686 if (!TBB && !FBB) {
687 // Block falls through to its successor.
688 if (!MBB->empty() && MBB->back().isBarrier() &&
689 !TII->isPredicated(MBB->back())) {
690 report("MBB exits via unconditional fall-through but ends with a "
691 "barrier instruction!", MBB);
692 }
693 if (!Cond.empty()) {
694 report("MBB exits via unconditional fall-through but has a condition!",
695 MBB);
696 }
697 } else if (TBB && !FBB && Cond.empty()) {
698 // Block unconditionally branches somewhere.
699 if (MBB->empty()) {
700 report("MBB exits via unconditional branch but doesn't contain "
701 "any instructions!", MBB);
702 } else if (!MBB->back().isBarrier()) {
703 report("MBB exits via unconditional branch but doesn't end with a "
704 "barrier instruction!", MBB);
705 } else if (!MBB->back().isTerminator()) {
706 report("MBB exits via unconditional branch but the branch isn't a "
707 "terminator instruction!", MBB);
708 }
709 } else if (TBB && !FBB && !Cond.empty()) {
710 // Block conditionally branches somewhere, otherwise falls through.
711 if (MBB->empty()) {
712 report("MBB exits via conditional branch/fall-through but doesn't "
713 "contain any instructions!", MBB);
714 } else if (MBB->back().isBarrier()) {
715 report("MBB exits via conditional branch/fall-through but ends with a "
716 "barrier instruction!", MBB);
717 } else if (!MBB->back().isTerminator()) {
718 report("MBB exits via conditional branch/fall-through but the branch "
719 "isn't a terminator instruction!", MBB);
720 }
721 } else if (TBB && FBB) {
722 // Block conditionally branches somewhere, otherwise branches
723 // somewhere else.
724 if (MBB->empty()) {
725 report("MBB exits via conditional branch/branch but doesn't "
726 "contain any instructions!", MBB);
727 } else if (!MBB->back().isBarrier()) {
728 report("MBB exits via conditional branch/branch but doesn't end with a "
729 "barrier instruction!", MBB);
730 } else if (!MBB->back().isTerminator()) {
731 report("MBB exits via conditional branch/branch but the branch "
732 "isn't a terminator instruction!", MBB);
733 }
734 if (Cond.empty()) {
735 report("MBB exits via conditional branch/branch but there's no "
736 "condition!", MBB);
737 }
738 } else {
739 report("analyzeBranch returned invalid data!", MBB);
740 }
741
742 // Now check that the successors match up with the answers reported by
743 // analyzeBranch.
744 if (TBB && !MBB->isSuccessor(TBB))
745 report("MBB exits via jump or conditional branch, but its target isn't a "
746 "CFG successor!",
747 MBB);
748 if (FBB && !MBB->isSuccessor(FBB))
749 report("MBB exits via conditional branch, but its target isn't a CFG "
750 "successor!",
751 MBB);
752
753 // There might be a fallthrough to the next block if there's either no
754 // unconditional true branch, or if there's a condition, and one of the
755 // branches is missing.
756 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
757
758 // A conditional fallthrough must be an actual CFG successor, not
759 // unreachable. (Conversely, an unconditional fallthrough might not really
760 // be a successor, because the block might end in unreachable.)
761 if (!Cond.empty() && !FBB) {
762 MachineFunction::const_iterator MBBI = std::next(MBB->getIterator());
763 if (MBBI == MF->end()) {
764 report("MBB conditionally falls through out of function!", MBB);
765 } else if (!MBB->isSuccessor(&*MBBI))
766 report("MBB exits via conditional branch/fall-through but the CFG "
767 "successors don't match the actual successors!",
768 MBB);
769 }
770
771 // Verify that there aren't any extra un-accounted-for successors.
772 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
773 // If this successor is one of the branch targets, it's okay.
774 if (SuccMBB == TBB || SuccMBB == FBB)
775 continue;
776 // If we might have a fallthrough, and the successor is the fallthrough
777 // block, that's also ok.
778 if (Fallthrough && SuccMBB == MBB->getNextNode())
779 continue;
780 // Also accept successors which are for exception-handling or might be
781 // inlineasm_br targets.
782 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
783 continue;
784 report("MBB has unexpected successors which are not branch targets, "
785 "fallthrough, EHPads, or inlineasm_br targets.",
786 MBB);
787 }
788 }
789
790 regsLive.clear();
791 if (MRI->tracksLiveness()) {
792 for (const auto &LI : MBB->liveins()) {
793 if (!Register::isPhysicalRegister(LI.PhysReg)) {
794 report("MBB live-in list contains non-physical register", MBB);
795 continue;
796 }
797 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
798 regsLive.insert(SubReg);
799 }
800 }
801
802 const MachineFrameInfo &MFI = MF->getFrameInfo();
803 BitVector PR = MFI.getPristineRegs(*MF);
804 for (unsigned I : PR.set_bits()) {
805 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
806 regsLive.insert(SubReg);
807 }
808
809 regsKilled.clear();
810 regsDefined.clear();
811
812 if (Indexes)
813 lastIndex = Indexes->getMBBStartIdx(MBB);
814 }
815
816 // This function gets called for all bundle headers, including normal
817 // stand-alone unbundled instructions.
visitMachineBundleBefore(const MachineInstr * MI)818 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
819 if (Indexes && Indexes->hasIndex(*MI)) {
820 SlotIndex idx = Indexes->getInstructionIndex(*MI);
821 if (!(idx > lastIndex)) {
822 report("Instruction index out of order", MI);
823 errs() << "Last instruction was at " << lastIndex << '\n';
824 }
825 lastIndex = idx;
826 }
827
828 // Ensure non-terminators don't follow terminators.
829 if (MI->isTerminator()) {
830 if (!FirstTerminator)
831 FirstTerminator = MI;
832 } else if (FirstTerminator) {
833 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
834 // precede non-terminators.
835 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
836 report("Non-terminator instruction after the first terminator", MI);
837 errs() << "First terminator was:\t" << *FirstTerminator;
838 }
839 }
840 }
841
842 // The operands on an INLINEASM instruction must follow a template.
843 // Verify that the flag operands make sense.
verifyInlineAsm(const MachineInstr * MI)844 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
845 // The first two operands on INLINEASM are the asm string and global flags.
846 if (MI->getNumOperands() < 2) {
847 report("Too few operands on inline asm", MI);
848 return;
849 }
850 if (!MI->getOperand(0).isSymbol())
851 report("Asm string must be an external symbol", MI);
852 if (!MI->getOperand(1).isImm())
853 report("Asm flags must be an immediate", MI);
854 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
855 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
856 // and Extra_IsConvergent = 32.
857 if (!isUInt<6>(MI->getOperand(1).getImm()))
858 report("Unknown asm flags", &MI->getOperand(1), 1);
859
860 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
861
862 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
863 unsigned NumOps;
864 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
865 const MachineOperand &MO = MI->getOperand(OpNo);
866 // There may be implicit ops after the fixed operands.
867 if (!MO.isImm())
868 break;
869 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
870 }
871
872 if (OpNo > MI->getNumOperands())
873 report("Missing operands in last group", MI);
874
875 // An optional MDNode follows the groups.
876 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
877 ++OpNo;
878
879 // All trailing operands must be implicit registers.
880 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
881 const MachineOperand &MO = MI->getOperand(OpNo);
882 if (!MO.isReg() || !MO.isImplicit())
883 report("Expected implicit register after groups", &MO, OpNo);
884 }
885
886 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
887 const MachineBasicBlock *MBB = MI->getParent();
888
889 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
890 i != e; ++i) {
891 const MachineOperand &MO = MI->getOperand(i);
892
893 if (!MO.isMBB())
894 continue;
895
896 // Check the successor & predecessor lists look ok, assume they are
897 // not. Find the indirect target without going through the successors.
898 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
899 if (!IndirectTargetMBB) {
900 report("INLINEASM_BR indirect target does not exist", &MO, i);
901 break;
902 }
903
904 if (!MBB->isSuccessor(IndirectTargetMBB))
905 report("INLINEASM_BR indirect target missing from successor list", &MO,
906 i);
907
908 if (!IndirectTargetMBB->isPredecessor(MBB))
909 report("INLINEASM_BR indirect target predecessor list missing parent",
910 &MO, i);
911 }
912 }
913 }
914
verifyAllRegOpsScalar(const MachineInstr & MI,const MachineRegisterInfo & MRI)915 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
916 const MachineRegisterInfo &MRI) {
917 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
918 if (!Op.isReg())
919 return false;
920 const auto Reg = Op.getReg();
921 if (Reg.isPhysical())
922 return false;
923 return !MRI.getType(Reg).isScalar();
924 }))
925 return true;
926 report("All register operands must have scalar types", &MI);
927 return false;
928 }
929
930 /// Check that types are consistent when two operands need to have the same
931 /// number of vector elements.
932 /// \return true if the types are valid.
verifyVectorElementMatch(LLT Ty0,LLT Ty1,const MachineInstr * MI)933 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
934 const MachineInstr *MI) {
935 if (Ty0.isVector() != Ty1.isVector()) {
936 report("operand types must be all-vector or all-scalar", MI);
937 // Generally we try to report as many issues as possible at once, but in
938 // this case it's not clear what should we be comparing the size of the
939 // scalar with: the size of the whole vector or its lane. Instead of
940 // making an arbitrary choice and emitting not so helpful message, let's
941 // avoid the extra noise and stop here.
942 return false;
943 }
944
945 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
946 report("operand types must preserve number of vector elements", MI);
947 return false;
948 }
949
950 return true;
951 }
952
verifyPreISelGenericInstruction(const MachineInstr * MI)953 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
954 if (isFunctionSelected)
955 report("Unexpected generic instruction in a Selected function", MI);
956
957 const MCInstrDesc &MCID = MI->getDesc();
958 unsigned NumOps = MI->getNumOperands();
959
960 // Branches must reference a basic block if they are not indirect
961 if (MI->isBranch() && !MI->isIndirectBranch()) {
962 bool HasMBB = false;
963 for (const MachineOperand &Op : MI->operands()) {
964 if (Op.isMBB()) {
965 HasMBB = true;
966 break;
967 }
968 }
969
970 if (!HasMBB) {
971 report("Branch instruction is missing a basic block operand or "
972 "isIndirectBranch property",
973 MI);
974 }
975 }
976
977 // Check types.
978 SmallVector<LLT, 4> Types;
979 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
980 I != E; ++I) {
981 if (!MCID.operands()[I].isGenericType())
982 continue;
983 // Generic instructions specify type equality constraints between some of
984 // their operands. Make sure these are consistent.
985 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
986 Types.resize(std::max(TypeIdx + 1, Types.size()));
987
988 const MachineOperand *MO = &MI->getOperand(I);
989 if (!MO->isReg()) {
990 report("generic instruction must use register operands", MI);
991 continue;
992 }
993
994 LLT OpTy = MRI->getType(MO->getReg());
995 // Don't report a type mismatch if there is no actual mismatch, only a
996 // type missing, to reduce noise:
997 if (OpTy.isValid()) {
998 // Only the first valid type for a type index will be printed: don't
999 // overwrite it later so it's always clear which type was expected:
1000 if (!Types[TypeIdx].isValid())
1001 Types[TypeIdx] = OpTy;
1002 else if (Types[TypeIdx] != OpTy)
1003 report("Type mismatch in generic instruction", MO, I, OpTy);
1004 } else {
1005 // Generic instructions must have types attached to their operands.
1006 report("Generic instruction is missing a virtual register type", MO, I);
1007 }
1008 }
1009
1010 // Generic opcodes must not have physical register operands.
1011 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1012 const MachineOperand *MO = &MI->getOperand(I);
1013 if (MO->isReg() && MO->getReg().isPhysical())
1014 report("Generic instruction cannot have physical register", MO, I);
1015 }
1016
1017 // Avoid out of bounds in checks below. This was already reported earlier.
1018 if (MI->getNumOperands() < MCID.getNumOperands())
1019 return;
1020
1021 StringRef ErrorInfo;
1022 if (!TII->verifyInstruction(*MI, ErrorInfo))
1023 report(ErrorInfo.data(), MI);
1024
1025 // Verify properties of various specific instruction types
1026 unsigned Opc = MI->getOpcode();
1027 switch (Opc) {
1028 case TargetOpcode::G_ASSERT_SEXT:
1029 case TargetOpcode::G_ASSERT_ZEXT: {
1030 std::string OpcName =
1031 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1032 if (!MI->getOperand(2).isImm()) {
1033 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1034 break;
1035 }
1036
1037 Register Dst = MI->getOperand(0).getReg();
1038 Register Src = MI->getOperand(1).getReg();
1039 LLT SrcTy = MRI->getType(Src);
1040 int64_t Imm = MI->getOperand(2).getImm();
1041 if (Imm <= 0) {
1042 report(Twine(OpcName, " size must be >= 1"), MI);
1043 break;
1044 }
1045
1046 if (Imm >= SrcTy.getScalarSizeInBits()) {
1047 report(Twine(OpcName, " size must be less than source bit width"), MI);
1048 break;
1049 }
1050
1051 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1052 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1053
1054 // Allow only the source bank to be set.
1055 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1056 report(Twine(OpcName, " cannot change register bank"), MI);
1057 break;
1058 }
1059
1060 // Don't allow a class change. Do allow member class->regbank.
1061 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1062 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1063 report(
1064 Twine(OpcName, " source and destination register classes must match"),
1065 MI);
1066 break;
1067 }
1068
1069 break;
1070 }
1071
1072 case TargetOpcode::G_CONSTANT:
1073 case TargetOpcode::G_FCONSTANT: {
1074 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1075 if (DstTy.isVector())
1076 report("Instruction cannot use a vector result type", MI);
1077
1078 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1079 if (!MI->getOperand(1).isCImm()) {
1080 report("G_CONSTANT operand must be cimm", MI);
1081 break;
1082 }
1083
1084 const ConstantInt *CI = MI->getOperand(1).getCImm();
1085 if (CI->getBitWidth() != DstTy.getSizeInBits())
1086 report("inconsistent constant size", MI);
1087 } else {
1088 if (!MI->getOperand(1).isFPImm()) {
1089 report("G_FCONSTANT operand must be fpimm", MI);
1090 break;
1091 }
1092 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1093
1094 if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
1095 DstTy.getSizeInBits()) {
1096 report("inconsistent constant size", MI);
1097 }
1098 }
1099
1100 break;
1101 }
1102 case TargetOpcode::G_LOAD:
1103 case TargetOpcode::G_STORE:
1104 case TargetOpcode::G_ZEXTLOAD:
1105 case TargetOpcode::G_SEXTLOAD: {
1106 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1107 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1108 if (!PtrTy.isPointer())
1109 report("Generic memory instruction must access a pointer", MI);
1110
1111 // Generic loads and stores must have a single MachineMemOperand
1112 // describing that access.
1113 if (!MI->hasOneMemOperand()) {
1114 report("Generic instruction accessing memory must have one mem operand",
1115 MI);
1116 } else {
1117 const MachineMemOperand &MMO = **MI->memoperands_begin();
1118 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1119 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1120 if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1121 report("Generic extload must have a narrower memory type", MI);
1122 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1123 if (MMO.getSize() > ValTy.getSizeInBytes())
1124 report("load memory size cannot exceed result size", MI);
1125 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1126 if (ValTy.getSizeInBytes() < MMO.getSize())
1127 report("store memory size cannot exceed value size", MI);
1128 }
1129
1130 const AtomicOrdering Order = MMO.getSuccessOrdering();
1131 if (Opc == TargetOpcode::G_STORE) {
1132 if (Order == AtomicOrdering::Acquire ||
1133 Order == AtomicOrdering::AcquireRelease)
1134 report("atomic store cannot use acquire ordering", MI);
1135
1136 } else {
1137 if (Order == AtomicOrdering::Release ||
1138 Order == AtomicOrdering::AcquireRelease)
1139 report("atomic load cannot use release ordering", MI);
1140 }
1141 }
1142
1143 break;
1144 }
1145 case TargetOpcode::G_PHI: {
1146 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1147 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1148 [this, &DstTy](const MachineOperand &MO) {
1149 if (!MO.isReg())
1150 return true;
1151 LLT Ty = MRI->getType(MO.getReg());
1152 if (!Ty.isValid() || (Ty != DstTy))
1153 return false;
1154 return true;
1155 }))
1156 report("Generic Instruction G_PHI has operands with incompatible/missing "
1157 "types",
1158 MI);
1159 break;
1160 }
1161 case TargetOpcode::G_BITCAST: {
1162 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1163 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1164 if (!DstTy.isValid() || !SrcTy.isValid())
1165 break;
1166
1167 if (SrcTy.isPointer() != DstTy.isPointer())
1168 report("bitcast cannot convert between pointers and other types", MI);
1169
1170 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1171 report("bitcast sizes must match", MI);
1172
1173 if (SrcTy == DstTy)
1174 report("bitcast must change the type", MI);
1175
1176 break;
1177 }
1178 case TargetOpcode::G_INTTOPTR:
1179 case TargetOpcode::G_PTRTOINT:
1180 case TargetOpcode::G_ADDRSPACE_CAST: {
1181 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1182 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1183 if (!DstTy.isValid() || !SrcTy.isValid())
1184 break;
1185
1186 verifyVectorElementMatch(DstTy, SrcTy, MI);
1187
1188 DstTy = DstTy.getScalarType();
1189 SrcTy = SrcTy.getScalarType();
1190
1191 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1192 if (!DstTy.isPointer())
1193 report("inttoptr result type must be a pointer", MI);
1194 if (SrcTy.isPointer())
1195 report("inttoptr source type must not be a pointer", MI);
1196 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1197 if (!SrcTy.isPointer())
1198 report("ptrtoint source type must be a pointer", MI);
1199 if (DstTy.isPointer())
1200 report("ptrtoint result type must not be a pointer", MI);
1201 } else {
1202 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1203 if (!SrcTy.isPointer() || !DstTy.isPointer())
1204 report("addrspacecast types must be pointers", MI);
1205 else {
1206 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1207 report("addrspacecast must convert different address spaces", MI);
1208 }
1209 }
1210
1211 break;
1212 }
1213 case TargetOpcode::G_PTR_ADD: {
1214 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1215 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1216 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1217 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1218 break;
1219
1220 if (!PtrTy.getScalarType().isPointer())
1221 report("gep first operand must be a pointer", MI);
1222
1223 if (OffsetTy.getScalarType().isPointer())
1224 report("gep offset operand must not be a pointer", MI);
1225
1226 // TODO: Is the offset allowed to be a scalar with a vector?
1227 break;
1228 }
1229 case TargetOpcode::G_PTRMASK: {
1230 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1231 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1232 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1233 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1234 break;
1235
1236 if (!DstTy.getScalarType().isPointer())
1237 report("ptrmask result type must be a pointer", MI);
1238
1239 if (!MaskTy.getScalarType().isScalar())
1240 report("ptrmask mask type must be an integer", MI);
1241
1242 verifyVectorElementMatch(DstTy, MaskTy, MI);
1243 break;
1244 }
1245 case TargetOpcode::G_SEXT:
1246 case TargetOpcode::G_ZEXT:
1247 case TargetOpcode::G_ANYEXT:
1248 case TargetOpcode::G_TRUNC:
1249 case TargetOpcode::G_FPEXT:
1250 case TargetOpcode::G_FPTRUNC: {
1251 // Number of operands and presense of types is already checked (and
1252 // reported in case of any issues), so no need to report them again. As
1253 // we're trying to report as many issues as possible at once, however, the
1254 // instructions aren't guaranteed to have the right number of operands or
1255 // types attached to them at this point
1256 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1257 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1258 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1259 if (!DstTy.isValid() || !SrcTy.isValid())
1260 break;
1261
1262 LLT DstElTy = DstTy.getScalarType();
1263 LLT SrcElTy = SrcTy.getScalarType();
1264 if (DstElTy.isPointer() || SrcElTy.isPointer())
1265 report("Generic extend/truncate can not operate on pointers", MI);
1266
1267 verifyVectorElementMatch(DstTy, SrcTy, MI);
1268
1269 unsigned DstSize = DstElTy.getSizeInBits();
1270 unsigned SrcSize = SrcElTy.getSizeInBits();
1271 switch (MI->getOpcode()) {
1272 default:
1273 if (DstSize <= SrcSize)
1274 report("Generic extend has destination type no larger than source", MI);
1275 break;
1276 case TargetOpcode::G_TRUNC:
1277 case TargetOpcode::G_FPTRUNC:
1278 if (DstSize >= SrcSize)
1279 report("Generic truncate has destination type no smaller than source",
1280 MI);
1281 break;
1282 }
1283 break;
1284 }
1285 case TargetOpcode::G_SELECT: {
1286 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1287 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1288 if (!SelTy.isValid() || !CondTy.isValid())
1289 break;
1290
1291 // Scalar condition select on a vector is valid.
1292 if (CondTy.isVector())
1293 verifyVectorElementMatch(SelTy, CondTy, MI);
1294 break;
1295 }
1296 case TargetOpcode::G_MERGE_VALUES: {
1297 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1298 // e.g. s2N = MERGE sN, sN
1299 // Merging multiple scalars into a vector is not allowed, should use
1300 // G_BUILD_VECTOR for that.
1301 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1302 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1303 if (DstTy.isVector() || SrcTy.isVector())
1304 report("G_MERGE_VALUES cannot operate on vectors", MI);
1305
1306 const unsigned NumOps = MI->getNumOperands();
1307 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1308 report("G_MERGE_VALUES result size is inconsistent", MI);
1309
1310 for (unsigned I = 2; I != NumOps; ++I) {
1311 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1312 report("G_MERGE_VALUES source types do not match", MI);
1313 }
1314
1315 break;
1316 }
1317 case TargetOpcode::G_UNMERGE_VALUES: {
1318 unsigned NumDsts = MI->getNumOperands() - 1;
1319 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1320 for (unsigned i = 1; i < NumDsts; ++i) {
1321 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1322 report("G_UNMERGE_VALUES destination types do not match", MI);
1323 break;
1324 }
1325 }
1326
1327 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1328 if (DstTy.isVector()) {
1329 // This case is the converse of G_CONCAT_VECTORS.
1330 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1331 SrcTy.getNumElements() != NumDsts * DstTy.getNumElements())
1332 report("G_UNMERGE_VALUES source operand does not match vector "
1333 "destination operands",
1334 MI);
1335 } else if (SrcTy.isVector()) {
1336 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1337 // mismatched types as long as the total size matches:
1338 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1339 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1340 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1341 "destination operands",
1342 MI);
1343 } else {
1344 // This case is the converse of G_MERGE_VALUES.
1345 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1346 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1347 "destination operands",
1348 MI);
1349 }
1350 }
1351 break;
1352 }
1353 case TargetOpcode::G_BUILD_VECTOR: {
1354 // Source types must be scalars, dest type a vector. Total size of scalars
1355 // must match the dest vector size.
1356 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1357 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1358 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1359 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1360 break;
1361 }
1362
1363 if (DstTy.getElementType() != SrcEltTy)
1364 report("G_BUILD_VECTOR result element type must match source type", MI);
1365
1366 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1367 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1368
1369 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1370 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1371 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1372
1373 break;
1374 }
1375 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1376 // Source types must be scalars, dest type a vector. Scalar types must be
1377 // larger than the dest vector elt type, as this is a truncating operation.
1378 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1379 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1380 if (!DstTy.isVector() || SrcEltTy.isVector())
1381 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1382 MI);
1383 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1384 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1385 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1386 MI);
1387 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1388 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1389 "dest elt type",
1390 MI);
1391 break;
1392 }
1393 case TargetOpcode::G_CONCAT_VECTORS: {
1394 // Source types should be vectors, and total size should match the dest
1395 // vector size.
1396 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1397 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1398 if (!DstTy.isVector() || !SrcTy.isVector())
1399 report("G_CONCAT_VECTOR requires vector source and destination operands",
1400 MI);
1401
1402 if (MI->getNumOperands() < 3)
1403 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1404
1405 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1406 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1407 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1408 if (DstTy.getNumElements() !=
1409 SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1410 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1411 break;
1412 }
1413 case TargetOpcode::G_ICMP:
1414 case TargetOpcode::G_FCMP: {
1415 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1416 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1417
1418 if ((DstTy.isVector() != SrcTy.isVector()) ||
1419 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1420 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1421
1422 break;
1423 }
1424 case TargetOpcode::G_EXTRACT: {
1425 const MachineOperand &SrcOp = MI->getOperand(1);
1426 if (!SrcOp.isReg()) {
1427 report("extract source must be a register", MI);
1428 break;
1429 }
1430
1431 const MachineOperand &OffsetOp = MI->getOperand(2);
1432 if (!OffsetOp.isImm()) {
1433 report("extract offset must be a constant", MI);
1434 break;
1435 }
1436
1437 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1438 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1439 if (SrcSize == DstSize)
1440 report("extract source must be larger than result", MI);
1441
1442 if (DstSize + OffsetOp.getImm() > SrcSize)
1443 report("extract reads past end of register", MI);
1444 break;
1445 }
1446 case TargetOpcode::G_INSERT: {
1447 const MachineOperand &SrcOp = MI->getOperand(2);
1448 if (!SrcOp.isReg()) {
1449 report("insert source must be a register", MI);
1450 break;
1451 }
1452
1453 const MachineOperand &OffsetOp = MI->getOperand(3);
1454 if (!OffsetOp.isImm()) {
1455 report("insert offset must be a constant", MI);
1456 break;
1457 }
1458
1459 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1460 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1461
1462 if (DstSize <= SrcSize)
1463 report("inserted size must be smaller than total register", MI);
1464
1465 if (SrcSize + OffsetOp.getImm() > DstSize)
1466 report("insert writes past end of register", MI);
1467
1468 break;
1469 }
1470 case TargetOpcode::G_JUMP_TABLE: {
1471 if (!MI->getOperand(1).isJTI())
1472 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1473 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1474 if (!DstTy.isPointer())
1475 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1476 break;
1477 }
1478 case TargetOpcode::G_BRJT: {
1479 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1480 report("G_BRJT src operand 0 must be a pointer type", MI);
1481
1482 if (!MI->getOperand(1).isJTI())
1483 report("G_BRJT src operand 1 must be a jump table index", MI);
1484
1485 const auto &IdxOp = MI->getOperand(2);
1486 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1487 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1488 break;
1489 }
1490 case TargetOpcode::G_INTRINSIC:
1491 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1492 // TODO: Should verify number of def and use operands, but the current
1493 // interface requires passing in IR types for mangling.
1494 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1495 if (!IntrIDOp.isIntrinsicID()) {
1496 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1497 break;
1498 }
1499
1500 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
1501 unsigned IntrID = IntrIDOp.getIntrinsicID();
1502 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1503 AttributeList Attrs = Intrinsic::getAttributes(
1504 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1505 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1506 if (NoSideEffects && DeclHasSideEffects) {
1507 report("G_INTRINSIC used with intrinsic that accesses memory", MI);
1508 break;
1509 }
1510 if (!NoSideEffects && !DeclHasSideEffects) {
1511 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
1512 break;
1513 }
1514 }
1515
1516 break;
1517 }
1518 case TargetOpcode::G_SEXT_INREG: {
1519 if (!MI->getOperand(2).isImm()) {
1520 report("G_SEXT_INREG expects an immediate operand #2", MI);
1521 break;
1522 }
1523
1524 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1525 int64_t Imm = MI->getOperand(2).getImm();
1526 if (Imm <= 0)
1527 report("G_SEXT_INREG size must be >= 1", MI);
1528 if (Imm >= SrcTy.getScalarSizeInBits())
1529 report("G_SEXT_INREG size must be less than source bit width", MI);
1530 break;
1531 }
1532 case TargetOpcode::G_SHUFFLE_VECTOR: {
1533 const MachineOperand &MaskOp = MI->getOperand(3);
1534 if (!MaskOp.isShuffleMask()) {
1535 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1536 break;
1537 }
1538
1539 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1540 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1541 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1542
1543 if (Src0Ty != Src1Ty)
1544 report("Source operands must be the same type", MI);
1545
1546 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1547 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1548
1549 // Don't check that all operands are vector because scalars are used in
1550 // place of 1 element vectors.
1551 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1552 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1553
1554 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1555
1556 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1557 report("Wrong result type for shufflemask", MI);
1558
1559 for (int Idx : MaskIdxes) {
1560 if (Idx < 0)
1561 continue;
1562
1563 if (Idx >= 2 * SrcNumElts)
1564 report("Out of bounds shuffle index", MI);
1565 }
1566
1567 break;
1568 }
1569 case TargetOpcode::G_DYN_STACKALLOC: {
1570 const MachineOperand &DstOp = MI->getOperand(0);
1571 const MachineOperand &AllocOp = MI->getOperand(1);
1572 const MachineOperand &AlignOp = MI->getOperand(2);
1573
1574 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1575 report("dst operand 0 must be a pointer type", MI);
1576 break;
1577 }
1578
1579 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1580 report("src operand 1 must be a scalar reg type", MI);
1581 break;
1582 }
1583
1584 if (!AlignOp.isImm()) {
1585 report("src operand 2 must be an immediate type", MI);
1586 break;
1587 }
1588 break;
1589 }
1590 case TargetOpcode::G_MEMCPY_INLINE:
1591 case TargetOpcode::G_MEMCPY:
1592 case TargetOpcode::G_MEMMOVE: {
1593 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1594 if (MMOs.size() != 2) {
1595 report("memcpy/memmove must have 2 memory operands", MI);
1596 break;
1597 }
1598
1599 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1600 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1601 report("wrong memory operand types", MI);
1602 break;
1603 }
1604
1605 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1606 report("inconsistent memory operand sizes", MI);
1607
1608 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1609 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1610
1611 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1612 report("memory instruction operand must be a pointer", MI);
1613 break;
1614 }
1615
1616 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1617 report("inconsistent store address space", MI);
1618 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1619 report("inconsistent load address space", MI);
1620
1621 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1622 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1623 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1624
1625 break;
1626 }
1627 case TargetOpcode::G_BZERO:
1628 case TargetOpcode::G_MEMSET: {
1629 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1630 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1631 if (MMOs.size() != 1) {
1632 report(Twine(Name, " must have 1 memory operand"), MI);
1633 break;
1634 }
1635
1636 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1637 report(Twine(Name, " memory operand must be a store"), MI);
1638 break;
1639 }
1640
1641 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1642 if (!DstPtrTy.isPointer()) {
1643 report(Twine(Name, " operand must be a pointer"), MI);
1644 break;
1645 }
1646
1647 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1648 report("inconsistent " + Twine(Name, " address space"), MI);
1649
1650 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1651 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1652 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1653
1654 break;
1655 }
1656 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1657 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1658 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1659 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1660 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1661 if (!DstTy.isScalar())
1662 report("Vector reduction requires a scalar destination type", MI);
1663 if (!Src1Ty.isScalar())
1664 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1665 if (!Src2Ty.isVector())
1666 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1667 break;
1668 }
1669 case TargetOpcode::G_VECREDUCE_FADD:
1670 case TargetOpcode::G_VECREDUCE_FMUL:
1671 case TargetOpcode::G_VECREDUCE_FMAX:
1672 case TargetOpcode::G_VECREDUCE_FMIN:
1673 case TargetOpcode::G_VECREDUCE_ADD:
1674 case TargetOpcode::G_VECREDUCE_MUL:
1675 case TargetOpcode::G_VECREDUCE_AND:
1676 case TargetOpcode::G_VECREDUCE_OR:
1677 case TargetOpcode::G_VECREDUCE_XOR:
1678 case TargetOpcode::G_VECREDUCE_SMAX:
1679 case TargetOpcode::G_VECREDUCE_SMIN:
1680 case TargetOpcode::G_VECREDUCE_UMAX:
1681 case TargetOpcode::G_VECREDUCE_UMIN: {
1682 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1683 if (!DstTy.isScalar())
1684 report("Vector reduction requires a scalar destination type", MI);
1685 break;
1686 }
1687
1688 case TargetOpcode::G_SBFX:
1689 case TargetOpcode::G_UBFX: {
1690 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1691 if (DstTy.isVector()) {
1692 report("Bitfield extraction is not supported on vectors", MI);
1693 break;
1694 }
1695 break;
1696 }
1697 case TargetOpcode::G_SHL:
1698 case TargetOpcode::G_LSHR:
1699 case TargetOpcode::G_ASHR:
1700 case TargetOpcode::G_ROTR:
1701 case TargetOpcode::G_ROTL: {
1702 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1703 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1704 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1705 report("Shifts and rotates require operands to be either all scalars or "
1706 "all vectors",
1707 MI);
1708 break;
1709 }
1710 break;
1711 }
1712 case TargetOpcode::G_LLROUND:
1713 case TargetOpcode::G_LROUND: {
1714 verifyAllRegOpsScalar(*MI, *MRI);
1715 break;
1716 }
1717 case TargetOpcode::G_IS_FPCLASS: {
1718 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1719 LLT DestEltTy = DestTy.getScalarType();
1720 if (!DestEltTy.isScalar()) {
1721 report("Destination must be a scalar or vector of scalars", MI);
1722 break;
1723 }
1724 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1725 LLT SrcEltTy = SrcTy.getScalarType();
1726 if (!SrcEltTy.isScalar()) {
1727 report("Source must be a scalar or vector of scalars", MI);
1728 break;
1729 }
1730 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1731 break;
1732 const MachineOperand &TestMO = MI->getOperand(2);
1733 if (!TestMO.isImm()) {
1734 report("floating-point class set (operand 2) must be an immediate", MI);
1735 break;
1736 }
1737 int64_t Test = TestMO.getImm();
1738 if (Test < 0 || Test > fcAllFlags) {
1739 report("Incorrect floating-point class set (operand 2)", MI);
1740 break;
1741 }
1742 break;
1743 }
1744 case TargetOpcode::G_ASSERT_ALIGN: {
1745 if (MI->getOperand(2).getImm() < 1)
1746 report("alignment immediate must be >= 1", MI);
1747 break;
1748 }
1749 default:
1750 break;
1751 }
1752 }
1753
visitMachineInstrBefore(const MachineInstr * MI)1754 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1755 const MCInstrDesc &MCID = MI->getDesc();
1756 if (MI->getNumOperands() < MCID.getNumOperands()) {
1757 report("Too few operands", MI);
1758 errs() << MCID.getNumOperands() << " operands expected, but "
1759 << MI->getNumOperands() << " given.\n";
1760 }
1761
1762 if (MI->isPHI()) {
1763 if (MF->getProperties().hasProperty(
1764 MachineFunctionProperties::Property::NoPHIs))
1765 report("Found PHI instruction with NoPHIs property set", MI);
1766
1767 if (FirstNonPHI)
1768 report("Found PHI instruction after non-PHI", MI);
1769 } else if (FirstNonPHI == nullptr)
1770 FirstNonPHI = MI;
1771
1772 // Check the tied operands.
1773 if (MI->isInlineAsm())
1774 verifyInlineAsm(MI);
1775
1776 // Check that unspillable terminators define a reg and have at most one use.
1777 if (TII->isUnspillableTerminator(MI)) {
1778 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1779 report("Unspillable Terminator does not define a reg", MI);
1780 Register Def = MI->getOperand(0).getReg();
1781 if (Def.isVirtual() &&
1782 !MF->getProperties().hasProperty(
1783 MachineFunctionProperties::Property::NoPHIs) &&
1784 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1785 report("Unspillable Terminator expected to have at most one use!", MI);
1786 }
1787
1788 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1789 // DBG_VALUEs: these are convenient to use in tests, but should never get
1790 // generated.
1791 if (MI->isDebugValue() && MI->getNumOperands() == 4)
1792 if (!MI->getDebugLoc())
1793 report("Missing DebugLoc for debug instruction", MI);
1794
1795 // Meta instructions should never be the subject of debug value tracking,
1796 // they don't create a value in the output program at all.
1797 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1798 report("Metadata instruction should not have a value tracking number", MI);
1799
1800 // Check the MachineMemOperands for basic consistency.
1801 for (MachineMemOperand *Op : MI->memoperands()) {
1802 if (Op->isLoad() && !MI->mayLoad())
1803 report("Missing mayLoad flag", MI);
1804 if (Op->isStore() && !MI->mayStore())
1805 report("Missing mayStore flag", MI);
1806 }
1807
1808 // Debug values must not have a slot index.
1809 // Other instructions must have one, unless they are inside a bundle.
1810 if (LiveInts) {
1811 bool mapped = !LiveInts->isNotInMIMap(*MI);
1812 if (MI->isDebugOrPseudoInstr()) {
1813 if (mapped)
1814 report("Debug instruction has a slot index", MI);
1815 } else if (MI->isInsideBundle()) {
1816 if (mapped)
1817 report("Instruction inside bundle has a slot index", MI);
1818 } else {
1819 if (!mapped)
1820 report("Missing slot index", MI);
1821 }
1822 }
1823
1824 unsigned Opc = MCID.getOpcode();
1825 if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) {
1826 verifyPreISelGenericInstruction(MI);
1827 return;
1828 }
1829
1830 StringRef ErrorInfo;
1831 if (!TII->verifyInstruction(*MI, ErrorInfo))
1832 report(ErrorInfo.data(), MI);
1833
1834 // Verify properties of various specific instruction types
1835 switch (MI->getOpcode()) {
1836 case TargetOpcode::COPY: {
1837 const MachineOperand &DstOp = MI->getOperand(0);
1838 const MachineOperand &SrcOp = MI->getOperand(1);
1839 const Register SrcReg = SrcOp.getReg();
1840 const Register DstReg = DstOp.getReg();
1841
1842 LLT DstTy = MRI->getType(DstReg);
1843 LLT SrcTy = MRI->getType(SrcReg);
1844 if (SrcTy.isValid() && DstTy.isValid()) {
1845 // If both types are valid, check that the types are the same.
1846 if (SrcTy != DstTy) {
1847 report("Copy Instruction is illegal with mismatching types", MI);
1848 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1849 }
1850
1851 break;
1852 }
1853
1854 if (!SrcTy.isValid() && !DstTy.isValid())
1855 break;
1856
1857 // If we have only one valid type, this is likely a copy between a virtual
1858 // and physical register.
1859 unsigned SrcSize = 0;
1860 unsigned DstSize = 0;
1861 if (SrcReg.isPhysical() && DstTy.isValid()) {
1862 const TargetRegisterClass *SrcRC =
1863 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1864 if (SrcRC)
1865 SrcSize = TRI->getRegSizeInBits(*SrcRC);
1866 }
1867
1868 if (SrcSize == 0)
1869 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1870
1871 if (DstReg.isPhysical() && SrcTy.isValid()) {
1872 const TargetRegisterClass *DstRC =
1873 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1874 if (DstRC)
1875 DstSize = TRI->getRegSizeInBits(*DstRC);
1876 }
1877
1878 if (DstSize == 0)
1879 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1880
1881 if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1882 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1883 report("Copy Instruction is illegal with mismatching sizes", MI);
1884 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1885 << "\n";
1886 }
1887 }
1888 break;
1889 }
1890 case TargetOpcode::STATEPOINT: {
1891 StatepointOpers SO(MI);
1892 if (!MI->getOperand(SO.getIDPos()).isImm() ||
1893 !MI->getOperand(SO.getNBytesPos()).isImm() ||
1894 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1895 report("meta operands to STATEPOINT not constant!", MI);
1896 break;
1897 }
1898
1899 auto VerifyStackMapConstant = [&](unsigned Offset) {
1900 if (Offset >= MI->getNumOperands()) {
1901 report("stack map constant to STATEPOINT is out of range!", MI);
1902 return;
1903 }
1904 if (!MI->getOperand(Offset - 1).isImm() ||
1905 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1906 !MI->getOperand(Offset).isImm())
1907 report("stack map constant to STATEPOINT not well formed!", MI);
1908 };
1909 VerifyStackMapConstant(SO.getCCIdx());
1910 VerifyStackMapConstant(SO.getFlagsIdx());
1911 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1912 VerifyStackMapConstant(SO.getNumGCPtrIdx());
1913 VerifyStackMapConstant(SO.getNumAllocaIdx());
1914 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1915
1916 // Verify that all explicit statepoint defs are tied to gc operands as
1917 // they are expected to be a relocation of gc operands.
1918 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1919 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1920 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1921 unsigned UseOpIdx;
1922 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1923 report("STATEPOINT defs expected to be tied", MI);
1924 break;
1925 }
1926 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
1927 report("STATEPOINT def tied to non-gc operand", MI);
1928 break;
1929 }
1930 }
1931
1932 // TODO: verify we have properly encoded deopt arguments
1933 } break;
1934 case TargetOpcode::INSERT_SUBREG: {
1935 unsigned InsertedSize;
1936 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
1937 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
1938 else
1939 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
1940 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
1941 if (SubRegSize < InsertedSize) {
1942 report("INSERT_SUBREG expected inserted value to have equal or lesser "
1943 "size than the subreg it was inserted into", MI);
1944 break;
1945 }
1946 } break;
1947 case TargetOpcode::REG_SEQUENCE: {
1948 unsigned NumOps = MI->getNumOperands();
1949 if (!(NumOps & 1)) {
1950 report("Invalid number of operands for REG_SEQUENCE", MI);
1951 break;
1952 }
1953
1954 for (unsigned I = 1; I != NumOps; I += 2) {
1955 const MachineOperand &RegOp = MI->getOperand(I);
1956 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
1957
1958 if (!RegOp.isReg())
1959 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
1960
1961 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
1962 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
1963 report("Invalid subregister index operand for REG_SEQUENCE",
1964 &SubRegOp, I + 1);
1965 }
1966 }
1967
1968 Register DstReg = MI->getOperand(0).getReg();
1969 if (DstReg.isPhysical())
1970 report("REG_SEQUENCE does not support physical register results", MI);
1971
1972 if (MI->getOperand(0).getSubReg())
1973 report("Invalid subreg result for REG_SEQUENCE", MI);
1974
1975 break;
1976 }
1977 }
1978 }
1979
1980 void
visitMachineOperand(const MachineOperand * MO,unsigned MONum)1981 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1982 const MachineInstr *MI = MO->getParent();
1983 const MCInstrDesc &MCID = MI->getDesc();
1984 unsigned NumDefs = MCID.getNumDefs();
1985 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1986 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1987
1988 // The first MCID.NumDefs operands must be explicit register defines
1989 if (MONum < NumDefs) {
1990 const MCOperandInfo &MCOI = MCID.operands()[MONum];
1991 if (!MO->isReg())
1992 report("Explicit definition must be a register", MO, MONum);
1993 else if (!MO->isDef() && !MCOI.isOptionalDef())
1994 report("Explicit definition marked as use", MO, MONum);
1995 else if (MO->isImplicit())
1996 report("Explicit definition marked as implicit", MO, MONum);
1997 } else if (MONum < MCID.getNumOperands()) {
1998 const MCOperandInfo &MCOI = MCID.operands()[MONum];
1999 // Don't check if it's the last operand in a variadic instruction. See,
2000 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2001 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2002 if (!IsOptional) {
2003 if (MO->isReg()) {
2004 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2005 report("Explicit operand marked as def", MO, MONum);
2006 if (MO->isImplicit())
2007 report("Explicit operand marked as implicit", MO, MONum);
2008 }
2009
2010 // Check that an instruction has register operands only as expected.
2011 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2012 !MO->isReg() && !MO->isFI())
2013 report("Expected a register operand.", MO, MONum);
2014 if (MO->isReg()) {
2015 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2016 (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2017 !TII->isPCRelRegisterOperandLegal(*MO)))
2018 report("Expected a non-register operand.", MO, MONum);
2019 }
2020 }
2021
2022 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2023 if (TiedTo != -1) {
2024 if (!MO->isReg())
2025 report("Tied use must be a register", MO, MONum);
2026 else if (!MO->isTied())
2027 report("Operand should be tied", MO, MONum);
2028 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2029 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2030 else if (MO->getReg().isPhysical()) {
2031 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2032 if (!MOTied.isReg())
2033 report("Tied counterpart must be a register", &MOTied, TiedTo);
2034 else if (MOTied.getReg().isPhysical() &&
2035 MO->getReg() != MOTied.getReg())
2036 report("Tied physical registers must match.", &MOTied, TiedTo);
2037 }
2038 } else if (MO->isReg() && MO->isTied())
2039 report("Explicit operand should not be tied", MO, MONum);
2040 } else {
2041 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2042 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
2043 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2044 }
2045
2046 switch (MO->getType()) {
2047 case MachineOperand::MO_Register: {
2048 // Verify debug flag on debug instructions. Check this first because reg0
2049 // indicates an undefined debug value.
2050 if (MI->isDebugInstr() && MO->isUse()) {
2051 if (!MO->isDebug())
2052 report("Register operand must be marked debug", MO, MONum);
2053 } else if (MO->isDebug()) {
2054 report("Register operand must not be marked debug", MO, MONum);
2055 }
2056
2057 const Register Reg = MO->getReg();
2058 if (!Reg)
2059 return;
2060 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2061 checkLiveness(MO, MONum);
2062
2063 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2064 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2065 report("Undef virtual register def operands require a subregister", MO, MONum);
2066
2067 // Verify the consistency of tied operands.
2068 if (MO->isTied()) {
2069 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2070 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2071 if (!OtherMO.isReg())
2072 report("Must be tied to a register", MO, MONum);
2073 if (!OtherMO.isTied())
2074 report("Missing tie flags on tied operand", MO, MONum);
2075 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2076 report("Inconsistent tie links", MO, MONum);
2077 if (MONum < MCID.getNumDefs()) {
2078 if (OtherIdx < MCID.getNumOperands()) {
2079 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2080 report("Explicit def tied to explicit use without tie constraint",
2081 MO, MONum);
2082 } else {
2083 if (!OtherMO.isImplicit())
2084 report("Explicit def should be tied to implicit use", MO, MONum);
2085 }
2086 }
2087 }
2088
2089 // Verify two-address constraints after the twoaddressinstruction pass.
2090 // Both twoaddressinstruction pass and phi-node-elimination pass call
2091 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
2092 // twoaddressinstruction pass not after phi-node-elimination pass. So we
2093 // shouldn't use the NoSSA as the condition, we should based on
2094 // TiedOpsRewritten property to verify two-address constraints, this
2095 // property will be set in twoaddressinstruction pass.
2096 unsigned DefIdx;
2097 if (MF->getProperties().hasProperty(
2098 MachineFunctionProperties::Property::TiedOpsRewritten) &&
2099 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2100 Reg != MI->getOperand(DefIdx).getReg())
2101 report("Two-address instruction operands must be identical", MO, MONum);
2102
2103 // Check register classes.
2104 unsigned SubIdx = MO->getSubReg();
2105
2106 if (Reg.isPhysical()) {
2107 if (SubIdx) {
2108 report("Illegal subregister index for physical register", MO, MONum);
2109 return;
2110 }
2111 if (MONum < MCID.getNumOperands()) {
2112 if (const TargetRegisterClass *DRC =
2113 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2114 if (!DRC->contains(Reg)) {
2115 report("Illegal physical register for instruction", MO, MONum);
2116 errs() << printReg(Reg, TRI) << " is not a "
2117 << TRI->getRegClassName(DRC) << " register.\n";
2118 }
2119 }
2120 }
2121 if (MO->isRenamable()) {
2122 if (MRI->isReserved(Reg)) {
2123 report("isRenamable set on reserved register", MO, MONum);
2124 return;
2125 }
2126 }
2127 } else {
2128 // Virtual register.
2129 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2130 if (!RC) {
2131 // This is a generic virtual register.
2132
2133 // Do not allow undef uses for generic virtual registers. This ensures
2134 // getVRegDef can never fail and return null on a generic register.
2135 //
2136 // FIXME: This restriction should probably be broadened to all SSA
2137 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2138 // run on the SSA function just before phi elimination.
2139 if (MO->isUndef())
2140 report("Generic virtual register use cannot be undef", MO, MONum);
2141
2142 // Debug value instruction is permitted to use undefined vregs.
2143 // This is a performance measure to skip the overhead of immediately
2144 // pruning unused debug operands. The final undef substitution occurs
2145 // when debug values are allocated in LDVImpl::handleDebugValue, so
2146 // these verifications always apply after this pass.
2147 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2148 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2149 // If we're post-Select, we can't have gvregs anymore.
2150 if (isFunctionSelected) {
2151 report("Generic virtual register invalid in a Selected function",
2152 MO, MONum);
2153 return;
2154 }
2155
2156 // The gvreg must have a type and it must not have a SubIdx.
2157 LLT Ty = MRI->getType(Reg);
2158 if (!Ty.isValid()) {
2159 report("Generic virtual register must have a valid type", MO,
2160 MONum);
2161 return;
2162 }
2163
2164 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2165
2166 // If we're post-RegBankSelect, the gvreg must have a bank.
2167 if (!RegBank && isFunctionRegBankSelected) {
2168 report("Generic virtual register must have a bank in a "
2169 "RegBankSelected function",
2170 MO, MONum);
2171 return;
2172 }
2173
2174 // Make sure the register fits into its register bank if any.
2175 if (RegBank && Ty.isValid() &&
2176 RegBank->getSize() < Ty.getSizeInBits()) {
2177 report("Register bank is too small for virtual register", MO,
2178 MONum);
2179 errs() << "Register bank " << RegBank->getName() << " too small("
2180 << RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
2181 << "-bits\n";
2182 return;
2183 }
2184 }
2185
2186 if (SubIdx) {
2187 report("Generic virtual register does not allow subregister index", MO,
2188 MONum);
2189 return;
2190 }
2191
2192 // If this is a target specific instruction and this operand
2193 // has register class constraint, the virtual register must
2194 // comply to it.
2195 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2196 MONum < MCID.getNumOperands() &&
2197 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2198 report("Virtual register does not match instruction constraint", MO,
2199 MONum);
2200 errs() << "Expect register class "
2201 << TRI->getRegClassName(
2202 TII->getRegClass(MCID, MONum, TRI, *MF))
2203 << " but got nothing\n";
2204 return;
2205 }
2206
2207 break;
2208 }
2209 if (SubIdx) {
2210 const TargetRegisterClass *SRC =
2211 TRI->getSubClassWithSubReg(RC, SubIdx);
2212 if (!SRC) {
2213 report("Invalid subregister index for virtual register", MO, MONum);
2214 errs() << "Register class " << TRI->getRegClassName(RC)
2215 << " does not support subreg index " << SubIdx << "\n";
2216 return;
2217 }
2218 if (RC != SRC) {
2219 report("Invalid register class for subregister index", MO, MONum);
2220 errs() << "Register class " << TRI->getRegClassName(RC)
2221 << " does not fully support subreg index " << SubIdx << "\n";
2222 return;
2223 }
2224 }
2225 if (MONum < MCID.getNumOperands()) {
2226 if (const TargetRegisterClass *DRC =
2227 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2228 if (SubIdx) {
2229 const TargetRegisterClass *SuperRC =
2230 TRI->getLargestLegalSuperClass(RC, *MF);
2231 if (!SuperRC) {
2232 report("No largest legal super class exists.", MO, MONum);
2233 return;
2234 }
2235 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2236 if (!DRC) {
2237 report("No matching super-reg register class.", MO, MONum);
2238 return;
2239 }
2240 }
2241 if (!RC->hasSuperClassEq(DRC)) {
2242 report("Illegal virtual register for instruction", MO, MONum);
2243 errs() << "Expected a " << TRI->getRegClassName(DRC)
2244 << " register, but got a " << TRI->getRegClassName(RC)
2245 << " register\n";
2246 }
2247 }
2248 }
2249 }
2250 break;
2251 }
2252
2253 case MachineOperand::MO_RegisterMask:
2254 regMasks.push_back(MO->getRegMask());
2255 break;
2256
2257 case MachineOperand::MO_MachineBasicBlock:
2258 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2259 report("PHI operand is not in the CFG", MO, MONum);
2260 break;
2261
2262 case MachineOperand::MO_FrameIndex:
2263 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2264 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2265 int FI = MO->getIndex();
2266 LiveInterval &LI = LiveStks->getInterval(FI);
2267 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2268
2269 bool stores = MI->mayStore();
2270 bool loads = MI->mayLoad();
2271 // For a memory-to-memory move, we need to check if the frame
2272 // index is used for storing or loading, by inspecting the
2273 // memory operands.
2274 if (stores && loads) {
2275 for (auto *MMO : MI->memoperands()) {
2276 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2277 if (PSV == nullptr) continue;
2278 const FixedStackPseudoSourceValue *Value =
2279 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2280 if (Value == nullptr) continue;
2281 if (Value->getFrameIndex() != FI) continue;
2282
2283 if (MMO->isStore())
2284 loads = false;
2285 else
2286 stores = false;
2287 break;
2288 }
2289 if (loads == stores)
2290 report("Missing fixed stack memoperand.", MI);
2291 }
2292 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2293 report("Instruction loads from dead spill slot", MO, MONum);
2294 errs() << "Live stack: " << LI << '\n';
2295 }
2296 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2297 report("Instruction stores to dead spill slot", MO, MONum);
2298 errs() << "Live stack: " << LI << '\n';
2299 }
2300 }
2301 break;
2302
2303 case MachineOperand::MO_CFIIndex:
2304 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2305 report("CFI instruction has invalid index", MO, MONum);
2306 break;
2307
2308 default:
2309 break;
2310 }
2311 }
2312
checkLivenessAtUse(const MachineOperand * MO,unsigned MONum,SlotIndex UseIdx,const LiveRange & LR,Register VRegOrUnit,LaneBitmask LaneMask)2313 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2314 unsigned MONum, SlotIndex UseIdx,
2315 const LiveRange &LR,
2316 Register VRegOrUnit,
2317 LaneBitmask LaneMask) {
2318 LiveQueryResult LRQ = LR.Query(UseIdx);
2319 // Check if we have a segment at the use, note however that we only need one
2320 // live subregister range, the others may be dead.
2321 if (!LRQ.valueIn() && LaneMask.none()) {
2322 report("No live segment at use", MO, MONum);
2323 report_context_liverange(LR);
2324 report_context_vreg_regunit(VRegOrUnit);
2325 report_context(UseIdx);
2326 }
2327 if (MO->isKill() && !LRQ.isKill()) {
2328 report("Live range continues after kill flag", MO, MONum);
2329 report_context_liverange(LR);
2330 report_context_vreg_regunit(VRegOrUnit);
2331 if (LaneMask.any())
2332 report_context_lanemask(LaneMask);
2333 report_context(UseIdx);
2334 }
2335 }
2336
checkLivenessAtDef(const MachineOperand * MO,unsigned MONum,SlotIndex DefIdx,const LiveRange & LR,Register VRegOrUnit,bool SubRangeCheck,LaneBitmask LaneMask)2337 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2338 unsigned MONum, SlotIndex DefIdx,
2339 const LiveRange &LR,
2340 Register VRegOrUnit,
2341 bool SubRangeCheck,
2342 LaneBitmask LaneMask) {
2343 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2344 // The LR can correspond to the whole reg and its def slot is not obliged
2345 // to be the same as the MO' def slot. E.g. when we check here "normal"
2346 // subreg MO but there is other EC subreg MO in the same instruction so the
2347 // whole reg has EC def slot and differs from the currently checked MO' def
2348 // slot. For example:
2349 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2350 // Check that there is an early-clobber def of the same superregister
2351 // somewhere is performed in visitMachineFunctionAfter()
2352 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2353 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2354 (VNI->def != DefIdx &&
2355 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2356 report("Inconsistent valno->def", MO, MONum);
2357 report_context_liverange(LR);
2358 report_context_vreg_regunit(VRegOrUnit);
2359 if (LaneMask.any())
2360 report_context_lanemask(LaneMask);
2361 report_context(*VNI);
2362 report_context(DefIdx);
2363 }
2364 } else {
2365 report("No live segment at def", MO, MONum);
2366 report_context_liverange(LR);
2367 report_context_vreg_regunit(VRegOrUnit);
2368 if (LaneMask.any())
2369 report_context_lanemask(LaneMask);
2370 report_context(DefIdx);
2371 }
2372 // Check that, if the dead def flag is present, LiveInts agree.
2373 if (MO->isDead()) {
2374 LiveQueryResult LRQ = LR.Query(DefIdx);
2375 if (!LRQ.isDeadDef()) {
2376 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2377 // A dead subreg def only tells us that the specific subreg is dead. There
2378 // could be other non-dead defs of other subregs, or we could have other
2379 // parts of the register being live through the instruction. So unless we
2380 // are checking liveness for a subrange it is ok for the live range to
2381 // continue, given that we have a dead def of a subregister.
2382 if (SubRangeCheck || MO->getSubReg() == 0) {
2383 report("Live range continues after dead def flag", MO, MONum);
2384 report_context_liverange(LR);
2385 report_context_vreg_regunit(VRegOrUnit);
2386 if (LaneMask.any())
2387 report_context_lanemask(LaneMask);
2388 }
2389 }
2390 }
2391 }
2392
checkLiveness(const MachineOperand * MO,unsigned MONum)2393 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2394 const MachineInstr *MI = MO->getParent();
2395 const Register Reg = MO->getReg();
2396 const unsigned SubRegIdx = MO->getSubReg();
2397
2398 const LiveInterval *LI = nullptr;
2399 if (LiveInts && Reg.isVirtual()) {
2400 if (LiveInts->hasInterval(Reg)) {
2401 LI = &LiveInts->getInterval(Reg);
2402 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2403 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2404 report("Live interval for subreg operand has no subranges", MO, MONum);
2405 } else {
2406 report("Virtual register has no live interval", MO, MONum);
2407 }
2408 }
2409
2410 // Both use and def operands can read a register.
2411 if (MO->readsReg()) {
2412 if (MO->isKill())
2413 addRegWithSubRegs(regsKilled, Reg);
2414
2415 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2416 // which case we have already checked that LiveVars knows any kills on the
2417 // bundle header instead).
2418 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2419 !MI->isBundledWithPred()) {
2420 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2421 if (!is_contained(VI.Kills, MI))
2422 report("Kill missing from LiveVariables", MO, MONum);
2423 }
2424
2425 // Check LiveInts liveness and kill.
2426 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2427 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
2428 // Check the cached regunit intervals.
2429 if (Reg.isPhysical() && !isReserved(Reg)) {
2430 for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid();
2431 ++Units) {
2432 if (MRI->isReservedRegUnit(*Units))
2433 continue;
2434 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
2435 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
2436 }
2437 }
2438
2439 if (Reg.isVirtual()) {
2440 // This is a virtual register interval.
2441 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2442
2443 if (LI->hasSubRanges() && !MO->isDef()) {
2444 LaneBitmask MOMask = SubRegIdx != 0
2445 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2446 : MRI->getMaxLaneMaskForVReg(Reg);
2447 LaneBitmask LiveInMask;
2448 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2449 if ((MOMask & SR.LaneMask).none())
2450 continue;
2451 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2452 LiveQueryResult LRQ = SR.Query(UseIdx);
2453 if (LRQ.valueIn())
2454 LiveInMask |= SR.LaneMask;
2455 }
2456 // At least parts of the register has to be live at the use.
2457 if ((LiveInMask & MOMask).none()) {
2458 report("No live subrange at use", MO, MONum);
2459 report_context(*LI);
2460 report_context(UseIdx);
2461 }
2462 }
2463 }
2464 }
2465
2466 // Use of a dead register.
2467 if (!regsLive.count(Reg)) {
2468 if (Reg.isPhysical()) {
2469 // Reserved registers may be used even when 'dead'.
2470 bool Bad = !isReserved(Reg);
2471 // We are fine if just any subregister has a defined value.
2472 if (Bad) {
2473
2474 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2475 if (regsLive.count(SubReg)) {
2476 Bad = false;
2477 break;
2478 }
2479 }
2480 }
2481 // If there is an additional implicit-use of a super register we stop
2482 // here. By definition we are fine if the super register is not
2483 // (completely) dead, if the complete super register is dead we will
2484 // get a report for its operand.
2485 if (Bad) {
2486 for (const MachineOperand &MOP : MI->uses()) {
2487 if (!MOP.isReg() || !MOP.isImplicit())
2488 continue;
2489
2490 if (!MOP.getReg().isPhysical())
2491 continue;
2492
2493 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2494 Bad = false;
2495 }
2496 }
2497 if (Bad)
2498 report("Using an undefined physical register", MO, MONum);
2499 } else if (MRI->def_empty(Reg)) {
2500 report("Reading virtual register without a def", MO, MONum);
2501 } else {
2502 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2503 // We don't know which virtual registers are live in, so only complain
2504 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2505 // must be live in. PHI instructions are handled separately.
2506 if (MInfo.regsKilled.count(Reg))
2507 report("Using a killed virtual register", MO, MONum);
2508 else if (!MI->isPHI())
2509 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2510 }
2511 }
2512 }
2513
2514 if (MO->isDef()) {
2515 // Register defined.
2516 // TODO: verify that earlyclobber ops are not used.
2517 if (MO->isDead())
2518 addRegWithSubRegs(regsDead, Reg);
2519 else
2520 addRegWithSubRegs(regsDefined, Reg);
2521
2522 // Verify SSA form.
2523 if (MRI->isSSA() && Reg.isVirtual() &&
2524 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2525 report("Multiple virtual register defs in SSA form", MO, MONum);
2526
2527 // Check LiveInts for a live segment, but only for virtual registers.
2528 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2529 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2530 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2531
2532 if (Reg.isVirtual()) {
2533 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2534
2535 if (LI->hasSubRanges()) {
2536 LaneBitmask MOMask = SubRegIdx != 0
2537 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2538 : MRI->getMaxLaneMaskForVReg(Reg);
2539 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2540 if ((SR.LaneMask & MOMask).none())
2541 continue;
2542 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2543 }
2544 }
2545 }
2546 }
2547 }
2548 }
2549
2550 // This function gets called after visiting all instructions in a bundle. The
2551 // argument points to the bundle header.
2552 // Normal stand-alone instructions are also considered 'bundles', and this
2553 // function is called for all of them.
visitMachineBundleAfter(const MachineInstr * MI)2554 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2555 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2556 set_union(MInfo.regsKilled, regsKilled);
2557 set_subtract(regsLive, regsKilled); regsKilled.clear();
2558 // Kill any masked registers.
2559 while (!regMasks.empty()) {
2560 const uint32_t *Mask = regMasks.pop_back_val();
2561 for (Register Reg : regsLive)
2562 if (Reg.isPhysical() &&
2563 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2564 regsDead.push_back(Reg);
2565 }
2566 set_subtract(regsLive, regsDead); regsDead.clear();
2567 set_union(regsLive, regsDefined); regsDefined.clear();
2568 }
2569
2570 void
visitMachineBasicBlockAfter(const MachineBasicBlock * MBB)2571 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2572 MBBInfoMap[MBB].regsLiveOut = regsLive;
2573 regsLive.clear();
2574
2575 if (Indexes) {
2576 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2577 if (!(stop > lastIndex)) {
2578 report("Block ends before last instruction index", MBB);
2579 errs() << "Block ends at " << stop
2580 << " last instruction was at " << lastIndex << '\n';
2581 }
2582 lastIndex = stop;
2583 }
2584 }
2585
2586 namespace {
2587 // This implements a set of registers that serves as a filter: can filter other
2588 // sets by passing through elements not in the filter and blocking those that
2589 // are. Any filter implicitly includes the full set of physical registers upon
2590 // creation, thus filtering them all out. The filter itself as a set only grows,
2591 // and needs to be as efficient as possible.
2592 struct VRegFilter {
2593 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2594 // no duplicates. Both virtual and physical registers are fine.
add__anonad4071450511::VRegFilter2595 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2596 SmallVector<Register, 0> VRegsBuffer;
2597 filterAndAdd(FromRegSet, VRegsBuffer);
2598 }
2599 // Filter \p FromRegSet through the filter and append passed elements into \p
2600 // ToVRegs. All elements appended are then added to the filter itself.
2601 // \returns true if anything changed.
2602 template <typename RegSetT>
filterAndAdd__anonad4071450511::VRegFilter2603 bool filterAndAdd(const RegSetT &FromRegSet,
2604 SmallVectorImpl<Register> &ToVRegs) {
2605 unsigned SparseUniverse = Sparse.size();
2606 unsigned NewSparseUniverse = SparseUniverse;
2607 unsigned NewDenseSize = Dense.size();
2608 size_t Begin = ToVRegs.size();
2609 for (Register Reg : FromRegSet) {
2610 if (!Reg.isVirtual())
2611 continue;
2612 unsigned Index = Register::virtReg2Index(Reg);
2613 if (Index < SparseUniverseMax) {
2614 if (Index < SparseUniverse && Sparse.test(Index))
2615 continue;
2616 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2617 } else {
2618 if (Dense.count(Reg))
2619 continue;
2620 ++NewDenseSize;
2621 }
2622 ToVRegs.push_back(Reg);
2623 }
2624 size_t End = ToVRegs.size();
2625 if (Begin == End)
2626 return false;
2627 // Reserving space in sets once performs better than doing so continuously
2628 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2629 // tuned all the way down) and double iteration (the second one is over a
2630 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2631 Sparse.resize(NewSparseUniverse);
2632 Dense.reserve(NewDenseSize);
2633 for (unsigned I = Begin; I < End; ++I) {
2634 Register Reg = ToVRegs[I];
2635 unsigned Index = Register::virtReg2Index(Reg);
2636 if (Index < SparseUniverseMax)
2637 Sparse.set(Index);
2638 else
2639 Dense.insert(Reg);
2640 }
2641 return true;
2642 }
2643
2644 private:
2645 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2646 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2647 // are tracked by Dense. The only purpose of the threashold and the Dense set
2648 // is to have a reasonably growing memory usage in pathological cases (large
2649 // number of very sparse VRegFilter instances live at the same time). In
2650 // practice even in the worst-by-execution time cases having all elements
2651 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2652 // space efficient than if tracked by Dense. The threashold is set to keep the
2653 // worst-case memory usage within 2x of figures determined empirically for
2654 // "all Dense" scenario in such worst-by-execution-time cases.
2655 BitVector Sparse;
2656 DenseSet<unsigned> Dense;
2657 };
2658
2659 // Implements both a transfer function and a (binary, in-place) join operator
2660 // for a dataflow over register sets with set union join and filtering transfer
2661 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2662 // Maintains out_b as its state, allowing for O(n) iteration over it at any
2663 // time, where n is the size of the set (as opposed to O(U) where U is the
2664 // universe). filter_b implicitly contains all physical registers at all times.
2665 class FilteringVRegSet {
2666 VRegFilter Filter;
2667 SmallVector<Register, 0> VRegs;
2668
2669 public:
2670 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2671 // Both virtual and physical registers are fine.
addToFilter(const RegSetT & RS)2672 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2673 Filter.add(RS);
2674 }
2675 // Passes \p RS through the filter_b (transfer function) and adds what's left
2676 // to itself (out_b).
add(const RegSetT & RS)2677 template <typename RegSetT> bool add(const RegSetT &RS) {
2678 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2679 // a set union) just add everything being added here to the Filter as well.
2680 return Filter.filterAndAdd(RS, VRegs);
2681 }
2682 using const_iterator = decltype(VRegs)::const_iterator;
begin() const2683 const_iterator begin() const { return VRegs.begin(); }
end() const2684 const_iterator end() const { return VRegs.end(); }
size() const2685 size_t size() const { return VRegs.size(); }
2686 };
2687 } // namespace
2688
2689 // Calculate the largest possible vregsPassed sets. These are the registers that
2690 // can pass through an MBB live, but may not be live every time. It is assumed
2691 // that all vregsPassed sets are empty before the call.
calcRegsPassed()2692 void MachineVerifier::calcRegsPassed() {
2693 if (MF->empty())
2694 // ReversePostOrderTraversal doesn't handle empty functions.
2695 return;
2696
2697 for (const MachineBasicBlock *MB :
2698 ReversePostOrderTraversal<const MachineFunction *>(MF)) {
2699 FilteringVRegSet VRegs;
2700 BBInfo &Info = MBBInfoMap[MB];
2701 assert(Info.reachable);
2702
2703 VRegs.addToFilter(Info.regsKilled);
2704 VRegs.addToFilter(Info.regsLiveOut);
2705 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2706 const BBInfo &PredInfo = MBBInfoMap[Pred];
2707 if (!PredInfo.reachable)
2708 continue;
2709
2710 VRegs.add(PredInfo.regsLiveOut);
2711 VRegs.add(PredInfo.vregsPassed);
2712 }
2713 Info.vregsPassed.reserve(VRegs.size());
2714 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2715 }
2716 }
2717
2718 // Calculate the set of virtual registers that must be passed through each basic
2719 // block in order to satisfy the requirements of successor blocks. This is very
2720 // similar to calcRegsPassed, only backwards.
calcRegsRequired()2721 void MachineVerifier::calcRegsRequired() {
2722 // First push live-in regs to predecessors' vregsRequired.
2723 SmallPtrSet<const MachineBasicBlock*, 8> todo;
2724 for (const auto &MBB : *MF) {
2725 BBInfo &MInfo = MBBInfoMap[&MBB];
2726 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2727 BBInfo &PInfo = MBBInfoMap[Pred];
2728 if (PInfo.addRequired(MInfo.vregsLiveIn))
2729 todo.insert(Pred);
2730 }
2731
2732 // Handle the PHI node.
2733 for (const MachineInstr &MI : MBB.phis()) {
2734 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2735 // Skip those Operands which are undef regs or not regs.
2736 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2737 continue;
2738
2739 // Get register and predecessor for one PHI edge.
2740 Register Reg = MI.getOperand(i).getReg();
2741 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2742
2743 BBInfo &PInfo = MBBInfoMap[Pred];
2744 if (PInfo.addRequired(Reg))
2745 todo.insert(Pred);
2746 }
2747 }
2748 }
2749
2750 // Iteratively push vregsRequired to predecessors. This will converge to the
2751 // same final state regardless of DenseSet iteration order.
2752 while (!todo.empty()) {
2753 const MachineBasicBlock *MBB = *todo.begin();
2754 todo.erase(MBB);
2755 BBInfo &MInfo = MBBInfoMap[MBB];
2756 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2757 if (Pred == MBB)
2758 continue;
2759 BBInfo &SInfo = MBBInfoMap[Pred];
2760 if (SInfo.addRequired(MInfo.vregsRequired))
2761 todo.insert(Pred);
2762 }
2763 }
2764 }
2765
2766 // Check PHI instructions at the beginning of MBB. It is assumed that
2767 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
checkPHIOps(const MachineBasicBlock & MBB)2768 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2769 BBInfo &MInfo = MBBInfoMap[&MBB];
2770
2771 SmallPtrSet<const MachineBasicBlock*, 8> seen;
2772 for (const MachineInstr &Phi : MBB) {
2773 if (!Phi.isPHI())
2774 break;
2775 seen.clear();
2776
2777 const MachineOperand &MODef = Phi.getOperand(0);
2778 if (!MODef.isReg() || !MODef.isDef()) {
2779 report("Expected first PHI operand to be a register def", &MODef, 0);
2780 continue;
2781 }
2782 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2783 MODef.isEarlyClobber() || MODef.isDebug())
2784 report("Unexpected flag on PHI operand", &MODef, 0);
2785 Register DefReg = MODef.getReg();
2786 if (!DefReg.isVirtual())
2787 report("Expected first PHI operand to be a virtual register", &MODef, 0);
2788
2789 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2790 const MachineOperand &MO0 = Phi.getOperand(I);
2791 if (!MO0.isReg()) {
2792 report("Expected PHI operand to be a register", &MO0, I);
2793 continue;
2794 }
2795 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2796 MO0.isDebug() || MO0.isTied())
2797 report("Unexpected flag on PHI operand", &MO0, I);
2798
2799 const MachineOperand &MO1 = Phi.getOperand(I + 1);
2800 if (!MO1.isMBB()) {
2801 report("Expected PHI operand to be a basic block", &MO1, I + 1);
2802 continue;
2803 }
2804
2805 const MachineBasicBlock &Pre = *MO1.getMBB();
2806 if (!Pre.isSuccessor(&MBB)) {
2807 report("PHI input is not a predecessor block", &MO1, I + 1);
2808 continue;
2809 }
2810
2811 if (MInfo.reachable) {
2812 seen.insert(&Pre);
2813 BBInfo &PrInfo = MBBInfoMap[&Pre];
2814 if (!MO0.isUndef() && PrInfo.reachable &&
2815 !PrInfo.isLiveOut(MO0.getReg()))
2816 report("PHI operand is not live-out from predecessor", &MO0, I);
2817 }
2818 }
2819
2820 // Did we see all predecessors?
2821 if (MInfo.reachable) {
2822 for (MachineBasicBlock *Pred : MBB.predecessors()) {
2823 if (!seen.count(Pred)) {
2824 report("Missing PHI operand", &Phi);
2825 errs() << printMBBReference(*Pred)
2826 << " is a predecessor according to the CFG.\n";
2827 }
2828 }
2829 }
2830 }
2831 }
2832
visitMachineFunctionAfter()2833 void MachineVerifier::visitMachineFunctionAfter() {
2834 calcRegsPassed();
2835
2836 for (const MachineBasicBlock &MBB : *MF)
2837 checkPHIOps(MBB);
2838
2839 // Now check liveness info if available
2840 calcRegsRequired();
2841
2842 // Check for killed virtual registers that should be live out.
2843 for (const auto &MBB : *MF) {
2844 BBInfo &MInfo = MBBInfoMap[&MBB];
2845 for (Register VReg : MInfo.vregsRequired)
2846 if (MInfo.regsKilled.count(VReg)) {
2847 report("Virtual register killed in block, but needed live out.", &MBB);
2848 errs() << "Virtual register " << printReg(VReg)
2849 << " is used after the block.\n";
2850 }
2851 }
2852
2853 if (!MF->empty()) {
2854 BBInfo &MInfo = MBBInfoMap[&MF->front()];
2855 for (Register VReg : MInfo.vregsRequired) {
2856 report("Virtual register defs don't dominate all uses.", MF);
2857 report_context_vreg(VReg);
2858 }
2859 }
2860
2861 if (LiveVars)
2862 verifyLiveVariables();
2863 if (LiveInts)
2864 verifyLiveIntervals();
2865
2866 // Check live-in list of each MBB. If a register is live into MBB, check
2867 // that the register is in regsLiveOut of each predecessor block. Since
2868 // this must come from a definition in the predecesssor or its live-in
2869 // list, this will catch a live-through case where the predecessor does not
2870 // have the register in its live-in list. This currently only checks
2871 // registers that have no aliases, are not allocatable and are not
2872 // reserved, which could mean a condition code register for instance.
2873 if (MRI->tracksLiveness())
2874 for (const auto &MBB : *MF)
2875 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
2876 MCPhysReg LiveInReg = P.PhysReg;
2877 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2878 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2879 continue;
2880 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2881 BBInfo &PInfo = MBBInfoMap[Pred];
2882 if (!PInfo.regsLiveOut.count(LiveInReg)) {
2883 report("Live in register not found to be live out from predecessor.",
2884 &MBB);
2885 errs() << TRI->getName(LiveInReg)
2886 << " not found to be live out from "
2887 << printMBBReference(*Pred) << "\n";
2888 }
2889 }
2890 }
2891
2892 for (auto CSInfo : MF->getCallSitesInfo())
2893 if (!CSInfo.first->isCall())
2894 report("Call site info referencing instruction that is not call", MF);
2895
2896 // If there's debug-info, check that we don't have any duplicate value
2897 // tracking numbers.
2898 if (MF->getFunction().getSubprogram()) {
2899 DenseSet<unsigned> SeenNumbers;
2900 for (const auto &MBB : *MF) {
2901 for (const auto &MI : MBB) {
2902 if (auto Num = MI.peekDebugInstrNum()) {
2903 auto Result = SeenNumbers.insert((unsigned)Num);
2904 if (!Result.second)
2905 report("Instruction has a duplicated value tracking number", &MI);
2906 }
2907 }
2908 }
2909 }
2910 }
2911
verifyLiveVariables()2912 void MachineVerifier::verifyLiveVariables() {
2913 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
2914 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2915 Register Reg = Register::index2VirtReg(I);
2916 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2917 for (const auto &MBB : *MF) {
2918 BBInfo &MInfo = MBBInfoMap[&MBB];
2919
2920 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2921 if (MInfo.vregsRequired.count(Reg)) {
2922 if (!VI.AliveBlocks.test(MBB.getNumber())) {
2923 report("LiveVariables: Block missing from AliveBlocks", &MBB);
2924 errs() << "Virtual register " << printReg(Reg)
2925 << " must be live through the block.\n";
2926 }
2927 } else {
2928 if (VI.AliveBlocks.test(MBB.getNumber())) {
2929 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2930 errs() << "Virtual register " << printReg(Reg)
2931 << " is not needed live through the block.\n";
2932 }
2933 }
2934 }
2935 }
2936 }
2937
verifyLiveIntervals()2938 void MachineVerifier::verifyLiveIntervals() {
2939 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
2940 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2941 Register Reg = Register::index2VirtReg(I);
2942
2943 // Spilling and splitting may leave unused registers around. Skip them.
2944 if (MRI->reg_nodbg_empty(Reg))
2945 continue;
2946
2947 if (!LiveInts->hasInterval(Reg)) {
2948 report("Missing live interval for virtual register", MF);
2949 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2950 continue;
2951 }
2952
2953 const LiveInterval &LI = LiveInts->getInterval(Reg);
2954 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
2955 verifyLiveInterval(LI);
2956 }
2957
2958 // Verify all the cached regunit intervals.
2959 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2960 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2961 verifyLiveRange(*LR, i);
2962 }
2963
verifyLiveRangeValue(const LiveRange & LR,const VNInfo * VNI,Register Reg,LaneBitmask LaneMask)2964 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2965 const VNInfo *VNI, Register Reg,
2966 LaneBitmask LaneMask) {
2967 if (VNI->isUnused())
2968 return;
2969
2970 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2971
2972 if (!DefVNI) {
2973 report("Value not live at VNInfo def and not marked unused", MF);
2974 report_context(LR, Reg, LaneMask);
2975 report_context(*VNI);
2976 return;
2977 }
2978
2979 if (DefVNI != VNI) {
2980 report("Live segment at def has different VNInfo", MF);
2981 report_context(LR, Reg, LaneMask);
2982 report_context(*VNI);
2983 return;
2984 }
2985
2986 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
2987 if (!MBB) {
2988 report("Invalid VNInfo definition index", MF);
2989 report_context(LR, Reg, LaneMask);
2990 report_context(*VNI);
2991 return;
2992 }
2993
2994 if (VNI->isPHIDef()) {
2995 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
2996 report("PHIDef VNInfo is not defined at MBB start", MBB);
2997 report_context(LR, Reg, LaneMask);
2998 report_context(*VNI);
2999 }
3000 return;
3001 }
3002
3003 // Non-PHI def.
3004 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3005 if (!MI) {
3006 report("No instruction at VNInfo def index", MBB);
3007 report_context(LR, Reg, LaneMask);
3008 report_context(*VNI);
3009 return;
3010 }
3011
3012 if (Reg != 0) {
3013 bool hasDef = false;
3014 bool isEarlyClobber = false;
3015 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3016 if (!MOI->isReg() || !MOI->isDef())
3017 continue;
3018 if (Reg.isVirtual()) {
3019 if (MOI->getReg() != Reg)
3020 continue;
3021 } else {
3022 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3023 continue;
3024 }
3025 if (LaneMask.any() &&
3026 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3027 continue;
3028 hasDef = true;
3029 if (MOI->isEarlyClobber())
3030 isEarlyClobber = true;
3031 }
3032
3033 if (!hasDef) {
3034 report("Defining instruction does not modify register", MI);
3035 report_context(LR, Reg, LaneMask);
3036 report_context(*VNI);
3037 }
3038
3039 // Early clobber defs begin at USE slots, but other defs must begin at
3040 // DEF slots.
3041 if (isEarlyClobber) {
3042 if (!VNI->def.isEarlyClobber()) {
3043 report("Early clobber def must be at an early-clobber slot", MBB);
3044 report_context(LR, Reg, LaneMask);
3045 report_context(*VNI);
3046 }
3047 } else if (!VNI->def.isRegister()) {
3048 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3049 report_context(LR, Reg, LaneMask);
3050 report_context(*VNI);
3051 }
3052 }
3053 }
3054
verifyLiveRangeSegment(const LiveRange & LR,const LiveRange::const_iterator I,Register Reg,LaneBitmask LaneMask)3055 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3056 const LiveRange::const_iterator I,
3057 Register Reg,
3058 LaneBitmask LaneMask) {
3059 const LiveRange::Segment &S = *I;
3060 const VNInfo *VNI = S.valno;
3061 assert(VNI && "Live segment has no valno");
3062
3063 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3064 report("Foreign valno in live segment", MF);
3065 report_context(LR, Reg, LaneMask);
3066 report_context(S);
3067 report_context(*VNI);
3068 }
3069
3070 if (VNI->isUnused()) {
3071 report("Live segment valno is marked unused", MF);
3072 report_context(LR, Reg, LaneMask);
3073 report_context(S);
3074 }
3075
3076 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3077 if (!MBB) {
3078 report("Bad start of live segment, no basic block", MF);
3079 report_context(LR, Reg, LaneMask);
3080 report_context(S);
3081 return;
3082 }
3083 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3084 if (S.start != MBBStartIdx && S.start != VNI->def) {
3085 report("Live segment must begin at MBB entry or valno def", MBB);
3086 report_context(LR, Reg, LaneMask);
3087 report_context(S);
3088 }
3089
3090 const MachineBasicBlock *EndMBB =
3091 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3092 if (!EndMBB) {
3093 report("Bad end of live segment, no basic block", MF);
3094 report_context(LR, Reg, LaneMask);
3095 report_context(S);
3096 return;
3097 }
3098
3099 // No more checks for live-out segments.
3100 if (S.end == LiveInts->getMBBEndIdx(EndMBB))
3101 return;
3102
3103 // RegUnit intervals are allowed dead phis.
3104 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3105 S.end == VNI->def.getDeadSlot())
3106 return;
3107
3108 // The live segment is ending inside EndMBB
3109 const MachineInstr *MI =
3110 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3111 if (!MI) {
3112 report("Live segment doesn't end at a valid instruction", EndMBB);
3113 report_context(LR, Reg, LaneMask);
3114 report_context(S);
3115 return;
3116 }
3117
3118 // The block slot must refer to a basic block boundary.
3119 if (S.end.isBlock()) {
3120 report("Live segment ends at B slot of an instruction", EndMBB);
3121 report_context(LR, Reg, LaneMask);
3122 report_context(S);
3123 }
3124
3125 if (S.end.isDead()) {
3126 // Segment ends on the dead slot.
3127 // That means there must be a dead def.
3128 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3129 report("Live segment ending at dead slot spans instructions", EndMBB);
3130 report_context(LR, Reg, LaneMask);
3131 report_context(S);
3132 }
3133 }
3134
3135 // After tied operands are rewritten, a live segment can only end at an
3136 // early-clobber slot if it is being redefined by an early-clobber def.
3137 // TODO: Before tied operands are rewritten, a live segment can only end at an
3138 // early-clobber slot if the last use is tied to an early-clobber def.
3139 if (MF->getProperties().hasProperty(
3140 MachineFunctionProperties::Property::TiedOpsRewritten) &&
3141 S.end.isEarlyClobber()) {
3142 if (I+1 == LR.end() || (I+1)->start != S.end) {
3143 report("Live segment ending at early clobber slot must be "
3144 "redefined by an EC def in the same instruction", EndMBB);
3145 report_context(LR, Reg, LaneMask);
3146 report_context(S);
3147 }
3148 }
3149
3150 // The following checks only apply to virtual registers. Physreg liveness
3151 // is too weird to check.
3152 if (Reg.isVirtual()) {
3153 // A live segment can end with either a redefinition, a kill flag on a
3154 // use, or a dead flag on a def.
3155 bool hasRead = false;
3156 bool hasSubRegDef = false;
3157 bool hasDeadDef = false;
3158 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3159 if (!MOI->isReg() || MOI->getReg() != Reg)
3160 continue;
3161 unsigned Sub = MOI->getSubReg();
3162 LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
3163 : LaneBitmask::getAll();
3164 if (MOI->isDef()) {
3165 if (Sub != 0) {
3166 hasSubRegDef = true;
3167 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3168 // mask for subregister defs. Read-undef defs will be handled by
3169 // readsReg below.
3170 SLM = ~SLM;
3171 }
3172 if (MOI->isDead())
3173 hasDeadDef = true;
3174 }
3175 if (LaneMask.any() && (LaneMask & SLM).none())
3176 continue;
3177 if (MOI->readsReg())
3178 hasRead = true;
3179 }
3180 if (S.end.isDead()) {
3181 // Make sure that the corresponding machine operand for a "dead" live
3182 // range has the dead flag. We cannot perform this check for subregister
3183 // liveranges as partially dead values are allowed.
3184 if (LaneMask.none() && !hasDeadDef) {
3185 report("Instruction ending live segment on dead slot has no dead flag",
3186 MI);
3187 report_context(LR, Reg, LaneMask);
3188 report_context(S);
3189 }
3190 } else {
3191 if (!hasRead) {
3192 // When tracking subregister liveness, the main range must start new
3193 // values on partial register writes, even if there is no read.
3194 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3195 !hasSubRegDef) {
3196 report("Instruction ending live segment doesn't read the register",
3197 MI);
3198 report_context(LR, Reg, LaneMask);
3199 report_context(S);
3200 }
3201 }
3202 }
3203 }
3204
3205 // Now check all the basic blocks in this live segment.
3206 MachineFunction::const_iterator MFI = MBB->getIterator();
3207 // Is this live segment the beginning of a non-PHIDef VN?
3208 if (S.start == VNI->def && !VNI->isPHIDef()) {
3209 // Not live-in to any blocks.
3210 if (MBB == EndMBB)
3211 return;
3212 // Skip this block.
3213 ++MFI;
3214 }
3215
3216 SmallVector<SlotIndex, 4> Undefs;
3217 if (LaneMask.any()) {
3218 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3219 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3220 }
3221
3222 while (true) {
3223 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3224 // We don't know how to track physregs into a landing pad.
3225 if (!Reg.isVirtual() && MFI->isEHPad()) {
3226 if (&*MFI == EndMBB)
3227 break;
3228 ++MFI;
3229 continue;
3230 }
3231
3232 // Is VNI a PHI-def in the current block?
3233 bool IsPHI = VNI->isPHIDef() &&
3234 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3235
3236 // Check that VNI is live-out of all predecessors.
3237 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3238 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3239 // Predecessor of landing pad live-out on last call.
3240 if (MFI->isEHPad()) {
3241 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3242 if (MI.isCall()) {
3243 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3244 break;
3245 }
3246 }
3247 }
3248 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3249
3250 // All predecessors must have a live-out value. However for a phi
3251 // instruction with subregister intervals
3252 // only one of the subregisters (not necessarily the current one) needs to
3253 // be defined.
3254 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3255 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3256 continue;
3257 report("Register not marked live out of predecessor", Pred);
3258 report_context(LR, Reg, LaneMask);
3259 report_context(*VNI);
3260 errs() << " live into " << printMBBReference(*MFI) << '@'
3261 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3262 << PEnd << '\n';
3263 continue;
3264 }
3265
3266 // Only PHI-defs can take different predecessor values.
3267 if (!IsPHI && PVNI != VNI) {
3268 report("Different value live out of predecessor", Pred);
3269 report_context(LR, Reg, LaneMask);
3270 errs() << "Valno #" << PVNI->id << " live out of "
3271 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3272 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3273 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3274 }
3275 }
3276 if (&*MFI == EndMBB)
3277 break;
3278 ++MFI;
3279 }
3280 }
3281
verifyLiveRange(const LiveRange & LR,Register Reg,LaneBitmask LaneMask)3282 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3283 LaneBitmask LaneMask) {
3284 for (const VNInfo *VNI : LR.valnos)
3285 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3286
3287 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3288 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3289 }
3290
verifyLiveInterval(const LiveInterval & LI)3291 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3292 Register Reg = LI.reg();
3293 assert(Reg.isVirtual());
3294 verifyLiveRange(LI, Reg);
3295
3296 LaneBitmask Mask;
3297 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3298 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3299 if ((Mask & SR.LaneMask).any()) {
3300 report("Lane masks of sub ranges overlap in live interval", MF);
3301 report_context(LI);
3302 }
3303 if ((SR.LaneMask & ~MaxMask).any()) {
3304 report("Subrange lanemask is invalid", MF);
3305 report_context(LI);
3306 }
3307 if (SR.empty()) {
3308 report("Subrange must not be empty", MF);
3309 report_context(SR, LI.reg(), SR.LaneMask);
3310 }
3311 Mask |= SR.LaneMask;
3312 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3313 if (!LI.covers(SR)) {
3314 report("A Subrange is not covered by the main range", MF);
3315 report_context(LI);
3316 }
3317 }
3318
3319 // Check the LI only has one connected component.
3320 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3321 unsigned NumComp = ConEQ.Classify(LI);
3322 if (NumComp > 1) {
3323 report("Multiple connected components in live interval", MF);
3324 report_context(LI);
3325 for (unsigned comp = 0; comp != NumComp; ++comp) {
3326 errs() << comp << ": valnos";
3327 for (const VNInfo *I : LI.valnos)
3328 if (comp == ConEQ.getEqClass(I))
3329 errs() << ' ' << I->id;
3330 errs() << '\n';
3331 }
3332 }
3333 }
3334
3335 namespace {
3336
3337 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3338 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3339 // value is zero.
3340 // We use a bool plus an integer to capture the stack state.
3341 struct StackStateOfBB {
3342 StackStateOfBB() = default;
StackStateOfBB__anonad4071450611::StackStateOfBB3343 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3344 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3345 ExitIsSetup(ExitSetup) {}
3346
3347 // Can be negative, which means we are setting up a frame.
3348 int EntryValue = 0;
3349 int ExitValue = 0;
3350 bool EntryIsSetup = false;
3351 bool ExitIsSetup = false;
3352 };
3353
3354 } // end anonymous namespace
3355
3356 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3357 /// by a FrameDestroy <n>, stack adjustments are identical on all
3358 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
verifyStackFrame()3359 void MachineVerifier::verifyStackFrame() {
3360 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3361 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3362 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3363 return;
3364
3365 SmallVector<StackStateOfBB, 8> SPState;
3366 SPState.resize(MF->getNumBlockIDs());
3367 df_iterator_default_set<const MachineBasicBlock*> Reachable;
3368
3369 // Visit the MBBs in DFS order.
3370 for (df_ext_iterator<const MachineFunction *,
3371 df_iterator_default_set<const MachineBasicBlock *>>
3372 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3373 DFI != DFE; ++DFI) {
3374 const MachineBasicBlock *MBB = *DFI;
3375
3376 StackStateOfBB BBState;
3377 // Check the exit state of the DFS stack predecessor.
3378 if (DFI.getPathLength() >= 2) {
3379 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3380 assert(Reachable.count(StackPred) &&
3381 "DFS stack predecessor is already visited.\n");
3382 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3383 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3384 BBState.ExitValue = BBState.EntryValue;
3385 BBState.ExitIsSetup = BBState.EntryIsSetup;
3386 }
3387
3388 // Update stack state by checking contents of MBB.
3389 for (const auto &I : *MBB) {
3390 if (I.getOpcode() == FrameSetupOpcode) {
3391 if (BBState.ExitIsSetup)
3392 report("FrameSetup is after another FrameSetup", &I);
3393 BBState.ExitValue -= TII->getFrameTotalSize(I);
3394 BBState.ExitIsSetup = true;
3395 }
3396
3397 if (I.getOpcode() == FrameDestroyOpcode) {
3398 int Size = TII->getFrameTotalSize(I);
3399 if (!BBState.ExitIsSetup)
3400 report("FrameDestroy is not after a FrameSetup", &I);
3401 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3402 BBState.ExitValue;
3403 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3404 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3405 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3406 << AbsSPAdj << ">.\n";
3407 }
3408 BBState.ExitValue += Size;
3409 BBState.ExitIsSetup = false;
3410 }
3411 }
3412 SPState[MBB->getNumber()] = BBState;
3413
3414 // Make sure the exit state of any predecessor is consistent with the entry
3415 // state.
3416 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3417 if (Reachable.count(Pred) &&
3418 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3419 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3420 report("The exit stack state of a predecessor is inconsistent.", MBB);
3421 errs() << "Predecessor " << printMBBReference(*Pred)
3422 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3423 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3424 << printMBBReference(*MBB) << " has entry state ("
3425 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3426 }
3427 }
3428
3429 // Make sure the entry state of any successor is consistent with the exit
3430 // state.
3431 for (const MachineBasicBlock *Succ : MBB->successors()) {
3432 if (Reachable.count(Succ) &&
3433 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3434 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3435 report("The entry stack state of a successor is inconsistent.", MBB);
3436 errs() << "Successor " << printMBBReference(*Succ)
3437 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3438 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3439 << printMBBReference(*MBB) << " has exit state ("
3440 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3441 }
3442 }
3443
3444 // Make sure a basic block with return ends with zero stack adjustment.
3445 if (!MBB->empty() && MBB->back().isReturn()) {
3446 if (BBState.ExitIsSetup)
3447 report("A return block ends with a FrameSetup.", MBB);
3448 if (BBState.ExitValue)
3449 report("A return block ends with a nonzero stack adjustment.", MBB);
3450 }
3451 }
3452 }
3453