1 //===- CorrelatedValuePropagation.cpp - Propagate CFG-derived info --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Correlated Value Propagation pass.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Transforms/Scalar/CorrelatedValuePropagation.h"
14 #include "llvm/ADT/DepthFirstIterator.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/DomTreeUpdater.h"
18 #include "llvm/Analysis/GlobalsModRef.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LazyValueInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/Attributes.h"
23 #include "llvm/IR/BasicBlock.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/Constant.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/IRBuilder.h"
31 #include "llvm/IR/InstrTypes.h"
32 #include "llvm/IR/Instruction.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Operator.h"
36 #include "llvm/IR/PassManager.h"
37 #include "llvm/IR/Type.h"
38 #include "llvm/IR/Value.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Transforms/Scalar.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include <cassert>
46 #include <optional>
47 #include <utility>
48
49 using namespace llvm;
50
51 #define DEBUG_TYPE "correlated-value-propagation"
52
53 static cl::opt<bool> CanonicalizeICmpPredicatesToUnsigned(
54 "canonicalize-icmp-predicates-to-unsigned", cl::init(true), cl::Hidden,
55 cl::desc("Enables canonicalization of signed relational predicates to "
56 "unsigned (e.g. sgt => ugt)"));
57
58 STATISTIC(NumPhis, "Number of phis propagated");
59 STATISTIC(NumPhiCommon, "Number of phis deleted via common incoming value");
60 STATISTIC(NumSelects, "Number of selects propagated");
61 STATISTIC(NumMemAccess, "Number of memory access targets propagated");
62 STATISTIC(NumCmps, "Number of comparisons propagated");
63 STATISTIC(NumReturns, "Number of return values propagated");
64 STATISTIC(NumDeadCases, "Number of switch cases removed");
65 STATISTIC(NumSDivSRemsNarrowed,
66 "Number of sdivs/srems whose width was decreased");
67 STATISTIC(NumSDivs, "Number of sdiv converted to udiv");
68 STATISTIC(NumUDivURemsNarrowed,
69 "Number of udivs/urems whose width was decreased");
70 STATISTIC(NumAShrsConverted, "Number of ashr converted to lshr");
71 STATISTIC(NumAShrsRemoved, "Number of ashr removed");
72 STATISTIC(NumSRems, "Number of srem converted to urem");
73 STATISTIC(NumSExt, "Number of sext converted to zext");
74 STATISTIC(NumSICmps, "Number of signed icmp preds simplified to unsigned");
75 STATISTIC(NumAnd, "Number of ands removed");
76 STATISTIC(NumNW, "Number of no-wrap deductions");
77 STATISTIC(NumNSW, "Number of no-signed-wrap deductions");
78 STATISTIC(NumNUW, "Number of no-unsigned-wrap deductions");
79 STATISTIC(NumAddNW, "Number of no-wrap deductions for add");
80 STATISTIC(NumAddNSW, "Number of no-signed-wrap deductions for add");
81 STATISTIC(NumAddNUW, "Number of no-unsigned-wrap deductions for add");
82 STATISTIC(NumSubNW, "Number of no-wrap deductions for sub");
83 STATISTIC(NumSubNSW, "Number of no-signed-wrap deductions for sub");
84 STATISTIC(NumSubNUW, "Number of no-unsigned-wrap deductions for sub");
85 STATISTIC(NumMulNW, "Number of no-wrap deductions for mul");
86 STATISTIC(NumMulNSW, "Number of no-signed-wrap deductions for mul");
87 STATISTIC(NumMulNUW, "Number of no-unsigned-wrap deductions for mul");
88 STATISTIC(NumShlNW, "Number of no-wrap deductions for shl");
89 STATISTIC(NumShlNSW, "Number of no-signed-wrap deductions for shl");
90 STATISTIC(NumShlNUW, "Number of no-unsigned-wrap deductions for shl");
91 STATISTIC(NumAbs, "Number of llvm.abs intrinsics removed");
92 STATISTIC(NumOverflows, "Number of overflow checks removed");
93 STATISTIC(NumSaturating,
94 "Number of saturating arithmetics converted to normal arithmetics");
95 STATISTIC(NumNonNull, "Number of function pointer arguments marked non-null");
96 STATISTIC(NumMinMax, "Number of llvm.[us]{min,max} intrinsics removed");
97 STATISTIC(NumUDivURemsNarrowedExpanded,
98 "Number of bound udiv's/urem's expanded");
99
100 namespace {
101
102 class CorrelatedValuePropagation : public FunctionPass {
103 public:
104 static char ID;
105
CorrelatedValuePropagation()106 CorrelatedValuePropagation(): FunctionPass(ID) {
107 initializeCorrelatedValuePropagationPass(*PassRegistry::getPassRegistry());
108 }
109
110 bool runOnFunction(Function &F) override;
111
getAnalysisUsage(AnalysisUsage & AU) const112 void getAnalysisUsage(AnalysisUsage &AU) const override {
113 AU.addRequired<DominatorTreeWrapperPass>();
114 AU.addRequired<LazyValueInfoWrapperPass>();
115 AU.addPreserved<GlobalsAAWrapperPass>();
116 AU.addPreserved<DominatorTreeWrapperPass>();
117 AU.addPreserved<LazyValueInfoWrapperPass>();
118 }
119 };
120
121 } // end anonymous namespace
122
123 char CorrelatedValuePropagation::ID = 0;
124
125 INITIALIZE_PASS_BEGIN(CorrelatedValuePropagation, "correlated-propagation",
126 "Value Propagation", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)127 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
128 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
129 INITIALIZE_PASS_END(CorrelatedValuePropagation, "correlated-propagation",
130 "Value Propagation", false, false)
131
132 // Public interface to the Value Propagation pass
133 Pass *llvm::createCorrelatedValuePropagationPass() {
134 return new CorrelatedValuePropagation();
135 }
136
processSelect(SelectInst * S,LazyValueInfo * LVI)137 static bool processSelect(SelectInst *S, LazyValueInfo *LVI) {
138 if (S->getType()->isVectorTy()) return false;
139 if (isa<Constant>(S->getCondition())) return false;
140
141 Constant *C = LVI->getConstant(S->getCondition(), S);
142 if (!C) return false;
143
144 ConstantInt *CI = dyn_cast<ConstantInt>(C);
145 if (!CI) return false;
146
147 Value *ReplaceWith = CI->isOne() ? S->getTrueValue() : S->getFalseValue();
148 S->replaceAllUsesWith(ReplaceWith);
149 S->eraseFromParent();
150
151 ++NumSelects;
152
153 return true;
154 }
155
156 /// Try to simplify a phi with constant incoming values that match the edge
157 /// values of a non-constant value on all other edges:
158 /// bb0:
159 /// %isnull = icmp eq i8* %x, null
160 /// br i1 %isnull, label %bb2, label %bb1
161 /// bb1:
162 /// br label %bb2
163 /// bb2:
164 /// %r = phi i8* [ %x, %bb1 ], [ null, %bb0 ]
165 /// -->
166 /// %r = %x
simplifyCommonValuePhi(PHINode * P,LazyValueInfo * LVI,DominatorTree * DT)167 static bool simplifyCommonValuePhi(PHINode *P, LazyValueInfo *LVI,
168 DominatorTree *DT) {
169 // Collect incoming constants and initialize possible common value.
170 SmallVector<std::pair<Constant *, unsigned>, 4> IncomingConstants;
171 Value *CommonValue = nullptr;
172 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
173 Value *Incoming = P->getIncomingValue(i);
174 if (auto *IncomingConstant = dyn_cast<Constant>(Incoming)) {
175 IncomingConstants.push_back(std::make_pair(IncomingConstant, i));
176 } else if (!CommonValue) {
177 // The potential common value is initialized to the first non-constant.
178 CommonValue = Incoming;
179 } else if (Incoming != CommonValue) {
180 // There can be only one non-constant common value.
181 return false;
182 }
183 }
184
185 if (!CommonValue || IncomingConstants.empty())
186 return false;
187
188 // The common value must be valid in all incoming blocks.
189 BasicBlock *ToBB = P->getParent();
190 if (auto *CommonInst = dyn_cast<Instruction>(CommonValue))
191 if (!DT->dominates(CommonInst, ToBB))
192 return false;
193
194 // We have a phi with exactly 1 variable incoming value and 1 or more constant
195 // incoming values. See if all constant incoming values can be mapped back to
196 // the same incoming variable value.
197 for (auto &IncomingConstant : IncomingConstants) {
198 Constant *C = IncomingConstant.first;
199 BasicBlock *IncomingBB = P->getIncomingBlock(IncomingConstant.second);
200 if (C != LVI->getConstantOnEdge(CommonValue, IncomingBB, ToBB, P))
201 return false;
202 }
203
204 // LVI only guarantees that the value matches a certain constant if the value
205 // is not poison. Make sure we don't replace a well-defined value with poison.
206 // This is usually satisfied due to a prior branch on the value.
207 if (!isGuaranteedNotToBePoison(CommonValue, nullptr, P, DT))
208 return false;
209
210 // All constant incoming values map to the same variable along the incoming
211 // edges of the phi. The phi is unnecessary.
212 P->replaceAllUsesWith(CommonValue);
213 P->eraseFromParent();
214 ++NumPhiCommon;
215 return true;
216 }
217
getValueOnEdge(LazyValueInfo * LVI,Value * Incoming,BasicBlock * From,BasicBlock * To,Instruction * CxtI)218 static Value *getValueOnEdge(LazyValueInfo *LVI, Value *Incoming,
219 BasicBlock *From, BasicBlock *To,
220 Instruction *CxtI) {
221 if (Constant *C = LVI->getConstantOnEdge(Incoming, From, To, CxtI))
222 return C;
223
224 // Look if the incoming value is a select with a scalar condition for which
225 // LVI can tells us the value. In that case replace the incoming value with
226 // the appropriate value of the select. This often allows us to remove the
227 // select later.
228 auto *SI = dyn_cast<SelectInst>(Incoming);
229 if (!SI)
230 return nullptr;
231
232 // Once LVI learns to handle vector types, we could also add support
233 // for vector type constants that are not all zeroes or all ones.
234 Value *Condition = SI->getCondition();
235 if (!Condition->getType()->isVectorTy()) {
236 if (Constant *C = LVI->getConstantOnEdge(Condition, From, To, CxtI)) {
237 if (C->isOneValue())
238 return SI->getTrueValue();
239 if (C->isZeroValue())
240 return SI->getFalseValue();
241 }
242 }
243
244 // Look if the select has a constant but LVI tells us that the incoming
245 // value can never be that constant. In that case replace the incoming
246 // value with the other value of the select. This often allows us to
247 // remove the select later.
248
249 // The "false" case
250 if (auto *C = dyn_cast<Constant>(SI->getFalseValue()))
251 if (LVI->getPredicateOnEdge(ICmpInst::ICMP_EQ, SI, C, From, To, CxtI) ==
252 LazyValueInfo::False)
253 return SI->getTrueValue();
254
255 // The "true" case,
256 // similar to the select "false" case, but try the select "true" value
257 if (auto *C = dyn_cast<Constant>(SI->getTrueValue()))
258 if (LVI->getPredicateOnEdge(ICmpInst::ICMP_EQ, SI, C, From, To, CxtI) ==
259 LazyValueInfo::False)
260 return SI->getFalseValue();
261
262 return nullptr;
263 }
264
processPHI(PHINode * P,LazyValueInfo * LVI,DominatorTree * DT,const SimplifyQuery & SQ)265 static bool processPHI(PHINode *P, LazyValueInfo *LVI, DominatorTree *DT,
266 const SimplifyQuery &SQ) {
267 bool Changed = false;
268
269 BasicBlock *BB = P->getParent();
270 for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
271 Value *Incoming = P->getIncomingValue(i);
272 if (isa<Constant>(Incoming)) continue;
273
274 Value *V = getValueOnEdge(LVI, Incoming, P->getIncomingBlock(i), BB, P);
275 if (V) {
276 P->setIncomingValue(i, V);
277 Changed = true;
278 }
279 }
280
281 if (Value *V = simplifyInstruction(P, SQ)) {
282 P->replaceAllUsesWith(V);
283 P->eraseFromParent();
284 Changed = true;
285 }
286
287 if (!Changed)
288 Changed = simplifyCommonValuePhi(P, LVI, DT);
289
290 if (Changed)
291 ++NumPhis;
292
293 return Changed;
294 }
295
processMemAccess(Instruction * I,LazyValueInfo * LVI)296 static bool processMemAccess(Instruction *I, LazyValueInfo *LVI) {
297 Value *Pointer = nullptr;
298 if (LoadInst *L = dyn_cast<LoadInst>(I))
299 Pointer = L->getPointerOperand();
300 else
301 Pointer = cast<StoreInst>(I)->getPointerOperand();
302
303 if (isa<Constant>(Pointer)) return false;
304
305 Constant *C = LVI->getConstant(Pointer, I);
306 if (!C) return false;
307
308 ++NumMemAccess;
309 I->replaceUsesOfWith(Pointer, C);
310 return true;
311 }
312
processICmp(ICmpInst * Cmp,LazyValueInfo * LVI)313 static bool processICmp(ICmpInst *Cmp, LazyValueInfo *LVI) {
314 if (!CanonicalizeICmpPredicatesToUnsigned)
315 return false;
316
317 // Only for signed relational comparisons of scalar integers.
318 if (Cmp->getType()->isVectorTy() ||
319 !Cmp->getOperand(0)->getType()->isIntegerTy())
320 return false;
321
322 if (!Cmp->isSigned())
323 return false;
324
325 ICmpInst::Predicate UnsignedPred =
326 ConstantRange::getEquivalentPredWithFlippedSignedness(
327 Cmp->getPredicate(), LVI->getConstantRange(Cmp->getOperand(0), Cmp),
328 LVI->getConstantRange(Cmp->getOperand(1), Cmp));
329
330 if (UnsignedPred == ICmpInst::Predicate::BAD_ICMP_PREDICATE)
331 return false;
332
333 ++NumSICmps;
334 Cmp->setPredicate(UnsignedPred);
335
336 return true;
337 }
338
339 /// See if LazyValueInfo's ability to exploit edge conditions or range
340 /// information is sufficient to prove this comparison. Even for local
341 /// conditions, this can sometimes prove conditions instcombine can't by
342 /// exploiting range information.
constantFoldCmp(CmpInst * Cmp,LazyValueInfo * LVI)343 static bool constantFoldCmp(CmpInst *Cmp, LazyValueInfo *LVI) {
344 Value *Op0 = Cmp->getOperand(0);
345 Value *Op1 = Cmp->getOperand(1);
346 LazyValueInfo::Tristate Result =
347 LVI->getPredicateAt(Cmp->getPredicate(), Op0, Op1, Cmp,
348 /*UseBlockValue=*/true);
349 if (Result == LazyValueInfo::Unknown)
350 return false;
351
352 ++NumCmps;
353 Constant *TorF =
354 ConstantInt::get(CmpInst::makeCmpResultType(Op0->getType()), Result);
355 Cmp->replaceAllUsesWith(TorF);
356 Cmp->eraseFromParent();
357 return true;
358 }
359
processCmp(CmpInst * Cmp,LazyValueInfo * LVI)360 static bool processCmp(CmpInst *Cmp, LazyValueInfo *LVI) {
361 if (constantFoldCmp(Cmp, LVI))
362 return true;
363
364 if (auto *ICmp = dyn_cast<ICmpInst>(Cmp))
365 if (processICmp(ICmp, LVI))
366 return true;
367
368 return false;
369 }
370
371 /// Simplify a switch instruction by removing cases which can never fire. If the
372 /// uselessness of a case could be determined locally then constant propagation
373 /// would already have figured it out. Instead, walk the predecessors and
374 /// statically evaluate cases based on information available on that edge. Cases
375 /// that cannot fire no matter what the incoming edge can safely be removed. If
376 /// a case fires on every incoming edge then the entire switch can be removed
377 /// and replaced with a branch to the case destination.
processSwitch(SwitchInst * I,LazyValueInfo * LVI,DominatorTree * DT)378 static bool processSwitch(SwitchInst *I, LazyValueInfo *LVI,
379 DominatorTree *DT) {
380 DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Lazy);
381 Value *Cond = I->getCondition();
382 BasicBlock *BB = I->getParent();
383
384 // Analyse each switch case in turn.
385 bool Changed = false;
386 DenseMap<BasicBlock*, int> SuccessorsCount;
387 for (auto *Succ : successors(BB))
388 SuccessorsCount[Succ]++;
389
390 { // Scope for SwitchInstProfUpdateWrapper. It must not live during
391 // ConstantFoldTerminator() as the underlying SwitchInst can be changed.
392 SwitchInstProfUpdateWrapper SI(*I);
393
394 for (auto CI = SI->case_begin(), CE = SI->case_end(); CI != CE;) {
395 ConstantInt *Case = CI->getCaseValue();
396 LazyValueInfo::Tristate State =
397 LVI->getPredicateAt(CmpInst::ICMP_EQ, Cond, Case, I,
398 /* UseBlockValue */ true);
399
400 if (State == LazyValueInfo::False) {
401 // This case never fires - remove it.
402 BasicBlock *Succ = CI->getCaseSuccessor();
403 Succ->removePredecessor(BB);
404 CI = SI.removeCase(CI);
405 CE = SI->case_end();
406
407 // The condition can be modified by removePredecessor's PHI simplification
408 // logic.
409 Cond = SI->getCondition();
410
411 ++NumDeadCases;
412 Changed = true;
413 if (--SuccessorsCount[Succ] == 0)
414 DTU.applyUpdatesPermissive({{DominatorTree::Delete, BB, Succ}});
415 continue;
416 }
417 if (State == LazyValueInfo::True) {
418 // This case always fires. Arrange for the switch to be turned into an
419 // unconditional branch by replacing the switch condition with the case
420 // value.
421 SI->setCondition(Case);
422 NumDeadCases += SI->getNumCases();
423 Changed = true;
424 break;
425 }
426
427 // Increment the case iterator since we didn't delete it.
428 ++CI;
429 }
430 }
431
432 if (Changed)
433 // If the switch has been simplified to the point where it can be replaced
434 // by a branch then do so now.
435 ConstantFoldTerminator(BB, /*DeleteDeadConditions = */ false,
436 /*TLI = */ nullptr, &DTU);
437 return Changed;
438 }
439
440 // See if we can prove that the given binary op intrinsic will not overflow.
willNotOverflow(BinaryOpIntrinsic * BO,LazyValueInfo * LVI)441 static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI) {
442 ConstantRange LRange = LVI->getConstantRangeAtUse(BO->getOperandUse(0));
443 ConstantRange RRange = LVI->getConstantRangeAtUse(BO->getOperandUse(1));
444 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
445 BO->getBinaryOp(), RRange, BO->getNoWrapKind());
446 return NWRegion.contains(LRange);
447 }
448
setDeducedOverflowingFlags(Value * V,Instruction::BinaryOps Opcode,bool NewNSW,bool NewNUW)449 static void setDeducedOverflowingFlags(Value *V, Instruction::BinaryOps Opcode,
450 bool NewNSW, bool NewNUW) {
451 Statistic *OpcNW, *OpcNSW, *OpcNUW;
452 switch (Opcode) {
453 case Instruction::Add:
454 OpcNW = &NumAddNW;
455 OpcNSW = &NumAddNSW;
456 OpcNUW = &NumAddNUW;
457 break;
458 case Instruction::Sub:
459 OpcNW = &NumSubNW;
460 OpcNSW = &NumSubNSW;
461 OpcNUW = &NumSubNUW;
462 break;
463 case Instruction::Mul:
464 OpcNW = &NumMulNW;
465 OpcNSW = &NumMulNSW;
466 OpcNUW = &NumMulNUW;
467 break;
468 case Instruction::Shl:
469 OpcNW = &NumShlNW;
470 OpcNSW = &NumShlNSW;
471 OpcNUW = &NumShlNUW;
472 break;
473 default:
474 llvm_unreachable("Will not be called with other binops");
475 }
476
477 auto *Inst = dyn_cast<Instruction>(V);
478 if (NewNSW) {
479 ++NumNW;
480 ++*OpcNW;
481 ++NumNSW;
482 ++*OpcNSW;
483 if (Inst)
484 Inst->setHasNoSignedWrap();
485 }
486 if (NewNUW) {
487 ++NumNW;
488 ++*OpcNW;
489 ++NumNUW;
490 ++*OpcNUW;
491 if (Inst)
492 Inst->setHasNoUnsignedWrap();
493 }
494 }
495
496 static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI);
497
498 // See if @llvm.abs argument is alays positive/negative, and simplify.
499 // Notably, INT_MIN can belong to either range, regardless of the NSW,
500 // because it is negation-invariant.
processAbsIntrinsic(IntrinsicInst * II,LazyValueInfo * LVI)501 static bool processAbsIntrinsic(IntrinsicInst *II, LazyValueInfo *LVI) {
502 Value *X = II->getArgOperand(0);
503 bool IsIntMinPoison = cast<ConstantInt>(II->getArgOperand(1))->isOne();
504
505 Type *Ty = X->getType();
506 Constant *IntMin =
507 ConstantInt::get(Ty, APInt::getSignedMinValue(Ty->getScalarSizeInBits()));
508 LazyValueInfo::Tristate Result;
509
510 // Is X in [0, IntMin]? NOTE: INT_MIN is fine!
511 Result = LVI->getPredicateAt(CmpInst::Predicate::ICMP_ULE, X, IntMin, II,
512 /*UseBlockValue=*/true);
513 if (Result == LazyValueInfo::True) {
514 ++NumAbs;
515 II->replaceAllUsesWith(X);
516 II->eraseFromParent();
517 return true;
518 }
519
520 // Is X in [IntMin, 0]? NOTE: INT_MIN is fine!
521 Constant *Zero = ConstantInt::getNullValue(Ty);
522 Result = LVI->getPredicateAt(CmpInst::Predicate::ICMP_SLE, X, Zero, II,
523 /*UseBlockValue=*/true);
524 assert(Result != LazyValueInfo::False && "Should have been handled already.");
525
526 if (Result == LazyValueInfo::Unknown) {
527 // Argument's range crosses zero.
528 bool Changed = false;
529 if (!IsIntMinPoison) {
530 // Can we at least tell that the argument is never INT_MIN?
531 Result = LVI->getPredicateAt(CmpInst::Predicate::ICMP_NE, X, IntMin, II,
532 /*UseBlockValue=*/true);
533 if (Result == LazyValueInfo::True) {
534 ++NumNSW;
535 ++NumSubNSW;
536 II->setArgOperand(1, ConstantInt::getTrue(II->getContext()));
537 Changed = true;
538 }
539 }
540 return Changed;
541 }
542
543 IRBuilder<> B(II);
544 Value *NegX = B.CreateNeg(X, II->getName(), /*HasNUW=*/false,
545 /*HasNSW=*/IsIntMinPoison);
546 ++NumAbs;
547 II->replaceAllUsesWith(NegX);
548 II->eraseFromParent();
549
550 // See if we can infer some no-wrap flags.
551 if (auto *BO = dyn_cast<BinaryOperator>(NegX))
552 processBinOp(BO, LVI);
553
554 return true;
555 }
556
557 // See if this min/max intrinsic always picks it's one specific operand.
processMinMaxIntrinsic(MinMaxIntrinsic * MM,LazyValueInfo * LVI)558 static bool processMinMaxIntrinsic(MinMaxIntrinsic *MM, LazyValueInfo *LVI) {
559 CmpInst::Predicate Pred = CmpInst::getNonStrictPredicate(MM->getPredicate());
560 LazyValueInfo::Tristate Result = LVI->getPredicateAt(
561 Pred, MM->getLHS(), MM->getRHS(), MM, /*UseBlockValue=*/true);
562 if (Result == LazyValueInfo::Unknown)
563 return false;
564
565 ++NumMinMax;
566 MM->replaceAllUsesWith(MM->getOperand(!Result));
567 MM->eraseFromParent();
568 return true;
569 }
570
571 // Rewrite this with.overflow intrinsic as non-overflowing.
processOverflowIntrinsic(WithOverflowInst * WO,LazyValueInfo * LVI)572 static bool processOverflowIntrinsic(WithOverflowInst *WO, LazyValueInfo *LVI) {
573 IRBuilder<> B(WO);
574 Instruction::BinaryOps Opcode = WO->getBinaryOp();
575 bool NSW = WO->isSigned();
576 bool NUW = !WO->isSigned();
577
578 Value *NewOp =
579 B.CreateBinOp(Opcode, WO->getLHS(), WO->getRHS(), WO->getName());
580 setDeducedOverflowingFlags(NewOp, Opcode, NSW, NUW);
581
582 StructType *ST = cast<StructType>(WO->getType());
583 Constant *Struct = ConstantStruct::get(ST,
584 { PoisonValue::get(ST->getElementType(0)),
585 ConstantInt::getFalse(ST->getElementType(1)) });
586 Value *NewI = B.CreateInsertValue(Struct, NewOp, 0);
587 WO->replaceAllUsesWith(NewI);
588 WO->eraseFromParent();
589 ++NumOverflows;
590
591 // See if we can infer the other no-wrap too.
592 if (auto *BO = dyn_cast<BinaryOperator>(NewOp))
593 processBinOp(BO, LVI);
594
595 return true;
596 }
597
processSaturatingInst(SaturatingInst * SI,LazyValueInfo * LVI)598 static bool processSaturatingInst(SaturatingInst *SI, LazyValueInfo *LVI) {
599 Instruction::BinaryOps Opcode = SI->getBinaryOp();
600 bool NSW = SI->isSigned();
601 bool NUW = !SI->isSigned();
602 BinaryOperator *BinOp = BinaryOperator::Create(
603 Opcode, SI->getLHS(), SI->getRHS(), SI->getName(), SI);
604 BinOp->setDebugLoc(SI->getDebugLoc());
605 setDeducedOverflowingFlags(BinOp, Opcode, NSW, NUW);
606
607 SI->replaceAllUsesWith(BinOp);
608 SI->eraseFromParent();
609 ++NumSaturating;
610
611 // See if we can infer the other no-wrap too.
612 if (auto *BO = dyn_cast<BinaryOperator>(BinOp))
613 processBinOp(BO, LVI);
614
615 return true;
616 }
617
618 /// Infer nonnull attributes for the arguments at the specified callsite.
processCallSite(CallBase & CB,LazyValueInfo * LVI)619 static bool processCallSite(CallBase &CB, LazyValueInfo *LVI) {
620
621 if (CB.getIntrinsicID() == Intrinsic::abs) {
622 return processAbsIntrinsic(&cast<IntrinsicInst>(CB), LVI);
623 }
624
625 if (auto *MM = dyn_cast<MinMaxIntrinsic>(&CB)) {
626 return processMinMaxIntrinsic(MM, LVI);
627 }
628
629 if (auto *WO = dyn_cast<WithOverflowInst>(&CB)) {
630 if (WO->getLHS()->getType()->isIntegerTy() && willNotOverflow(WO, LVI)) {
631 return processOverflowIntrinsic(WO, LVI);
632 }
633 }
634
635 if (auto *SI = dyn_cast<SaturatingInst>(&CB)) {
636 if (SI->getType()->isIntegerTy() && willNotOverflow(SI, LVI)) {
637 return processSaturatingInst(SI, LVI);
638 }
639 }
640
641 bool Changed = false;
642
643 // Deopt bundle operands are intended to capture state with minimal
644 // perturbance of the code otherwise. If we can find a constant value for
645 // any such operand and remove a use of the original value, that's
646 // desireable since it may allow further optimization of that value (e.g. via
647 // single use rules in instcombine). Since deopt uses tend to,
648 // idiomatically, appear along rare conditional paths, it's reasonable likely
649 // we may have a conditional fact with which LVI can fold.
650 if (auto DeoptBundle = CB.getOperandBundle(LLVMContext::OB_deopt)) {
651 for (const Use &ConstU : DeoptBundle->Inputs) {
652 Use &U = const_cast<Use&>(ConstU);
653 Value *V = U.get();
654 if (V->getType()->isVectorTy()) continue;
655 if (isa<Constant>(V)) continue;
656
657 Constant *C = LVI->getConstant(V, &CB);
658 if (!C) continue;
659 U.set(C);
660 Changed = true;
661 }
662 }
663
664 SmallVector<unsigned, 4> ArgNos;
665 unsigned ArgNo = 0;
666
667 for (Value *V : CB.args()) {
668 PointerType *Type = dyn_cast<PointerType>(V->getType());
669 // Try to mark pointer typed parameters as non-null. We skip the
670 // relatively expensive analysis for constants which are obviously either
671 // null or non-null to start with.
672 if (Type && !CB.paramHasAttr(ArgNo, Attribute::NonNull) &&
673 !isa<Constant>(V) &&
674 LVI->getPredicateAt(ICmpInst::ICMP_EQ, V,
675 ConstantPointerNull::get(Type), &CB,
676 /*UseBlockValue=*/false) == LazyValueInfo::False)
677 ArgNos.push_back(ArgNo);
678 ArgNo++;
679 }
680
681 assert(ArgNo == CB.arg_size() && "Call arguments not processed correctly.");
682
683 if (ArgNos.empty())
684 return Changed;
685
686 NumNonNull += ArgNos.size();
687 AttributeList AS = CB.getAttributes();
688 LLVMContext &Ctx = CB.getContext();
689 AS = AS.addParamAttribute(Ctx, ArgNos,
690 Attribute::get(Ctx, Attribute::NonNull));
691 CB.setAttributes(AS);
692
693 return true;
694 }
695
696 enum class Domain { NonNegative, NonPositive, Unknown };
697
getDomain(const ConstantRange & CR)698 static Domain getDomain(const ConstantRange &CR) {
699 if (CR.isAllNonNegative())
700 return Domain::NonNegative;
701 if (CR.icmp(ICmpInst::ICMP_SLE, APInt::getNullValue(CR.getBitWidth())))
702 return Domain::NonPositive;
703 return Domain::Unknown;
704 }
705
706 /// Try to shrink a sdiv/srem's width down to the smallest power of two that's
707 /// sufficient to contain its operands.
narrowSDivOrSRem(BinaryOperator * Instr,const ConstantRange & LCR,const ConstantRange & RCR)708 static bool narrowSDivOrSRem(BinaryOperator *Instr, const ConstantRange &LCR,
709 const ConstantRange &RCR) {
710 assert(Instr->getOpcode() == Instruction::SDiv ||
711 Instr->getOpcode() == Instruction::SRem);
712 assert(!Instr->getType()->isVectorTy());
713
714 // Find the smallest power of two bitwidth that's sufficient to hold Instr's
715 // operands.
716 unsigned OrigWidth = Instr->getType()->getIntegerBitWidth();
717
718 // What is the smallest bit width that can accommodate the entire value ranges
719 // of both of the operands?
720 std::array<std::optional<ConstantRange>, 2> CRs;
721 unsigned MinSignedBits =
722 std::max(LCR.getMinSignedBits(), RCR.getMinSignedBits());
723
724 // sdiv/srem is UB if divisor is -1 and divident is INT_MIN, so unless we can
725 // prove that such a combination is impossible, we need to bump the bitwidth.
726 if (RCR.contains(APInt::getAllOnes(OrigWidth)) &&
727 LCR.contains(APInt::getSignedMinValue(MinSignedBits).sext(OrigWidth)))
728 ++MinSignedBits;
729
730 // Don't shrink below 8 bits wide.
731 unsigned NewWidth = std::max<unsigned>(PowerOf2Ceil(MinSignedBits), 8);
732
733 // NewWidth might be greater than OrigWidth if OrigWidth is not a power of
734 // two.
735 if (NewWidth >= OrigWidth)
736 return false;
737
738 ++NumSDivSRemsNarrowed;
739 IRBuilder<> B{Instr};
740 auto *TruncTy = Type::getIntNTy(Instr->getContext(), NewWidth);
741 auto *LHS = B.CreateTruncOrBitCast(Instr->getOperand(0), TruncTy,
742 Instr->getName() + ".lhs.trunc");
743 auto *RHS = B.CreateTruncOrBitCast(Instr->getOperand(1), TruncTy,
744 Instr->getName() + ".rhs.trunc");
745 auto *BO = B.CreateBinOp(Instr->getOpcode(), LHS, RHS, Instr->getName());
746 auto *Sext = B.CreateSExt(BO, Instr->getType(), Instr->getName() + ".sext");
747 if (auto *BinOp = dyn_cast<BinaryOperator>(BO))
748 if (BinOp->getOpcode() == Instruction::SDiv)
749 BinOp->setIsExact(Instr->isExact());
750
751 Instr->replaceAllUsesWith(Sext);
752 Instr->eraseFromParent();
753 return true;
754 }
755
expandUDivOrURem(BinaryOperator * Instr,const ConstantRange & XCR,const ConstantRange & YCR)756 static bool expandUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
757 const ConstantRange &YCR) {
758 Type *Ty = Instr->getType();
759 assert(Instr->getOpcode() == Instruction::UDiv ||
760 Instr->getOpcode() == Instruction::URem);
761 assert(!Ty->isVectorTy());
762 bool IsRem = Instr->getOpcode() == Instruction::URem;
763
764 Value *X = Instr->getOperand(0);
765 Value *Y = Instr->getOperand(1);
766
767 // X u/ Y -> 0 iff X u< Y
768 // X u% Y -> X iff X u< Y
769 if (XCR.icmp(ICmpInst::ICMP_ULT, YCR)) {
770 Instr->replaceAllUsesWith(IsRem ? X : Constant::getNullValue(Ty));
771 Instr->eraseFromParent();
772 ++NumUDivURemsNarrowedExpanded;
773 return true;
774 }
775
776 // Given
777 // R = X u% Y
778 // We can represent the modulo operation as a loop/self-recursion:
779 // urem_rec(X, Y):
780 // Z = X - Y
781 // if X u< Y
782 // ret X
783 // else
784 // ret urem_rec(Z, Y)
785 // which isn't better, but if we only need a single iteration
786 // to compute the answer, this becomes quite good:
787 // R = X < Y ? X : X - Y iff X u< 2*Y (w/ unsigned saturation)
788 // Now, we do not care about all full multiples of Y in X, they do not change
789 // the answer, thus we could rewrite the expression as:
790 // X* = X - (Y * |_ X / Y _|)
791 // R = X* % Y
792 // so we don't need the *first* iteration to return, we just need to
793 // know *which* iteration will always return, so we could also rewrite it as:
794 // X* = X - (Y * |_ X / Y _|)
795 // R = X* % Y iff X* u< 2*Y (w/ unsigned saturation)
796 // but that does not seem profitable here.
797
798 // Even if we don't know X's range, the divisor may be so large, X can't ever
799 // be 2x larger than that. I.e. if divisor is always negative.
800 if (!XCR.icmp(ICmpInst::ICMP_ULT,
801 YCR.umul_sat(APInt(YCR.getBitWidth(), 2))) &&
802 !YCR.isAllNegative())
803 return false;
804
805 IRBuilder<> B(Instr);
806 Value *ExpandedOp;
807 if (IsRem) {
808 // NOTE: this transformation introduces two uses of X,
809 // but it may be undef so we must freeze it first.
810 Value *FrozenX = B.CreateFreeze(X, X->getName() + ".frozen");
811 auto *AdjX = B.CreateNUWSub(FrozenX, Y, Instr->getName() + ".urem");
812 auto *Cmp =
813 B.CreateICmp(ICmpInst::ICMP_ULT, FrozenX, Y, Instr->getName() + ".cmp");
814 ExpandedOp = B.CreateSelect(Cmp, FrozenX, AdjX);
815 } else {
816 auto *Cmp =
817 B.CreateICmp(ICmpInst::ICMP_UGE, X, Y, Instr->getName() + ".cmp");
818 ExpandedOp = B.CreateZExt(Cmp, Ty, Instr->getName() + ".udiv");
819 }
820 ExpandedOp->takeName(Instr);
821 Instr->replaceAllUsesWith(ExpandedOp);
822 Instr->eraseFromParent();
823 ++NumUDivURemsNarrowedExpanded;
824 return true;
825 }
826
827 /// Try to shrink a udiv/urem's width down to the smallest power of two that's
828 /// sufficient to contain its operands.
narrowUDivOrURem(BinaryOperator * Instr,const ConstantRange & XCR,const ConstantRange & YCR)829 static bool narrowUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
830 const ConstantRange &YCR) {
831 assert(Instr->getOpcode() == Instruction::UDiv ||
832 Instr->getOpcode() == Instruction::URem);
833 assert(!Instr->getType()->isVectorTy());
834
835 // Find the smallest power of two bitwidth that's sufficient to hold Instr's
836 // operands.
837
838 // What is the smallest bit width that can accommodate the entire value ranges
839 // of both of the operands?
840 unsigned MaxActiveBits = std::max(XCR.getActiveBits(), YCR.getActiveBits());
841 // Don't shrink below 8 bits wide.
842 unsigned NewWidth = std::max<unsigned>(PowerOf2Ceil(MaxActiveBits), 8);
843
844 // NewWidth might be greater than OrigWidth if OrigWidth is not a power of
845 // two.
846 if (NewWidth >= Instr->getType()->getIntegerBitWidth())
847 return false;
848
849 ++NumUDivURemsNarrowed;
850 IRBuilder<> B{Instr};
851 auto *TruncTy = Type::getIntNTy(Instr->getContext(), NewWidth);
852 auto *LHS = B.CreateTruncOrBitCast(Instr->getOperand(0), TruncTy,
853 Instr->getName() + ".lhs.trunc");
854 auto *RHS = B.CreateTruncOrBitCast(Instr->getOperand(1), TruncTy,
855 Instr->getName() + ".rhs.trunc");
856 auto *BO = B.CreateBinOp(Instr->getOpcode(), LHS, RHS, Instr->getName());
857 auto *Zext = B.CreateZExt(BO, Instr->getType(), Instr->getName() + ".zext");
858 if (auto *BinOp = dyn_cast<BinaryOperator>(BO))
859 if (BinOp->getOpcode() == Instruction::UDiv)
860 BinOp->setIsExact(Instr->isExact());
861
862 Instr->replaceAllUsesWith(Zext);
863 Instr->eraseFromParent();
864 return true;
865 }
866
processUDivOrURem(BinaryOperator * Instr,LazyValueInfo * LVI)867 static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
868 assert(Instr->getOpcode() == Instruction::UDiv ||
869 Instr->getOpcode() == Instruction::URem);
870 if (Instr->getType()->isVectorTy())
871 return false;
872
873 ConstantRange XCR = LVI->getConstantRangeAtUse(Instr->getOperandUse(0));
874 ConstantRange YCR = LVI->getConstantRangeAtUse(Instr->getOperandUse(1));
875 if (expandUDivOrURem(Instr, XCR, YCR))
876 return true;
877
878 return narrowUDivOrURem(Instr, XCR, YCR);
879 }
880
processSRem(BinaryOperator * SDI,const ConstantRange & LCR,const ConstantRange & RCR,LazyValueInfo * LVI)881 static bool processSRem(BinaryOperator *SDI, const ConstantRange &LCR,
882 const ConstantRange &RCR, LazyValueInfo *LVI) {
883 assert(SDI->getOpcode() == Instruction::SRem);
884 assert(!SDI->getType()->isVectorTy());
885
886 if (LCR.abs().icmp(CmpInst::ICMP_ULT, RCR.abs())) {
887 SDI->replaceAllUsesWith(SDI->getOperand(0));
888 SDI->eraseFromParent();
889 return true;
890 }
891
892 struct Operand {
893 Value *V;
894 Domain D;
895 };
896 std::array<Operand, 2> Ops = {{{SDI->getOperand(0), getDomain(LCR)},
897 {SDI->getOperand(1), getDomain(RCR)}}};
898 if (Ops[0].D == Domain::Unknown || Ops[1].D == Domain::Unknown)
899 return false;
900
901 // We know domains of both of the operands!
902 ++NumSRems;
903
904 // We need operands to be non-negative, so negate each one that isn't.
905 for (Operand &Op : Ops) {
906 if (Op.D == Domain::NonNegative)
907 continue;
908 auto *BO =
909 BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg", SDI);
910 BO->setDebugLoc(SDI->getDebugLoc());
911 Op.V = BO;
912 }
913
914 auto *URem =
915 BinaryOperator::CreateURem(Ops[0].V, Ops[1].V, SDI->getName(), SDI);
916 URem->setDebugLoc(SDI->getDebugLoc());
917
918 auto *Res = URem;
919
920 // If the divident was non-positive, we need to negate the result.
921 if (Ops[0].D == Domain::NonPositive) {
922 Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg", SDI);
923 Res->setDebugLoc(SDI->getDebugLoc());
924 }
925
926 SDI->replaceAllUsesWith(Res);
927 SDI->eraseFromParent();
928
929 // Try to simplify our new urem.
930 processUDivOrURem(URem, LVI);
931
932 return true;
933 }
934
935 /// See if LazyValueInfo's ability to exploit edge conditions or range
936 /// information is sufficient to prove the signs of both operands of this SDiv.
937 /// If this is the case, replace the SDiv with a UDiv. Even for local
938 /// conditions, this can sometimes prove conditions instcombine can't by
939 /// exploiting range information.
processSDiv(BinaryOperator * SDI,const ConstantRange & LCR,const ConstantRange & RCR,LazyValueInfo * LVI)940 static bool processSDiv(BinaryOperator *SDI, const ConstantRange &LCR,
941 const ConstantRange &RCR, LazyValueInfo *LVI) {
942 assert(SDI->getOpcode() == Instruction::SDiv);
943 assert(!SDI->getType()->isVectorTy());
944
945 struct Operand {
946 Value *V;
947 Domain D;
948 };
949 std::array<Operand, 2> Ops = {{{SDI->getOperand(0), getDomain(LCR)},
950 {SDI->getOperand(1), getDomain(RCR)}}};
951 if (Ops[0].D == Domain::Unknown || Ops[1].D == Domain::Unknown)
952 return false;
953
954 // We know domains of both of the operands!
955 ++NumSDivs;
956
957 // We need operands to be non-negative, so negate each one that isn't.
958 for (Operand &Op : Ops) {
959 if (Op.D == Domain::NonNegative)
960 continue;
961 auto *BO =
962 BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg", SDI);
963 BO->setDebugLoc(SDI->getDebugLoc());
964 Op.V = BO;
965 }
966
967 auto *UDiv =
968 BinaryOperator::CreateUDiv(Ops[0].V, Ops[1].V, SDI->getName(), SDI);
969 UDiv->setDebugLoc(SDI->getDebugLoc());
970 UDiv->setIsExact(SDI->isExact());
971
972 Value *Res = UDiv;
973
974 // If the operands had two different domains, we need to negate the result.
975 if (Ops[0].D != Ops[1].D)
976 Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg", SDI);
977
978 SDI->replaceAllUsesWith(Res);
979 SDI->eraseFromParent();
980
981 // Try to simplify our new udiv.
982 processUDivOrURem(UDiv, LVI);
983
984 return true;
985 }
986
processSDivOrSRem(BinaryOperator * Instr,LazyValueInfo * LVI)987 static bool processSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
988 assert(Instr->getOpcode() == Instruction::SDiv ||
989 Instr->getOpcode() == Instruction::SRem);
990 if (Instr->getType()->isVectorTy())
991 return false;
992
993 ConstantRange LCR = LVI->getConstantRangeAtUse(Instr->getOperandUse(0));
994 ConstantRange RCR = LVI->getConstantRangeAtUse(Instr->getOperandUse(1));
995 if (Instr->getOpcode() == Instruction::SDiv)
996 if (processSDiv(Instr, LCR, RCR, LVI))
997 return true;
998
999 if (Instr->getOpcode() == Instruction::SRem) {
1000 if (processSRem(Instr, LCR, RCR, LVI))
1001 return true;
1002 }
1003
1004 return narrowSDivOrSRem(Instr, LCR, RCR);
1005 }
1006
processAShr(BinaryOperator * SDI,LazyValueInfo * LVI)1007 static bool processAShr(BinaryOperator *SDI, LazyValueInfo *LVI) {
1008 if (SDI->getType()->isVectorTy())
1009 return false;
1010
1011 ConstantRange LRange = LVI->getConstantRangeAtUse(SDI->getOperandUse(0));
1012 unsigned OrigWidth = SDI->getType()->getIntegerBitWidth();
1013 ConstantRange NegOneOrZero =
1014 ConstantRange(APInt(OrigWidth, (uint64_t)-1, true), APInt(OrigWidth, 1));
1015 if (NegOneOrZero.contains(LRange)) {
1016 // ashr of -1 or 0 never changes the value, so drop the whole instruction
1017 ++NumAShrsRemoved;
1018 SDI->replaceAllUsesWith(SDI->getOperand(0));
1019 SDI->eraseFromParent();
1020 return true;
1021 }
1022
1023 if (!LRange.isAllNonNegative())
1024 return false;
1025
1026 ++NumAShrsConverted;
1027 auto *BO = BinaryOperator::CreateLShr(SDI->getOperand(0), SDI->getOperand(1),
1028 "", SDI);
1029 BO->takeName(SDI);
1030 BO->setDebugLoc(SDI->getDebugLoc());
1031 BO->setIsExact(SDI->isExact());
1032 SDI->replaceAllUsesWith(BO);
1033 SDI->eraseFromParent();
1034
1035 return true;
1036 }
1037
processSExt(SExtInst * SDI,LazyValueInfo * LVI)1038 static bool processSExt(SExtInst *SDI, LazyValueInfo *LVI) {
1039 if (SDI->getType()->isVectorTy())
1040 return false;
1041
1042 const Use &Base = SDI->getOperandUse(0);
1043 if (!LVI->getConstantRangeAtUse(Base).isAllNonNegative())
1044 return false;
1045
1046 ++NumSExt;
1047 auto *ZExt = CastInst::CreateZExtOrBitCast(Base, SDI->getType(), "", SDI);
1048 ZExt->takeName(SDI);
1049 ZExt->setDebugLoc(SDI->getDebugLoc());
1050 SDI->replaceAllUsesWith(ZExt);
1051 SDI->eraseFromParent();
1052
1053 return true;
1054 }
1055
processBinOp(BinaryOperator * BinOp,LazyValueInfo * LVI)1056 static bool processBinOp(BinaryOperator *BinOp, LazyValueInfo *LVI) {
1057 using OBO = OverflowingBinaryOperator;
1058
1059 if (BinOp->getType()->isVectorTy())
1060 return false;
1061
1062 bool NSW = BinOp->hasNoSignedWrap();
1063 bool NUW = BinOp->hasNoUnsignedWrap();
1064 if (NSW && NUW)
1065 return false;
1066
1067 Instruction::BinaryOps Opcode = BinOp->getOpcode();
1068 Value *LHS = BinOp->getOperand(0);
1069 Value *RHS = BinOp->getOperand(1);
1070
1071 ConstantRange LRange = LVI->getConstantRange(LHS, BinOp);
1072 ConstantRange RRange = LVI->getConstantRange(RHS, BinOp);
1073
1074 bool Changed = false;
1075 bool NewNUW = false, NewNSW = false;
1076 if (!NUW) {
1077 ConstantRange NUWRange = ConstantRange::makeGuaranteedNoWrapRegion(
1078 Opcode, RRange, OBO::NoUnsignedWrap);
1079 NewNUW = NUWRange.contains(LRange);
1080 Changed |= NewNUW;
1081 }
1082 if (!NSW) {
1083 ConstantRange NSWRange = ConstantRange::makeGuaranteedNoWrapRegion(
1084 Opcode, RRange, OBO::NoSignedWrap);
1085 NewNSW = NSWRange.contains(LRange);
1086 Changed |= NewNSW;
1087 }
1088
1089 setDeducedOverflowingFlags(BinOp, Opcode, NewNSW, NewNUW);
1090
1091 return Changed;
1092 }
1093
processAnd(BinaryOperator * BinOp,LazyValueInfo * LVI)1094 static bool processAnd(BinaryOperator *BinOp, LazyValueInfo *LVI) {
1095 if (BinOp->getType()->isVectorTy())
1096 return false;
1097
1098 // Pattern match (and lhs, C) where C includes a superset of bits which might
1099 // be set in lhs. This is a common truncation idiom created by instcombine.
1100 const Use &LHS = BinOp->getOperandUse(0);
1101 ConstantInt *RHS = dyn_cast<ConstantInt>(BinOp->getOperand(1));
1102 if (!RHS || !RHS->getValue().isMask())
1103 return false;
1104
1105 // We can only replace the AND with LHS based on range info if the range does
1106 // not include undef.
1107 ConstantRange LRange =
1108 LVI->getConstantRangeAtUse(LHS, /*UndefAllowed=*/false);
1109 if (!LRange.getUnsignedMax().ule(RHS->getValue()))
1110 return false;
1111
1112 BinOp->replaceAllUsesWith(LHS);
1113 BinOp->eraseFromParent();
1114 NumAnd++;
1115 return true;
1116 }
1117
1118
getConstantAt(Value * V,Instruction * At,LazyValueInfo * LVI)1119 static Constant *getConstantAt(Value *V, Instruction *At, LazyValueInfo *LVI) {
1120 if (Constant *C = LVI->getConstant(V, At))
1121 return C;
1122
1123 // TODO: The following really should be sunk inside LVI's core algorithm, or
1124 // at least the outer shims around such.
1125 auto *C = dyn_cast<CmpInst>(V);
1126 if (!C) return nullptr;
1127
1128 Value *Op0 = C->getOperand(0);
1129 Constant *Op1 = dyn_cast<Constant>(C->getOperand(1));
1130 if (!Op1) return nullptr;
1131
1132 LazyValueInfo::Tristate Result = LVI->getPredicateAt(
1133 C->getPredicate(), Op0, Op1, At, /*UseBlockValue=*/false);
1134 if (Result == LazyValueInfo::Unknown)
1135 return nullptr;
1136
1137 return (Result == LazyValueInfo::True) ?
1138 ConstantInt::getTrue(C->getContext()) :
1139 ConstantInt::getFalse(C->getContext());
1140 }
1141
runImpl(Function & F,LazyValueInfo * LVI,DominatorTree * DT,const SimplifyQuery & SQ)1142 static bool runImpl(Function &F, LazyValueInfo *LVI, DominatorTree *DT,
1143 const SimplifyQuery &SQ) {
1144 bool FnChanged = false;
1145 // Visiting in a pre-order depth-first traversal causes us to simplify early
1146 // blocks before querying later blocks (which require us to analyze early
1147 // blocks). Eagerly simplifying shallow blocks means there is strictly less
1148 // work to do for deep blocks. This also means we don't visit unreachable
1149 // blocks.
1150 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) {
1151 bool BBChanged = false;
1152 for (Instruction &II : llvm::make_early_inc_range(*BB)) {
1153 switch (II.getOpcode()) {
1154 case Instruction::Select:
1155 BBChanged |= processSelect(cast<SelectInst>(&II), LVI);
1156 break;
1157 case Instruction::PHI:
1158 BBChanged |= processPHI(cast<PHINode>(&II), LVI, DT, SQ);
1159 break;
1160 case Instruction::ICmp:
1161 case Instruction::FCmp:
1162 BBChanged |= processCmp(cast<CmpInst>(&II), LVI);
1163 break;
1164 case Instruction::Load:
1165 case Instruction::Store:
1166 BBChanged |= processMemAccess(&II, LVI);
1167 break;
1168 case Instruction::Call:
1169 case Instruction::Invoke:
1170 BBChanged |= processCallSite(cast<CallBase>(II), LVI);
1171 break;
1172 case Instruction::SRem:
1173 case Instruction::SDiv:
1174 BBChanged |= processSDivOrSRem(cast<BinaryOperator>(&II), LVI);
1175 break;
1176 case Instruction::UDiv:
1177 case Instruction::URem:
1178 BBChanged |= processUDivOrURem(cast<BinaryOperator>(&II), LVI);
1179 break;
1180 case Instruction::AShr:
1181 BBChanged |= processAShr(cast<BinaryOperator>(&II), LVI);
1182 break;
1183 case Instruction::SExt:
1184 BBChanged |= processSExt(cast<SExtInst>(&II), LVI);
1185 break;
1186 case Instruction::Add:
1187 case Instruction::Sub:
1188 case Instruction::Mul:
1189 case Instruction::Shl:
1190 BBChanged |= processBinOp(cast<BinaryOperator>(&II), LVI);
1191 break;
1192 case Instruction::And:
1193 BBChanged |= processAnd(cast<BinaryOperator>(&II), LVI);
1194 break;
1195 }
1196 }
1197
1198 Instruction *Term = BB->getTerminator();
1199 switch (Term->getOpcode()) {
1200 case Instruction::Switch:
1201 BBChanged |= processSwitch(cast<SwitchInst>(Term), LVI, DT);
1202 break;
1203 case Instruction::Ret: {
1204 auto *RI = cast<ReturnInst>(Term);
1205 // Try to determine the return value if we can. This is mainly here to
1206 // simplify the writing of unit tests, but also helps to enable IPO by
1207 // constant folding the return values of callees.
1208 auto *RetVal = RI->getReturnValue();
1209 if (!RetVal) break; // handle "ret void"
1210 if (isa<Constant>(RetVal)) break; // nothing to do
1211 if (auto *C = getConstantAt(RetVal, RI, LVI)) {
1212 ++NumReturns;
1213 RI->replaceUsesOfWith(RetVal, C);
1214 BBChanged = true;
1215 }
1216 }
1217 }
1218
1219 FnChanged |= BBChanged;
1220 }
1221
1222 return FnChanged;
1223 }
1224
runOnFunction(Function & F)1225 bool CorrelatedValuePropagation::runOnFunction(Function &F) {
1226 if (skipFunction(F))
1227 return false;
1228
1229 LazyValueInfo *LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI();
1230 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1231
1232 return runImpl(F, LVI, DT, getBestSimplifyQuery(*this, F));
1233 }
1234
1235 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)1236 CorrelatedValuePropagationPass::run(Function &F, FunctionAnalysisManager &AM) {
1237 LazyValueInfo *LVI = &AM.getResult<LazyValueAnalysis>(F);
1238 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1239
1240 bool Changed = runImpl(F, LVI, DT, getBestSimplifyQuery(AM, F));
1241
1242 PreservedAnalyses PA;
1243 if (!Changed) {
1244 PA = PreservedAnalyses::all();
1245 } else {
1246 PA.preserve<DominatorTreeAnalysis>();
1247 PA.preserve<LazyValueAnalysis>();
1248 }
1249
1250 // Keeping LVI alive is expensive, both because it uses a lot of memory, and
1251 // because invalidating values in LVI is expensive. While CVP does preserve
1252 // LVI, we know that passes after JumpThreading+CVP will not need the result
1253 // of this analysis, so we forcefully discard it early.
1254 PA.abandon<LazyValueAnalysis>();
1255 return PA;
1256 }
1257