1 //===- llvm/Analysis/IVDescriptors.cpp - IndVar Descriptors -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file "describes" induction and recurrence variables.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/IVDescriptors.h"
14 #include "llvm/Analysis/DemandedBits.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/IR/PatternMatch.h"
23 #include "llvm/IR/ValueHandle.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/KnownBits.h"
26
27 #include <set>
28
29 using namespace llvm;
30 using namespace llvm::PatternMatch;
31
32 #define DEBUG_TYPE "iv-descriptors"
33
areAllUsesIn(Instruction * I,SmallPtrSetImpl<Instruction * > & Set)34 bool RecurrenceDescriptor::areAllUsesIn(Instruction *I,
35 SmallPtrSetImpl<Instruction *> &Set) {
36 for (const Use &Use : I->operands())
37 if (!Set.count(dyn_cast<Instruction>(Use)))
38 return false;
39 return true;
40 }
41
isIntegerRecurrenceKind(RecurKind Kind)42 bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurKind Kind) {
43 switch (Kind) {
44 default:
45 break;
46 case RecurKind::Add:
47 case RecurKind::Mul:
48 case RecurKind::Or:
49 case RecurKind::And:
50 case RecurKind::Xor:
51 case RecurKind::SMax:
52 case RecurKind::SMin:
53 case RecurKind::UMax:
54 case RecurKind::UMin:
55 case RecurKind::SelectICmp:
56 case RecurKind::SelectFCmp:
57 return true;
58 }
59 return false;
60 }
61
isFloatingPointRecurrenceKind(RecurKind Kind)62 bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurKind Kind) {
63 return (Kind != RecurKind::None) && !isIntegerRecurrenceKind(Kind);
64 }
65
66 /// Determines if Phi may have been type-promoted. If Phi has a single user
67 /// that ANDs the Phi with a type mask, return the user. RT is updated to
68 /// account for the narrower bit width represented by the mask, and the AND
69 /// instruction is added to CI.
lookThroughAnd(PHINode * Phi,Type * & RT,SmallPtrSetImpl<Instruction * > & Visited,SmallPtrSetImpl<Instruction * > & CI)70 static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
71 SmallPtrSetImpl<Instruction *> &Visited,
72 SmallPtrSetImpl<Instruction *> &CI) {
73 if (!Phi->hasOneUse())
74 return Phi;
75
76 const APInt *M = nullptr;
77 Instruction *I, *J = cast<Instruction>(Phi->use_begin()->getUser());
78
79 // Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT
80 // with a new integer type of the corresponding bit width.
81 if (match(J, m_c_And(m_Instruction(I), m_APInt(M)))) {
82 int32_t Bits = (*M + 1).exactLogBase2();
83 if (Bits > 0) {
84 RT = IntegerType::get(Phi->getContext(), Bits);
85 Visited.insert(Phi);
86 CI.insert(J);
87 return J;
88 }
89 }
90 return Phi;
91 }
92
93 /// Compute the minimal bit width needed to represent a reduction whose exit
94 /// instruction is given by Exit.
computeRecurrenceType(Instruction * Exit,DemandedBits * DB,AssumptionCache * AC,DominatorTree * DT)95 static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit,
96 DemandedBits *DB,
97 AssumptionCache *AC,
98 DominatorTree *DT) {
99 bool IsSigned = false;
100 const DataLayout &DL = Exit->getModule()->getDataLayout();
101 uint64_t MaxBitWidth = DL.getTypeSizeInBits(Exit->getType());
102
103 if (DB) {
104 // Use the demanded bits analysis to determine the bits that are live out
105 // of the exit instruction, rounding up to the nearest power of two. If the
106 // use of demanded bits results in a smaller bit width, we know the value
107 // must be positive (i.e., IsSigned = false), because if this were not the
108 // case, the sign bit would have been demanded.
109 auto Mask = DB->getDemandedBits(Exit);
110 MaxBitWidth = Mask.getBitWidth() - Mask.countLeadingZeros();
111 }
112
113 if (MaxBitWidth == DL.getTypeSizeInBits(Exit->getType()) && AC && DT) {
114 // If demanded bits wasn't able to limit the bit width, we can try to use
115 // value tracking instead. This can be the case, for example, if the value
116 // may be negative.
117 auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT);
118 auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType());
119 MaxBitWidth = NumTypeBits - NumSignBits;
120 KnownBits Bits = computeKnownBits(Exit, DL);
121 if (!Bits.isNonNegative()) {
122 // If the value is not known to be non-negative, we set IsSigned to true,
123 // meaning that we will use sext instructions instead of zext
124 // instructions to restore the original type.
125 IsSigned = true;
126 // Make sure at at least one sign bit is included in the result, so it
127 // will get properly sign-extended.
128 ++MaxBitWidth;
129 }
130 }
131 if (!isPowerOf2_64(MaxBitWidth))
132 MaxBitWidth = NextPowerOf2(MaxBitWidth);
133
134 return std::make_pair(Type::getIntNTy(Exit->getContext(), MaxBitWidth),
135 IsSigned);
136 }
137
138 /// Collect cast instructions that can be ignored in the vectorizer's cost
139 /// model, given a reduction exit value and the minimal type in which the
140 // reduction can be represented. Also search casts to the recurrence type
141 // to find the minimum width used by the recurrence.
collectCastInstrs(Loop * TheLoop,Instruction * Exit,Type * RecurrenceType,SmallPtrSetImpl<Instruction * > & Casts,unsigned & MinWidthCastToRecurTy)142 static void collectCastInstrs(Loop *TheLoop, Instruction *Exit,
143 Type *RecurrenceType,
144 SmallPtrSetImpl<Instruction *> &Casts,
145 unsigned &MinWidthCastToRecurTy) {
146
147 SmallVector<Instruction *, 8> Worklist;
148 SmallPtrSet<Instruction *, 8> Visited;
149 Worklist.push_back(Exit);
150 MinWidthCastToRecurTy = -1U;
151
152 while (!Worklist.empty()) {
153 Instruction *Val = Worklist.pop_back_val();
154 Visited.insert(Val);
155 if (auto *Cast = dyn_cast<CastInst>(Val)) {
156 if (Cast->getSrcTy() == RecurrenceType) {
157 // If the source type of a cast instruction is equal to the recurrence
158 // type, it will be eliminated, and should be ignored in the vectorizer
159 // cost model.
160 Casts.insert(Cast);
161 continue;
162 }
163 if (Cast->getDestTy() == RecurrenceType) {
164 // The minimum width used by the recurrence is found by checking for
165 // casts on its operands. The minimum width is used by the vectorizer
166 // when finding the widest type for in-loop reductions without any
167 // loads/stores.
168 MinWidthCastToRecurTy = std::min<unsigned>(
169 MinWidthCastToRecurTy, Cast->getSrcTy()->getScalarSizeInBits());
170 continue;
171 }
172 }
173 // Add all operands to the work list if they are loop-varying values that
174 // we haven't yet visited.
175 for (Value *O : cast<User>(Val)->operands())
176 if (auto *I = dyn_cast<Instruction>(O))
177 if (TheLoop->contains(I) && !Visited.count(I))
178 Worklist.push_back(I);
179 }
180 }
181
182 // Check if a given Phi node can be recognized as an ordered reduction for
183 // vectorizing floating point operations without unsafe math.
checkOrderedReduction(RecurKind Kind,Instruction * ExactFPMathInst,Instruction * Exit,PHINode * Phi)184 static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst,
185 Instruction *Exit, PHINode *Phi) {
186 // Currently only FAdd and FMulAdd are supported.
187 if (Kind != RecurKind::FAdd && Kind != RecurKind::FMulAdd)
188 return false;
189
190 if (Kind == RecurKind::FAdd && Exit->getOpcode() != Instruction::FAdd)
191 return false;
192
193 if (Kind == RecurKind::FMulAdd &&
194 !RecurrenceDescriptor::isFMulAddIntrinsic(Exit))
195 return false;
196
197 // Ensure the exit instruction has only one user other than the reduction PHI
198 if (Exit != ExactFPMathInst || Exit->hasNUsesOrMore(3))
199 return false;
200
201 // The only pattern accepted is the one in which the reduction PHI
202 // is used as one of the operands of the exit instruction
203 auto *Op0 = Exit->getOperand(0);
204 auto *Op1 = Exit->getOperand(1);
205 if (Kind == RecurKind::FAdd && Op0 != Phi && Op1 != Phi)
206 return false;
207 if (Kind == RecurKind::FMulAdd && Exit->getOperand(2) != Phi)
208 return false;
209
210 LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phi
211 << ", ExitInst: " << *Exit << "\n");
212
213 return true;
214 }
215
AddReductionVar(PHINode * Phi,RecurKind Kind,Loop * TheLoop,FastMathFlags FuncFMF,RecurrenceDescriptor & RedDes,DemandedBits * DB,AssumptionCache * AC,DominatorTree * DT,ScalarEvolution * SE)216 bool RecurrenceDescriptor::AddReductionVar(
217 PHINode *Phi, RecurKind Kind, Loop *TheLoop, FastMathFlags FuncFMF,
218 RecurrenceDescriptor &RedDes, DemandedBits *DB, AssumptionCache *AC,
219 DominatorTree *DT, ScalarEvolution *SE) {
220 if (Phi->getNumIncomingValues() != 2)
221 return false;
222
223 // Reduction variables are only found in the loop header block.
224 if (Phi->getParent() != TheLoop->getHeader())
225 return false;
226
227 // Obtain the reduction start value from the value that comes from the loop
228 // preheader.
229 Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());
230
231 // ExitInstruction is the single value which is used outside the loop.
232 // We only allow for a single reduction value to be used outside the loop.
233 // This includes users of the reduction, variables (which form a cycle
234 // which ends in the phi node).
235 Instruction *ExitInstruction = nullptr;
236
237 // Variable to keep last visited store instruction. By the end of the
238 // algorithm this variable will be either empty or having intermediate
239 // reduction value stored in invariant address.
240 StoreInst *IntermediateStore = nullptr;
241
242 // Indicates that we found a reduction operation in our scan.
243 bool FoundReduxOp = false;
244
245 // We start with the PHI node and scan for all of the users of this
246 // instruction. All users must be instructions that can be used as reduction
247 // variables (such as ADD). We must have a single out-of-block user. The cycle
248 // must include the original PHI.
249 bool FoundStartPHI = false;
250
251 // To recognize min/max patterns formed by a icmp select sequence, we store
252 // the number of instruction we saw from the recognized min/max pattern,
253 // to make sure we only see exactly the two instructions.
254 unsigned NumCmpSelectPatternInst = 0;
255 InstDesc ReduxDesc(false, nullptr);
256
257 // Data used for determining if the recurrence has been type-promoted.
258 Type *RecurrenceType = Phi->getType();
259 SmallPtrSet<Instruction *, 4> CastInsts;
260 unsigned MinWidthCastToRecurrenceType;
261 Instruction *Start = Phi;
262 bool IsSigned = false;
263
264 SmallPtrSet<Instruction *, 8> VisitedInsts;
265 SmallVector<Instruction *, 8> Worklist;
266
267 // Return early if the recurrence kind does not match the type of Phi. If the
268 // recurrence kind is arithmetic, we attempt to look through AND operations
269 // resulting from the type promotion performed by InstCombine. Vector
270 // operations are not limited to the legal integer widths, so we may be able
271 // to evaluate the reduction in the narrower width.
272 if (RecurrenceType->isFloatingPointTy()) {
273 if (!isFloatingPointRecurrenceKind(Kind))
274 return false;
275 } else if (RecurrenceType->isIntegerTy()) {
276 if (!isIntegerRecurrenceKind(Kind))
277 return false;
278 if (!isMinMaxRecurrenceKind(Kind))
279 Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts);
280 } else {
281 // Pointer min/max may exist, but it is not supported as a reduction op.
282 return false;
283 }
284
285 Worklist.push_back(Start);
286 VisitedInsts.insert(Start);
287
288 // Start with all flags set because we will intersect this with the reduction
289 // flags from all the reduction operations.
290 FastMathFlags FMF = FastMathFlags::getFast();
291
292 // The first instruction in the use-def chain of the Phi node that requires
293 // exact floating point operations.
294 Instruction *ExactFPMathInst = nullptr;
295
296 // A value in the reduction can be used:
297 // - By the reduction:
298 // - Reduction operation:
299 // - One use of reduction value (safe).
300 // - Multiple use of reduction value (not safe).
301 // - PHI:
302 // - All uses of the PHI must be the reduction (safe).
303 // - Otherwise, not safe.
304 // - By instructions outside of the loop (safe).
305 // * One value may have several outside users, but all outside
306 // uses must be of the same value.
307 // - By store instructions with a loop invariant address (safe with
308 // the following restrictions):
309 // * If there are several stores, all must have the same address.
310 // * Final value should be stored in that loop invariant address.
311 // - By an instruction that is not part of the reduction (not safe).
312 // This is either:
313 // * An instruction type other than PHI or the reduction operation.
314 // * A PHI in the header other than the initial PHI.
315 while (!Worklist.empty()) {
316 Instruction *Cur = Worklist.pop_back_val();
317
318 // Store instructions are allowed iff it is the store of the reduction
319 // value to the same loop invariant memory location.
320 if (auto *SI = dyn_cast<StoreInst>(Cur)) {
321 if (!SE) {
322 LLVM_DEBUG(dbgs() << "Store instructions are not processed without "
323 << "Scalar Evolution Analysis\n");
324 return false;
325 }
326
327 const SCEV *PtrScev = SE->getSCEV(SI->getPointerOperand());
328 // Check it is the same address as previous stores
329 if (IntermediateStore) {
330 const SCEV *OtherScev =
331 SE->getSCEV(IntermediateStore->getPointerOperand());
332
333 if (OtherScev != PtrScev) {
334 LLVM_DEBUG(dbgs() << "Storing reduction value to different addresses "
335 << "inside the loop: " << *SI->getPointerOperand()
336 << " and "
337 << *IntermediateStore->getPointerOperand() << '\n');
338 return false;
339 }
340 }
341
342 // Check the pointer is loop invariant
343 if (!SE->isLoopInvariant(PtrScev, TheLoop)) {
344 LLVM_DEBUG(dbgs() << "Storing reduction value to non-uniform address "
345 << "inside the loop: " << *SI->getPointerOperand()
346 << '\n');
347 return false;
348 }
349
350 // IntermediateStore is always the last store in the loop.
351 IntermediateStore = SI;
352 continue;
353 }
354
355 // No Users.
356 // If the instruction has no users then this is a broken chain and can't be
357 // a reduction variable.
358 if (Cur->use_empty())
359 return false;
360
361 bool IsAPhi = isa<PHINode>(Cur);
362
363 // A header PHI use other than the original PHI.
364 if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent())
365 return false;
366
367 // Reductions of instructions such as Div, and Sub is only possible if the
368 // LHS is the reduction variable.
369 if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) &&
370 !isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) &&
371 !VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0))))
372 return false;
373
374 // Any reduction instruction must be of one of the allowed kinds. We ignore
375 // the starting value (the Phi or an AND instruction if the Phi has been
376 // type-promoted).
377 if (Cur != Start) {
378 ReduxDesc =
379 isRecurrenceInstr(TheLoop, Phi, Cur, Kind, ReduxDesc, FuncFMF);
380 ExactFPMathInst = ExactFPMathInst == nullptr
381 ? ReduxDesc.getExactFPMathInst()
382 : ExactFPMathInst;
383 if (!ReduxDesc.isRecurrence())
384 return false;
385 // FIXME: FMF is allowed on phi, but propagation is not handled correctly.
386 if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi) {
387 FastMathFlags CurFMF = ReduxDesc.getPatternInst()->getFastMathFlags();
388 if (auto *Sel = dyn_cast<SelectInst>(ReduxDesc.getPatternInst())) {
389 // Accept FMF on either fcmp or select of a min/max idiom.
390 // TODO: This is a hack to work-around the fact that FMF may not be
391 // assigned/propagated correctly. If that problem is fixed or we
392 // standardize on fmin/fmax via intrinsics, this can be removed.
393 if (auto *FCmp = dyn_cast<FCmpInst>(Sel->getCondition()))
394 CurFMF |= FCmp->getFastMathFlags();
395 }
396 FMF &= CurFMF;
397 }
398 // Update this reduction kind if we matched a new instruction.
399 // TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind'
400 // state accurate while processing the worklist?
401 if (ReduxDesc.getRecKind() != RecurKind::None)
402 Kind = ReduxDesc.getRecKind();
403 }
404
405 bool IsASelect = isa<SelectInst>(Cur);
406
407 // A conditional reduction operation must only have 2 or less uses in
408 // VisitedInsts.
409 if (IsASelect && (Kind == RecurKind::FAdd || Kind == RecurKind::FMul) &&
410 hasMultipleUsesOf(Cur, VisitedInsts, 2))
411 return false;
412
413 // A reduction operation must only have one use of the reduction value.
414 if (!IsAPhi && !IsASelect && !isMinMaxRecurrenceKind(Kind) &&
415 !isSelectCmpRecurrenceKind(Kind) &&
416 hasMultipleUsesOf(Cur, VisitedInsts, 1))
417 return false;
418
419 // All inputs to a PHI node must be a reduction value.
420 if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts))
421 return false;
422
423 if ((isIntMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectICmp) &&
424 (isa<ICmpInst>(Cur) || isa<SelectInst>(Cur)))
425 ++NumCmpSelectPatternInst;
426 if ((isFPMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectFCmp) &&
427 (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur)))
428 ++NumCmpSelectPatternInst;
429
430 // Check whether we found a reduction operator.
431 FoundReduxOp |= !IsAPhi && Cur != Start;
432
433 // Process users of current instruction. Push non-PHI nodes after PHI nodes
434 // onto the stack. This way we are going to have seen all inputs to PHI
435 // nodes once we get to them.
436 SmallVector<Instruction *, 8> NonPHIs;
437 SmallVector<Instruction *, 8> PHIs;
438 for (User *U : Cur->users()) {
439 Instruction *UI = cast<Instruction>(U);
440
441 // If the user is a call to llvm.fmuladd then the instruction can only be
442 // the final operand.
443 if (isFMulAddIntrinsic(UI))
444 if (Cur == UI->getOperand(0) || Cur == UI->getOperand(1))
445 return false;
446
447 // Check if we found the exit user.
448 BasicBlock *Parent = UI->getParent();
449 if (!TheLoop->contains(Parent)) {
450 // If we already know this instruction is used externally, move on to
451 // the next user.
452 if (ExitInstruction == Cur)
453 continue;
454
455 // Exit if you find multiple values used outside or if the header phi
456 // node is being used. In this case the user uses the value of the
457 // previous iteration, in which case we would loose "VF-1" iterations of
458 // the reduction operation if we vectorize.
459 if (ExitInstruction != nullptr || Cur == Phi)
460 return false;
461
462 // The instruction used by an outside user must be the last instruction
463 // before we feed back to the reduction phi. Otherwise, we loose VF-1
464 // operations on the value.
465 if (!is_contained(Phi->operands(), Cur))
466 return false;
467
468 ExitInstruction = Cur;
469 continue;
470 }
471
472 // Process instructions only once (termination). Each reduction cycle
473 // value must only be used once, except by phi nodes and min/max
474 // reductions which are represented as a cmp followed by a select.
475 InstDesc IgnoredVal(false, nullptr);
476 if (VisitedInsts.insert(UI).second) {
477 if (isa<PHINode>(UI)) {
478 PHIs.push_back(UI);
479 } else {
480 StoreInst *SI = dyn_cast<StoreInst>(UI);
481 if (SI && SI->getPointerOperand() == Cur) {
482 // Reduction variable chain can only be stored somewhere but it
483 // can't be used as an address.
484 return false;
485 }
486 NonPHIs.push_back(UI);
487 }
488 } else if (!isa<PHINode>(UI) &&
489 ((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
490 !isa<SelectInst>(UI)) ||
491 (!isConditionalRdxPattern(Kind, UI).isRecurrence() &&
492 !isSelectCmpPattern(TheLoop, Phi, UI, IgnoredVal)
493 .isRecurrence() &&
494 !isMinMaxPattern(UI, Kind, IgnoredVal).isRecurrence())))
495 return false;
496
497 // Remember that we completed the cycle.
498 if (UI == Phi)
499 FoundStartPHI = true;
500 }
501 Worklist.append(PHIs.begin(), PHIs.end());
502 Worklist.append(NonPHIs.begin(), NonPHIs.end());
503 }
504
505 // This means we have seen one but not the other instruction of the
506 // pattern or more than just a select and cmp. Zero implies that we saw a
507 // llvm.min/max intrinsic, which is always OK.
508 if (isMinMaxRecurrenceKind(Kind) && NumCmpSelectPatternInst != 2 &&
509 NumCmpSelectPatternInst != 0)
510 return false;
511
512 if (isSelectCmpRecurrenceKind(Kind) && NumCmpSelectPatternInst != 1)
513 return false;
514
515 if (IntermediateStore) {
516 // Check that stored value goes to the phi node again. This way we make sure
517 // that the value stored in IntermediateStore is indeed the final reduction
518 // value.
519 if (!is_contained(Phi->operands(), IntermediateStore->getValueOperand())) {
520 LLVM_DEBUG(dbgs() << "Not a final reduction value stored: "
521 << *IntermediateStore << '\n');
522 return false;
523 }
524
525 // If there is an exit instruction it's value should be stored in
526 // IntermediateStore
527 if (ExitInstruction &&
528 IntermediateStore->getValueOperand() != ExitInstruction) {
529 LLVM_DEBUG(dbgs() << "Last store Instruction of reduction value does not "
530 "store last calculated value of the reduction: "
531 << *IntermediateStore << '\n');
532 return false;
533 }
534
535 // If all uses are inside the loop (intermediate stores), then the
536 // reduction value after the loop will be the one used in the last store.
537 if (!ExitInstruction)
538 ExitInstruction = cast<Instruction>(IntermediateStore->getValueOperand());
539 }
540
541 if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
542 return false;
543
544 const bool IsOrdered =
545 checkOrderedReduction(Kind, ExactFPMathInst, ExitInstruction, Phi);
546
547 if (Start != Phi) {
548 // If the starting value is not the same as the phi node, we speculatively
549 // looked through an 'and' instruction when evaluating a potential
550 // arithmetic reduction to determine if it may have been type-promoted.
551 //
552 // We now compute the minimal bit width that is required to represent the
553 // reduction. If this is the same width that was indicated by the 'and', we
554 // can represent the reduction in the smaller type. The 'and' instruction
555 // will be eliminated since it will essentially be a cast instruction that
556 // can be ignore in the cost model. If we compute a different type than we
557 // did when evaluating the 'and', the 'and' will not be eliminated, and we
558 // will end up with different kinds of operations in the recurrence
559 // expression (e.g., IntegerAND, IntegerADD). We give up if this is
560 // the case.
561 //
562 // The vectorizer relies on InstCombine to perform the actual
563 // type-shrinking. It does this by inserting instructions to truncate the
564 // exit value of the reduction to the width indicated by RecurrenceType and
565 // then extend this value back to the original width. If IsSigned is false,
566 // a 'zext' instruction will be generated; otherwise, a 'sext' will be
567 // used.
568 //
569 // TODO: We should not rely on InstCombine to rewrite the reduction in the
570 // smaller type. We should just generate a correctly typed expression
571 // to begin with.
572 Type *ComputedType;
573 std::tie(ComputedType, IsSigned) =
574 computeRecurrenceType(ExitInstruction, DB, AC, DT);
575 if (ComputedType != RecurrenceType)
576 return false;
577 }
578
579 // Collect cast instructions and the minimum width used by the recurrence.
580 // If the starting value is not the same as the phi node and the computed
581 // recurrence type is equal to the recurrence type, the recurrence expression
582 // will be represented in a narrower or wider type. If there are any cast
583 // instructions that will be unnecessary, collect them in CastsFromRecurTy.
584 // Note that the 'and' instruction was already included in this list.
585 //
586 // TODO: A better way to represent this may be to tag in some way all the
587 // instructions that are a part of the reduction. The vectorizer cost
588 // model could then apply the recurrence type to these instructions,
589 // without needing a white list of instructions to ignore.
590 // This may also be useful for the inloop reductions, if it can be
591 // kept simple enough.
592 collectCastInstrs(TheLoop, ExitInstruction, RecurrenceType, CastInsts,
593 MinWidthCastToRecurrenceType);
594
595 // We found a reduction var if we have reached the original phi node and we
596 // only have a single instruction with out-of-loop users.
597
598 // The ExitInstruction(Instruction which is allowed to have out-of-loop users)
599 // is saved as part of the RecurrenceDescriptor.
600
601 // Save the description of this reduction variable.
602 RecurrenceDescriptor RD(RdxStart, ExitInstruction, IntermediateStore, Kind,
603 FMF, ExactFPMathInst, RecurrenceType, IsSigned,
604 IsOrdered, CastInsts, MinWidthCastToRecurrenceType);
605 RedDes = RD;
606
607 return true;
608 }
609
610 // We are looking for loops that do something like this:
611 // int r = 0;
612 // for (int i = 0; i < n; i++) {
613 // if (src[i] > 3)
614 // r = 3;
615 // }
616 // where the reduction value (r) only has two states, in this example 0 or 3.
617 // The generated LLVM IR for this type of loop will be like this:
618 // for.body:
619 // %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ]
620 // ...
621 // %cmp = icmp sgt i32 %5, 3
622 // %spec.select = select i1 %cmp, i32 3, i32 %r
623 // ...
624 // In general we can support vectorization of loops where 'r' flips between
625 // any two non-constants, provided they are loop invariant. The only thing
626 // we actually care about at the end of the loop is whether or not any lane
627 // in the selected vector is different from the start value. The final
628 // across-vector reduction after the loop simply involves choosing the start
629 // value if nothing changed (0 in the example above) or the other selected
630 // value (3 in the example above).
631 RecurrenceDescriptor::InstDesc
isSelectCmpPattern(Loop * Loop,PHINode * OrigPhi,Instruction * I,InstDesc & Prev)632 RecurrenceDescriptor::isSelectCmpPattern(Loop *Loop, PHINode *OrigPhi,
633 Instruction *I, InstDesc &Prev) {
634 // We must handle the select(cmp(),x,y) as a single instruction. Advance to
635 // the select.
636 CmpInst::Predicate Pred;
637 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
638 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
639 return InstDesc(Select, Prev.getRecKind());
640 }
641
642 // Only match select with single use cmp condition.
643 if (!match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
644 m_Value())))
645 return InstDesc(false, I);
646
647 SelectInst *SI = cast<SelectInst>(I);
648 Value *NonPhi = nullptr;
649
650 if (OrigPhi == dyn_cast<PHINode>(SI->getTrueValue()))
651 NonPhi = SI->getFalseValue();
652 else if (OrigPhi == dyn_cast<PHINode>(SI->getFalseValue()))
653 NonPhi = SI->getTrueValue();
654 else
655 return InstDesc(false, I);
656
657 // We are looking for selects of the form:
658 // select(cmp(), phi, loop_invariant) or
659 // select(cmp(), loop_invariant, phi)
660 if (!Loop->isLoopInvariant(NonPhi))
661 return InstDesc(false, I);
662
663 return InstDesc(I, isa<ICmpInst>(I->getOperand(0)) ? RecurKind::SelectICmp
664 : RecurKind::SelectFCmp);
665 }
666
667 RecurrenceDescriptor::InstDesc
isMinMaxPattern(Instruction * I,RecurKind Kind,const InstDesc & Prev)668 RecurrenceDescriptor::isMinMaxPattern(Instruction *I, RecurKind Kind,
669 const InstDesc &Prev) {
670 assert((isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) &&
671 "Expected a cmp or select or call instruction");
672 if (!isMinMaxRecurrenceKind(Kind))
673 return InstDesc(false, I);
674
675 // We must handle the select(cmp()) as a single instruction. Advance to the
676 // select.
677 CmpInst::Predicate Pred;
678 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
679 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
680 return InstDesc(Select, Prev.getRecKind());
681 }
682
683 // Only match select with single use cmp condition, or a min/max intrinsic.
684 if (!isa<IntrinsicInst>(I) &&
685 !match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
686 m_Value())))
687 return InstDesc(false, I);
688
689 // Look for a min/max pattern.
690 if (match(I, m_UMin(m_Value(), m_Value())))
691 return InstDesc(Kind == RecurKind::UMin, I);
692 if (match(I, m_UMax(m_Value(), m_Value())))
693 return InstDesc(Kind == RecurKind::UMax, I);
694 if (match(I, m_SMax(m_Value(), m_Value())))
695 return InstDesc(Kind == RecurKind::SMax, I);
696 if (match(I, m_SMin(m_Value(), m_Value())))
697 return InstDesc(Kind == RecurKind::SMin, I);
698 if (match(I, m_OrdFMin(m_Value(), m_Value())))
699 return InstDesc(Kind == RecurKind::FMin, I);
700 if (match(I, m_OrdFMax(m_Value(), m_Value())))
701 return InstDesc(Kind == RecurKind::FMax, I);
702 if (match(I, m_UnordFMin(m_Value(), m_Value())))
703 return InstDesc(Kind == RecurKind::FMin, I);
704 if (match(I, m_UnordFMax(m_Value(), m_Value())))
705 return InstDesc(Kind == RecurKind::FMax, I);
706 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
707 return InstDesc(Kind == RecurKind::FMin, I);
708 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
709 return InstDesc(Kind == RecurKind::FMax, I);
710
711 return InstDesc(false, I);
712 }
713
714 /// Returns true if the select instruction has users in the compare-and-add
715 /// reduction pattern below. The select instruction argument is the last one
716 /// in the sequence.
717 ///
718 /// %sum.1 = phi ...
719 /// ...
720 /// %cmp = fcmp pred %0, %CFP
721 /// %add = fadd %0, %sum.1
722 /// %sum.2 = select %cmp, %add, %sum.1
723 RecurrenceDescriptor::InstDesc
isConditionalRdxPattern(RecurKind Kind,Instruction * I)724 RecurrenceDescriptor::isConditionalRdxPattern(RecurKind Kind, Instruction *I) {
725 SelectInst *SI = dyn_cast<SelectInst>(I);
726 if (!SI)
727 return InstDesc(false, I);
728
729 CmpInst *CI = dyn_cast<CmpInst>(SI->getCondition());
730 // Only handle single use cases for now.
731 if (!CI || !CI->hasOneUse())
732 return InstDesc(false, I);
733
734 Value *TrueVal = SI->getTrueValue();
735 Value *FalseVal = SI->getFalseValue();
736 // Handle only when either of operands of select instruction is a PHI
737 // node for now.
738 if ((isa<PHINode>(*TrueVal) && isa<PHINode>(*FalseVal)) ||
739 (!isa<PHINode>(*TrueVal) && !isa<PHINode>(*FalseVal)))
740 return InstDesc(false, I);
741
742 Instruction *I1 =
743 isa<PHINode>(*TrueVal) ? dyn_cast<Instruction>(FalseVal)
744 : dyn_cast<Instruction>(TrueVal);
745 if (!I1 || !I1->isBinaryOp())
746 return InstDesc(false, I);
747
748 Value *Op1, *Op2;
749 if ((m_FAdd(m_Value(Op1), m_Value(Op2)).match(I1) ||
750 m_FSub(m_Value(Op1), m_Value(Op2)).match(I1)) &&
751 I1->isFast())
752 return InstDesc(Kind == RecurKind::FAdd, SI);
753
754 if (m_FMul(m_Value(Op1), m_Value(Op2)).match(I1) && (I1->isFast()))
755 return InstDesc(Kind == RecurKind::FMul, SI);
756
757 return InstDesc(false, I);
758 }
759
760 RecurrenceDescriptor::InstDesc
isRecurrenceInstr(Loop * L,PHINode * OrigPhi,Instruction * I,RecurKind Kind,InstDesc & Prev,FastMathFlags FuncFMF)761 RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi,
762 Instruction *I, RecurKind Kind,
763 InstDesc &Prev, FastMathFlags FuncFMF) {
764 assert(Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind);
765 switch (I->getOpcode()) {
766 default:
767 return InstDesc(false, I);
768 case Instruction::PHI:
769 return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst());
770 case Instruction::Sub:
771 case Instruction::Add:
772 return InstDesc(Kind == RecurKind::Add, I);
773 case Instruction::Mul:
774 return InstDesc(Kind == RecurKind::Mul, I);
775 case Instruction::And:
776 return InstDesc(Kind == RecurKind::And, I);
777 case Instruction::Or:
778 return InstDesc(Kind == RecurKind::Or, I);
779 case Instruction::Xor:
780 return InstDesc(Kind == RecurKind::Xor, I);
781 case Instruction::FDiv:
782 case Instruction::FMul:
783 return InstDesc(Kind == RecurKind::FMul, I,
784 I->hasAllowReassoc() ? nullptr : I);
785 case Instruction::FSub:
786 case Instruction::FAdd:
787 return InstDesc(Kind == RecurKind::FAdd, I,
788 I->hasAllowReassoc() ? nullptr : I);
789 case Instruction::Select:
790 if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul)
791 return isConditionalRdxPattern(Kind, I);
792 [[fallthrough]];
793 case Instruction::FCmp:
794 case Instruction::ICmp:
795 case Instruction::Call:
796 if (isSelectCmpRecurrenceKind(Kind))
797 return isSelectCmpPattern(L, OrigPhi, I, Prev);
798 if (isIntMinMaxRecurrenceKind(Kind) ||
799 (((FuncFMF.noNaNs() && FuncFMF.noSignedZeros()) ||
800 (isa<FPMathOperator>(I) && I->hasNoNaNs() &&
801 I->hasNoSignedZeros())) &&
802 isFPMinMaxRecurrenceKind(Kind)))
803 return isMinMaxPattern(I, Kind, Prev);
804 else if (isFMulAddIntrinsic(I))
805 return InstDesc(Kind == RecurKind::FMulAdd, I,
806 I->hasAllowReassoc() ? nullptr : I);
807 return InstDesc(false, I);
808 }
809 }
810
hasMultipleUsesOf(Instruction * I,SmallPtrSetImpl<Instruction * > & Insts,unsigned MaxNumUses)811 bool RecurrenceDescriptor::hasMultipleUsesOf(
812 Instruction *I, SmallPtrSetImpl<Instruction *> &Insts,
813 unsigned MaxNumUses) {
814 unsigned NumUses = 0;
815 for (const Use &U : I->operands()) {
816 if (Insts.count(dyn_cast<Instruction>(U)))
817 ++NumUses;
818 if (NumUses > MaxNumUses)
819 return true;
820 }
821
822 return false;
823 }
824
isReductionPHI(PHINode * Phi,Loop * TheLoop,RecurrenceDescriptor & RedDes,DemandedBits * DB,AssumptionCache * AC,DominatorTree * DT,ScalarEvolution * SE)825 bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
826 RecurrenceDescriptor &RedDes,
827 DemandedBits *DB, AssumptionCache *AC,
828 DominatorTree *DT,
829 ScalarEvolution *SE) {
830 BasicBlock *Header = TheLoop->getHeader();
831 Function &F = *Header->getParent();
832 FastMathFlags FMF;
833 FMF.setNoNaNs(
834 F.getFnAttribute("no-nans-fp-math").getValueAsBool());
835 FMF.setNoSignedZeros(
836 F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool());
837
838 if (AddReductionVar(Phi, RecurKind::Add, TheLoop, FMF, RedDes, DB, AC, DT,
839 SE)) {
840 LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n");
841 return true;
842 }
843 if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, FMF, RedDes, DB, AC, DT,
844 SE)) {
845 LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n");
846 return true;
847 }
848 if (AddReductionVar(Phi, RecurKind::Or, TheLoop, FMF, RedDes, DB, AC, DT,
849 SE)) {
850 LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n");
851 return true;
852 }
853 if (AddReductionVar(Phi, RecurKind::And, TheLoop, FMF, RedDes, DB, AC, DT,
854 SE)) {
855 LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n");
856 return true;
857 }
858 if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, FMF, RedDes, DB, AC, DT,
859 SE)) {
860 LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n");
861 return true;
862 }
863 if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, FMF, RedDes, DB, AC, DT,
864 SE)) {
865 LLVM_DEBUG(dbgs() << "Found a SMAX reduction PHI." << *Phi << "\n");
866 return true;
867 }
868 if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, FMF, RedDes, DB, AC, DT,
869 SE)) {
870 LLVM_DEBUG(dbgs() << "Found a SMIN reduction PHI." << *Phi << "\n");
871 return true;
872 }
873 if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, FMF, RedDes, DB, AC, DT,
874 SE)) {
875 LLVM_DEBUG(dbgs() << "Found a UMAX reduction PHI." << *Phi << "\n");
876 return true;
877 }
878 if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, FMF, RedDes, DB, AC, DT,
879 SE)) {
880 LLVM_DEBUG(dbgs() << "Found a UMIN reduction PHI." << *Phi << "\n");
881 return true;
882 }
883 if (AddReductionVar(Phi, RecurKind::SelectICmp, TheLoop, FMF, RedDes, DB, AC,
884 DT, SE)) {
885 LLVM_DEBUG(dbgs() << "Found an integer conditional select reduction PHI."
886 << *Phi << "\n");
887 return true;
888 }
889 if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, FMF, RedDes, DB, AC, DT,
890 SE)) {
891 LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n");
892 return true;
893 }
894 if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, FMF, RedDes, DB, AC, DT,
895 SE)) {
896 LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n");
897 return true;
898 }
899 if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, FMF, RedDes, DB, AC, DT,
900 SE)) {
901 LLVM_DEBUG(dbgs() << "Found a float MAX reduction PHI." << *Phi << "\n");
902 return true;
903 }
904 if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, FMF, RedDes, DB, AC, DT,
905 SE)) {
906 LLVM_DEBUG(dbgs() << "Found a float MIN reduction PHI." << *Phi << "\n");
907 return true;
908 }
909 if (AddReductionVar(Phi, RecurKind::SelectFCmp, TheLoop, FMF, RedDes, DB, AC,
910 DT, SE)) {
911 LLVM_DEBUG(dbgs() << "Found a float conditional select reduction PHI."
912 << " PHI." << *Phi << "\n");
913 return true;
914 }
915 if (AddReductionVar(Phi, RecurKind::FMulAdd, TheLoop, FMF, RedDes, DB, AC, DT,
916 SE)) {
917 LLVM_DEBUG(dbgs() << "Found an FMulAdd reduction PHI." << *Phi << "\n");
918 return true;
919 }
920 // Not a reduction of known type.
921 return false;
922 }
923
isFixedOrderRecurrence(PHINode * Phi,Loop * TheLoop,MapVector<Instruction *,Instruction * > & SinkAfter,DominatorTree * DT)924 bool RecurrenceDescriptor::isFixedOrderRecurrence(
925 PHINode *Phi, Loop *TheLoop,
926 MapVector<Instruction *, Instruction *> &SinkAfter, DominatorTree *DT) {
927
928 // Ensure the phi node is in the loop header and has two incoming values.
929 if (Phi->getParent() != TheLoop->getHeader() ||
930 Phi->getNumIncomingValues() != 2)
931 return false;
932
933 // Ensure the loop has a preheader and a single latch block. The loop
934 // vectorizer will need the latch to set up the next iteration of the loop.
935 auto *Preheader = TheLoop->getLoopPreheader();
936 auto *Latch = TheLoop->getLoopLatch();
937 if (!Preheader || !Latch)
938 return false;
939
940 // Ensure the phi node's incoming blocks are the loop preheader and latch.
941 if (Phi->getBasicBlockIndex(Preheader) < 0 ||
942 Phi->getBasicBlockIndex(Latch) < 0)
943 return false;
944
945 // Get the previous value. The previous value comes from the latch edge while
946 // the initial value comes from the preheader edge.
947 auto *Previous = dyn_cast<Instruction>(Phi->getIncomingValueForBlock(Latch));
948
949 // If Previous is a phi in the header, go through incoming values from the
950 // latch until we find a non-phi value. Use this as the new Previous, all uses
951 // in the header will be dominated by the original phi, but need to be moved
952 // after the non-phi previous value.
953 SmallPtrSet<PHINode *, 4> SeenPhis;
954 while (auto *PrevPhi = dyn_cast_or_null<PHINode>(Previous)) {
955 if (PrevPhi->getParent() != Phi->getParent())
956 return false;
957 if (!SeenPhis.insert(PrevPhi).second)
958 return false;
959 Previous = dyn_cast<Instruction>(PrevPhi->getIncomingValueForBlock(Latch));
960 }
961
962 if (!Previous || !TheLoop->contains(Previous) || isa<PHINode>(Previous) ||
963 SinkAfter.count(Previous)) // Cannot rely on dominance due to motion.
964 return false;
965
966 // Ensure every user of the phi node (recursively) is dominated by the
967 // previous value. The dominance requirement ensures the loop vectorizer will
968 // not need to vectorize the initial value prior to the first iteration of the
969 // loop.
970 // TODO: Consider extending this sinking to handle memory instructions.
971
972 // We optimistically assume we can sink all users after Previous. Keep a set
973 // of instructions to sink after Previous ordered by dominance in the common
974 // basic block. It will be applied to SinkAfter if all users can be sunk.
975 auto CompareByComesBefore = [](const Instruction *A, const Instruction *B) {
976 return A->comesBefore(B);
977 };
978 std::set<Instruction *, decltype(CompareByComesBefore)> InstrsToSink(
979 CompareByComesBefore);
980
981 BasicBlock *PhiBB = Phi->getParent();
982 SmallVector<Instruction *, 8> WorkList;
983 auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) {
984 // Already sunk SinkCandidate.
985 if (SinkCandidate->getParent() == PhiBB &&
986 InstrsToSink.find(SinkCandidate) != InstrsToSink.end())
987 return true;
988
989 // Cyclic dependence.
990 if (Previous == SinkCandidate)
991 return false;
992
993 if (DT->dominates(Previous,
994 SinkCandidate)) // We already are good w/o sinking.
995 return true;
996
997 if (SinkCandidate->getParent() != PhiBB ||
998 SinkCandidate->mayHaveSideEffects() ||
999 SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator())
1000 return false;
1001
1002 // Avoid sinking an instruction multiple times (if multiple operands are
1003 // fixed order recurrences) by sinking once - after the latest 'previous'
1004 // instruction.
1005 auto It = SinkAfter.find(SinkCandidate);
1006 if (It != SinkAfter.end()) {
1007 auto *OtherPrev = It->second;
1008 // Find the earliest entry in the 'sink-after' chain. The last entry in
1009 // the chain is the original 'Previous' for a recurrence handled earlier.
1010 auto EarlierIt = SinkAfter.find(OtherPrev);
1011 while (EarlierIt != SinkAfter.end()) {
1012 Instruction *EarlierInst = EarlierIt->second;
1013 EarlierIt = SinkAfter.find(EarlierInst);
1014 // Bail out if order has not been preserved.
1015 if (EarlierIt != SinkAfter.end() &&
1016 !DT->dominates(EarlierInst, OtherPrev))
1017 return false;
1018 OtherPrev = EarlierInst;
1019 }
1020 // Bail out if order has not been preserved.
1021 if (OtherPrev != It->second && !DT->dominates(It->second, OtherPrev))
1022 return false;
1023
1024 // SinkCandidate is already being sunk after an instruction after
1025 // Previous. Nothing left to do.
1026 if (DT->dominates(Previous, OtherPrev) || Previous == OtherPrev)
1027 return true;
1028
1029 // If there are other instructions to be sunk after SinkCandidate, remove
1030 // and re-insert SinkCandidate can break those instructions. Bail out for
1031 // simplicity.
1032 if (any_of(SinkAfter,
1033 [SinkCandidate](const std::pair<Instruction *, Instruction *> &P) {
1034 return P.second == SinkCandidate;
1035 }))
1036 return false;
1037
1038 // Otherwise, Previous comes after OtherPrev and SinkCandidate needs to be
1039 // re-sunk to Previous, instead of sinking to OtherPrev. Remove
1040 // SinkCandidate from SinkAfter to ensure it's insert position is updated.
1041 SinkAfter.erase(SinkCandidate);
1042 }
1043
1044 // If we reach a PHI node that is not dominated by Previous, we reached a
1045 // header PHI. No need for sinking.
1046 if (isa<PHINode>(SinkCandidate))
1047 return true;
1048
1049 // Sink User tentatively and check its users
1050 InstrsToSink.insert(SinkCandidate);
1051 WorkList.push_back(SinkCandidate);
1052 return true;
1053 };
1054
1055 WorkList.push_back(Phi);
1056 // Try to recursively sink instructions and their users after Previous.
1057 while (!WorkList.empty()) {
1058 Instruction *Current = WorkList.pop_back_val();
1059 for (User *User : Current->users()) {
1060 if (!TryToPushSinkCandidate(cast<Instruction>(User)))
1061 return false;
1062 }
1063 }
1064
1065 // We can sink all users of Phi. Update the mapping.
1066 for (Instruction *I : InstrsToSink) {
1067 SinkAfter[I] = Previous;
1068 Previous = I;
1069 }
1070 return true;
1071 }
1072
1073 /// This function returns the identity element (or neutral element) for
1074 /// the operation K.
getRecurrenceIdentity(RecurKind K,Type * Tp,FastMathFlags FMF) const1075 Value *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp,
1076 FastMathFlags FMF) const {
1077 switch (K) {
1078 case RecurKind::Xor:
1079 case RecurKind::Add:
1080 case RecurKind::Or:
1081 // Adding, Xoring, Oring zero to a number does not change it.
1082 return ConstantInt::get(Tp, 0);
1083 case RecurKind::Mul:
1084 // Multiplying a number by 1 does not change it.
1085 return ConstantInt::get(Tp, 1);
1086 case RecurKind::And:
1087 // AND-ing a number with an all-1 value does not change it.
1088 return ConstantInt::get(Tp, -1, true);
1089 case RecurKind::FMul:
1090 // Multiplying a number by 1 does not change it.
1091 return ConstantFP::get(Tp, 1.0L);
1092 case RecurKind::FMulAdd:
1093 case RecurKind::FAdd:
1094 // Adding zero to a number does not change it.
1095 // FIXME: Ideally we should not need to check FMF for FAdd and should always
1096 // use -0.0. However, this will currently result in mixed vectors of 0.0/-0.0.
1097 // Instead, we should ensure that 1) the FMF from FAdd are propagated to the PHI
1098 // nodes where possible, and 2) PHIs with the nsz flag + -0.0 use 0.0. This would
1099 // mean we can then remove the check for noSignedZeros() below (see D98963).
1100 if (FMF.noSignedZeros())
1101 return ConstantFP::get(Tp, 0.0L);
1102 return ConstantFP::get(Tp, -0.0L);
1103 case RecurKind::UMin:
1104 return ConstantInt::get(Tp, -1);
1105 case RecurKind::UMax:
1106 return ConstantInt::get(Tp, 0);
1107 case RecurKind::SMin:
1108 return ConstantInt::get(Tp,
1109 APInt::getSignedMaxValue(Tp->getIntegerBitWidth()));
1110 case RecurKind::SMax:
1111 return ConstantInt::get(Tp,
1112 APInt::getSignedMinValue(Tp->getIntegerBitWidth()));
1113 case RecurKind::FMin:
1114 assert((FMF.noNaNs() && FMF.noSignedZeros()) &&
1115 "nnan, nsz is expected to be set for FP min reduction.");
1116 return ConstantFP::getInfinity(Tp, false /*Negative*/);
1117 case RecurKind::FMax:
1118 assert((FMF.noNaNs() && FMF.noSignedZeros()) &&
1119 "nnan, nsz is expected to be set for FP max reduction.");
1120 return ConstantFP::getInfinity(Tp, true /*Negative*/);
1121 case RecurKind::SelectICmp:
1122 case RecurKind::SelectFCmp:
1123 return getRecurrenceStartValue();
1124 break;
1125 default:
1126 llvm_unreachable("Unknown recurrence kind");
1127 }
1128 }
1129
getOpcode(RecurKind Kind)1130 unsigned RecurrenceDescriptor::getOpcode(RecurKind Kind) {
1131 switch (Kind) {
1132 case RecurKind::Add:
1133 return Instruction::Add;
1134 case RecurKind::Mul:
1135 return Instruction::Mul;
1136 case RecurKind::Or:
1137 return Instruction::Or;
1138 case RecurKind::And:
1139 return Instruction::And;
1140 case RecurKind::Xor:
1141 return Instruction::Xor;
1142 case RecurKind::FMul:
1143 return Instruction::FMul;
1144 case RecurKind::FMulAdd:
1145 case RecurKind::FAdd:
1146 return Instruction::FAdd;
1147 case RecurKind::SMax:
1148 case RecurKind::SMin:
1149 case RecurKind::UMax:
1150 case RecurKind::UMin:
1151 case RecurKind::SelectICmp:
1152 return Instruction::ICmp;
1153 case RecurKind::FMax:
1154 case RecurKind::FMin:
1155 case RecurKind::SelectFCmp:
1156 return Instruction::FCmp;
1157 default:
1158 llvm_unreachable("Unknown recurrence operation");
1159 }
1160 }
1161
1162 SmallVector<Instruction *, 4>
getReductionOpChain(PHINode * Phi,Loop * L) const1163 RecurrenceDescriptor::getReductionOpChain(PHINode *Phi, Loop *L) const {
1164 SmallVector<Instruction *, 4> ReductionOperations;
1165 unsigned RedOp = getOpcode(Kind);
1166
1167 // Search down from the Phi to the LoopExitInstr, looking for instructions
1168 // with a single user of the correct type for the reduction.
1169
1170 // Note that we check that the type of the operand is correct for each item in
1171 // the chain, including the last (the loop exit value). This can come up from
1172 // sub, which would otherwise be treated as an add reduction. MinMax also need
1173 // to check for a pair of icmp/select, for which we use getNextInstruction and
1174 // isCorrectOpcode functions to step the right number of instruction, and
1175 // check the icmp/select pair.
1176 // FIXME: We also do not attempt to look through Select's yet, which might
1177 // be part of the reduction chain, or attempt to looks through And's to find a
1178 // smaller bitwidth. Subs are also currently not allowed (which are usually
1179 // treated as part of a add reduction) as they are expected to generally be
1180 // more expensive than out-of-loop reductions, and need to be costed more
1181 // carefully.
1182 unsigned ExpectedUses = 1;
1183 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp)
1184 ExpectedUses = 2;
1185
1186 auto getNextInstruction = [&](Instruction *Cur) -> Instruction * {
1187 for (auto *User : Cur->users()) {
1188 Instruction *UI = cast<Instruction>(User);
1189 if (isa<PHINode>(UI))
1190 continue;
1191 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1192 // We are expecting a icmp/select pair, which we go to the next select
1193 // instruction if we can. We already know that Cur has 2 uses.
1194 if (isa<SelectInst>(UI))
1195 return UI;
1196 continue;
1197 }
1198 return UI;
1199 }
1200 return nullptr;
1201 };
1202 auto isCorrectOpcode = [&](Instruction *Cur) {
1203 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1204 Value *LHS, *RHS;
1205 return SelectPatternResult::isMinOrMax(
1206 matchSelectPattern(Cur, LHS, RHS).Flavor);
1207 }
1208 // Recognize a call to the llvm.fmuladd intrinsic.
1209 if (isFMulAddIntrinsic(Cur))
1210 return true;
1211
1212 return Cur->getOpcode() == RedOp;
1213 };
1214
1215 // Attempt to look through Phis which are part of the reduction chain
1216 unsigned ExtraPhiUses = 0;
1217 Instruction *RdxInstr = LoopExitInstr;
1218 if (auto ExitPhi = dyn_cast<PHINode>(LoopExitInstr)) {
1219 if (ExitPhi->getNumIncomingValues() != 2)
1220 return {};
1221
1222 Instruction *Inc0 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(0));
1223 Instruction *Inc1 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(1));
1224
1225 Instruction *Chain = nullptr;
1226 if (Inc0 == Phi)
1227 Chain = Inc1;
1228 else if (Inc1 == Phi)
1229 Chain = Inc0;
1230 else
1231 return {};
1232
1233 RdxInstr = Chain;
1234 ExtraPhiUses = 1;
1235 }
1236
1237 // The loop exit instruction we check first (as a quick test) but add last. We
1238 // check the opcode is correct (and dont allow them to be Subs) and that they
1239 // have expected to have the expected number of uses. They will have one use
1240 // from the phi and one from a LCSSA value, no matter the type.
1241 if (!isCorrectOpcode(RdxInstr) || !LoopExitInstr->hasNUses(2))
1242 return {};
1243
1244 // Check that the Phi has one (or two for min/max) uses, plus an extra use
1245 // for conditional reductions.
1246 if (!Phi->hasNUses(ExpectedUses + ExtraPhiUses))
1247 return {};
1248
1249 Instruction *Cur = getNextInstruction(Phi);
1250
1251 // Each other instruction in the chain should have the expected number of uses
1252 // and be the correct opcode.
1253 while (Cur != RdxInstr) {
1254 if (!Cur || !isCorrectOpcode(Cur) || !Cur->hasNUses(ExpectedUses))
1255 return {};
1256
1257 ReductionOperations.push_back(Cur);
1258 Cur = getNextInstruction(Cur);
1259 }
1260
1261 ReductionOperations.push_back(Cur);
1262 return ReductionOperations;
1263 }
1264
InductionDescriptor(Value * Start,InductionKind K,const SCEV * Step,BinaryOperator * BOp,Type * ElementType,SmallVectorImpl<Instruction * > * Casts)1265 InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K,
1266 const SCEV *Step, BinaryOperator *BOp,
1267 Type *ElementType,
1268 SmallVectorImpl<Instruction *> *Casts)
1269 : StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp),
1270 ElementType(ElementType) {
1271 assert(IK != IK_NoInduction && "Not an induction");
1272
1273 // Start value type should match the induction kind and the value
1274 // itself should not be null.
1275 assert(StartValue && "StartValue is null");
1276 assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) &&
1277 "StartValue is not a pointer for pointer induction");
1278 assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) &&
1279 "StartValue is not an integer for integer induction");
1280
1281 // Check the Step Value. It should be non-zero integer value.
1282 assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) &&
1283 "Step value is zero");
1284
1285 assert((IK != IK_PtrInduction || getConstIntStepValue()) &&
1286 "Step value should be constant for pointer induction");
1287 assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) &&
1288 "StepValue is not an integer");
1289
1290 assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) &&
1291 "StepValue is not FP for FpInduction");
1292 assert((IK != IK_FpInduction ||
1293 (InductionBinOp &&
1294 (InductionBinOp->getOpcode() == Instruction::FAdd ||
1295 InductionBinOp->getOpcode() == Instruction::FSub))) &&
1296 "Binary opcode should be specified for FP induction");
1297
1298 if (IK == IK_PtrInduction)
1299 assert(ElementType && "Pointer induction must have element type");
1300 else
1301 assert(!ElementType && "Non-pointer induction cannot have element type");
1302
1303 if (Casts) {
1304 for (auto &Inst : *Casts) {
1305 RedundantCasts.push_back(Inst);
1306 }
1307 }
1308 }
1309
getConstIntStepValue() const1310 ConstantInt *InductionDescriptor::getConstIntStepValue() const {
1311 if (isa<SCEVConstant>(Step))
1312 return dyn_cast<ConstantInt>(cast<SCEVConstant>(Step)->getValue());
1313 return nullptr;
1314 }
1315
isFPInductionPHI(PHINode * Phi,const Loop * TheLoop,ScalarEvolution * SE,InductionDescriptor & D)1316 bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop,
1317 ScalarEvolution *SE,
1318 InductionDescriptor &D) {
1319
1320 // Here we only handle FP induction variables.
1321 assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type");
1322
1323 if (TheLoop->getHeader() != Phi->getParent())
1324 return false;
1325
1326 // The loop may have multiple entrances or multiple exits; we can analyze
1327 // this phi if it has a unique entry value and a unique backedge value.
1328 if (Phi->getNumIncomingValues() != 2)
1329 return false;
1330 Value *BEValue = nullptr, *StartValue = nullptr;
1331 if (TheLoop->contains(Phi->getIncomingBlock(0))) {
1332 BEValue = Phi->getIncomingValue(0);
1333 StartValue = Phi->getIncomingValue(1);
1334 } else {
1335 assert(TheLoop->contains(Phi->getIncomingBlock(1)) &&
1336 "Unexpected Phi node in the loop");
1337 BEValue = Phi->getIncomingValue(1);
1338 StartValue = Phi->getIncomingValue(0);
1339 }
1340
1341 BinaryOperator *BOp = dyn_cast<BinaryOperator>(BEValue);
1342 if (!BOp)
1343 return false;
1344
1345 Value *Addend = nullptr;
1346 if (BOp->getOpcode() == Instruction::FAdd) {
1347 if (BOp->getOperand(0) == Phi)
1348 Addend = BOp->getOperand(1);
1349 else if (BOp->getOperand(1) == Phi)
1350 Addend = BOp->getOperand(0);
1351 } else if (BOp->getOpcode() == Instruction::FSub)
1352 if (BOp->getOperand(0) == Phi)
1353 Addend = BOp->getOperand(1);
1354
1355 if (!Addend)
1356 return false;
1357
1358 // The addend should be loop invariant
1359 if (auto *I = dyn_cast<Instruction>(Addend))
1360 if (TheLoop->contains(I))
1361 return false;
1362
1363 // FP Step has unknown SCEV
1364 const SCEV *Step = SE->getUnknown(Addend);
1365 D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp);
1366 return true;
1367 }
1368
1369 /// This function is called when we suspect that the update-chain of a phi node
1370 /// (whose symbolic SCEV expression sin \p PhiScev) contains redundant casts,
1371 /// that can be ignored. (This can happen when the PSCEV rewriter adds a runtime
1372 /// predicate P under which the SCEV expression for the phi can be the
1373 /// AddRecurrence \p AR; See createAddRecFromPHIWithCast). We want to find the
1374 /// cast instructions that are involved in the update-chain of this induction.
1375 /// A caller that adds the required runtime predicate can be free to drop these
1376 /// cast instructions, and compute the phi using \p AR (instead of some scev
1377 /// expression with casts).
1378 ///
1379 /// For example, without a predicate the scev expression can take the following
1380 /// form:
1381 /// (Ext ix (Trunc iy ( Start + i*Step ) to ix) to iy)
1382 ///
1383 /// It corresponds to the following IR sequence:
1384 /// %for.body:
1385 /// %x = phi i64 [ 0, %ph ], [ %add, %for.body ]
1386 /// %casted_phi = "ExtTrunc i64 %x"
1387 /// %add = add i64 %casted_phi, %step
1388 ///
1389 /// where %x is given in \p PN,
1390 /// PSE.getSCEV(%x) is equal to PSE.getSCEV(%casted_phi) under a predicate,
1391 /// and the IR sequence that "ExtTrunc i64 %x" represents can take one of
1392 /// several forms, for example, such as:
1393 /// ExtTrunc1: %casted_phi = and %x, 2^n-1
1394 /// or:
1395 /// ExtTrunc2: %t = shl %x, m
1396 /// %casted_phi = ashr %t, m
1397 ///
1398 /// If we are able to find such sequence, we return the instructions
1399 /// we found, namely %casted_phi and the instructions on its use-def chain up
1400 /// to the phi (not including the phi).
getCastsForInductionPHI(PredicatedScalarEvolution & PSE,const SCEVUnknown * PhiScev,const SCEVAddRecExpr * AR,SmallVectorImpl<Instruction * > & CastInsts)1401 static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE,
1402 const SCEVUnknown *PhiScev,
1403 const SCEVAddRecExpr *AR,
1404 SmallVectorImpl<Instruction *> &CastInsts) {
1405
1406 assert(CastInsts.empty() && "CastInsts is expected to be empty.");
1407 auto *PN = cast<PHINode>(PhiScev->getValue());
1408 assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression");
1409 const Loop *L = AR->getLoop();
1410
1411 // Find any cast instructions that participate in the def-use chain of
1412 // PhiScev in the loop.
1413 // FORNOW/TODO: We currently expect the def-use chain to include only
1414 // two-operand instructions, where one of the operands is an invariant.
1415 // createAddRecFromPHIWithCasts() currently does not support anything more
1416 // involved than that, so we keep the search simple. This can be
1417 // extended/generalized as needed.
1418
1419 auto getDef = [&](const Value *Val) -> Value * {
1420 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val);
1421 if (!BinOp)
1422 return nullptr;
1423 Value *Op0 = BinOp->getOperand(0);
1424 Value *Op1 = BinOp->getOperand(1);
1425 Value *Def = nullptr;
1426 if (L->isLoopInvariant(Op0))
1427 Def = Op1;
1428 else if (L->isLoopInvariant(Op1))
1429 Def = Op0;
1430 return Def;
1431 };
1432
1433 // Look for the instruction that defines the induction via the
1434 // loop backedge.
1435 BasicBlock *Latch = L->getLoopLatch();
1436 if (!Latch)
1437 return false;
1438 Value *Val = PN->getIncomingValueForBlock(Latch);
1439 if (!Val)
1440 return false;
1441
1442 // Follow the def-use chain until the induction phi is reached.
1443 // If on the way we encounter a Value that has the same SCEV Expr as the
1444 // phi node, we can consider the instructions we visit from that point
1445 // as part of the cast-sequence that can be ignored.
1446 bool InCastSequence = false;
1447 auto *Inst = dyn_cast<Instruction>(Val);
1448 while (Val != PN) {
1449 // If we encountered a phi node other than PN, or if we left the loop,
1450 // we bail out.
1451 if (!Inst || !L->contains(Inst)) {
1452 return false;
1453 }
1454 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PSE.getSCEV(Val));
1455 if (AddRec && PSE.areAddRecsEqualWithPreds(AddRec, AR))
1456 InCastSequence = true;
1457 if (InCastSequence) {
1458 // Only the last instruction in the cast sequence is expected to have
1459 // uses outside the induction def-use chain.
1460 if (!CastInsts.empty())
1461 if (!Inst->hasOneUse())
1462 return false;
1463 CastInsts.push_back(Inst);
1464 }
1465 Val = getDef(Val);
1466 if (!Val)
1467 return false;
1468 Inst = dyn_cast<Instruction>(Val);
1469 }
1470
1471 return InCastSequence;
1472 }
1473
isInductionPHI(PHINode * Phi,const Loop * TheLoop,PredicatedScalarEvolution & PSE,InductionDescriptor & D,bool Assume)1474 bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
1475 PredicatedScalarEvolution &PSE,
1476 InductionDescriptor &D, bool Assume) {
1477 Type *PhiTy = Phi->getType();
1478
1479 // Handle integer and pointer inductions variables.
1480 // Now we handle also FP induction but not trying to make a
1481 // recurrent expression from the PHI node in-place.
1482
1483 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() && !PhiTy->isFloatTy() &&
1484 !PhiTy->isDoubleTy() && !PhiTy->isHalfTy())
1485 return false;
1486
1487 if (PhiTy->isFloatingPointTy())
1488 return isFPInductionPHI(Phi, TheLoop, PSE.getSE(), D);
1489
1490 const SCEV *PhiScev = PSE.getSCEV(Phi);
1491 const auto *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1492
1493 // We need this expression to be an AddRecExpr.
1494 if (Assume && !AR)
1495 AR = PSE.getAsAddRec(Phi);
1496
1497 if (!AR) {
1498 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1499 return false;
1500 }
1501
1502 // Record any Cast instructions that participate in the induction update
1503 const auto *SymbolicPhi = dyn_cast<SCEVUnknown>(PhiScev);
1504 // If we started from an UnknownSCEV, and managed to build an addRecurrence
1505 // only after enabling Assume with PSCEV, this means we may have encountered
1506 // cast instructions that required adding a runtime check in order to
1507 // guarantee the correctness of the AddRecurrence respresentation of the
1508 // induction.
1509 if (PhiScev != AR && SymbolicPhi) {
1510 SmallVector<Instruction *, 2> Casts;
1511 if (getCastsForInductionPHI(PSE, SymbolicPhi, AR, Casts))
1512 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR, &Casts);
1513 }
1514
1515 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR);
1516 }
1517
isInductionPHI(PHINode * Phi,const Loop * TheLoop,ScalarEvolution * SE,InductionDescriptor & D,const SCEV * Expr,SmallVectorImpl<Instruction * > * CastsToIgnore)1518 bool InductionDescriptor::isInductionPHI(
1519 PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE,
1520 InductionDescriptor &D, const SCEV *Expr,
1521 SmallVectorImpl<Instruction *> *CastsToIgnore) {
1522 Type *PhiTy = Phi->getType();
1523 // We only handle integer and pointer inductions variables.
1524 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy())
1525 return false;
1526
1527 // Check that the PHI is consecutive.
1528 const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi);
1529 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1530
1531 if (!AR) {
1532 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1533 return false;
1534 }
1535
1536 if (AR->getLoop() != TheLoop) {
1537 // FIXME: We should treat this as a uniform. Unfortunately, we
1538 // don't currently know how to handled uniform PHIs.
1539 LLVM_DEBUG(
1540 dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n");
1541 return false;
1542 }
1543
1544 Value *StartValue =
1545 Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader());
1546
1547 BasicBlock *Latch = AR->getLoop()->getLoopLatch();
1548 if (!Latch)
1549 return false;
1550
1551 const SCEV *Step = AR->getStepRecurrence(*SE);
1552 // Calculate the pointer stride and check if it is consecutive.
1553 // The stride may be a constant or a loop invariant integer value.
1554 const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step);
1555 if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop))
1556 return false;
1557
1558 if (PhiTy->isIntegerTy()) {
1559 BinaryOperator *BOp =
1560 dyn_cast<BinaryOperator>(Phi->getIncomingValueForBlock(Latch));
1561 D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp,
1562 /* ElementType */ nullptr, CastsToIgnore);
1563 return true;
1564 }
1565
1566 assert(PhiTy->isPointerTy() && "The PHI must be a pointer");
1567 // Pointer induction should be a constant.
1568 if (!ConstStep)
1569 return false;
1570
1571 // Always use i8 element type for opaque pointer inductions.
1572 PointerType *PtrTy = cast<PointerType>(PhiTy);
1573 Type *ElementType = PtrTy->isOpaque()
1574 ? Type::getInt8Ty(PtrTy->getContext())
1575 : PtrTy->getNonOpaquePointerElementType();
1576 if (!ElementType->isSized())
1577 return false;
1578
1579 ConstantInt *CV = ConstStep->getValue();
1580 const DataLayout &DL = Phi->getModule()->getDataLayout();
1581 TypeSize TySize = DL.getTypeAllocSize(ElementType);
1582 // TODO: We could potentially support this for scalable vectors if we can
1583 // prove at compile time that the constant step is always a multiple of
1584 // the scalable type.
1585 if (TySize.isZero() || TySize.isScalable())
1586 return false;
1587
1588 int64_t Size = static_cast<int64_t>(TySize.getFixedValue());
1589 int64_t CVSize = CV->getSExtValue();
1590 if (CVSize % Size)
1591 return false;
1592 auto *StepValue =
1593 SE->getConstant(CV->getType(), CVSize / Size, true /* signed */);
1594 D = InductionDescriptor(StartValue, IK_PtrInduction, StepValue,
1595 /* BinOp */ nullptr, ElementType);
1596 return true;
1597 }
1598