xref: /aosp_15_r20/external/swiftshader/third_party/llvm-16.0/llvm/lib/Target/X86/X86ISelLowering.cpp (revision 03ce13f70fcc45d86ee91b7ee4cab1936a95046e)
1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "X86ISelLowering.h"
15 #include "MCTargetDesc/X86ShuffleDecode.h"
16 #include "X86.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86IntrinsicsInfo.h"
21 #include "X86MachineFunctionInfo.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/BlockFrequencyInfo.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/ObjCARCUtil.h"
32 #include "llvm/Analysis/ProfileSummaryInfo.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/CodeGen/IntrinsicLowering.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineInstrBuilder.h"
38 #include "llvm/CodeGen/MachineJumpTableInfo.h"
39 #include "llvm/CodeGen/MachineLoopInfo.h"
40 #include "llvm/CodeGen/MachineModuleInfo.h"
41 #include "llvm/CodeGen/MachineRegisterInfo.h"
42 #include "llvm/CodeGen/TargetLowering.h"
43 #include "llvm/CodeGen/WinEHFuncInfo.h"
44 #include "llvm/IR/CallingConv.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/DiagnosticInfo.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GlobalAlias.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/IRBuilder.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/PatternMatch.h"
55 #include "llvm/MC/MCAsmInfo.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/MC/MCExpr.h"
58 #include "llvm/MC/MCSymbol.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/KnownBits.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Target/TargetOptions.h"
65 #include <algorithm>
66 #include <bitset>
67 #include <cctype>
68 #include <numeric>
69 using namespace llvm;
70 
71 #define DEBUG_TYPE "x86-isel"
72 
73 STATISTIC(NumTailCalls, "Number of tail calls");
74 
75 static cl::opt<int> ExperimentalPrefInnermostLoopAlignment(
76     "x86-experimental-pref-innermost-loop-alignment", cl::init(4),
77     cl::desc(
78         "Sets the preferable loop alignment for experiments (as log2 bytes) "
79         "for innermost loops only. If specified, this option overrides "
80         "alignment set by x86-experimental-pref-loop-alignment."),
81     cl::Hidden);
82 
83 static cl::opt<bool> MulConstantOptimization(
84     "mul-constant-optimization", cl::init(true),
85     cl::desc("Replace 'mul x, Const' with more effective instructions like "
86              "SHIFT, LEA, etc."),
87     cl::Hidden);
88 
89 static cl::opt<bool> ExperimentalUnorderedISEL(
90     "x86-experimental-unordered-atomic-isel", cl::init(false),
91     cl::desc("Use LoadSDNode and StoreSDNode instead of "
92              "AtomicSDNode for unordered atomic loads and "
93              "stores respectively."),
94     cl::Hidden);
95 
96 /// Call this when the user attempts to do something unsupported, like
97 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
98 /// report_fatal_error, so calling code should attempt to recover without
99 /// crashing.
errorUnsupported(SelectionDAG & DAG,const SDLoc & dl,const char * Msg)100 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
101                              const char *Msg) {
102   MachineFunction &MF = DAG.getMachineFunction();
103   DAG.getContext()->diagnose(
104       DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
105 }
106 
X86TargetLowering(const X86TargetMachine & TM,const X86Subtarget & STI)107 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
108                                      const X86Subtarget &STI)
109     : TargetLowering(TM), Subtarget(STI) {
110   bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
111   MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
112 
113   // Set up the TargetLowering object.
114 
115   // X86 is weird. It always uses i8 for shift amounts and setcc results.
116   setBooleanContents(ZeroOrOneBooleanContent);
117   // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
118   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
119 
120   // For 64-bit, since we have so many registers, use the ILP scheduler.
121   // For 32-bit, use the register pressure specific scheduling.
122   // For Atom, always use ILP scheduling.
123   if (Subtarget.isAtom())
124     setSchedulingPreference(Sched::ILP);
125   else if (Subtarget.is64Bit())
126     setSchedulingPreference(Sched::ILP);
127   else
128     setSchedulingPreference(Sched::RegPressure);
129   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
130   setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
131 
132   // Bypass expensive divides and use cheaper ones.
133   if (TM.getOptLevel() >= CodeGenOpt::Default) {
134     if (Subtarget.hasSlowDivide32())
135       addBypassSlowDiv(32, 8);
136     if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
137       addBypassSlowDiv(64, 32);
138   }
139 
140   // Setup Windows compiler runtime calls.
141   if (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()) {
142     static const struct {
143       const RTLIB::Libcall Op;
144       const char * const Name;
145       const CallingConv::ID CC;
146     } LibraryCalls[] = {
147       { RTLIB::SDIV_I64, "_alldiv", CallingConv::X86_StdCall },
148       { RTLIB::UDIV_I64, "_aulldiv", CallingConv::X86_StdCall },
149       { RTLIB::SREM_I64, "_allrem", CallingConv::X86_StdCall },
150       { RTLIB::UREM_I64, "_aullrem", CallingConv::X86_StdCall },
151       { RTLIB::MUL_I64, "_allmul", CallingConv::X86_StdCall },
152     };
153 
154     for (const auto &LC : LibraryCalls) {
155       setLibcallName(LC.Op, LC.Name);
156       setLibcallCallingConv(LC.Op, LC.CC);
157     }
158   }
159 
160   if (Subtarget.getTargetTriple().isOSMSVCRT()) {
161     // MSVCRT doesn't have powi; fall back to pow
162     setLibcallName(RTLIB::POWI_F32, nullptr);
163     setLibcallName(RTLIB::POWI_F64, nullptr);
164   }
165 
166   // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
167   // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
168   // FIXME: Should we be limiting the atomic size on other configs? Default is
169   // 1024.
170   if (!Subtarget.canUseCMPXCHG8B())
171     setMaxAtomicSizeInBitsSupported(32);
172 
173   setMaxDivRemBitWidthSupported(Subtarget.is64Bit() ? 128 : 64);
174 
175   setMaxLargeFPConvertBitWidthSupported(128);
176 
177   // Set up the register classes.
178   addRegisterClass(MVT::i8, &X86::GR8RegClass);
179   addRegisterClass(MVT::i16, &X86::GR16RegClass);
180   addRegisterClass(MVT::i32, &X86::GR32RegClass);
181   if (Subtarget.is64Bit())
182     addRegisterClass(MVT::i64, &X86::GR64RegClass);
183 
184   for (MVT VT : MVT::integer_valuetypes())
185     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
186 
187   // We don't accept any truncstore of integer registers.
188   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
189   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
190   setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
191   setTruncStoreAction(MVT::i32, MVT::i16, Expand);
192   setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
193   setTruncStoreAction(MVT::i16, MVT::i8,  Expand);
194 
195   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
196 
197   // SETOEQ and SETUNE require checking two conditions.
198   for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) {
199     setCondCodeAction(ISD::SETOEQ, VT, Expand);
200     setCondCodeAction(ISD::SETUNE, VT, Expand);
201   }
202 
203   // Integer absolute.
204   if (Subtarget.canUseCMOV()) {
205     setOperationAction(ISD::ABS            , MVT::i16  , Custom);
206     setOperationAction(ISD::ABS            , MVT::i32  , Custom);
207     if (Subtarget.is64Bit())
208       setOperationAction(ISD::ABS          , MVT::i64  , Custom);
209   }
210 
211   // Signed saturation subtraction.
212   setOperationAction(ISD::SSUBSAT          , MVT::i8   , Custom);
213   setOperationAction(ISD::SSUBSAT          , MVT::i16  , Custom);
214   setOperationAction(ISD::SSUBSAT          , MVT::i32  , Custom);
215   if (Subtarget.is64Bit())
216     setOperationAction(ISD::SSUBSAT        , MVT::i64  , Custom);
217 
218   // Funnel shifts.
219   for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
220     // For slow shld targets we only lower for code size.
221     LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal;
222 
223     setOperationAction(ShiftOp             , MVT::i8   , Custom);
224     setOperationAction(ShiftOp             , MVT::i16  , Custom);
225     setOperationAction(ShiftOp             , MVT::i32  , ShiftDoubleAction);
226     if (Subtarget.is64Bit())
227       setOperationAction(ShiftOp           , MVT::i64  , ShiftDoubleAction);
228   }
229 
230   if (!Subtarget.useSoftFloat()) {
231     // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
232     // operation.
233     setOperationAction(ISD::UINT_TO_FP,        MVT::i8, Promote);
234     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
235     setOperationAction(ISD::UINT_TO_FP,        MVT::i16, Promote);
236     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
237     // We have an algorithm for SSE2, and we turn this into a 64-bit
238     // FILD or VCVTUSI2SS/SD for other targets.
239     setOperationAction(ISD::UINT_TO_FP,        MVT::i32, Custom);
240     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
241     // We have an algorithm for SSE2->double, and we turn this into a
242     // 64-bit FILD followed by conditional FADD for other targets.
243     setOperationAction(ISD::UINT_TO_FP,        MVT::i64, Custom);
244     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
245 
246     // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
247     // this operation.
248     setOperationAction(ISD::SINT_TO_FP,        MVT::i8, Promote);
249     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
250     // SSE has no i16 to fp conversion, only i32. We promote in the handler
251     // to allow f80 to use i16 and f64 to use i16 with sse1 only
252     setOperationAction(ISD::SINT_TO_FP,        MVT::i16, Custom);
253     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
254     // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
255     setOperationAction(ISD::SINT_TO_FP,        MVT::i32, Custom);
256     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
257     // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
258     // are Legal, f80 is custom lowered.
259     setOperationAction(ISD::SINT_TO_FP,        MVT::i64, Custom);
260     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
261 
262     // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
263     // this operation.
264     setOperationAction(ISD::FP_TO_SINT,        MVT::i8,  Promote);
265     // FIXME: This doesn't generate invalid exception when it should. PR44019.
266     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8,  Promote);
267     setOperationAction(ISD::FP_TO_SINT,        MVT::i16, Custom);
268     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
269     setOperationAction(ISD::FP_TO_SINT,        MVT::i32, Custom);
270     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
271     // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
272     // are Legal, f80 is custom lowered.
273     setOperationAction(ISD::FP_TO_SINT,        MVT::i64, Custom);
274     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
275 
276     // Handle FP_TO_UINT by promoting the destination to a larger signed
277     // conversion.
278     setOperationAction(ISD::FP_TO_UINT,        MVT::i8,  Promote);
279     // FIXME: This doesn't generate invalid exception when it should. PR44019.
280     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8,  Promote);
281     setOperationAction(ISD::FP_TO_UINT,        MVT::i16, Promote);
282     // FIXME: This doesn't generate invalid exception when it should. PR44019.
283     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
284     setOperationAction(ISD::FP_TO_UINT,        MVT::i32, Custom);
285     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
286     setOperationAction(ISD::FP_TO_UINT,        MVT::i64, Custom);
287     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
288 
289     setOperationAction(ISD::LRINT,             MVT::f32, Custom);
290     setOperationAction(ISD::LRINT,             MVT::f64, Custom);
291     setOperationAction(ISD::LLRINT,            MVT::f32, Custom);
292     setOperationAction(ISD::LLRINT,            MVT::f64, Custom);
293 
294     if (!Subtarget.is64Bit()) {
295       setOperationAction(ISD::LRINT,  MVT::i64, Custom);
296       setOperationAction(ISD::LLRINT, MVT::i64, Custom);
297     }
298   }
299 
300   if (Subtarget.hasSSE2()) {
301     // Custom lowering for saturating float to int conversions.
302     // We handle promotion to larger result types manually.
303     for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) {
304       setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom);
305       setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
306     }
307     if (Subtarget.is64Bit()) {
308       setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
309       setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
310     }
311   }
312 
313   // Handle address space casts between mixed sized pointers.
314   setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
315   setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
316 
317   // TODO: when we have SSE, these could be more efficient, by using movd/movq.
318   if (!Subtarget.hasSSE2()) {
319     setOperationAction(ISD::BITCAST        , MVT::f32  , Expand);
320     setOperationAction(ISD::BITCAST        , MVT::i32  , Expand);
321     if (Subtarget.is64Bit()) {
322       setOperationAction(ISD::BITCAST      , MVT::f64  , Expand);
323       // Without SSE, i64->f64 goes through memory.
324       setOperationAction(ISD::BITCAST      , MVT::i64  , Expand);
325     }
326   } else if (!Subtarget.is64Bit())
327     setOperationAction(ISD::BITCAST      , MVT::i64  , Custom);
328 
329   // Scalar integer divide and remainder are lowered to use operations that
330   // produce two results, to match the available instructions. This exposes
331   // the two-result form to trivial CSE, which is able to combine x/y and x%y
332   // into a single instruction.
333   //
334   // Scalar integer multiply-high is also lowered to use two-result
335   // operations, to match the available instructions. However, plain multiply
336   // (low) operations are left as Legal, as there are single-result
337   // instructions for this in x86. Using the two-result multiply instructions
338   // when both high and low results are needed must be arranged by dagcombine.
339   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
340     setOperationAction(ISD::MULHS, VT, Expand);
341     setOperationAction(ISD::MULHU, VT, Expand);
342     setOperationAction(ISD::SDIV, VT, Expand);
343     setOperationAction(ISD::UDIV, VT, Expand);
344     setOperationAction(ISD::SREM, VT, Expand);
345     setOperationAction(ISD::UREM, VT, Expand);
346   }
347 
348   setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
349   setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
350   for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
351                    MVT::i8,  MVT::i16, MVT::i32, MVT::i64 }) {
352     setOperationAction(ISD::BR_CC,     VT, Expand);
353     setOperationAction(ISD::SELECT_CC, VT, Expand);
354   }
355   if (Subtarget.is64Bit())
356     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
357   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
358   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
359   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
360 
361   setOperationAction(ISD::FREM             , MVT::f32  , Expand);
362   setOperationAction(ISD::FREM             , MVT::f64  , Expand);
363   setOperationAction(ISD::FREM             , MVT::f80  , Expand);
364   setOperationAction(ISD::FREM             , MVT::f128 , Expand);
365 
366   if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) {
367     setOperationAction(ISD::GET_ROUNDING   , MVT::i32  , Custom);
368     setOperationAction(ISD::SET_ROUNDING   , MVT::Other, Custom);
369   }
370 
371   // Promote the i8 variants and force them on up to i32 which has a shorter
372   // encoding.
373   setOperationPromotedToType(ISD::CTTZ           , MVT::i8   , MVT::i32);
374   setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
375   // Promoted i16. tzcntw has a false dependency on Intel CPUs. For BSF, we emit
376   // a REP prefix to encode it as TZCNT for modern CPUs so it makes sense to
377   // promote that too.
378   setOperationPromotedToType(ISD::CTTZ           , MVT::i16  , MVT::i32);
379   setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16  , MVT::i32);
380 
381   if (!Subtarget.hasBMI()) {
382     setOperationAction(ISD::CTTZ           , MVT::i32  , Custom);
383     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Legal);
384     if (Subtarget.is64Bit()) {
385       setOperationAction(ISD::CTTZ         , MVT::i64  , Custom);
386       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
387     }
388   }
389 
390   if (Subtarget.hasLZCNT()) {
391     // When promoting the i8 variants, force them to i32 for a shorter
392     // encoding.
393     setOperationPromotedToType(ISD::CTLZ           , MVT::i8   , MVT::i32);
394     setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
395   } else {
396     for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
397       if (VT == MVT::i64 && !Subtarget.is64Bit())
398         continue;
399       setOperationAction(ISD::CTLZ           , VT, Custom);
400       setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
401     }
402   }
403 
404   for (auto Op : {ISD::FP16_TO_FP, ISD::STRICT_FP16_TO_FP, ISD::FP_TO_FP16,
405                   ISD::STRICT_FP_TO_FP16}) {
406     // Special handling for half-precision floating point conversions.
407     // If we don't have F16C support, then lower half float conversions
408     // into library calls.
409     setOperationAction(
410         Op, MVT::f32,
411         (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand);
412     // There's never any support for operations beyond MVT::f32.
413     setOperationAction(Op, MVT::f64, Expand);
414     setOperationAction(Op, MVT::f80, Expand);
415     setOperationAction(Op, MVT::f128, Expand);
416   }
417 
418   for (MVT VT : {MVT::f32, MVT::f64, MVT::f80, MVT::f128}) {
419     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
420     setLoadExtAction(ISD::EXTLOAD, VT, MVT::bf16, Expand);
421     setTruncStoreAction(VT, MVT::f16, Expand);
422     setTruncStoreAction(VT, MVT::bf16, Expand);
423 
424     setOperationAction(ISD::BF16_TO_FP, VT, Expand);
425     setOperationAction(ISD::FP_TO_BF16, VT, Custom);
426   }
427 
428   setOperationAction(ISD::PARITY, MVT::i8, Custom);
429   setOperationAction(ISD::PARITY, MVT::i16, Custom);
430   setOperationAction(ISD::PARITY, MVT::i32, Custom);
431   if (Subtarget.is64Bit())
432     setOperationAction(ISD::PARITY, MVT::i64, Custom);
433   if (Subtarget.hasPOPCNT()) {
434     setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
435     // popcntw is longer to encode than popcntl and also has a false dependency
436     // on the dest that popcntl hasn't had since Cannon Lake.
437     setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32);
438   } else {
439     setOperationAction(ISD::CTPOP          , MVT::i8   , Expand);
440     setOperationAction(ISD::CTPOP          , MVT::i16  , Expand);
441     setOperationAction(ISD::CTPOP          , MVT::i32  , Expand);
442     if (Subtarget.is64Bit())
443       setOperationAction(ISD::CTPOP        , MVT::i64  , Expand);
444     else
445       setOperationAction(ISD::CTPOP        , MVT::i64  , Custom);
446   }
447 
448   setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
449 
450   if (!Subtarget.hasMOVBE())
451     setOperationAction(ISD::BSWAP          , MVT::i16  , Expand);
452 
453   // X86 wants to expand cmov itself.
454   for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
455     setOperationAction(ISD::SELECT, VT, Custom);
456     setOperationAction(ISD::SETCC, VT, Custom);
457     setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
458     setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
459   }
460   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
461     if (VT == MVT::i64 && !Subtarget.is64Bit())
462       continue;
463     setOperationAction(ISD::SELECT, VT, Custom);
464     setOperationAction(ISD::SETCC,  VT, Custom);
465   }
466 
467   // Custom action for SELECT MMX and expand action for SELECT_CC MMX
468   setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
469   setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
470 
471   setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
472   // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
473   // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
474   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
475   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
476   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
477   if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
478     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
479 
480   // Darwin ABI issue.
481   for (auto VT : { MVT::i32, MVT::i64 }) {
482     if (VT == MVT::i64 && !Subtarget.is64Bit())
483       continue;
484     setOperationAction(ISD::ConstantPool    , VT, Custom);
485     setOperationAction(ISD::JumpTable       , VT, Custom);
486     setOperationAction(ISD::GlobalAddress   , VT, Custom);
487     setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
488     setOperationAction(ISD::ExternalSymbol  , VT, Custom);
489     setOperationAction(ISD::BlockAddress    , VT, Custom);
490   }
491 
492   // 64-bit shl, sra, srl (iff 32-bit x86)
493   for (auto VT : { MVT::i32, MVT::i64 }) {
494     if (VT == MVT::i64 && !Subtarget.is64Bit())
495       continue;
496     setOperationAction(ISD::SHL_PARTS, VT, Custom);
497     setOperationAction(ISD::SRA_PARTS, VT, Custom);
498     setOperationAction(ISD::SRL_PARTS, VT, Custom);
499   }
500 
501   if (Subtarget.hasSSEPrefetch() || Subtarget.hasThreeDNow())
502     setOperationAction(ISD::PREFETCH      , MVT::Other, Legal);
503 
504   setOperationAction(ISD::ATOMIC_FENCE  , MVT::Other, Custom);
505 
506   // Expand certain atomics
507   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
508     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
509     setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
510     setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
511     setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
512     setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
513     setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
514     setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
515   }
516 
517   if (!Subtarget.is64Bit())
518     setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
519 
520   if (Subtarget.canUseCMPXCHG16B())
521     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
522 
523   // FIXME - use subtarget debug flags
524   if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
525       !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
526       TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
527     setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
528   }
529 
530   setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
531   setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
532 
533   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
534   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
535 
536   setOperationAction(ISD::TRAP, MVT::Other, Legal);
537   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
538   if (Subtarget.isTargetPS())
539     setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
540   else
541     setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
542 
543   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
544   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
545   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
546   bool Is64Bit = Subtarget.is64Bit();
547   setOperationAction(ISD::VAARG,  MVT::Other, Is64Bit ? Custom : Expand);
548   setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
549 
550   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
551   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
552 
553   setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
554 
555   // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
556   setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
557   setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
558 
559   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
560 
561   auto setF16Action = [&] (MVT VT, LegalizeAction Action) {
562     setOperationAction(ISD::FABS, VT, Action);
563     setOperationAction(ISD::FNEG, VT, Action);
564     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
565     setOperationAction(ISD::FREM, VT, Action);
566     setOperationAction(ISD::FMA, VT, Action);
567     setOperationAction(ISD::FMINNUM, VT, Action);
568     setOperationAction(ISD::FMAXNUM, VT, Action);
569     setOperationAction(ISD::FMINIMUM, VT, Action);
570     setOperationAction(ISD::FMAXIMUM, VT, Action);
571     setOperationAction(ISD::FSIN, VT, Action);
572     setOperationAction(ISD::FCOS, VT, Action);
573     setOperationAction(ISD::FSINCOS, VT, Action);
574     setOperationAction(ISD::FSQRT, VT, Action);
575     setOperationAction(ISD::FPOW, VT, Action);
576     setOperationAction(ISD::FLOG, VT, Action);
577     setOperationAction(ISD::FLOG2, VT, Action);
578     setOperationAction(ISD::FLOG10, VT, Action);
579     setOperationAction(ISD::FEXP, VT, Action);
580     setOperationAction(ISD::FEXP2, VT, Action);
581     setOperationAction(ISD::FCEIL, VT, Action);
582     setOperationAction(ISD::FFLOOR, VT, Action);
583     setOperationAction(ISD::FNEARBYINT, VT, Action);
584     setOperationAction(ISD::FRINT, VT, Action);
585     setOperationAction(ISD::BR_CC, VT, Action);
586     setOperationAction(ISD::SETCC, VT, Action);
587     setOperationAction(ISD::SELECT, VT, Custom);
588     setOperationAction(ISD::SELECT_CC, VT, Action);
589     setOperationAction(ISD::FROUND, VT, Action);
590     setOperationAction(ISD::FROUNDEVEN, VT, Action);
591     setOperationAction(ISD::FTRUNC, VT, Action);
592   };
593 
594   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
595     // f16, f32 and f64 use SSE.
596     // Set up the FP register classes.
597     addRegisterClass(MVT::f16, Subtarget.hasAVX512() ? &X86::FR16XRegClass
598                                                      : &X86::FR16RegClass);
599     addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
600                                                      : &X86::FR32RegClass);
601     addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
602                                                      : &X86::FR64RegClass);
603 
604     // Disable f32->f64 extload as we can only generate this in one instruction
605     // under optsize. So its easier to pattern match (fpext (load)) for that
606     // case instead of needing to emit 2 instructions for extload in the
607     // non-optsize case.
608     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
609 
610     for (auto VT : { MVT::f32, MVT::f64 }) {
611       // Use ANDPD to simulate FABS.
612       setOperationAction(ISD::FABS, VT, Custom);
613 
614       // Use XORP to simulate FNEG.
615       setOperationAction(ISD::FNEG, VT, Custom);
616 
617       // Use ANDPD and ORPD to simulate FCOPYSIGN.
618       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
619 
620       // These might be better off as horizontal vector ops.
621       setOperationAction(ISD::FADD, VT, Custom);
622       setOperationAction(ISD::FSUB, VT, Custom);
623 
624       // We don't support sin/cos/fmod
625       setOperationAction(ISD::FSIN   , VT, Expand);
626       setOperationAction(ISD::FCOS   , VT, Expand);
627       setOperationAction(ISD::FSINCOS, VT, Expand);
628     }
629 
630     // Half type will be promoted by default.
631     setF16Action(MVT::f16, Promote);
632     setOperationAction(ISD::FADD, MVT::f16, Promote);
633     setOperationAction(ISD::FSUB, MVT::f16, Promote);
634     setOperationAction(ISD::FMUL, MVT::f16, Promote);
635     setOperationAction(ISD::FDIV, MVT::f16, Promote);
636     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
637     setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
638     setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
639 
640     setOperationAction(ISD::STRICT_FADD, MVT::f16, Promote);
641     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Promote);
642     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Promote);
643     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Promote);
644     setOperationAction(ISD::STRICT_FMA, MVT::f16, Promote);
645     setOperationAction(ISD::STRICT_FMINNUM, MVT::f16, Promote);
646     setOperationAction(ISD::STRICT_FMAXNUM, MVT::f16, Promote);
647     setOperationAction(ISD::STRICT_FMINIMUM, MVT::f16, Promote);
648     setOperationAction(ISD::STRICT_FMAXIMUM, MVT::f16, Promote);
649     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Promote);
650     setOperationAction(ISD::STRICT_FPOW, MVT::f16, Promote);
651     setOperationAction(ISD::STRICT_FLOG, MVT::f16, Promote);
652     setOperationAction(ISD::STRICT_FLOG2, MVT::f16, Promote);
653     setOperationAction(ISD::STRICT_FLOG10, MVT::f16, Promote);
654     setOperationAction(ISD::STRICT_FEXP, MVT::f16, Promote);
655     setOperationAction(ISD::STRICT_FEXP2, MVT::f16, Promote);
656     setOperationAction(ISD::STRICT_FCEIL, MVT::f16, Promote);
657     setOperationAction(ISD::STRICT_FFLOOR, MVT::f16, Promote);
658     setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f16, Promote);
659     setOperationAction(ISD::STRICT_FRINT, MVT::f16, Promote);
660     setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Promote);
661     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Promote);
662     setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
663     setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Promote);
664     setOperationAction(ISD::STRICT_FTRUNC, MVT::f16, Promote);
665     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
666     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
667     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
668 
669     setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
670     setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
671 
672     // Lower this to MOVMSK plus an AND.
673     setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
674     setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
675 
676   } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() &&
677              (UseX87 || Is64Bit)) {
678     // Use SSE for f32, x87 for f64.
679     // Set up the FP register classes.
680     addRegisterClass(MVT::f32, &X86::FR32RegClass);
681     if (UseX87)
682       addRegisterClass(MVT::f64, &X86::RFP64RegClass);
683 
684     // Use ANDPS to simulate FABS.
685     setOperationAction(ISD::FABS , MVT::f32, Custom);
686 
687     // Use XORP to simulate FNEG.
688     setOperationAction(ISD::FNEG , MVT::f32, Custom);
689 
690     if (UseX87)
691       setOperationAction(ISD::UNDEF, MVT::f64, Expand);
692 
693     // Use ANDPS and ORPS to simulate FCOPYSIGN.
694     if (UseX87)
695       setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
696     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
697 
698     // We don't support sin/cos/fmod
699     setOperationAction(ISD::FSIN   , MVT::f32, Expand);
700     setOperationAction(ISD::FCOS   , MVT::f32, Expand);
701     setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
702 
703     if (UseX87) {
704       // Always expand sin/cos functions even though x87 has an instruction.
705       setOperationAction(ISD::FSIN, MVT::f64, Expand);
706       setOperationAction(ISD::FCOS, MVT::f64, Expand);
707       setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
708     }
709   } else if (UseX87) {
710     // f32 and f64 in x87.
711     // Set up the FP register classes.
712     addRegisterClass(MVT::f64, &X86::RFP64RegClass);
713     addRegisterClass(MVT::f32, &X86::RFP32RegClass);
714 
715     for (auto VT : { MVT::f32, MVT::f64 }) {
716       setOperationAction(ISD::UNDEF,     VT, Expand);
717       setOperationAction(ISD::FCOPYSIGN, VT, Expand);
718 
719       // Always expand sin/cos functions even though x87 has an instruction.
720       setOperationAction(ISD::FSIN   , VT, Expand);
721       setOperationAction(ISD::FCOS   , VT, Expand);
722       setOperationAction(ISD::FSINCOS, VT, Expand);
723     }
724   }
725 
726   // Expand FP32 immediates into loads from the stack, save special cases.
727   if (isTypeLegal(MVT::f32)) {
728     if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
729       addLegalFPImmediate(APFloat(+0.0f)); // FLD0
730       addLegalFPImmediate(APFloat(+1.0f)); // FLD1
731       addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
732       addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
733     } else // SSE immediates.
734       addLegalFPImmediate(APFloat(+0.0f)); // xorps
735   }
736   // Expand FP64 immediates into loads from the stack, save special cases.
737   if (isTypeLegal(MVT::f64)) {
738     if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
739       addLegalFPImmediate(APFloat(+0.0)); // FLD0
740       addLegalFPImmediate(APFloat(+1.0)); // FLD1
741       addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
742       addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
743     } else // SSE immediates.
744       addLegalFPImmediate(APFloat(+0.0)); // xorpd
745   }
746   // Support fp16 0 immediate.
747   if (isTypeLegal(MVT::f16))
748     addLegalFPImmediate(APFloat::getZero(APFloat::IEEEhalf()));
749 
750   // Handle constrained floating-point operations of scalar.
751   setOperationAction(ISD::STRICT_FADD,      MVT::f32, Legal);
752   setOperationAction(ISD::STRICT_FADD,      MVT::f64, Legal);
753   setOperationAction(ISD::STRICT_FSUB,      MVT::f32, Legal);
754   setOperationAction(ISD::STRICT_FSUB,      MVT::f64, Legal);
755   setOperationAction(ISD::STRICT_FMUL,      MVT::f32, Legal);
756   setOperationAction(ISD::STRICT_FMUL,      MVT::f64, Legal);
757   setOperationAction(ISD::STRICT_FDIV,      MVT::f32, Legal);
758   setOperationAction(ISD::STRICT_FDIV,      MVT::f64, Legal);
759   setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f32, Legal);
760   setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f64, Legal);
761   setOperationAction(ISD::STRICT_FSQRT,     MVT::f32, Legal);
762   setOperationAction(ISD::STRICT_FSQRT,     MVT::f64, Legal);
763 
764   // We don't support FMA.
765   setOperationAction(ISD::FMA, MVT::f64, Expand);
766   setOperationAction(ISD::FMA, MVT::f32, Expand);
767 
768   // f80 always uses X87.
769   if (UseX87) {
770     addRegisterClass(MVT::f80, &X86::RFP80RegClass);
771     setOperationAction(ISD::UNDEF,     MVT::f80, Expand);
772     setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
773     {
774       APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
775       addLegalFPImmediate(TmpFlt);  // FLD0
776       TmpFlt.changeSign();
777       addLegalFPImmediate(TmpFlt);  // FLD0/FCHS
778 
779       bool ignored;
780       APFloat TmpFlt2(+1.0);
781       TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
782                       &ignored);
783       addLegalFPImmediate(TmpFlt2);  // FLD1
784       TmpFlt2.changeSign();
785       addLegalFPImmediate(TmpFlt2);  // FLD1/FCHS
786     }
787 
788     // Always expand sin/cos functions even though x87 has an instruction.
789     setOperationAction(ISD::FSIN   , MVT::f80, Expand);
790     setOperationAction(ISD::FCOS   , MVT::f80, Expand);
791     setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
792 
793     setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
794     setOperationAction(ISD::FCEIL,  MVT::f80, Expand);
795     setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
796     setOperationAction(ISD::FRINT,  MVT::f80, Expand);
797     setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
798     setOperationAction(ISD::FMA, MVT::f80, Expand);
799     setOperationAction(ISD::LROUND, MVT::f80, Expand);
800     setOperationAction(ISD::LLROUND, MVT::f80, Expand);
801     setOperationAction(ISD::LRINT, MVT::f80, Custom);
802     setOperationAction(ISD::LLRINT, MVT::f80, Custom);
803 
804     // Handle constrained floating-point operations of scalar.
805     setOperationAction(ISD::STRICT_FADD     , MVT::f80, Legal);
806     setOperationAction(ISD::STRICT_FSUB     , MVT::f80, Legal);
807     setOperationAction(ISD::STRICT_FMUL     , MVT::f80, Legal);
808     setOperationAction(ISD::STRICT_FDIV     , MVT::f80, Legal);
809     setOperationAction(ISD::STRICT_FSQRT    , MVT::f80, Legal);
810     if (isTypeLegal(MVT::f16)) {
811       setOperationAction(ISD::FP_EXTEND, MVT::f80, Custom);
812       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Custom);
813     } else {
814       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
815     }
816     // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
817     // as Custom.
818     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
819   }
820 
821   // f128 uses xmm registers, but most operations require libcalls.
822   if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
823     addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
824                                                    : &X86::VR128RegClass);
825 
826     addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
827 
828     setOperationAction(ISD::FADD,        MVT::f128, LibCall);
829     setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
830     setOperationAction(ISD::FSUB,        MVT::f128, LibCall);
831     setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
832     setOperationAction(ISD::FDIV,        MVT::f128, LibCall);
833     setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
834     setOperationAction(ISD::FMUL,        MVT::f128, LibCall);
835     setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
836     setOperationAction(ISD::FMA,         MVT::f128, LibCall);
837     setOperationAction(ISD::STRICT_FMA,  MVT::f128, LibCall);
838 
839     setOperationAction(ISD::FABS, MVT::f128, Custom);
840     setOperationAction(ISD::FNEG, MVT::f128, Custom);
841     setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
842 
843     setOperationAction(ISD::FSIN,         MVT::f128, LibCall);
844     setOperationAction(ISD::STRICT_FSIN,  MVT::f128, LibCall);
845     setOperationAction(ISD::FCOS,         MVT::f128, LibCall);
846     setOperationAction(ISD::STRICT_FCOS,  MVT::f128, LibCall);
847     setOperationAction(ISD::FSINCOS,      MVT::f128, LibCall);
848     // No STRICT_FSINCOS
849     setOperationAction(ISD::FSQRT,        MVT::f128, LibCall);
850     setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
851 
852     setOperationAction(ISD::FP_EXTEND,        MVT::f128, Custom);
853     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
854     // We need to custom handle any FP_ROUND with an f128 input, but
855     // LegalizeDAG uses the result type to know when to run a custom handler.
856     // So we have to list all legal floating point result types here.
857     if (isTypeLegal(MVT::f32)) {
858       setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
859       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
860     }
861     if (isTypeLegal(MVT::f64)) {
862       setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
863       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
864     }
865     if (isTypeLegal(MVT::f80)) {
866       setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
867       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
868     }
869 
870     setOperationAction(ISD::SETCC, MVT::f128, Custom);
871 
872     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
873     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
874     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
875     setTruncStoreAction(MVT::f128, MVT::f32, Expand);
876     setTruncStoreAction(MVT::f128, MVT::f64, Expand);
877     setTruncStoreAction(MVT::f128, MVT::f80, Expand);
878   }
879 
880   // Always use a library call for pow.
881   setOperationAction(ISD::FPOW             , MVT::f32  , Expand);
882   setOperationAction(ISD::FPOW             , MVT::f64  , Expand);
883   setOperationAction(ISD::FPOW             , MVT::f80  , Expand);
884   setOperationAction(ISD::FPOW             , MVT::f128 , Expand);
885 
886   setOperationAction(ISD::FLOG, MVT::f80, Expand);
887   setOperationAction(ISD::FLOG2, MVT::f80, Expand);
888   setOperationAction(ISD::FLOG10, MVT::f80, Expand);
889   setOperationAction(ISD::FEXP, MVT::f80, Expand);
890   setOperationAction(ISD::FEXP2, MVT::f80, Expand);
891   setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
892   setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
893 
894   // Some FP actions are always expanded for vector types.
895   for (auto VT : { MVT::v8f16, MVT::v16f16, MVT::v32f16,
896                    MVT::v4f32, MVT::v8f32,  MVT::v16f32,
897                    MVT::v2f64, MVT::v4f64,  MVT::v8f64 }) {
898     setOperationAction(ISD::FSIN,      VT, Expand);
899     setOperationAction(ISD::FSINCOS,   VT, Expand);
900     setOperationAction(ISD::FCOS,      VT, Expand);
901     setOperationAction(ISD::FREM,      VT, Expand);
902     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
903     setOperationAction(ISD::FPOW,      VT, Expand);
904     setOperationAction(ISD::FLOG,      VT, Expand);
905     setOperationAction(ISD::FLOG2,     VT, Expand);
906     setOperationAction(ISD::FLOG10,    VT, Expand);
907     setOperationAction(ISD::FEXP,      VT, Expand);
908     setOperationAction(ISD::FEXP2,     VT, Expand);
909   }
910 
911   // First set operation action for all vector types to either promote
912   // (for widening) or expand (for scalarization). Then we will selectively
913   // turn on ones that can be effectively codegen'd.
914   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
915     setOperationAction(ISD::SDIV, VT, Expand);
916     setOperationAction(ISD::UDIV, VT, Expand);
917     setOperationAction(ISD::SREM, VT, Expand);
918     setOperationAction(ISD::UREM, VT, Expand);
919     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
920     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
921     setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
922     setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
923     setOperationAction(ISD::FMA,  VT, Expand);
924     setOperationAction(ISD::FFLOOR, VT, Expand);
925     setOperationAction(ISD::FCEIL, VT, Expand);
926     setOperationAction(ISD::FTRUNC, VT, Expand);
927     setOperationAction(ISD::FRINT, VT, Expand);
928     setOperationAction(ISD::FNEARBYINT, VT, Expand);
929     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
930     setOperationAction(ISD::MULHS, VT, Expand);
931     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
932     setOperationAction(ISD::MULHU, VT, Expand);
933     setOperationAction(ISD::SDIVREM, VT, Expand);
934     setOperationAction(ISD::UDIVREM, VT, Expand);
935     setOperationAction(ISD::CTPOP, VT, Expand);
936     setOperationAction(ISD::CTTZ, VT, Expand);
937     setOperationAction(ISD::CTLZ, VT, Expand);
938     setOperationAction(ISD::ROTL, VT, Expand);
939     setOperationAction(ISD::ROTR, VT, Expand);
940     setOperationAction(ISD::BSWAP, VT, Expand);
941     setOperationAction(ISD::SETCC, VT, Expand);
942     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
943     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
944     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
945     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
946     setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
947     setOperationAction(ISD::TRUNCATE, VT, Expand);
948     setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
949     setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
950     setOperationAction(ISD::ANY_EXTEND, VT, Expand);
951     setOperationAction(ISD::SELECT_CC, VT, Expand);
952     for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
953       setTruncStoreAction(InnerVT, VT, Expand);
954 
955       setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
956       setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
957 
958       // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
959       // types, we have to deal with them whether we ask for Expansion or not.
960       // Setting Expand causes its own optimisation problems though, so leave
961       // them legal.
962       if (VT.getVectorElementType() == MVT::i1)
963         setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
964 
965       // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
966       // split/scalarized right now.
967       if (VT.getVectorElementType() == MVT::f16 ||
968           VT.getVectorElementType() == MVT::bf16)
969         setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
970     }
971   }
972 
973   // FIXME: In order to prevent SSE instructions being expanded to MMX ones
974   // with -msoft-float, disable use of MMX as well.
975   if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
976     addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
977     // No operations on x86mmx supported, everything uses intrinsics.
978   }
979 
980   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
981     addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
982                                                     : &X86::VR128RegClass);
983 
984     setOperationAction(ISD::FNEG,               MVT::v4f32, Custom);
985     setOperationAction(ISD::FABS,               MVT::v4f32, Custom);
986     setOperationAction(ISD::FCOPYSIGN,          MVT::v4f32, Custom);
987     setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
988     setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
989     setOperationAction(ISD::VSELECT,            MVT::v4f32, Custom);
990     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
991     setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
992 
993     setOperationAction(ISD::LOAD,               MVT::v2f32, Custom);
994     setOperationAction(ISD::STORE,              MVT::v2f32, Custom);
995 
996     setOperationAction(ISD::STRICT_FADD,        MVT::v4f32, Legal);
997     setOperationAction(ISD::STRICT_FSUB,        MVT::v4f32, Legal);
998     setOperationAction(ISD::STRICT_FMUL,        MVT::v4f32, Legal);
999     setOperationAction(ISD::STRICT_FDIV,        MVT::v4f32, Legal);
1000     setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f32, Legal);
1001   }
1002 
1003   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
1004     addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1005                                                     : &X86::VR128RegClass);
1006 
1007     // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
1008     // registers cannot be used even for integer operations.
1009     addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
1010                                                     : &X86::VR128RegClass);
1011     addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1012                                                     : &X86::VR128RegClass);
1013     addRegisterClass(MVT::v8f16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1014                                                     : &X86::VR128RegClass);
1015     addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1016                                                     : &X86::VR128RegClass);
1017     addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1018                                                     : &X86::VR128RegClass);
1019 
1020     for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
1021                      MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
1022       setOperationAction(ISD::SDIV, VT, Custom);
1023       setOperationAction(ISD::SREM, VT, Custom);
1024       setOperationAction(ISD::UDIV, VT, Custom);
1025       setOperationAction(ISD::UREM, VT, Custom);
1026     }
1027 
1028     setOperationAction(ISD::MUL,                MVT::v2i8,  Custom);
1029     setOperationAction(ISD::MUL,                MVT::v4i8,  Custom);
1030     setOperationAction(ISD::MUL,                MVT::v8i8,  Custom);
1031 
1032     setOperationAction(ISD::MUL,                MVT::v16i8, Custom);
1033     setOperationAction(ISD::MUL,                MVT::v4i32, Custom);
1034     setOperationAction(ISD::MUL,                MVT::v2i64, Custom);
1035     setOperationAction(ISD::MULHU,              MVT::v4i32, Custom);
1036     setOperationAction(ISD::MULHS,              MVT::v4i32, Custom);
1037     setOperationAction(ISD::MULHU,              MVT::v16i8, Custom);
1038     setOperationAction(ISD::MULHS,              MVT::v16i8, Custom);
1039     setOperationAction(ISD::MULHU,              MVT::v8i16, Legal);
1040     setOperationAction(ISD::MULHS,              MVT::v8i16, Legal);
1041     setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
1042     setOperationAction(ISD::AVGCEILU,           MVT::v16i8, Legal);
1043     setOperationAction(ISD::AVGCEILU,           MVT::v8i16, Legal);
1044 
1045     setOperationAction(ISD::SMULO,              MVT::v16i8, Custom);
1046     setOperationAction(ISD::UMULO,              MVT::v16i8, Custom);
1047     setOperationAction(ISD::UMULO,              MVT::v2i32, Custom);
1048 
1049     setOperationAction(ISD::FNEG,               MVT::v2f64, Custom);
1050     setOperationAction(ISD::FABS,               MVT::v2f64, Custom);
1051     setOperationAction(ISD::FCOPYSIGN,          MVT::v2f64, Custom);
1052 
1053     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1054       setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
1055       setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
1056       setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
1057       setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
1058     }
1059 
1060     setOperationAction(ISD::UADDSAT,            MVT::v16i8, Legal);
1061     setOperationAction(ISD::SADDSAT,            MVT::v16i8, Legal);
1062     setOperationAction(ISD::USUBSAT,            MVT::v16i8, Legal);
1063     setOperationAction(ISD::SSUBSAT,            MVT::v16i8, Legal);
1064     setOperationAction(ISD::UADDSAT,            MVT::v8i16, Legal);
1065     setOperationAction(ISD::SADDSAT,            MVT::v8i16, Legal);
1066     setOperationAction(ISD::USUBSAT,            MVT::v8i16, Legal);
1067     setOperationAction(ISD::SSUBSAT,            MVT::v8i16, Legal);
1068     setOperationAction(ISD::USUBSAT,            MVT::v4i32, Custom);
1069     setOperationAction(ISD::USUBSAT,            MVT::v2i64, Custom);
1070 
1071     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v16i8, Custom);
1072     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
1073     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
1074     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
1075 
1076     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1077       setOperationAction(ISD::SETCC,              VT, Custom);
1078       setOperationAction(ISD::STRICT_FSETCC,      VT, Custom);
1079       setOperationAction(ISD::STRICT_FSETCCS,     VT, Custom);
1080       setOperationAction(ISD::CTPOP,              VT, Custom);
1081       setOperationAction(ISD::ABS,                VT, Custom);
1082 
1083       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1084       // setcc all the way to isel and prefer SETGT in some isel patterns.
1085       setCondCodeAction(ISD::SETLT, VT, Custom);
1086       setCondCodeAction(ISD::SETLE, VT, Custom);
1087     }
1088 
1089     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1090       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1091       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1092       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1093       setOperationAction(ISD::VSELECT,            VT, Custom);
1094       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1095     }
1096 
1097     for (auto VT : { MVT::v8f16, MVT::v2f64, MVT::v2i64 }) {
1098       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1099       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1100       setOperationAction(ISD::VSELECT,            VT, Custom);
1101 
1102       if (VT == MVT::v2i64 && !Subtarget.is64Bit())
1103         continue;
1104 
1105       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1106       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1107     }
1108     setF16Action(MVT::v8f16, Expand);
1109     setOperationAction(ISD::FADD, MVT::v8f16, Expand);
1110     setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
1111     setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
1112     setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
1113 
1114     // Custom lower v2i64 and v2f64 selects.
1115     setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
1116     setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
1117     setOperationAction(ISD::SELECT,             MVT::v4i32, Custom);
1118     setOperationAction(ISD::SELECT,             MVT::v8i16, Custom);
1119     setOperationAction(ISD::SELECT,             MVT::v8f16, Custom);
1120     setOperationAction(ISD::SELECT,             MVT::v16i8, Custom);
1121 
1122     setOperationAction(ISD::FP_TO_SINT,         MVT::v4i32, Custom);
1123     setOperationAction(ISD::FP_TO_UINT,         MVT::v4i32, Custom);
1124     setOperationAction(ISD::FP_TO_SINT,         MVT::v2i32, Custom);
1125     setOperationAction(ISD::FP_TO_UINT,         MVT::v2i32, Custom);
1126     setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v4i32, Custom);
1127     setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v2i32, Custom);
1128 
1129     // Custom legalize these to avoid over promotion or custom promotion.
1130     for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
1131       setOperationAction(ISD::FP_TO_SINT,        VT, Custom);
1132       setOperationAction(ISD::FP_TO_UINT,        VT, Custom);
1133       setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1134       setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1135     }
1136 
1137     setOperationAction(ISD::SINT_TO_FP,         MVT::v4i32, Custom);
1138     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v4i32, Custom);
1139     setOperationAction(ISD::SINT_TO_FP,         MVT::v2i32, Custom);
1140     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2i32, Custom);
1141 
1142     setOperationAction(ISD::UINT_TO_FP,         MVT::v2i32, Custom);
1143     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2i32, Custom);
1144 
1145     setOperationAction(ISD::UINT_TO_FP,         MVT::v4i32, Custom);
1146     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v4i32, Custom);
1147 
1148     // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
1149     setOperationAction(ISD::SINT_TO_FP,         MVT::v2f32, Custom);
1150     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2f32, Custom);
1151     setOperationAction(ISD::UINT_TO_FP,         MVT::v2f32, Custom);
1152     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2f32, Custom);
1153 
1154     setOperationAction(ISD::FP_EXTEND,          MVT::v2f32, Custom);
1155     setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v2f32, Custom);
1156     setOperationAction(ISD::FP_ROUND,           MVT::v2f32, Custom);
1157     setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v2f32, Custom);
1158 
1159     // We want to legalize this to an f64 load rather than an i64 load on
1160     // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1161     // store.
1162     setOperationAction(ISD::LOAD,               MVT::v2i32, Custom);
1163     setOperationAction(ISD::LOAD,               MVT::v4i16, Custom);
1164     setOperationAction(ISD::LOAD,               MVT::v8i8,  Custom);
1165     setOperationAction(ISD::STORE,              MVT::v2i32, Custom);
1166     setOperationAction(ISD::STORE,              MVT::v4i16, Custom);
1167     setOperationAction(ISD::STORE,              MVT::v8i8,  Custom);
1168 
1169     // Add 32-bit vector stores to help vectorization opportunities.
1170     setOperationAction(ISD::STORE,              MVT::v2i16, Custom);
1171     setOperationAction(ISD::STORE,              MVT::v4i8,  Custom);
1172 
1173     setOperationAction(ISD::BITCAST,            MVT::v2i32, Custom);
1174     setOperationAction(ISD::BITCAST,            MVT::v4i16, Custom);
1175     setOperationAction(ISD::BITCAST,            MVT::v8i8,  Custom);
1176     if (!Subtarget.hasAVX512())
1177       setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1178 
1179     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1180     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1181     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1182 
1183     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1184 
1185     setOperationAction(ISD::TRUNCATE,    MVT::v2i8,  Custom);
1186     setOperationAction(ISD::TRUNCATE,    MVT::v2i16, Custom);
1187     setOperationAction(ISD::TRUNCATE,    MVT::v2i32, Custom);
1188     setOperationAction(ISD::TRUNCATE,    MVT::v4i8,  Custom);
1189     setOperationAction(ISD::TRUNCATE,    MVT::v4i16, Custom);
1190     setOperationAction(ISD::TRUNCATE,    MVT::v8i8,  Custom);
1191 
1192     // In the customized shift lowering, the legal v4i32/v2i64 cases
1193     // in AVX2 will be recognized.
1194     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1195       setOperationAction(ISD::SRL,              VT, Custom);
1196       setOperationAction(ISD::SHL,              VT, Custom);
1197       setOperationAction(ISD::SRA,              VT, Custom);
1198       if (VT == MVT::v2i64) continue;
1199       setOperationAction(ISD::ROTL,             VT, Custom);
1200       setOperationAction(ISD::ROTR,             VT, Custom);
1201       setOperationAction(ISD::FSHL,             VT, Custom);
1202       setOperationAction(ISD::FSHR,             VT, Custom);
1203     }
1204 
1205     setOperationAction(ISD::STRICT_FSQRT,       MVT::v2f64, Legal);
1206     setOperationAction(ISD::STRICT_FADD,        MVT::v2f64, Legal);
1207     setOperationAction(ISD::STRICT_FSUB,        MVT::v2f64, Legal);
1208     setOperationAction(ISD::STRICT_FMUL,        MVT::v2f64, Legal);
1209     setOperationAction(ISD::STRICT_FDIV,        MVT::v2f64, Legal);
1210   }
1211 
1212   if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1213     setOperationAction(ISD::ABS,                MVT::v16i8, Legal);
1214     setOperationAction(ISD::ABS,                MVT::v8i16, Legal);
1215     setOperationAction(ISD::ABS,                MVT::v4i32, Legal);
1216     setOperationAction(ISD::BITREVERSE,         MVT::v16i8, Custom);
1217     setOperationAction(ISD::CTLZ,               MVT::v16i8, Custom);
1218     setOperationAction(ISD::CTLZ,               MVT::v8i16, Custom);
1219     setOperationAction(ISD::CTLZ,               MVT::v4i32, Custom);
1220     setOperationAction(ISD::CTLZ,               MVT::v2i64, Custom);
1221 
1222     // These might be better off as horizontal vector ops.
1223     setOperationAction(ISD::ADD,                MVT::i16, Custom);
1224     setOperationAction(ISD::ADD,                MVT::i32, Custom);
1225     setOperationAction(ISD::SUB,                MVT::i16, Custom);
1226     setOperationAction(ISD::SUB,                MVT::i32, Custom);
1227   }
1228 
1229   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1230     for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1231       setOperationAction(ISD::FFLOOR,            RoundedTy,  Legal);
1232       setOperationAction(ISD::STRICT_FFLOOR,     RoundedTy,  Legal);
1233       setOperationAction(ISD::FCEIL,             RoundedTy,  Legal);
1234       setOperationAction(ISD::STRICT_FCEIL,      RoundedTy,  Legal);
1235       setOperationAction(ISD::FTRUNC,            RoundedTy,  Legal);
1236       setOperationAction(ISD::STRICT_FTRUNC,     RoundedTy,  Legal);
1237       setOperationAction(ISD::FRINT,             RoundedTy,  Legal);
1238       setOperationAction(ISD::STRICT_FRINT,      RoundedTy,  Legal);
1239       setOperationAction(ISD::FNEARBYINT,        RoundedTy,  Legal);
1240       setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy,  Legal);
1241       setOperationAction(ISD::FROUNDEVEN,        RoundedTy,  Legal);
1242       setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy,  Legal);
1243 
1244       setOperationAction(ISD::FROUND,            RoundedTy,  Custom);
1245     }
1246 
1247     setOperationAction(ISD::SMAX,               MVT::v16i8, Legal);
1248     setOperationAction(ISD::SMAX,               MVT::v4i32, Legal);
1249     setOperationAction(ISD::UMAX,               MVT::v8i16, Legal);
1250     setOperationAction(ISD::UMAX,               MVT::v4i32, Legal);
1251     setOperationAction(ISD::SMIN,               MVT::v16i8, Legal);
1252     setOperationAction(ISD::SMIN,               MVT::v4i32, Legal);
1253     setOperationAction(ISD::UMIN,               MVT::v8i16, Legal);
1254     setOperationAction(ISD::UMIN,               MVT::v4i32, Legal);
1255 
1256     setOperationAction(ISD::UADDSAT,            MVT::v4i32, Custom);
1257     setOperationAction(ISD::SADDSAT,            MVT::v2i64, Custom);
1258     setOperationAction(ISD::SSUBSAT,            MVT::v2i64, Custom);
1259 
1260     // FIXME: Do we need to handle scalar-to-vector here?
1261     setOperationAction(ISD::MUL,                MVT::v4i32, Legal);
1262     setOperationAction(ISD::SMULO,              MVT::v2i32, Custom);
1263 
1264     // We directly match byte blends in the backend as they match the VSELECT
1265     // condition form.
1266     setOperationAction(ISD::VSELECT,            MVT::v16i8, Legal);
1267 
1268     // SSE41 brings specific instructions for doing vector sign extend even in
1269     // cases where we don't have SRA.
1270     for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1271       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1272       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1273     }
1274 
1275     // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1276     for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1277       setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8,  Legal);
1278       setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8,  Legal);
1279       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8,  Legal);
1280       setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1281       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1282       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1283     }
1284 
1285     if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1286       // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1287       // do the pre and post work in the vector domain.
1288       setOperationAction(ISD::UINT_TO_FP,        MVT::v4i64, Custom);
1289       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1290       // We need to mark SINT_TO_FP as Custom even though we want to expand it
1291       // so that DAG combine doesn't try to turn it into uint_to_fp.
1292       setOperationAction(ISD::SINT_TO_FP,        MVT::v4i64, Custom);
1293       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1294     }
1295   }
1296 
1297   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) {
1298     setOperationAction(ISD::UADDSAT,            MVT::v2i64, Custom);
1299   }
1300 
1301   if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1302     for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1303                      MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1304       setOperationAction(ISD::ROTL, VT, Custom);
1305       setOperationAction(ISD::ROTR, VT, Custom);
1306     }
1307 
1308     // XOP can efficiently perform BITREVERSE with VPPERM.
1309     for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1310       setOperationAction(ISD::BITREVERSE, VT, Custom);
1311 
1312     for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1313                      MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1314       setOperationAction(ISD::BITREVERSE, VT, Custom);
1315   }
1316 
1317   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1318     bool HasInt256 = Subtarget.hasInt256();
1319 
1320     addRegisterClass(MVT::v32i8,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1321                                                      : &X86::VR256RegClass);
1322     addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1323                                                      : &X86::VR256RegClass);
1324     addRegisterClass(MVT::v16f16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1325                                                      : &X86::VR256RegClass);
1326     addRegisterClass(MVT::v8i32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1327                                                      : &X86::VR256RegClass);
1328     addRegisterClass(MVT::v8f32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1329                                                      : &X86::VR256RegClass);
1330     addRegisterClass(MVT::v4i64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1331                                                      : &X86::VR256RegClass);
1332     addRegisterClass(MVT::v4f64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1333                                                      : &X86::VR256RegClass);
1334 
1335     for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1336       setOperationAction(ISD::FFLOOR,            VT, Legal);
1337       setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1338       setOperationAction(ISD::FCEIL,             VT, Legal);
1339       setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1340       setOperationAction(ISD::FTRUNC,            VT, Legal);
1341       setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1342       setOperationAction(ISD::FRINT,             VT, Legal);
1343       setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1344       setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1345       setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1346       setOperationAction(ISD::FROUNDEVEN,        VT, Legal);
1347       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1348 
1349       setOperationAction(ISD::FROUND,            VT, Custom);
1350 
1351       setOperationAction(ISD::FNEG,              VT, Custom);
1352       setOperationAction(ISD::FABS,              VT, Custom);
1353       setOperationAction(ISD::FCOPYSIGN,         VT, Custom);
1354     }
1355 
1356     // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1357     // even though v8i16 is a legal type.
1358     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i16, MVT::v8i32);
1359     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i16, MVT::v8i32);
1360     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1361     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1362     setOperationAction(ISD::FP_TO_SINT,                MVT::v8i32, Custom);
1363     setOperationAction(ISD::FP_TO_UINT,                MVT::v8i32, Custom);
1364     setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v8i32, Custom);
1365 
1366     setOperationAction(ISD::SINT_TO_FP,         MVT::v8i32, Custom);
1367     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i32, Custom);
1368     setOperationAction(ISD::FP_EXTEND,          MVT::v8f32, Expand);
1369     setOperationAction(ISD::FP_ROUND,           MVT::v8f16, Expand);
1370     setOperationAction(ISD::FP_EXTEND,          MVT::v4f64, Custom);
1371     setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Custom);
1372 
1373     setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v4f32, Legal);
1374     setOperationAction(ISD::STRICT_FADD,        MVT::v8f32, Legal);
1375     setOperationAction(ISD::STRICT_FADD,        MVT::v4f64, Legal);
1376     setOperationAction(ISD::STRICT_FSUB,        MVT::v8f32, Legal);
1377     setOperationAction(ISD::STRICT_FSUB,        MVT::v4f64, Legal);
1378     setOperationAction(ISD::STRICT_FMUL,        MVT::v8f32, Legal);
1379     setOperationAction(ISD::STRICT_FMUL,        MVT::v4f64, Legal);
1380     setOperationAction(ISD::STRICT_FDIV,        MVT::v8f32, Legal);
1381     setOperationAction(ISD::STRICT_FDIV,        MVT::v4f64, Legal);
1382     setOperationAction(ISD::STRICT_FSQRT,       MVT::v8f32, Legal);
1383     setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f64, Legal);
1384 
1385     if (!Subtarget.hasAVX512())
1386       setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1387 
1388     // In the customized shift lowering, the legal v8i32/v4i64 cases
1389     // in AVX2 will be recognized.
1390     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1391       setOperationAction(ISD::SRL, VT, Custom);
1392       setOperationAction(ISD::SHL, VT, Custom);
1393       setOperationAction(ISD::SRA, VT, Custom);
1394       if (VT == MVT::v4i64) continue;
1395       setOperationAction(ISD::ROTL, VT, Custom);
1396       setOperationAction(ISD::ROTR, VT, Custom);
1397       setOperationAction(ISD::FSHL, VT, Custom);
1398       setOperationAction(ISD::FSHR, VT, Custom);
1399     }
1400 
1401     // These types need custom splitting if their input is a 128-bit vector.
1402     setOperationAction(ISD::SIGN_EXTEND,       MVT::v8i64,  Custom);
1403     setOperationAction(ISD::SIGN_EXTEND,       MVT::v16i32, Custom);
1404     setOperationAction(ISD::ZERO_EXTEND,       MVT::v8i64,  Custom);
1405     setOperationAction(ISD::ZERO_EXTEND,       MVT::v16i32, Custom);
1406 
1407     setOperationAction(ISD::SELECT,            MVT::v4f64, Custom);
1408     setOperationAction(ISD::SELECT,            MVT::v4i64, Custom);
1409     setOperationAction(ISD::SELECT,            MVT::v8i32, Custom);
1410     setOperationAction(ISD::SELECT,            MVT::v16i16, Custom);
1411     setOperationAction(ISD::SELECT,            MVT::v16f16, Custom);
1412     setOperationAction(ISD::SELECT,            MVT::v32i8, Custom);
1413     setOperationAction(ISD::SELECT,            MVT::v8f32, Custom);
1414 
1415     for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1416       setOperationAction(ISD::SIGN_EXTEND,     VT, Custom);
1417       setOperationAction(ISD::ZERO_EXTEND,     VT, Custom);
1418       setOperationAction(ISD::ANY_EXTEND,      VT, Custom);
1419     }
1420 
1421     setOperationAction(ISD::TRUNCATE,          MVT::v16i8, Custom);
1422     setOperationAction(ISD::TRUNCATE,          MVT::v8i16, Custom);
1423     setOperationAction(ISD::TRUNCATE,          MVT::v4i32, Custom);
1424     setOperationAction(ISD::BITREVERSE,        MVT::v32i8, Custom);
1425 
1426     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1427       setOperationAction(ISD::SETCC,           VT, Custom);
1428       setOperationAction(ISD::STRICT_FSETCC,   VT, Custom);
1429       setOperationAction(ISD::STRICT_FSETCCS,  VT, Custom);
1430       setOperationAction(ISD::CTPOP,           VT, Custom);
1431       setOperationAction(ISD::CTLZ,            VT, Custom);
1432 
1433       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1434       // setcc all the way to isel and prefer SETGT in some isel patterns.
1435       setCondCodeAction(ISD::SETLT, VT, Custom);
1436       setCondCodeAction(ISD::SETLE, VT, Custom);
1437     }
1438 
1439     if (Subtarget.hasAnyFMA()) {
1440       for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1441                        MVT::v2f64, MVT::v4f64 }) {
1442         setOperationAction(ISD::FMA, VT, Legal);
1443         setOperationAction(ISD::STRICT_FMA, VT, Legal);
1444       }
1445     }
1446 
1447     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1448       setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1449       setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1450     }
1451 
1452     setOperationAction(ISD::MUL,       MVT::v4i64,  Custom);
1453     setOperationAction(ISD::MUL,       MVT::v8i32,  HasInt256 ? Legal : Custom);
1454     setOperationAction(ISD::MUL,       MVT::v16i16, HasInt256 ? Legal : Custom);
1455     setOperationAction(ISD::MUL,       MVT::v32i8,  Custom);
1456 
1457     setOperationAction(ISD::MULHU,     MVT::v8i32,  Custom);
1458     setOperationAction(ISD::MULHS,     MVT::v8i32,  Custom);
1459     setOperationAction(ISD::MULHU,     MVT::v16i16, HasInt256 ? Legal : Custom);
1460     setOperationAction(ISD::MULHS,     MVT::v16i16, HasInt256 ? Legal : Custom);
1461     setOperationAction(ISD::MULHU,     MVT::v32i8,  Custom);
1462     setOperationAction(ISD::MULHS,     MVT::v32i8,  Custom);
1463     setOperationAction(ISD::AVGCEILU,  MVT::v16i16, HasInt256 ? Legal : Custom);
1464     setOperationAction(ISD::AVGCEILU,  MVT::v32i8,  HasInt256 ? Legal : Custom);
1465 
1466     setOperationAction(ISD::SMULO,     MVT::v32i8, Custom);
1467     setOperationAction(ISD::UMULO,     MVT::v32i8, Custom);
1468 
1469     setOperationAction(ISD::ABS,       MVT::v4i64,  Custom);
1470     setOperationAction(ISD::SMAX,      MVT::v4i64,  Custom);
1471     setOperationAction(ISD::UMAX,      MVT::v4i64,  Custom);
1472     setOperationAction(ISD::SMIN,      MVT::v4i64,  Custom);
1473     setOperationAction(ISD::UMIN,      MVT::v4i64,  Custom);
1474 
1475     setOperationAction(ISD::UADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1476     setOperationAction(ISD::SADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1477     setOperationAction(ISD::USUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1478     setOperationAction(ISD::SSUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1479     setOperationAction(ISD::UADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1480     setOperationAction(ISD::SADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1481     setOperationAction(ISD::USUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1482     setOperationAction(ISD::SSUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1483     setOperationAction(ISD::UADDSAT,   MVT::v8i32, Custom);
1484     setOperationAction(ISD::USUBSAT,   MVT::v8i32, Custom);
1485     setOperationAction(ISD::UADDSAT,   MVT::v4i64, Custom);
1486     setOperationAction(ISD::USUBSAT,   MVT::v4i64, Custom);
1487 
1488     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1489       setOperationAction(ISD::ABS,  VT, HasInt256 ? Legal : Custom);
1490       setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1491       setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1492       setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1493       setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1494     }
1495 
1496     for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1497       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1498       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1499     }
1500 
1501     if (HasInt256) {
1502       // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1503       // when we have a 256bit-wide blend with immediate.
1504       setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1505       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1506 
1507       // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1508       for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1509         setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1510         setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i8,  Legal);
1511         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i8,  Legal);
1512         setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i16, Legal);
1513         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i16, Legal);
1514         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i32, Legal);
1515       }
1516     }
1517 
1518     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1519                      MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1520       setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
1521       setOperationAction(ISD::MSTORE, VT, Legal);
1522     }
1523 
1524     // Extract subvector is special because the value type
1525     // (result) is 128-bit but the source is 256-bit wide.
1526     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1527                      MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1528       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1529     }
1530 
1531     // Custom lower several nodes for 256-bit types.
1532     for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1533                     MVT::v16f16, MVT::v8f32, MVT::v4f64 }) {
1534       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1535       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1536       setOperationAction(ISD::VSELECT,            VT, Custom);
1537       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1538       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1539       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1540       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1541       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1542       setOperationAction(ISD::STORE,              VT, Custom);
1543     }
1544     setF16Action(MVT::v16f16, Expand);
1545     setOperationAction(ISD::FADD, MVT::v16f16, Expand);
1546     setOperationAction(ISD::FSUB, MVT::v16f16, Expand);
1547     setOperationAction(ISD::FMUL, MVT::v16f16, Expand);
1548     setOperationAction(ISD::FDIV, MVT::v16f16, Expand);
1549 
1550     if (HasInt256) {
1551       setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1552 
1553       // Custom legalize 2x32 to get a little better code.
1554       setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1555       setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1556 
1557       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1558                        MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1559         setOperationAction(ISD::MGATHER,  VT, Custom);
1560     }
1561   }
1562 
1563   if (!Subtarget.useSoftFloat() && !Subtarget.hasFP16() &&
1564       Subtarget.hasF16C()) {
1565     for (MVT VT : { MVT::f16, MVT::v2f16, MVT::v4f16, MVT::v8f16 }) {
1566       setOperationAction(ISD::FP_ROUND,           VT, Custom);
1567       setOperationAction(ISD::STRICT_FP_ROUND,    VT, Custom);
1568     }
1569     for (MVT VT : { MVT::f32, MVT::v2f32, MVT::v4f32 }) {
1570       setOperationAction(ISD::FP_EXTEND,          VT, Custom);
1571       setOperationAction(ISD::STRICT_FP_EXTEND,   VT, Custom);
1572     }
1573     for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1574       setOperationPromotedToType(Opc, MVT::v8f16, MVT::v8f32);
1575       setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1576     }
1577 
1578     setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1579     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal);
1580   }
1581 
1582   // This block controls legalization of the mask vector sizes that are
1583   // available with AVX512. 512-bit vectors are in a separate block controlled
1584   // by useAVX512Regs.
1585   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1586     addRegisterClass(MVT::v1i1,   &X86::VK1RegClass);
1587     addRegisterClass(MVT::v2i1,   &X86::VK2RegClass);
1588     addRegisterClass(MVT::v4i1,   &X86::VK4RegClass);
1589     addRegisterClass(MVT::v8i1,   &X86::VK8RegClass);
1590     addRegisterClass(MVT::v16i1,  &X86::VK16RegClass);
1591 
1592     setOperationAction(ISD::SELECT,             MVT::v1i1, Custom);
1593     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1594     setOperationAction(ISD::BUILD_VECTOR,       MVT::v1i1, Custom);
1595 
1596     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i1,  MVT::v8i32);
1597     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i1,  MVT::v8i32);
1598     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v4i1,  MVT::v4i32);
1599     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v4i1,  MVT::v4i32);
1600     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1,  MVT::v8i32);
1601     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1,  MVT::v8i32);
1602     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1,  MVT::v4i32);
1603     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1,  MVT::v4i32);
1604     setOperationAction(ISD::FP_TO_SINT,                MVT::v2i1,  Custom);
1605     setOperationAction(ISD::FP_TO_UINT,                MVT::v2i1,  Custom);
1606     setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v2i1,  Custom);
1607     setOperationAction(ISD::STRICT_FP_TO_UINT,         MVT::v2i1,  Custom);
1608 
1609     // There is no byte sized k-register load or store without AVX512DQ.
1610     if (!Subtarget.hasDQI()) {
1611       setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1612       setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1613       setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1614       setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1615 
1616       setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1617       setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1618       setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1619       setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1620     }
1621 
1622     // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1623     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1624       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1625       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1626       setOperationAction(ISD::ANY_EXTEND,  VT, Custom);
1627     }
1628 
1629     for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 })
1630       setOperationAction(ISD::VSELECT,          VT, Expand);
1631 
1632     for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1633       setOperationAction(ISD::SETCC,            VT, Custom);
1634       setOperationAction(ISD::STRICT_FSETCC,    VT, Custom);
1635       setOperationAction(ISD::STRICT_FSETCCS,   VT, Custom);
1636       setOperationAction(ISD::SELECT,           VT, Custom);
1637       setOperationAction(ISD::TRUNCATE,         VT, Custom);
1638 
1639       setOperationAction(ISD::BUILD_VECTOR,     VT, Custom);
1640       setOperationAction(ISD::CONCAT_VECTORS,   VT, Custom);
1641       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1642       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1643       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1644       setOperationAction(ISD::VECTOR_SHUFFLE,   VT,  Custom);
1645     }
1646 
1647     for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1648       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1649   }
1650 
1651   // This block controls legalization for 512-bit operations with 32/64 bit
1652   // elements. 512-bits can be disabled based on prefer-vector-width and
1653   // required-vector-width function attributes.
1654   if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1655     bool HasBWI = Subtarget.hasBWI();
1656 
1657     addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1658     addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1659     addRegisterClass(MVT::v8i64,  &X86::VR512RegClass);
1660     addRegisterClass(MVT::v8f64,  &X86::VR512RegClass);
1661     addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1662     addRegisterClass(MVT::v32f16, &X86::VR512RegClass);
1663     addRegisterClass(MVT::v64i8,  &X86::VR512RegClass);
1664 
1665     for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1666       setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8,  Legal);
1667       setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1668       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i8,   Legal);
1669       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i16,  Legal);
1670       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i32,  Legal);
1671       if (HasBWI)
1672         setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1673     }
1674 
1675     for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1676       setOperationAction(ISD::FNEG,  VT, Custom);
1677       setOperationAction(ISD::FABS,  VT, Custom);
1678       setOperationAction(ISD::FMA,   VT, Legal);
1679       setOperationAction(ISD::STRICT_FMA, VT, Legal);
1680       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1681     }
1682 
1683     for (MVT VT : { MVT::v16i1, MVT::v16i8, MVT::v16i16 }) {
1684       setOperationPromotedToType(ISD::FP_TO_SINT       , VT, MVT::v16i32);
1685       setOperationPromotedToType(ISD::FP_TO_UINT       , VT, MVT::v16i32);
1686       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1687       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1688     }
1689     setOperationAction(ISD::FP_TO_SINT,        MVT::v16i32, Custom);
1690     setOperationAction(ISD::FP_TO_UINT,        MVT::v16i32, Custom);
1691     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Custom);
1692     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Custom);
1693     setOperationAction(ISD::SINT_TO_FP,        MVT::v16i32, Custom);
1694     setOperationAction(ISD::UINT_TO_FP,        MVT::v16i32, Custom);
1695     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Custom);
1696     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Custom);
1697     setOperationAction(ISD::FP_EXTEND,         MVT::v8f64,  Custom);
1698     setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v8f64,  Custom);
1699 
1700     setOperationAction(ISD::STRICT_FADD,      MVT::v16f32, Legal);
1701     setOperationAction(ISD::STRICT_FADD,      MVT::v8f64,  Legal);
1702     setOperationAction(ISD::STRICT_FSUB,      MVT::v16f32, Legal);
1703     setOperationAction(ISD::STRICT_FSUB,      MVT::v8f64,  Legal);
1704     setOperationAction(ISD::STRICT_FMUL,      MVT::v16f32, Legal);
1705     setOperationAction(ISD::STRICT_FMUL,      MVT::v8f64,  Legal);
1706     setOperationAction(ISD::STRICT_FDIV,      MVT::v16f32, Legal);
1707     setOperationAction(ISD::STRICT_FDIV,      MVT::v8f64,  Legal);
1708     setOperationAction(ISD::STRICT_FSQRT,     MVT::v16f32, Legal);
1709     setOperationAction(ISD::STRICT_FSQRT,     MVT::v8f64,  Legal);
1710     setOperationAction(ISD::STRICT_FP_ROUND,  MVT::v8f32,  Legal);
1711 
1712     setTruncStoreAction(MVT::v8i64,   MVT::v8i8,   Legal);
1713     setTruncStoreAction(MVT::v8i64,   MVT::v8i16,  Legal);
1714     setTruncStoreAction(MVT::v8i64,   MVT::v8i32,  Legal);
1715     setTruncStoreAction(MVT::v16i32,  MVT::v16i8,  Legal);
1716     setTruncStoreAction(MVT::v16i32,  MVT::v16i16, Legal);
1717     if (HasBWI)
1718       setTruncStoreAction(MVT::v32i16,  MVT::v32i8, Legal);
1719 
1720     // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1721     // to 512-bit rather than use the AVX2 instructions so that we can use
1722     // k-masks.
1723     if (!Subtarget.hasVLX()) {
1724       for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1725            MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1726         setOperationAction(ISD::MLOAD,  VT, Custom);
1727         setOperationAction(ISD::MSTORE, VT, Custom);
1728       }
1729     }
1730 
1731     setOperationAction(ISD::TRUNCATE,    MVT::v8i32,  Legal);
1732     setOperationAction(ISD::TRUNCATE,    MVT::v16i16, Legal);
1733     setOperationAction(ISD::TRUNCATE,    MVT::v32i8,  HasBWI ? Legal : Custom);
1734     setOperationAction(ISD::TRUNCATE,    MVT::v16i64, Custom);
1735     setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1736     setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1737     setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64,  Custom);
1738     setOperationAction(ISD::ANY_EXTEND,  MVT::v32i16, Custom);
1739     setOperationAction(ISD::ANY_EXTEND,  MVT::v16i32, Custom);
1740     setOperationAction(ISD::ANY_EXTEND,  MVT::v8i64,  Custom);
1741     setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1742     setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1743     setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64,  Custom);
1744 
1745     if (HasBWI) {
1746       // Extends from v64i1 masks to 512-bit vectors.
1747       setOperationAction(ISD::SIGN_EXTEND,        MVT::v64i8, Custom);
1748       setOperationAction(ISD::ZERO_EXTEND,        MVT::v64i8, Custom);
1749       setOperationAction(ISD::ANY_EXTEND,         MVT::v64i8, Custom);
1750     }
1751 
1752     for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1753       setOperationAction(ISD::FFLOOR,            VT, Legal);
1754       setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1755       setOperationAction(ISD::FCEIL,             VT, Legal);
1756       setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1757       setOperationAction(ISD::FTRUNC,            VT, Legal);
1758       setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1759       setOperationAction(ISD::FRINT,             VT, Legal);
1760       setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1761       setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1762       setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1763       setOperationAction(ISD::FROUNDEVEN,        VT, Legal);
1764       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1765 
1766       setOperationAction(ISD::FROUND,            VT, Custom);
1767     }
1768 
1769     for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1770       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1771       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1772     }
1773 
1774     setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom);
1775     setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom);
1776     setOperationAction(ISD::ADD, MVT::v64i8,  HasBWI ? Legal : Custom);
1777     setOperationAction(ISD::SUB, MVT::v64i8,  HasBWI ? Legal : Custom);
1778 
1779     setOperationAction(ISD::MUL, MVT::v8i64,  Custom);
1780     setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1781     setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom);
1782     setOperationAction(ISD::MUL, MVT::v64i8,  Custom);
1783 
1784     setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1785     setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1786     setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom);
1787     setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
1788     setOperationAction(ISD::MULHS, MVT::v64i8,  Custom);
1789     setOperationAction(ISD::MULHU, MVT::v64i8,  Custom);
1790     setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom);
1791     setOperationAction(ISD::AVGCEILU, MVT::v64i8,  HasBWI ? Legal : Custom);
1792 
1793     setOperationAction(ISD::SMULO, MVT::v64i8, Custom);
1794     setOperationAction(ISD::UMULO, MVT::v64i8, Custom);
1795 
1796     setOperationAction(ISD::BITREVERSE, MVT::v64i8,  Custom);
1797 
1798     for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1799       setOperationAction(ISD::SRL,              VT, Custom);
1800       setOperationAction(ISD::SHL,              VT, Custom);
1801       setOperationAction(ISD::SRA,              VT, Custom);
1802       setOperationAction(ISD::ROTL,             VT, Custom);
1803       setOperationAction(ISD::ROTR,             VT, Custom);
1804       setOperationAction(ISD::SETCC,            VT, Custom);
1805 
1806       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1807       // setcc all the way to isel and prefer SETGT in some isel patterns.
1808       setCondCodeAction(ISD::SETLT, VT, Custom);
1809       setCondCodeAction(ISD::SETLE, VT, Custom);
1810     }
1811     for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1812       setOperationAction(ISD::SMAX,             VT, Legal);
1813       setOperationAction(ISD::UMAX,             VT, Legal);
1814       setOperationAction(ISD::SMIN,             VT, Legal);
1815       setOperationAction(ISD::UMIN,             VT, Legal);
1816       setOperationAction(ISD::ABS,              VT, Legal);
1817       setOperationAction(ISD::CTPOP,            VT, Custom);
1818       setOperationAction(ISD::STRICT_FSETCC,    VT, Custom);
1819       setOperationAction(ISD::STRICT_FSETCCS,   VT, Custom);
1820     }
1821 
1822     for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1823       setOperationAction(ISD::ABS,     VT, HasBWI ? Legal : Custom);
1824       setOperationAction(ISD::CTPOP,   VT, Subtarget.hasBITALG() ? Legal : Custom);
1825       setOperationAction(ISD::CTLZ,    VT, Custom);
1826       setOperationAction(ISD::SMAX,    VT, HasBWI ? Legal : Custom);
1827       setOperationAction(ISD::UMAX,    VT, HasBWI ? Legal : Custom);
1828       setOperationAction(ISD::SMIN,    VT, HasBWI ? Legal : Custom);
1829       setOperationAction(ISD::UMIN,    VT, HasBWI ? Legal : Custom);
1830       setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom);
1831       setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom);
1832       setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom);
1833       setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
1834     }
1835 
1836     setOperationAction(ISD::FSHL,       MVT::v64i8, Custom);
1837     setOperationAction(ISD::FSHR,       MVT::v64i8, Custom);
1838     setOperationAction(ISD::FSHL,      MVT::v32i16, Custom);
1839     setOperationAction(ISD::FSHR,      MVT::v32i16, Custom);
1840     setOperationAction(ISD::FSHL,      MVT::v16i32, Custom);
1841     setOperationAction(ISD::FSHR,      MVT::v16i32, Custom);
1842 
1843     if (Subtarget.hasDQI()) {
1844       for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1845                        ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1846                        ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1847         setOperationAction(Opc,           MVT::v8i64, Custom);
1848       setOperationAction(ISD::MUL,        MVT::v8i64, Legal);
1849     }
1850 
1851     if (Subtarget.hasCDI()) {
1852       // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1853       for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1854         setOperationAction(ISD::CTLZ,            VT, Legal);
1855       }
1856     } // Subtarget.hasCDI()
1857 
1858     if (Subtarget.hasVPOPCNTDQ()) {
1859       for (auto VT : { MVT::v16i32, MVT::v8i64 })
1860         setOperationAction(ISD::CTPOP, VT, Legal);
1861     }
1862 
1863     // Extract subvector is special because the value type
1864     // (result) is 256-bit but the source is 512-bit wide.
1865     // 128-bit was made Legal under AVX1.
1866     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1867                      MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1868       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1869 
1870     for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
1871                      MVT::v32f16, MVT::v16f32, MVT::v8f64 }) {
1872       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1873       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1874       setOperationAction(ISD::SELECT,             VT, Custom);
1875       setOperationAction(ISD::VSELECT,            VT, Custom);
1876       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1877       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1878       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1879       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1880       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1881     }
1882     setF16Action(MVT::v32f16, Expand);
1883     setOperationAction(ISD::FP_ROUND, MVT::v16f16, Custom);
1884     setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Custom);
1885     setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Legal);
1886     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Legal);
1887     for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1888       setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1889       setOperationPromotedToType(Opc, MVT::v32f16, MVT::v32f32);
1890     }
1891 
1892     for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1893       setOperationAction(ISD::MLOAD,               VT, Legal);
1894       setOperationAction(ISD::MSTORE,              VT, Legal);
1895       setOperationAction(ISD::MGATHER,             VT, Custom);
1896       setOperationAction(ISD::MSCATTER,            VT, Custom);
1897     }
1898     if (HasBWI) {
1899       for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1900         setOperationAction(ISD::MLOAD,        VT, Legal);
1901         setOperationAction(ISD::MSTORE,       VT, Legal);
1902       }
1903     } else {
1904       setOperationAction(ISD::STORE, MVT::v32i16, Custom);
1905       setOperationAction(ISD::STORE, MVT::v64i8,  Custom);
1906     }
1907 
1908     if (Subtarget.hasVBMI2()) {
1909       for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1910                        MVT::v16i16, MVT::v8i32, MVT::v4i64,
1911                        MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1912         setOperationAction(ISD::FSHL, VT, Custom);
1913         setOperationAction(ISD::FSHR, VT, Custom);
1914       }
1915 
1916       setOperationAction(ISD::ROTL, MVT::v32i16, Custom);
1917       setOperationAction(ISD::ROTR, MVT::v8i16,  Custom);
1918       setOperationAction(ISD::ROTR, MVT::v16i16, Custom);
1919       setOperationAction(ISD::ROTR, MVT::v32i16, Custom);
1920     }
1921   }// useAVX512Regs
1922 
1923   // This block controls legalization for operations that don't have
1924   // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1925   // narrower widths.
1926   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1927     // These operations are handled on non-VLX by artificially widening in
1928     // isel patterns.
1929 
1930     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i32, Custom);
1931     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v4i32, Custom);
1932     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v2i32, Custom);
1933 
1934     if (Subtarget.hasDQI()) {
1935       // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1936       // v2f32 UINT_TO_FP is already custom under SSE2.
1937       assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1938              isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
1939              "Unexpected operation action!");
1940       // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1941       setOperationAction(ISD::FP_TO_SINT,        MVT::v2f32, Custom);
1942       setOperationAction(ISD::FP_TO_UINT,        MVT::v2f32, Custom);
1943       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
1944       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
1945     }
1946 
1947     for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1948       setOperationAction(ISD::SMAX, VT, Legal);
1949       setOperationAction(ISD::UMAX, VT, Legal);
1950       setOperationAction(ISD::SMIN, VT, Legal);
1951       setOperationAction(ISD::UMIN, VT, Legal);
1952       setOperationAction(ISD::ABS,  VT, Legal);
1953     }
1954 
1955     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1956       setOperationAction(ISD::ROTL,     VT, Custom);
1957       setOperationAction(ISD::ROTR,     VT, Custom);
1958     }
1959 
1960     // Custom legalize 2x32 to get a little better code.
1961     setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1962     setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1963 
1964     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1965                      MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1966       setOperationAction(ISD::MSCATTER, VT, Custom);
1967 
1968     if (Subtarget.hasDQI()) {
1969       for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1970                        ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1971                        ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}) {
1972         setOperationAction(Opc, MVT::v2i64, Custom);
1973         setOperationAction(Opc, MVT::v4i64, Custom);
1974       }
1975       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
1976       setOperationAction(ISD::MUL, MVT::v4i64, Legal);
1977     }
1978 
1979     if (Subtarget.hasCDI()) {
1980       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1981         setOperationAction(ISD::CTLZ,            VT, Legal);
1982       }
1983     } // Subtarget.hasCDI()
1984 
1985     if (Subtarget.hasVPOPCNTDQ()) {
1986       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1987         setOperationAction(ISD::CTPOP, VT, Legal);
1988     }
1989   }
1990 
1991   // This block control legalization of v32i1/v64i1 which are available with
1992   // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1993   // useBWIRegs.
1994   if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1995     addRegisterClass(MVT::v32i1,  &X86::VK32RegClass);
1996     addRegisterClass(MVT::v64i1,  &X86::VK64RegClass);
1997 
1998     for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1999       setOperationAction(ISD::VSELECT,            VT, Expand);
2000       setOperationAction(ISD::TRUNCATE,           VT, Custom);
2001       setOperationAction(ISD::SETCC,              VT, Custom);
2002       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2003       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
2004       setOperationAction(ISD::SELECT,             VT, Custom);
2005       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
2006       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
2007       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
2008       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Custom);
2009     }
2010 
2011     for (auto VT : { MVT::v16i1, MVT::v32i1 })
2012       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
2013 
2014     // Extends from v32i1 masks to 256-bit vectors.
2015     setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i8, Custom);
2016     setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i8, Custom);
2017     setOperationAction(ISD::ANY_EXTEND,         MVT::v32i8, Custom);
2018 
2019     for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
2020       setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
2021       setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
2022     }
2023 
2024     // These operations are handled on non-VLX by artificially widening in
2025     // isel patterns.
2026     // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
2027 
2028     if (Subtarget.hasBITALG()) {
2029       for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
2030         setOperationAction(ISD::CTPOP, VT, Legal);
2031     }
2032   }
2033 
2034   if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) {
2035     auto setGroup = [&] (MVT VT) {
2036       setOperationAction(ISD::FADD,               VT, Legal);
2037       setOperationAction(ISD::STRICT_FADD,        VT, Legal);
2038       setOperationAction(ISD::FSUB,               VT, Legal);
2039       setOperationAction(ISD::STRICT_FSUB,        VT, Legal);
2040       setOperationAction(ISD::FMUL,               VT, Legal);
2041       setOperationAction(ISD::STRICT_FMUL,        VT, Legal);
2042       setOperationAction(ISD::FDIV,               VT, Legal);
2043       setOperationAction(ISD::STRICT_FDIV,        VT, Legal);
2044       setOperationAction(ISD::FSQRT,              VT, Legal);
2045       setOperationAction(ISD::STRICT_FSQRT,       VT, Legal);
2046 
2047       setOperationAction(ISD::FFLOOR,             VT, Legal);
2048       setOperationAction(ISD::STRICT_FFLOOR,      VT, Legal);
2049       setOperationAction(ISD::FCEIL,              VT, Legal);
2050       setOperationAction(ISD::STRICT_FCEIL,       VT, Legal);
2051       setOperationAction(ISD::FTRUNC,             VT, Legal);
2052       setOperationAction(ISD::STRICT_FTRUNC,      VT, Legal);
2053       setOperationAction(ISD::FRINT,              VT, Legal);
2054       setOperationAction(ISD::STRICT_FRINT,       VT, Legal);
2055       setOperationAction(ISD::FNEARBYINT,         VT, Legal);
2056       setOperationAction(ISD::STRICT_FNEARBYINT,  VT, Legal);
2057 
2058       setOperationAction(ISD::LOAD,               VT, Legal);
2059       setOperationAction(ISD::STORE,              VT, Legal);
2060 
2061       setOperationAction(ISD::FMA,                VT, Legal);
2062       setOperationAction(ISD::STRICT_FMA,         VT, Legal);
2063       setOperationAction(ISD::VSELECT,            VT, Legal);
2064       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
2065       setOperationAction(ISD::SELECT,             VT, Custom);
2066 
2067       setOperationAction(ISD::FNEG,               VT, Custom);
2068       setOperationAction(ISD::FABS,               VT, Custom);
2069       setOperationAction(ISD::FCOPYSIGN,          VT, Custom);
2070       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2071       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
2072     };
2073 
2074     // AVX512_FP16 scalar operations
2075     setGroup(MVT::f16);
2076     setOperationAction(ISD::FREM,                 MVT::f16, Promote);
2077     setOperationAction(ISD::STRICT_FREM,          MVT::f16, Promote);
2078     setOperationAction(ISD::SELECT_CC,            MVT::f16, Expand);
2079     setOperationAction(ISD::BR_CC,                MVT::f16, Expand);
2080     setOperationAction(ISD::SETCC,                MVT::f16, Custom);
2081     setOperationAction(ISD::STRICT_FSETCC,        MVT::f16, Custom);
2082     setOperationAction(ISD::STRICT_FSETCCS,       MVT::f16, Custom);
2083     setOperationAction(ISD::FROUND,               MVT::f16, Custom);
2084     setOperationAction(ISD::STRICT_FROUND,        MVT::f16, Promote);
2085     setOperationAction(ISD::FROUNDEVEN,           MVT::f16, Legal);
2086     setOperationAction(ISD::STRICT_FROUNDEVEN,    MVT::f16, Legal);
2087     setOperationAction(ISD::FP_ROUND,             MVT::f16, Custom);
2088     setOperationAction(ISD::STRICT_FP_ROUND,      MVT::f16, Custom);
2089     setOperationAction(ISD::FP_EXTEND,            MVT::f32, Legal);
2090     setOperationAction(ISD::STRICT_FP_EXTEND,     MVT::f32, Legal);
2091 
2092     setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand);
2093     setCondCodeAction(ISD::SETUNE, MVT::f16, Expand);
2094 
2095     if (Subtarget.useAVX512Regs()) {
2096       setGroup(MVT::v32f16);
2097       setOperationAction(ISD::SCALAR_TO_VECTOR,       MVT::v32f16, Custom);
2098       setOperationAction(ISD::SINT_TO_FP,             MVT::v32i16, Legal);
2099       setOperationAction(ISD::STRICT_SINT_TO_FP,      MVT::v32i16, Legal);
2100       setOperationAction(ISD::UINT_TO_FP,             MVT::v32i16, Legal);
2101       setOperationAction(ISD::STRICT_UINT_TO_FP,      MVT::v32i16, Legal);
2102       setOperationAction(ISD::FP_ROUND,               MVT::v16f16, Legal);
2103       setOperationAction(ISD::STRICT_FP_ROUND,        MVT::v16f16, Legal);
2104       setOperationAction(ISD::FP_EXTEND,              MVT::v16f32, Legal);
2105       setOperationAction(ISD::STRICT_FP_EXTEND,       MVT::v16f32, Legal);
2106       setOperationAction(ISD::FP_EXTEND,              MVT::v8f64,  Legal);
2107       setOperationAction(ISD::STRICT_FP_EXTEND,       MVT::v8f64,  Legal);
2108       setOperationAction(ISD::INSERT_VECTOR_ELT,      MVT::v32f16, Custom);
2109 
2110       setOperationAction(ISD::FP_TO_SINT,             MVT::v32i16, Custom);
2111       setOperationAction(ISD::STRICT_FP_TO_SINT,      MVT::v32i16, Custom);
2112       setOperationAction(ISD::FP_TO_UINT,             MVT::v32i16, Custom);
2113       setOperationAction(ISD::STRICT_FP_TO_UINT,      MVT::v32i16, Custom);
2114       setOperationPromotedToType(ISD::FP_TO_SINT,     MVT::v32i8,  MVT::v32i16);
2115       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i8,
2116                                  MVT::v32i16);
2117       setOperationPromotedToType(ISD::FP_TO_UINT,     MVT::v32i8,  MVT::v32i16);
2118       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i8,
2119                                  MVT::v32i16);
2120       setOperationPromotedToType(ISD::FP_TO_SINT,     MVT::v32i1,  MVT::v32i16);
2121       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i1,
2122                                  MVT::v32i16);
2123       setOperationPromotedToType(ISD::FP_TO_UINT,     MVT::v32i1,  MVT::v32i16);
2124       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i1,
2125                                  MVT::v32i16);
2126 
2127       setOperationAction(ISD::EXTRACT_SUBVECTOR,      MVT::v16f16, Legal);
2128       setOperationAction(ISD::INSERT_SUBVECTOR,       MVT::v32f16, Legal);
2129       setOperationAction(ISD::CONCAT_VECTORS,         MVT::v32f16, Custom);
2130 
2131       setLoadExtAction(ISD::EXTLOAD, MVT::v8f64,  MVT::v8f16,  Legal);
2132       setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Legal);
2133 
2134       setOperationAction(ISD::STRICT_FSETCC,      MVT::v32i1, Custom);
2135       setOperationAction(ISD::STRICT_FSETCCS,     MVT::v32i1, Custom);
2136     }
2137 
2138     if (Subtarget.hasVLX()) {
2139       setGroup(MVT::v8f16);
2140       setGroup(MVT::v16f16);
2141 
2142       setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v8f16,  Legal);
2143       setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v16f16, Custom);
2144       setOperationAction(ISD::SINT_TO_FP,         MVT::v16i16, Legal);
2145       setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v16i16, Legal);
2146       setOperationAction(ISD::SINT_TO_FP,         MVT::v8i16,  Legal);
2147       setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i16,  Legal);
2148       setOperationAction(ISD::UINT_TO_FP,         MVT::v16i16, Legal);
2149       setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v16i16, Legal);
2150       setOperationAction(ISD::UINT_TO_FP,         MVT::v8i16,  Legal);
2151       setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v8i16,  Legal);
2152 
2153       setOperationAction(ISD::FP_TO_SINT,         MVT::v8i16, Custom);
2154       setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v8i16, Custom);
2155       setOperationAction(ISD::FP_TO_UINT,         MVT::v8i16, Custom);
2156       setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i16, Custom);
2157       setOperationAction(ISD::FP_ROUND,           MVT::v8f16, Legal);
2158       setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v8f16, Legal);
2159       setOperationAction(ISD::FP_EXTEND,          MVT::v8f32, Legal);
2160       setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v8f32, Legal);
2161       setOperationAction(ISD::FP_EXTEND,          MVT::v4f64, Legal);
2162       setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Legal);
2163 
2164       // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE
2165       setOperationAction(ISD::INSERT_VECTOR_ELT,    MVT::v8f16,  Custom);
2166       setOperationAction(ISD::INSERT_VECTOR_ELT,    MVT::v16f16, Custom);
2167 
2168       setOperationAction(ISD::EXTRACT_SUBVECTOR,    MVT::v8f16, Legal);
2169       setOperationAction(ISD::INSERT_SUBVECTOR,     MVT::v16f16, Legal);
2170       setOperationAction(ISD::CONCAT_VECTORS,       MVT::v16f16, Custom);
2171 
2172       setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Legal);
2173       setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Legal);
2174       setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Legal);
2175       setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Legal);
2176 
2177       // Need to custom widen these to prevent scalarization.
2178       setOperationAction(ISD::LOAD,  MVT::v4f16, Custom);
2179       setOperationAction(ISD::STORE, MVT::v4f16, Custom);
2180     }
2181   }
2182 
2183   if (!Subtarget.useSoftFloat() &&
2184       (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) {
2185     addRegisterClass(MVT::v8bf16, &X86::VR128XRegClass);
2186     addRegisterClass(MVT::v16bf16, &X86::VR256XRegClass);
2187     // We set the type action of bf16 to TypeSoftPromoteHalf, but we don't
2188     // provide the method to promote BUILD_VECTOR. Set the operation action
2189     // Custom to do the customization later.
2190     setOperationAction(ISD::BUILD_VECTOR, MVT::bf16, Custom);
2191     for (auto VT : {MVT::v8bf16, MVT::v16bf16}) {
2192       setF16Action(VT, Expand);
2193       setOperationAction(ISD::FADD, VT, Expand);
2194       setOperationAction(ISD::FSUB, VT, Expand);
2195       setOperationAction(ISD::FMUL, VT, Expand);
2196       setOperationAction(ISD::FDIV, VT, Expand);
2197       setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2198     }
2199     addLegalFPImmediate(APFloat::getZero(APFloat::BFloat()));
2200   }
2201 
2202   if (!Subtarget.useSoftFloat() && Subtarget.hasBF16()) {
2203     addRegisterClass(MVT::v32bf16, &X86::VR512RegClass);
2204     setF16Action(MVT::v32bf16, Expand);
2205     setOperationAction(ISD::FADD, MVT::v32bf16, Expand);
2206     setOperationAction(ISD::FSUB, MVT::v32bf16, Expand);
2207     setOperationAction(ISD::FMUL, MVT::v32bf16, Expand);
2208     setOperationAction(ISD::FDIV, MVT::v32bf16, Expand);
2209     setOperationAction(ISD::BUILD_VECTOR, MVT::v32bf16, Custom);
2210   }
2211 
2212   if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
2213     setTruncStoreAction(MVT::v4i64, MVT::v4i8,  Legal);
2214     setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
2215     setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
2216     setTruncStoreAction(MVT::v8i32, MVT::v8i8,  Legal);
2217     setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
2218 
2219     setTruncStoreAction(MVT::v2i64, MVT::v2i8,  Legal);
2220     setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
2221     setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
2222     setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
2223     setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
2224 
2225     if (Subtarget.hasBWI()) {
2226       setTruncStoreAction(MVT::v16i16,  MVT::v16i8, Legal);
2227       setTruncStoreAction(MVT::v8i16,   MVT::v8i8,  Legal);
2228     }
2229 
2230     if (Subtarget.hasFP16()) {
2231       // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64
2232       setOperationAction(ISD::FP_TO_SINT,        MVT::v2f16, Custom);
2233       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom);
2234       setOperationAction(ISD::FP_TO_UINT,        MVT::v2f16, Custom);
2235       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom);
2236       setOperationAction(ISD::FP_TO_SINT,        MVT::v4f16, Custom);
2237       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom);
2238       setOperationAction(ISD::FP_TO_UINT,        MVT::v4f16, Custom);
2239       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom);
2240       // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16
2241       setOperationAction(ISD::SINT_TO_FP,        MVT::v2f16, Custom);
2242       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom);
2243       setOperationAction(ISD::UINT_TO_FP,        MVT::v2f16, Custom);
2244       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom);
2245       setOperationAction(ISD::SINT_TO_FP,        MVT::v4f16, Custom);
2246       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom);
2247       setOperationAction(ISD::UINT_TO_FP,        MVT::v4f16, Custom);
2248       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom);
2249       // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16
2250       setOperationAction(ISD::FP_ROUND,          MVT::v2f16, Custom);
2251       setOperationAction(ISD::STRICT_FP_ROUND,   MVT::v2f16, Custom);
2252       setOperationAction(ISD::FP_ROUND,          MVT::v4f16, Custom);
2253       setOperationAction(ISD::STRICT_FP_ROUND,   MVT::v4f16, Custom);
2254       // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32
2255       setOperationAction(ISD::FP_EXTEND,         MVT::v2f16, Custom);
2256       setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v2f16, Custom);
2257       setOperationAction(ISD::FP_EXTEND,         MVT::v4f16, Custom);
2258       setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v4f16, Custom);
2259     }
2260 
2261     setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
2262     setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
2263     setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
2264   }
2265 
2266   if (Subtarget.hasAMXTILE()) {
2267     addRegisterClass(MVT::x86amx, &X86::TILERegClass);
2268   }
2269 
2270   // We want to custom lower some of our intrinsics.
2271   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
2272   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
2273   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
2274   if (!Subtarget.is64Bit()) {
2275     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
2276   }
2277 
2278   // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
2279   // handle type legalization for these operations here.
2280   //
2281   // FIXME: We really should do custom legalization for addition and
2282   // subtraction on x86-32 once PR3203 is fixed.  We really can't do much better
2283   // than generic legalization for 64-bit multiplication-with-overflow, though.
2284   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
2285     if (VT == MVT::i64 && !Subtarget.is64Bit())
2286       continue;
2287     // Add/Sub/Mul with overflow operations are custom lowered.
2288     setOperationAction(ISD::SADDO, VT, Custom);
2289     setOperationAction(ISD::UADDO, VT, Custom);
2290     setOperationAction(ISD::SSUBO, VT, Custom);
2291     setOperationAction(ISD::USUBO, VT, Custom);
2292     setOperationAction(ISD::SMULO, VT, Custom);
2293     setOperationAction(ISD::UMULO, VT, Custom);
2294 
2295     // Support carry in as value rather than glue.
2296     setOperationAction(ISD::ADDCARRY, VT, Custom);
2297     setOperationAction(ISD::SUBCARRY, VT, Custom);
2298     setOperationAction(ISD::SETCCCARRY, VT, Custom);
2299     setOperationAction(ISD::SADDO_CARRY, VT, Custom);
2300     setOperationAction(ISD::SSUBO_CARRY, VT, Custom);
2301   }
2302 
2303   if (!Subtarget.is64Bit()) {
2304     // These libcalls are not available in 32-bit.
2305     setLibcallName(RTLIB::SHL_I128, nullptr);
2306     setLibcallName(RTLIB::SRL_I128, nullptr);
2307     setLibcallName(RTLIB::SRA_I128, nullptr);
2308     setLibcallName(RTLIB::MUL_I128, nullptr);
2309     // The MULO libcall is not part of libgcc, only compiler-rt.
2310     setLibcallName(RTLIB::MULO_I64, nullptr);
2311   }
2312   // The MULO libcall is not part of libgcc, only compiler-rt.
2313   setLibcallName(RTLIB::MULO_I128, nullptr);
2314 
2315   // Combine sin / cos into _sincos_stret if it is available.
2316   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
2317       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
2318     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
2319     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
2320   }
2321 
2322   if (Subtarget.isTargetWin64()) {
2323     setOperationAction(ISD::SDIV, MVT::i128, Custom);
2324     setOperationAction(ISD::UDIV, MVT::i128, Custom);
2325     setOperationAction(ISD::SREM, MVT::i128, Custom);
2326     setOperationAction(ISD::UREM, MVT::i128, Custom);
2327     setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
2328     setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
2329     setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
2330     setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
2331     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
2332     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
2333     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
2334     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
2335   }
2336 
2337   // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
2338   // is. We should promote the value to 64-bits to solve this.
2339   // This is what the CRT headers do - `fmodf` is an inline header
2340   // function casting to f64 and calling `fmod`.
2341   if (Subtarget.is32Bit() &&
2342       (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
2343     for (ISD::NodeType Op :
2344          {ISD::FCEIL,  ISD::STRICT_FCEIL,
2345           ISD::FCOS,   ISD::STRICT_FCOS,
2346           ISD::FEXP,   ISD::STRICT_FEXP,
2347           ISD::FFLOOR, ISD::STRICT_FFLOOR,
2348           ISD::FREM,   ISD::STRICT_FREM,
2349           ISD::FLOG,   ISD::STRICT_FLOG,
2350           ISD::FLOG10, ISD::STRICT_FLOG10,
2351           ISD::FPOW,   ISD::STRICT_FPOW,
2352           ISD::FSIN,   ISD::STRICT_FSIN})
2353       if (isOperationExpand(Op, MVT::f32))
2354         setOperationAction(Op, MVT::f32, Promote);
2355 
2356   // We have target-specific dag combine patterns for the following nodes:
2357   setTargetDAGCombine({ISD::VECTOR_SHUFFLE,
2358                        ISD::SCALAR_TO_VECTOR,
2359                        ISD::INSERT_VECTOR_ELT,
2360                        ISD::EXTRACT_VECTOR_ELT,
2361                        ISD::CONCAT_VECTORS,
2362                        ISD::INSERT_SUBVECTOR,
2363                        ISD::EXTRACT_SUBVECTOR,
2364                        ISD::BITCAST,
2365                        ISD::VSELECT,
2366                        ISD::SELECT,
2367                        ISD::SHL,
2368                        ISD::SRA,
2369                        ISD::SRL,
2370                        ISD::OR,
2371                        ISD::AND,
2372                        ISD::ADD,
2373                        ISD::FADD,
2374                        ISD::FSUB,
2375                        ISD::FNEG,
2376                        ISD::FMA,
2377                        ISD::STRICT_FMA,
2378                        ISD::FMINNUM,
2379                        ISD::FMAXNUM,
2380                        ISD::SUB,
2381                        ISD::LOAD,
2382                        ISD::MLOAD,
2383                        ISD::STORE,
2384                        ISD::MSTORE,
2385                        ISD::TRUNCATE,
2386                        ISD::ZERO_EXTEND,
2387                        ISD::ANY_EXTEND,
2388                        ISD::SIGN_EXTEND,
2389                        ISD::SIGN_EXTEND_INREG,
2390                        ISD::ANY_EXTEND_VECTOR_INREG,
2391                        ISD::SIGN_EXTEND_VECTOR_INREG,
2392                        ISD::ZERO_EXTEND_VECTOR_INREG,
2393                        ISD::SINT_TO_FP,
2394                        ISD::UINT_TO_FP,
2395                        ISD::STRICT_SINT_TO_FP,
2396                        ISD::STRICT_UINT_TO_FP,
2397                        ISD::SETCC,
2398                        ISD::MUL,
2399                        ISD::XOR,
2400                        ISD::MSCATTER,
2401                        ISD::MGATHER,
2402                        ISD::FP16_TO_FP,
2403                        ISD::FP_EXTEND,
2404                        ISD::STRICT_FP_EXTEND,
2405                        ISD::FP_ROUND,
2406                        ISD::STRICT_FP_ROUND});
2407 
2408   computeRegisterProperties(Subtarget.getRegisterInfo());
2409 
2410   MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2411   MaxStoresPerMemsetOptSize = 8;
2412   MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2413   MaxStoresPerMemcpyOptSize = 4;
2414   MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2415   MaxStoresPerMemmoveOptSize = 4;
2416 
2417   // TODO: These control memcmp expansion in CGP and could be raised higher, but
2418   // that needs to benchmarked and balanced with the potential use of vector
2419   // load/store types (PR33329, PR33914).
2420   MaxLoadsPerMemcmp = 2;
2421   MaxLoadsPerMemcmpOptSize = 2;
2422 
2423   // Default loop alignment, which can be overridden by -align-loops.
2424   setPrefLoopAlignment(Align(16));
2425 
2426   // An out-of-order CPU can speculatively execute past a predictable branch,
2427   // but a conditional move could be stalled by an expensive earlier operation.
2428   PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2429   EnableExtLdPromotion = true;
2430   setPrefFunctionAlignment(Align(16));
2431 
2432   verifyIntrinsicTables();
2433 
2434   // Default to having -disable-strictnode-mutation on
2435   IsStrictFPEnabled = true;
2436 }
2437 
2438 // This has so far only been implemented for 64-bit MachO.
useLoadStackGuardNode() const2439 bool X86TargetLowering::useLoadStackGuardNode() const {
2440   return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2441 }
2442 
useStackGuardXorFP() const2443 bool X86TargetLowering::useStackGuardXorFP() const {
2444   // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2445   return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2446 }
2447 
emitStackGuardXorFP(SelectionDAG & DAG,SDValue Val,const SDLoc & DL) const2448 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2449                                                const SDLoc &DL) const {
2450   EVT PtrTy = getPointerTy(DAG.getDataLayout());
2451   unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2452   MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2453   return SDValue(Node, 0);
2454 }
2455 
2456 TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT) const2457 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2458   if ((VT == MVT::v32i1 || VT == MVT::v64i1) && Subtarget.hasAVX512() &&
2459       !Subtarget.hasBWI())
2460     return TypeSplitVector;
2461 
2462   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2463       !Subtarget.hasF16C() && VT.getVectorElementType() == MVT::f16)
2464     return TypeSplitVector;
2465 
2466   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2467       VT.getVectorElementType() != MVT::i1)
2468     return TypeWidenVector;
2469 
2470   return TargetLoweringBase::getPreferredVectorAction(VT);
2471 }
2472 
2473 static std::pair<MVT, unsigned>
handleMaskRegisterForCallingConv(unsigned NumElts,CallingConv::ID CC,const X86Subtarget & Subtarget)2474 handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC,
2475                                  const X86Subtarget &Subtarget) {
2476   // v2i1/v4i1/v8i1/v16i1 all pass in xmm registers unless the calling
2477   // convention is one that uses k registers.
2478   if (NumElts == 2)
2479     return {MVT::v2i64, 1};
2480   if (NumElts == 4)
2481     return {MVT::v4i32, 1};
2482   if (NumElts == 8 && CC != CallingConv::X86_RegCall &&
2483       CC != CallingConv::Intel_OCL_BI)
2484     return {MVT::v8i16, 1};
2485   if (NumElts == 16 && CC != CallingConv::X86_RegCall &&
2486       CC != CallingConv::Intel_OCL_BI)
2487     return {MVT::v16i8, 1};
2488   // v32i1 passes in ymm unless we have BWI and the calling convention is
2489   // regcall.
2490   if (NumElts == 32 && (!Subtarget.hasBWI() || CC != CallingConv::X86_RegCall))
2491     return {MVT::v32i8, 1};
2492   // Split v64i1 vectors if we don't have v64i8 available.
2493   if (NumElts == 64 && Subtarget.hasBWI() && CC != CallingConv::X86_RegCall) {
2494     if (Subtarget.useAVX512Regs())
2495       return {MVT::v64i8, 1};
2496     return {MVT::v32i8, 2};
2497   }
2498 
2499   // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2500   if (!isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
2501       NumElts > 64)
2502     return {MVT::i8, NumElts};
2503 
2504   return {MVT::INVALID_SIMPLE_VALUE_TYPE, 0};
2505 }
2506 
getRegisterTypeForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT) const2507 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2508                                                      CallingConv::ID CC,
2509                                                      EVT VT) const {
2510   if (VT.isVector()) {
2511     if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) {
2512       unsigned NumElts = VT.getVectorNumElements();
2513 
2514       MVT RegisterVT;
2515       unsigned NumRegisters;
2516       std::tie(RegisterVT, NumRegisters) =
2517           handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2518       if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2519         return RegisterVT;
2520     }
2521 
2522     if (VT.getVectorElementType() == MVT::f16 && VT.getVectorNumElements() < 8)
2523       return MVT::v8f16;
2524   }
2525 
2526   // We will use more GPRs for f64 and f80 on 32 bits when x87 is disabled.
2527   if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&
2528       !Subtarget.hasX87())
2529     return MVT::i32;
2530 
2531   if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
2532     return getRegisterTypeForCallingConv(Context, CC,
2533                                          VT.changeVectorElementTypeToInteger());
2534 
2535   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2536 }
2537 
getNumRegistersForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT) const2538 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2539                                                           CallingConv::ID CC,
2540                                                           EVT VT) const {
2541   if (VT.isVector()) {
2542     if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) {
2543       unsigned NumElts = VT.getVectorNumElements();
2544 
2545       MVT RegisterVT;
2546       unsigned NumRegisters;
2547       std::tie(RegisterVT, NumRegisters) =
2548           handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2549       if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2550         return NumRegisters;
2551     }
2552 
2553     if (VT.getVectorElementType() == MVT::f16 && VT.getVectorNumElements() < 8)
2554       return 1;
2555   }
2556 
2557   // We have to split f64 to 2 registers and f80 to 3 registers on 32 bits if
2558   // x87 is disabled.
2559   if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {
2560     if (VT == MVT::f64)
2561       return 2;
2562     if (VT == MVT::f80)
2563       return 3;
2564   }
2565 
2566   if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
2567     return getNumRegistersForCallingConv(Context, CC,
2568                                          VT.changeVectorElementTypeToInteger());
2569 
2570   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2571 }
2572 
getVectorTypeBreakdownForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT) const2573 unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
2574     LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2575     unsigned &NumIntermediates, MVT &RegisterVT) const {
2576   // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2577   if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2578       Subtarget.hasAVX512() &&
2579       (!isPowerOf2_32(VT.getVectorNumElements()) ||
2580        (VT.getVectorNumElements() == 64 && !Subtarget.hasBWI()) ||
2581        VT.getVectorNumElements() > 64)) {
2582     RegisterVT = MVT::i8;
2583     IntermediateVT = MVT::i1;
2584     NumIntermediates = VT.getVectorNumElements();
2585     return NumIntermediates;
2586   }
2587 
2588   // Split v64i1 vectors if we don't have v64i8 available.
2589   if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2590       CC != CallingConv::X86_RegCall) {
2591     RegisterVT = MVT::v32i8;
2592     IntermediateVT = MVT::v32i1;
2593     NumIntermediates = 2;
2594     return 2;
2595   }
2596 
2597   return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
2598                                               NumIntermediates, RegisterVT);
2599 }
2600 
getSetCCResultType(const DataLayout & DL,LLVMContext & Context,EVT VT) const2601 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
2602                                           LLVMContext& Context,
2603                                           EVT VT) const {
2604   if (!VT.isVector())
2605     return MVT::i8;
2606 
2607   if (Subtarget.hasAVX512()) {
2608     // Figure out what this type will be legalized to.
2609     EVT LegalVT = VT;
2610     while (getTypeAction(Context, LegalVT) != TypeLegal)
2611       LegalVT = getTypeToTransformTo(Context, LegalVT);
2612 
2613     // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2614     if (LegalVT.getSimpleVT().is512BitVector())
2615       return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
2616 
2617     if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2618       // If we legalized to less than a 512-bit vector, then we will use a vXi1
2619       // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2620       // vXi16/vXi8.
2621       MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2622       if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2623         return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
2624     }
2625   }
2626 
2627   return VT.changeVectorElementTypeToInteger();
2628 }
2629 
2630 /// Helper for getByValTypeAlignment to determine
2631 /// the desired ByVal argument alignment.
getMaxByValAlign(Type * Ty,Align & MaxAlign)2632 static void getMaxByValAlign(Type *Ty, Align &MaxAlign) {
2633   if (MaxAlign == 16)
2634     return;
2635   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2636     if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128)
2637       MaxAlign = Align(16);
2638   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2639     Align EltAlign;
2640     getMaxByValAlign(ATy->getElementType(), EltAlign);
2641     if (EltAlign > MaxAlign)
2642       MaxAlign = EltAlign;
2643   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2644     for (auto *EltTy : STy->elements()) {
2645       Align EltAlign;
2646       getMaxByValAlign(EltTy, EltAlign);
2647       if (EltAlign > MaxAlign)
2648         MaxAlign = EltAlign;
2649       if (MaxAlign == 16)
2650         break;
2651     }
2652   }
2653 }
2654 
2655 /// Return the desired alignment for ByVal aggregate
2656 /// function arguments in the caller parameter area. For X86, aggregates
2657 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2658 /// are at 4-byte boundaries.
getByValTypeAlignment(Type * Ty,const DataLayout & DL) const2659 uint64_t X86TargetLowering::getByValTypeAlignment(Type *Ty,
2660                                                   const DataLayout &DL) const {
2661   if (Subtarget.is64Bit()) {
2662     // Max of 8 and alignment of type.
2663     Align TyAlign = DL.getABITypeAlign(Ty);
2664     if (TyAlign > 8)
2665       return TyAlign.value();
2666     return 8;
2667   }
2668 
2669   Align Alignment(4);
2670   if (Subtarget.hasSSE1())
2671     getMaxByValAlign(Ty, Alignment);
2672   return Alignment.value();
2673 }
2674 
2675 /// It returns EVT::Other if the type should be determined using generic
2676 /// target-independent logic.
2677 /// For vector ops we check that the overall size isn't larger than our
2678 /// preferred vector width.
getOptimalMemOpType(const MemOp & Op,const AttributeList & FuncAttributes) const2679 EVT X86TargetLowering::getOptimalMemOpType(
2680     const MemOp &Op, const AttributeList &FuncAttributes) const {
2681   if (!FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) {
2682     if (Op.size() >= 16 &&
2683         (!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) {
2684       // FIXME: Check if unaligned 64-byte accesses are slow.
2685       if (Op.size() >= 64 && Subtarget.hasAVX512() &&
2686           (Subtarget.getPreferVectorWidth() >= 512)) {
2687         return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2688       }
2689       // FIXME: Check if unaligned 32-byte accesses are slow.
2690       if (Op.size() >= 32 && Subtarget.hasAVX() &&
2691           Subtarget.useLight256BitInstructions()) {
2692         // Although this isn't a well-supported type for AVX1, we'll let
2693         // legalization and shuffle lowering produce the optimal codegen. If we
2694         // choose an optimal type with a vector element larger than a byte,
2695         // getMemsetStores() may create an intermediate splat (using an integer
2696         // multiply) before we splat as a vector.
2697         return MVT::v32i8;
2698       }
2699       if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2700         return MVT::v16i8;
2701       // TODO: Can SSE1 handle a byte vector?
2702       // If we have SSE1 registers we should be able to use them.
2703       if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2704           (Subtarget.getPreferVectorWidth() >= 128))
2705         return MVT::v4f32;
2706     } else if (((Op.isMemcpy() && !Op.isMemcpyStrSrc()) || Op.isZeroMemset()) &&
2707                Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2708       // Do not use f64 to lower memcpy if source is string constant. It's
2709       // better to use i32 to avoid the loads.
2710       // Also, do not use f64 to lower memset unless this is a memset of zeros.
2711       // The gymnastics of splatting a byte value into an XMM register and then
2712       // only using 8-byte stores (because this is a CPU with slow unaligned
2713       // 16-byte accesses) makes that a loser.
2714       return MVT::f64;
2715     }
2716   }
2717   // This is a compromise. If we reach here, unaligned accesses may be slow on
2718   // this target. However, creating smaller, aligned accesses could be even
2719   // slower and would certainly be a lot more code.
2720   if (Subtarget.is64Bit() && Op.size() >= 8)
2721     return MVT::i64;
2722   return MVT::i32;
2723 }
2724 
isSafeMemOpType(MVT VT) const2725 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2726   if (VT == MVT::f32)
2727     return Subtarget.hasSSE1();
2728   if (VT == MVT::f64)
2729     return Subtarget.hasSSE2();
2730   return true;
2731 }
2732 
isBitAligned(Align Alignment,uint64_t SizeInBits)2733 static bool isBitAligned(Align Alignment, uint64_t SizeInBits) {
2734   return (8 * Alignment.value()) % SizeInBits == 0;
2735 }
2736 
isMemoryAccessFast(EVT VT,Align Alignment) const2737 bool X86TargetLowering::isMemoryAccessFast(EVT VT, Align Alignment) const {
2738   if (isBitAligned(Alignment, VT.getSizeInBits()))
2739     return true;
2740   switch (VT.getSizeInBits()) {
2741   default:
2742     // 8-byte and under are always assumed to be fast.
2743     return true;
2744   case 128:
2745     return !Subtarget.isUnalignedMem16Slow();
2746   case 256:
2747     return !Subtarget.isUnalignedMem32Slow();
2748     // TODO: What about AVX-512 (512-bit) accesses?
2749   }
2750 }
2751 
allowsMisalignedMemoryAccesses(EVT VT,unsigned,Align Alignment,MachineMemOperand::Flags Flags,unsigned * Fast) const2752 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2753     EVT VT, unsigned, Align Alignment, MachineMemOperand::Flags Flags,
2754     unsigned *Fast) const {
2755   if (Fast)
2756     *Fast = isMemoryAccessFast(VT, Alignment);
2757   // NonTemporal vector memory ops must be aligned.
2758   if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2759     // NT loads can only be vector aligned, so if its less aligned than the
2760     // minimum vector size (which we can split the vector down to), we might as
2761     // well use a regular unaligned vector load.
2762     // We don't have any NT loads pre-SSE41.
2763     if (!!(Flags & MachineMemOperand::MOLoad))
2764       return (Alignment < 16 || !Subtarget.hasSSE41());
2765     return false;
2766   }
2767   // Misaligned accesses of any size are always allowed.
2768   return true;
2769 }
2770 
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,unsigned * Fast) const2771 bool X86TargetLowering::allowsMemoryAccess(LLVMContext &Context,
2772                                            const DataLayout &DL, EVT VT,
2773                                            unsigned AddrSpace, Align Alignment,
2774                                            MachineMemOperand::Flags Flags,
2775                                            unsigned *Fast) const {
2776   if (Fast)
2777     *Fast = isMemoryAccessFast(VT, Alignment);
2778   if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2779     if (allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags,
2780                                        /*Fast=*/nullptr))
2781       return true;
2782     // NonTemporal vector memory ops are special, and must be aligned.
2783     if (!isBitAligned(Alignment, VT.getSizeInBits()))
2784       return false;
2785     switch (VT.getSizeInBits()) {
2786     case 128:
2787       if (!!(Flags & MachineMemOperand::MOLoad) && Subtarget.hasSSE41())
2788         return true;
2789       if (!!(Flags & MachineMemOperand::MOStore) && Subtarget.hasSSE2())
2790         return true;
2791       return false;
2792     case 256:
2793       if (!!(Flags & MachineMemOperand::MOLoad) && Subtarget.hasAVX2())
2794         return true;
2795       if (!!(Flags & MachineMemOperand::MOStore) && Subtarget.hasAVX())
2796         return true;
2797       return false;
2798     case 512:
2799       if (Subtarget.hasAVX512())
2800         return true;
2801       return false;
2802     default:
2803       return false; // Don't have NonTemporal vector memory ops of this size.
2804     }
2805   }
2806   return true;
2807 }
2808 
2809 /// Return the entry encoding for a jump table in the
2810 /// current function.  The returned value is a member of the
2811 /// MachineJumpTableInfo::JTEntryKind enum.
getJumpTableEncoding() const2812 unsigned X86TargetLowering::getJumpTableEncoding() const {
2813   // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2814   // symbol.
2815   if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2816     return MachineJumpTableInfo::EK_Custom32;
2817 
2818   // Otherwise, use the normal jump table encoding heuristics.
2819   return TargetLowering::getJumpTableEncoding();
2820 }
2821 
splitValueIntoRegisterParts(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,std::optional<CallingConv::ID> CC) const2822 bool X86TargetLowering::splitValueIntoRegisterParts(
2823     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
2824     unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
2825   bool IsABIRegCopy = CC.has_value();
2826   EVT ValueVT = Val.getValueType();
2827   if (IsABIRegCopy && ValueVT == MVT::bf16 && PartVT == MVT::f32) {
2828     unsigned ValueBits = ValueVT.getSizeInBits();
2829     unsigned PartBits = PartVT.getSizeInBits();
2830     Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val);
2831     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val);
2832     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
2833     Parts[0] = Val;
2834     return true;
2835   }
2836   return false;
2837 }
2838 
joinRegisterPartsIntoValue(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,std::optional<CallingConv::ID> CC) const2839 SDValue X86TargetLowering::joinRegisterPartsIntoValue(
2840     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
2841     MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
2842   bool IsABIRegCopy = CC.has_value();
2843   if (IsABIRegCopy && ValueVT == MVT::bf16 && PartVT == MVT::f32) {
2844     unsigned ValueBits = ValueVT.getSizeInBits();
2845     unsigned PartBits = PartVT.getSizeInBits();
2846     SDValue Val = Parts[0];
2847 
2848     Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val);
2849     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val);
2850     Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
2851     return Val;
2852   }
2853   return SDValue();
2854 }
2855 
useSoftFloat() const2856 bool X86TargetLowering::useSoftFloat() const {
2857   return Subtarget.useSoftFloat();
2858 }
2859 
markLibCallAttributes(MachineFunction * MF,unsigned CC,ArgListTy & Args) const2860 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2861                                               ArgListTy &Args) const {
2862 
2863   // Only relabel X86-32 for C / Stdcall CCs.
2864   if (Subtarget.is64Bit())
2865     return;
2866   if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2867     return;
2868   unsigned ParamRegs = 0;
2869   if (auto *M = MF->getFunction().getParent())
2870     ParamRegs = M->getNumberRegisterParameters();
2871 
2872   // Mark the first N int arguments as having reg
2873   for (auto &Arg : Args) {
2874     Type *T = Arg.Ty;
2875     if (T->isIntOrPtrTy())
2876       if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2877         unsigned numRegs = 1;
2878         if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2879           numRegs = 2;
2880         if (ParamRegs < numRegs)
2881           return;
2882         ParamRegs -= numRegs;
2883         Arg.IsInReg = true;
2884       }
2885   }
2886 }
2887 
2888 const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo * MJTI,const MachineBasicBlock * MBB,unsigned uid,MCContext & Ctx) const2889 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2890                                              const MachineBasicBlock *MBB,
2891                                              unsigned uid,MCContext &Ctx) const{
2892   assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2893   // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2894   // entries.
2895   return MCSymbolRefExpr::create(MBB->getSymbol(),
2896                                  MCSymbolRefExpr::VK_GOTOFF, Ctx);
2897 }
2898 
2899 /// Returns relocation base for the given PIC jumptable.
getPICJumpTableRelocBase(SDValue Table,SelectionDAG & DAG) const2900 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2901                                                     SelectionDAG &DAG) const {
2902   if (!Subtarget.is64Bit())
2903     // This doesn't have SDLoc associated with it, but is not really the
2904     // same as a Register.
2905     return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2906                        getPointerTy(DAG.getDataLayout()));
2907   return Table;
2908 }
2909 
2910 /// This returns the relocation base for the given PIC jumptable,
2911 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2912 const MCExpr *X86TargetLowering::
getPICJumpTableRelocBaseExpr(const MachineFunction * MF,unsigned JTI,MCContext & Ctx) const2913 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2914                              MCContext &Ctx) const {
2915   // X86-64 uses RIP relative addressing based on the jump table label.
2916   if (Subtarget.isPICStyleRIPRel())
2917     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2918 
2919   // Otherwise, the reference is relative to the PIC base.
2920   return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2921 }
2922 
2923 std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo * TRI,MVT VT) const2924 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2925                                            MVT VT) const {
2926   const TargetRegisterClass *RRC = nullptr;
2927   uint8_t Cost = 1;
2928   switch (VT.SimpleTy) {
2929   default:
2930     return TargetLowering::findRepresentativeClass(TRI, VT);
2931   case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2932     RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2933     break;
2934   case MVT::x86mmx:
2935     RRC = &X86::VR64RegClass;
2936     break;
2937   case MVT::f32: case MVT::f64:
2938   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2939   case MVT::v4f32: case MVT::v2f64:
2940   case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2941   case MVT::v8f32: case MVT::v4f64:
2942   case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2943   case MVT::v16f32: case MVT::v8f64:
2944     RRC = &X86::VR128XRegClass;
2945     break;
2946   }
2947   return std::make_pair(RRC, Cost);
2948 }
2949 
getAddressSpace() const2950 unsigned X86TargetLowering::getAddressSpace() const {
2951   if (Subtarget.is64Bit())
2952     return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2953   return 256;
2954 }
2955 
hasStackGuardSlotTLS(const Triple & TargetTriple)2956 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2957   return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2958          (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2959 }
2960 
SegmentOffset(IRBuilderBase & IRB,int Offset,unsigned AddressSpace)2961 static Constant* SegmentOffset(IRBuilderBase &IRB,
2962                                int Offset, unsigned AddressSpace) {
2963   return ConstantExpr::getIntToPtr(
2964       ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2965       Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2966 }
2967 
getIRStackGuard(IRBuilderBase & IRB) const2968 Value *X86TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
2969   // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2970   // tcbhead_t; use it instead of the usual global variable (see
2971   // sysdeps/{i386,x86_64}/nptl/tls.h)
2972   if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2973     if (Subtarget.isTargetFuchsia()) {
2974       // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2975       return SegmentOffset(IRB, 0x10, getAddressSpace());
2976     } else {
2977       unsigned AddressSpace = getAddressSpace();
2978       Module *M = IRB.GetInsertBlock()->getParent()->getParent();
2979       // Specially, some users may customize the base reg and offset.
2980       int Offset = M->getStackProtectorGuardOffset();
2981       // If we don't set -stack-protector-guard-offset value:
2982       // %fs:0x28, unless we're using a Kernel code model, in which case
2983       // it's %gs:0x28.  gs:0x14 on i386.
2984       if (Offset == INT_MAX)
2985         Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2986 
2987       StringRef GuardReg = M->getStackProtectorGuardReg();
2988       if (GuardReg == "fs")
2989         AddressSpace = X86AS::FS;
2990       else if (GuardReg == "gs")
2991         AddressSpace = X86AS::GS;
2992 
2993       // Use symbol guard if user specify.
2994       StringRef GuardSymb = M->getStackProtectorGuardSymbol();
2995       if (!GuardSymb.empty()) {
2996         GlobalVariable *GV = M->getGlobalVariable(GuardSymb);
2997         if (!GV) {
2998           Type *Ty = Subtarget.is64Bit() ? Type::getInt64Ty(M->getContext())
2999                                          : Type::getInt32Ty(M->getContext());
3000           GV = new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage,
3001                                   nullptr, GuardSymb, nullptr,
3002                                   GlobalValue::NotThreadLocal, AddressSpace);
3003         }
3004         return GV;
3005       }
3006 
3007       return SegmentOffset(IRB, Offset, AddressSpace);
3008     }
3009   }
3010   return TargetLowering::getIRStackGuard(IRB);
3011 }
3012 
insertSSPDeclarations(Module & M) const3013 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
3014   // MSVC CRT provides functionalities for stack protection.
3015   if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
3016       Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
3017     // MSVC CRT has a global variable holding security cookie.
3018     M.getOrInsertGlobal("__security_cookie",
3019                         Type::getInt8PtrTy(M.getContext()));
3020 
3021     // MSVC CRT has a function to validate security cookie.
3022     FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
3023         "__security_check_cookie", Type::getVoidTy(M.getContext()),
3024         Type::getInt8PtrTy(M.getContext()));
3025     if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
3026       F->setCallingConv(CallingConv::X86_FastCall);
3027       F->addParamAttr(0, Attribute::AttrKind::InReg);
3028     }
3029     return;
3030   }
3031 
3032   StringRef GuardMode = M.getStackProtectorGuard();
3033 
3034   // glibc, bionic, and Fuchsia have a special slot for the stack guard.
3035   if ((GuardMode == "tls" || GuardMode.empty()) &&
3036       hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
3037     return;
3038   TargetLowering::insertSSPDeclarations(M);
3039 }
3040 
getSDagStackGuard(const Module & M) const3041 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
3042   // MSVC CRT has a global variable holding security cookie.
3043   if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
3044       Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
3045     return M.getGlobalVariable("__security_cookie");
3046   }
3047   return TargetLowering::getSDagStackGuard(M);
3048 }
3049 
getSSPStackGuardCheck(const Module & M) const3050 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
3051   // MSVC CRT has a function to validate security cookie.
3052   if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
3053       Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
3054     return M.getFunction("__security_check_cookie");
3055   }
3056   return TargetLowering::getSSPStackGuardCheck(M);
3057 }
3058 
3059 Value *
getSafeStackPointerLocation(IRBuilderBase & IRB) const3060 X86TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
3061   if (Subtarget.getTargetTriple().isOSContiki())
3062     return getDefaultSafeStackPointerLocation(IRB, false);
3063 
3064   // Android provides a fixed TLS slot for the SafeStack pointer. See the
3065   // definition of TLS_SLOT_SAFESTACK in
3066   // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
3067   if (Subtarget.isTargetAndroid()) {
3068     // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
3069     // %gs:0x24 on i386
3070     int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
3071     return SegmentOffset(IRB, Offset, getAddressSpace());
3072   }
3073 
3074   // Fuchsia is similar.
3075   if (Subtarget.isTargetFuchsia()) {
3076     // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
3077     return SegmentOffset(IRB, 0x18, getAddressSpace());
3078   }
3079 
3080   return TargetLowering::getSafeStackPointerLocation(IRB);
3081 }
3082 
3083 //===----------------------------------------------------------------------===//
3084 //               Return Value Calling Convention Implementation
3085 //===----------------------------------------------------------------------===//
3086 
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const3087 bool X86TargetLowering::CanLowerReturn(
3088     CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
3089     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
3090   SmallVector<CCValAssign, 16> RVLocs;
3091   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
3092   return CCInfo.CheckReturn(Outs, RetCC_X86);
3093 }
3094 
getScratchRegisters(CallingConv::ID) const3095 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
3096   static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
3097   return ScratchRegs;
3098 }
3099 
3100 /// Lowers masks values (v*i1) to the local register values
3101 /// \returns DAG node after lowering to register type
lowerMasksToReg(const SDValue & ValArg,const EVT & ValLoc,const SDLoc & Dl,SelectionDAG & DAG)3102 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
3103                                const SDLoc &Dl, SelectionDAG &DAG) {
3104   EVT ValVT = ValArg.getValueType();
3105 
3106   if (ValVT == MVT::v1i1)
3107     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
3108                        DAG.getIntPtrConstant(0, Dl));
3109 
3110   if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
3111       (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
3112     // Two stage lowering might be required
3113     // bitcast:   v8i1 -> i8 / v16i1 -> i16
3114     // anyextend: i8   -> i32 / i16   -> i32
3115     EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
3116     SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
3117     if (ValLoc == MVT::i32)
3118       ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
3119     return ValToCopy;
3120   }
3121 
3122   if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
3123       (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
3124     // One stage lowering is required
3125     // bitcast:   v32i1 -> i32 / v64i1 -> i64
3126     return DAG.getBitcast(ValLoc, ValArg);
3127   }
3128 
3129   return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
3130 }
3131 
3132 /// Breaks v64i1 value into two registers and adds the new node to the DAG
Passv64i1ArgInRegs(const SDLoc & Dl,SelectionDAG & DAG,SDValue & Arg,SmallVectorImpl<std::pair<Register,SDValue>> & RegsToPass,CCValAssign & VA,CCValAssign & NextVA,const X86Subtarget & Subtarget)3133 static void Passv64i1ArgInRegs(
3134     const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
3135     SmallVectorImpl<std::pair<Register, SDValue>> &RegsToPass, CCValAssign &VA,
3136     CCValAssign &NextVA, const X86Subtarget &Subtarget) {
3137   assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
3138   assert(Subtarget.is32Bit() && "Expecting 32 bit target");
3139   assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
3140   assert(VA.isRegLoc() && NextVA.isRegLoc() &&
3141          "The value should reside in two registers");
3142 
3143   // Before splitting the value we cast it to i64
3144   Arg = DAG.getBitcast(MVT::i64, Arg);
3145 
3146   // Splitting the value into two i32 types
3147   SDValue Lo, Hi;
3148   Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
3149                    DAG.getConstant(0, Dl, MVT::i32));
3150   Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
3151                    DAG.getConstant(1, Dl, MVT::i32));
3152 
3153   // Attach the two i32 types into corresponding registers
3154   RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
3155   RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
3156 }
3157 
3158 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & dl,SelectionDAG & DAG) const3159 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3160                                bool isVarArg,
3161                                const SmallVectorImpl<ISD::OutputArg> &Outs,
3162                                const SmallVectorImpl<SDValue> &OutVals,
3163                                const SDLoc &dl, SelectionDAG &DAG) const {
3164   MachineFunction &MF = DAG.getMachineFunction();
3165   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3166 
3167   // In some cases we need to disable registers from the default CSR list.
3168   // For example, when they are used for argument passing.
3169   bool ShouldDisableCalleeSavedRegister =
3170       CallConv == CallingConv::X86_RegCall ||
3171       MF.getFunction().hasFnAttribute("no_caller_saved_registers");
3172 
3173   if (CallConv == CallingConv::X86_INTR && !Outs.empty())
3174     report_fatal_error("X86 interrupts may not return any value");
3175 
3176   SmallVector<CCValAssign, 16> RVLocs;
3177   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
3178   CCInfo.AnalyzeReturn(Outs, RetCC_X86);
3179 
3180   SmallVector<std::pair<Register, SDValue>, 4> RetVals;
3181   for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
3182        ++I, ++OutsIndex) {
3183     CCValAssign &VA = RVLocs[I];
3184     assert(VA.isRegLoc() && "Can only return in registers!");
3185 
3186     // Add the register to the CalleeSaveDisableRegs list.
3187     if (ShouldDisableCalleeSavedRegister)
3188       MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
3189 
3190     SDValue ValToCopy = OutVals[OutsIndex];
3191     EVT ValVT = ValToCopy.getValueType();
3192 
3193     // Promote values to the appropriate types.
3194     if (VA.getLocInfo() == CCValAssign::SExt)
3195       ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
3196     else if (VA.getLocInfo() == CCValAssign::ZExt)
3197       ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
3198     else if (VA.getLocInfo() == CCValAssign::AExt) {
3199       if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
3200         ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
3201       else
3202         ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
3203     }
3204     else if (VA.getLocInfo() == CCValAssign::BCvt)
3205       ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
3206 
3207     assert(VA.getLocInfo() != CCValAssign::FPExt &&
3208            "Unexpected FP-extend for return value.");
3209 
3210     // Report an error if we have attempted to return a value via an XMM
3211     // register and SSE was disabled.
3212     if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3213       errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3214       VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3215     } else if (!Subtarget.hasSSE2() &&
3216                X86::FR64XRegClass.contains(VA.getLocReg()) &&
3217                ValVT == MVT::f64) {
3218       // When returning a double via an XMM register, report an error if SSE2 is
3219       // not enabled.
3220       errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3221       VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3222     }
3223 
3224     // Returns in ST0/ST1 are handled specially: these are pushed as operands to
3225     // the RET instruction and handled by the FP Stackifier.
3226     if (VA.getLocReg() == X86::FP0 ||
3227         VA.getLocReg() == X86::FP1) {
3228       // If this is a copy from an xmm register to ST(0), use an FPExtend to
3229       // change the value to the FP stack register class.
3230       if (isScalarFPTypeInSSEReg(VA.getValVT()))
3231         ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
3232       RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
3233       // Don't emit a copytoreg.
3234       continue;
3235     }
3236 
3237     // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
3238     // which is returned in RAX / RDX.
3239     if (Subtarget.is64Bit()) {
3240       if (ValVT == MVT::x86mmx) {
3241         if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
3242           ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
3243           ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
3244                                   ValToCopy);
3245           // If we don't have SSE2 available, convert to v4f32 so the generated
3246           // register is legal.
3247           if (!Subtarget.hasSSE2())
3248             ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
3249         }
3250       }
3251     }
3252 
3253     if (VA.needsCustom()) {
3254       assert(VA.getValVT() == MVT::v64i1 &&
3255              "Currently the only custom case is when we split v64i1 to 2 regs");
3256 
3257       Passv64i1ArgInRegs(dl, DAG, ValToCopy, RetVals, VA, RVLocs[++I],
3258                          Subtarget);
3259 
3260       // Add the second register to the CalleeSaveDisableRegs list.
3261       if (ShouldDisableCalleeSavedRegister)
3262         MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
3263     } else {
3264       RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
3265     }
3266   }
3267 
3268   SDValue Flag;
3269   SmallVector<SDValue, 6> RetOps;
3270   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
3271   // Operand #1 = Bytes To Pop
3272   RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
3273                    MVT::i32));
3274 
3275   // Copy the result values into the output registers.
3276   for (auto &RetVal : RetVals) {
3277     if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
3278       RetOps.push_back(RetVal.second);
3279       continue; // Don't emit a copytoreg.
3280     }
3281 
3282     Chain = DAG.getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Flag);
3283     Flag = Chain.getValue(1);
3284     RetOps.push_back(
3285         DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
3286   }
3287 
3288   // Swift calling convention does not require we copy the sret argument
3289   // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
3290 
3291   // All x86 ABIs require that for returning structs by value we copy
3292   // the sret argument into %rax/%eax (depending on ABI) for the return.
3293   // We saved the argument into a virtual register in the entry block,
3294   // so now we copy the value out and into %rax/%eax.
3295   //
3296   // Checking Function.hasStructRetAttr() here is insufficient because the IR
3297   // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
3298   // false, then an sret argument may be implicitly inserted in the SelDAG. In
3299   // either case FuncInfo->setSRetReturnReg() will have been called.
3300   if (Register SRetReg = FuncInfo->getSRetReturnReg()) {
3301     // When we have both sret and another return value, we should use the
3302     // original Chain stored in RetOps[0], instead of the current Chain updated
3303     // in the above loop. If we only have sret, RetOps[0] equals to Chain.
3304 
3305     // For the case of sret and another return value, we have
3306     //   Chain_0 at the function entry
3307     //   Chain_1 = getCopyToReg(Chain_0) in the above loop
3308     // If we use Chain_1 in getCopyFromReg, we will have
3309     //   Val = getCopyFromReg(Chain_1)
3310     //   Chain_2 = getCopyToReg(Chain_1, Val) from below
3311 
3312     // getCopyToReg(Chain_0) will be glued together with
3313     // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
3314     // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
3315     //   Data dependency from Unit B to Unit A due to usage of Val in
3316     //     getCopyToReg(Chain_1, Val)
3317     //   Chain dependency from Unit A to Unit B
3318 
3319     // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
3320     SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
3321                                      getPointerTy(MF.getDataLayout()));
3322 
3323     Register RetValReg
3324         = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
3325           X86::RAX : X86::EAX;
3326     Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
3327     Flag = Chain.getValue(1);
3328 
3329     // RAX/EAX now acts like a return value.
3330     RetOps.push_back(
3331         DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
3332 
3333     // Add the returned register to the CalleeSaveDisableRegs list.
3334     if (ShouldDisableCalleeSavedRegister)
3335       MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
3336   }
3337 
3338   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
3339   const MCPhysReg *I =
3340       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
3341   if (I) {
3342     for (; *I; ++I) {
3343       if (X86::GR64RegClass.contains(*I))
3344         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
3345       else
3346         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
3347     }
3348   }
3349 
3350   RetOps[0] = Chain;  // Update chain.
3351 
3352   // Add the flag if we have it.
3353   if (Flag.getNode())
3354     RetOps.push_back(Flag);
3355 
3356   X86ISD::NodeType opcode = X86ISD::RET_FLAG;
3357   if (CallConv == CallingConv::X86_INTR)
3358     opcode = X86ISD::IRET;
3359   return DAG.getNode(opcode, dl, MVT::Other, RetOps);
3360 }
3361 
isUsedByReturnOnly(SDNode * N,SDValue & Chain) const3362 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
3363   if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
3364     return false;
3365 
3366   SDValue TCChain = Chain;
3367   SDNode *Copy = *N->use_begin();
3368   if (Copy->getOpcode() == ISD::CopyToReg) {
3369     // If the copy has a glue operand, we conservatively assume it isn't safe to
3370     // perform a tail call.
3371     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3372       return false;
3373     TCChain = Copy->getOperand(0);
3374   } else if (Copy->getOpcode() != ISD::FP_EXTEND)
3375     return false;
3376 
3377   bool HasRet = false;
3378   for (const SDNode *U : Copy->uses()) {
3379     if (U->getOpcode() != X86ISD::RET_FLAG)
3380       return false;
3381     // If we are returning more than one value, we can definitely
3382     // not make a tail call see PR19530
3383     if (U->getNumOperands() > 4)
3384       return false;
3385     if (U->getNumOperands() == 4 &&
3386         U->getOperand(U->getNumOperands() - 1).getValueType() != MVT::Glue)
3387       return false;
3388     HasRet = true;
3389   }
3390 
3391   if (!HasRet)
3392     return false;
3393 
3394   Chain = TCChain;
3395   return true;
3396 }
3397 
getTypeForExtReturn(LLVMContext & Context,EVT VT,ISD::NodeType ExtendKind) const3398 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
3399                                            ISD::NodeType ExtendKind) const {
3400   MVT ReturnMVT = MVT::i32;
3401 
3402   bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
3403   if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
3404     // The ABI does not require i1, i8 or i16 to be extended.
3405     //
3406     // On Darwin, there is code in the wild relying on Clang's old behaviour of
3407     // always extending i8/i16 return values, so keep doing that for now.
3408     // (PR26665).
3409     ReturnMVT = MVT::i8;
3410   }
3411 
3412   EVT MinVT = getRegisterType(Context, ReturnMVT);
3413   return VT.bitsLT(MinVT) ? MinVT : VT;
3414 }
3415 
3416 /// Reads two 32 bit registers and creates a 64 bit mask value.
3417 /// \param VA The current 32 bit value that need to be assigned.
3418 /// \param NextVA The next 32 bit value that need to be assigned.
3419 /// \param Root The parent DAG node.
3420 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
3421 ///                        glue purposes. In the case the DAG is already using
3422 ///                        physical register instead of virtual, we should glue
3423 ///                        our new SDValue to InFlag SDvalue.
3424 /// \return a new SDvalue of size 64bit.
getv64i1Argument(CCValAssign & VA,CCValAssign & NextVA,SDValue & Root,SelectionDAG & DAG,const SDLoc & Dl,const X86Subtarget & Subtarget,SDValue * InFlag=nullptr)3425 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
3426                                 SDValue &Root, SelectionDAG &DAG,
3427                                 const SDLoc &Dl, const X86Subtarget &Subtarget,
3428                                 SDValue *InFlag = nullptr) {
3429   assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
3430   assert(Subtarget.is32Bit() && "Expecting 32 bit target");
3431   assert(VA.getValVT() == MVT::v64i1 &&
3432          "Expecting first location of 64 bit width type");
3433   assert(NextVA.getValVT() == VA.getValVT() &&
3434          "The locations should have the same type");
3435   assert(VA.isRegLoc() && NextVA.isRegLoc() &&
3436          "The values should reside in two registers");
3437 
3438   SDValue Lo, Hi;
3439   SDValue ArgValueLo, ArgValueHi;
3440 
3441   MachineFunction &MF = DAG.getMachineFunction();
3442   const TargetRegisterClass *RC = &X86::GR32RegClass;
3443 
3444   // Read a 32 bit value from the registers.
3445   if (nullptr == InFlag) {
3446     // When no physical register is present,
3447     // create an intermediate virtual register.
3448     Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
3449     ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
3450     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3451     ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
3452   } else {
3453     // When a physical register is available read the value from it and glue
3454     // the reads together.
3455     ArgValueLo =
3456       DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
3457     *InFlag = ArgValueLo.getValue(2);
3458     ArgValueHi =
3459       DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
3460     *InFlag = ArgValueHi.getValue(2);
3461   }
3462 
3463   // Convert the i32 type into v32i1 type.
3464   Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
3465 
3466   // Convert the i32 type into v32i1 type.
3467   Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
3468 
3469   // Concatenate the two values together.
3470   return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
3471 }
3472 
3473 /// The function will lower a register of various sizes (8/16/32/64)
3474 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
3475 /// \returns a DAG node contains the operand after lowering to mask type.
lowerRegToMasks(const SDValue & ValArg,const EVT & ValVT,const EVT & ValLoc,const SDLoc & Dl,SelectionDAG & DAG)3476 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
3477                                const EVT &ValLoc, const SDLoc &Dl,
3478                                SelectionDAG &DAG) {
3479   SDValue ValReturned = ValArg;
3480 
3481   if (ValVT == MVT::v1i1)
3482     return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
3483 
3484   if (ValVT == MVT::v64i1) {
3485     // In 32 bit machine, this case is handled by getv64i1Argument
3486     assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
3487     // In 64 bit machine, There is no need to truncate the value only bitcast
3488   } else {
3489     MVT maskLen;
3490     switch (ValVT.getSimpleVT().SimpleTy) {
3491     case MVT::v8i1:
3492       maskLen = MVT::i8;
3493       break;
3494     case MVT::v16i1:
3495       maskLen = MVT::i16;
3496       break;
3497     case MVT::v32i1:
3498       maskLen = MVT::i32;
3499       break;
3500     default:
3501       llvm_unreachable("Expecting a vector of i1 types");
3502     }
3503 
3504     ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
3505   }
3506   return DAG.getBitcast(ValVT, ValReturned);
3507 }
3508 
3509 /// Lower the result values of a call into the
3510 /// appropriate copies out of appropriate physical registers.
3511 ///
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,uint32_t * RegMask) const3512 SDValue X86TargetLowering::LowerCallResult(
3513     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
3514     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3515     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
3516     uint32_t *RegMask) const {
3517 
3518   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3519   // Assign locations to each value returned by this call.
3520   SmallVector<CCValAssign, 16> RVLocs;
3521   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
3522                  *DAG.getContext());
3523   CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3524 
3525   // Copy all of the result registers out of their specified physreg.
3526   for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
3527        ++I, ++InsIndex) {
3528     CCValAssign &VA = RVLocs[I];
3529     EVT CopyVT = VA.getLocVT();
3530 
3531     // In some calling conventions we need to remove the used registers
3532     // from the register mask.
3533     if (RegMask) {
3534       for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
3535            SubRegs.isValid(); ++SubRegs)
3536         RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
3537     }
3538 
3539     // Report an error if there was an attempt to return FP values via XMM
3540     // registers.
3541     if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3542       errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3543       if (VA.getLocReg() == X86::XMM1)
3544         VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3545       else
3546         VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3547     } else if (!Subtarget.hasSSE2() &&
3548                X86::FR64XRegClass.contains(VA.getLocReg()) &&
3549                CopyVT == MVT::f64) {
3550       errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3551       if (VA.getLocReg() == X86::XMM1)
3552         VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3553       else
3554         VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3555     }
3556 
3557     // If we prefer to use the value in xmm registers, copy it out as f80 and
3558     // use a truncate to move it from fp stack reg to xmm reg.
3559     bool RoundAfterCopy = false;
3560     if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3561         isScalarFPTypeInSSEReg(VA.getValVT())) {
3562       if (!Subtarget.hasX87())
3563         report_fatal_error("X87 register return with X87 disabled");
3564       CopyVT = MVT::f80;
3565       RoundAfterCopy = (CopyVT != VA.getLocVT());
3566     }
3567 
3568     SDValue Val;
3569     if (VA.needsCustom()) {
3570       assert(VA.getValVT() == MVT::v64i1 &&
3571              "Currently the only custom case is when we split v64i1 to 2 regs");
3572       Val =
3573           getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
3574     } else {
3575       Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
3576                   .getValue(1);
3577       Val = Chain.getValue(0);
3578       InFlag = Chain.getValue(2);
3579     }
3580 
3581     if (RoundAfterCopy)
3582       Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
3583                         // This truncation won't change the value.
3584                         DAG.getIntPtrConstant(1, dl, /*isTarget=*/true));
3585 
3586     if (VA.isExtInLoc()) {
3587       if (VA.getValVT().isVector() &&
3588           VA.getValVT().getScalarType() == MVT::i1 &&
3589           ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3590            (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3591         // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3592         Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
3593       } else
3594         Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
3595     }
3596 
3597     if (VA.getLocInfo() == CCValAssign::BCvt)
3598       Val = DAG.getBitcast(VA.getValVT(), Val);
3599 
3600     InVals.push_back(Val);
3601   }
3602 
3603   return Chain;
3604 }
3605 
3606 //===----------------------------------------------------------------------===//
3607 //                C & StdCall & Fast Calling Convention implementation
3608 //===----------------------------------------------------------------------===//
3609 //  StdCall calling convention seems to be standard for many Windows' API
3610 //  routines and around. It differs from C calling convention just a little:
3611 //  callee should clean up the stack, not caller. Symbols should be also
3612 //  decorated in some fancy way :) It doesn't support any vector arguments.
3613 //  For info on fast calling convention see Fast Calling Convention (tail call)
3614 //  implementation LowerX86_32FastCCCallTo.
3615 
3616 /// Determines whether Args, either a set of outgoing arguments to a call, or a
3617 /// set of incoming args of a call, contains an sret pointer that the callee
3618 /// pops
3619 template <typename T>
hasCalleePopSRet(const SmallVectorImpl<T> & Args,const X86Subtarget & Subtarget)3620 static bool hasCalleePopSRet(const SmallVectorImpl<T> &Args,
3621                              const X86Subtarget &Subtarget) {
3622   // Not C++20 (yet), so no concepts available.
3623   static_assert(std::is_same_v<T, ISD::OutputArg> ||
3624                     std::is_same_v<T, ISD::InputArg>,
3625                 "requires ISD::OutputArg or ISD::InputArg");
3626 
3627   // Only 32-bit pops the sret.  It's a 64-bit world these days, so early-out
3628   // for most compilations.
3629   if (!Subtarget.is32Bit())
3630     return false;
3631 
3632   if (Args.empty())
3633     return false;
3634 
3635   // Most calls do not have an sret argument, check the arg next.
3636   const ISD::ArgFlagsTy &Flags = Args[0].Flags;
3637   if (!Flags.isSRet() || Flags.isInReg())
3638     return false;
3639 
3640   // The MSVCabi does not pop the sret.
3641   if (Subtarget.getTargetTriple().isOSMSVCRT())
3642     return false;
3643 
3644   // MCUs don't pop the sret
3645   if (Subtarget.isTargetMCU())
3646     return false;
3647 
3648   // Callee pops argument
3649   return true;
3650 }
3651 
3652 /// Make a copy of an aggregate at address specified by "Src" to address
3653 /// "Dst" with size and alignment information specified by the specific
3654 /// parameter attribute. The copy will be passed as a byval function parameter.
CreateCopyOfByValArgument(SDValue Src,SDValue Dst,SDValue Chain,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,const SDLoc & dl)3655 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
3656                                          SDValue Chain, ISD::ArgFlagsTy Flags,
3657                                          SelectionDAG &DAG, const SDLoc &dl) {
3658   SDValue SizeNode = DAG.getIntPtrConstant(Flags.getByValSize(), dl);
3659 
3660   return DAG.getMemcpy(
3661       Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
3662       /*isVolatile*/ false, /*AlwaysInline=*/true,
3663       /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
3664 }
3665 
3666 /// Return true if the calling convention is one that we can guarantee TCO for.
canGuaranteeTCO(CallingConv::ID CC)3667 static bool canGuaranteeTCO(CallingConv::ID CC) {
3668   return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3669           CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
3670           CC == CallingConv::HHVM || CC == CallingConv::Tail ||
3671           CC == CallingConv::SwiftTail);
3672 }
3673 
3674 /// Return true if we might ever do TCO for calls with this calling convention.
mayTailCallThisCC(CallingConv::ID CC)3675 static bool mayTailCallThisCC(CallingConv::ID CC) {
3676   switch (CC) {
3677   // C calling conventions:
3678   case CallingConv::C:
3679   case CallingConv::Win64:
3680   case CallingConv::X86_64_SysV:
3681   // Callee pop conventions:
3682   case CallingConv::X86_ThisCall:
3683   case CallingConv::X86_StdCall:
3684   case CallingConv::X86_VectorCall:
3685   case CallingConv::X86_FastCall:
3686   // Swift:
3687   case CallingConv::Swift:
3688     return true;
3689   default:
3690     return canGuaranteeTCO(CC);
3691   }
3692 }
3693 
3694 /// Return true if the function is being made into a tailcall target by
3695 /// changing its ABI.
shouldGuaranteeTCO(CallingConv::ID CC,bool GuaranteedTailCallOpt)3696 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
3697   return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) ||
3698          CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
3699 }
3700 
mayBeEmittedAsTailCall(const CallInst * CI) const3701 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3702   if (!CI->isTailCall())
3703     return false;
3704 
3705   CallingConv::ID CalleeCC = CI->getCallingConv();
3706   if (!mayTailCallThisCC(CalleeCC))
3707     return false;
3708 
3709   return true;
3710 }
3711 
3712 SDValue
LowerMemArgument(SDValue Chain,CallingConv::ID CallConv,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,const CCValAssign & VA,MachineFrameInfo & MFI,unsigned i) const3713 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3714                                     const SmallVectorImpl<ISD::InputArg> &Ins,
3715                                     const SDLoc &dl, SelectionDAG &DAG,
3716                                     const CCValAssign &VA,
3717                                     MachineFrameInfo &MFI, unsigned i) const {
3718   // Create the nodes corresponding to a load from this parameter slot.
3719   ISD::ArgFlagsTy Flags = Ins[i].Flags;
3720   bool AlwaysUseMutable = shouldGuaranteeTCO(
3721       CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3722   bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3723   EVT ValVT;
3724   MVT PtrVT = getPointerTy(DAG.getDataLayout());
3725 
3726   // If value is passed by pointer we have address passed instead of the value
3727   // itself. No need to extend if the mask value and location share the same
3728   // absolute size.
3729   bool ExtendedInMem =
3730       VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3731       VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3732 
3733   if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3734     ValVT = VA.getLocVT();
3735   else
3736     ValVT = VA.getValVT();
3737 
3738   // FIXME: For now, all byval parameter objects are marked mutable. This can be
3739   // changed with more analysis.
3740   // In case of tail call optimization mark all arguments mutable. Since they
3741   // could be overwritten by lowering of arguments in case of a tail call.
3742   if (Flags.isByVal()) {
3743     unsigned Bytes = Flags.getByValSize();
3744     if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3745 
3746     // FIXME: For now, all byval parameter objects are marked as aliasing. This
3747     // can be improved with deeper analysis.
3748     int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3749                                    /*isAliased=*/true);
3750     return DAG.getFrameIndex(FI, PtrVT);
3751   }
3752 
3753   EVT ArgVT = Ins[i].ArgVT;
3754 
3755   // If this is a vector that has been split into multiple parts, and the
3756   // scalar size of the parts don't match the vector element size, then we can't
3757   // elide the copy. The parts will have padding between them instead of being
3758   // packed like a vector.
3759   bool ScalarizedAndExtendedVector =
3760       ArgVT.isVector() && !VA.getLocVT().isVector() &&
3761       VA.getLocVT().getSizeInBits() != ArgVT.getScalarSizeInBits();
3762 
3763   // This is an argument in memory. We might be able to perform copy elision.
3764   // If the argument is passed directly in memory without any extension, then we
3765   // can perform copy elision. Large vector types, for example, may be passed
3766   // indirectly by pointer.
3767   if (Flags.isCopyElisionCandidate() &&
3768       VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem &&
3769       !ScalarizedAndExtendedVector) {
3770     SDValue PartAddr;
3771     if (Ins[i].PartOffset == 0) {
3772       // If this is a one-part value or the first part of a multi-part value,
3773       // create a stack object for the entire argument value type and return a
3774       // load from our portion of it. This assumes that if the first part of an
3775       // argument is in memory, the rest will also be in memory.
3776       int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3777                                      /*IsImmutable=*/false);
3778       PartAddr = DAG.getFrameIndex(FI, PtrVT);
3779       return DAG.getLoad(
3780           ValVT, dl, Chain, PartAddr,
3781           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3782     } else {
3783       // This is not the first piece of an argument in memory. See if there is
3784       // already a fixed stack object including this offset. If so, assume it
3785       // was created by the PartOffset == 0 branch above and create a load from
3786       // the appropriate offset into it.
3787       int64_t PartBegin = VA.getLocMemOffset();
3788       int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3789       int FI = MFI.getObjectIndexBegin();
3790       for (; MFI.isFixedObjectIndex(FI); ++FI) {
3791         int64_t ObjBegin = MFI.getObjectOffset(FI);
3792         int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3793         if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3794           break;
3795       }
3796       if (MFI.isFixedObjectIndex(FI)) {
3797         SDValue Addr =
3798             DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3799                         DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3800         return DAG.getLoad(
3801             ValVT, dl, Chain, Addr,
3802             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3803                                               Ins[i].PartOffset));
3804       }
3805     }
3806   }
3807 
3808   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3809                                  VA.getLocMemOffset(), isImmutable);
3810 
3811   // Set SExt or ZExt flag.
3812   if (VA.getLocInfo() == CCValAssign::ZExt) {
3813     MFI.setObjectZExt(FI, true);
3814   } else if (VA.getLocInfo() == CCValAssign::SExt) {
3815     MFI.setObjectSExt(FI, true);
3816   }
3817 
3818   MaybeAlign Alignment;
3819   if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
3820       ValVT != MVT::f80)
3821     Alignment = MaybeAlign(4);
3822   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3823   SDValue Val = DAG.getLoad(
3824       ValVT, dl, Chain, FIN,
3825       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
3826       Alignment);
3827   return ExtendedInMem
3828              ? (VA.getValVT().isVector()
3829                     ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3830                     : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3831              : Val;
3832 }
3833 
3834 // FIXME: Get this from tablegen.
get64BitArgumentGPRs(CallingConv::ID CallConv,const X86Subtarget & Subtarget)3835 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3836                                                 const X86Subtarget &Subtarget) {
3837   assert(Subtarget.is64Bit());
3838 
3839   if (Subtarget.isCallingConvWin64(CallConv)) {
3840     static const MCPhysReg GPR64ArgRegsWin64[] = {
3841       X86::RCX, X86::RDX, X86::R8,  X86::R9
3842     };
3843     return ArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3844   }
3845 
3846   static const MCPhysReg GPR64ArgRegs64Bit[] = {
3847     X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3848   };
3849   return ArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3850 }
3851 
3852 // FIXME: Get this from tablegen.
get64BitArgumentXMMs(MachineFunction & MF,CallingConv::ID CallConv,const X86Subtarget & Subtarget)3853 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3854                                                 CallingConv::ID CallConv,
3855                                                 const X86Subtarget &Subtarget) {
3856   assert(Subtarget.is64Bit());
3857   if (Subtarget.isCallingConvWin64(CallConv)) {
3858     // The XMM registers which might contain var arg parameters are shadowed
3859     // in their paired GPR.  So we only need to save the GPR to their home
3860     // slots.
3861     // TODO: __vectorcall will change this.
3862     return std::nullopt;
3863   }
3864 
3865   bool isSoftFloat = Subtarget.useSoftFloat();
3866   if (isSoftFloat || !Subtarget.hasSSE1())
3867     // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3868     // registers.
3869     return std::nullopt;
3870 
3871   static const MCPhysReg XMMArgRegs64Bit[] = {
3872     X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3873     X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3874   };
3875   return ArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3876 }
3877 
3878 #ifndef NDEBUG
isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs)3879 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3880   return llvm::is_sorted(
3881       ArgLocs, [](const CCValAssign &A, const CCValAssign &B) -> bool {
3882         return A.getValNo() < B.getValNo();
3883       });
3884 }
3885 #endif
3886 
3887 namespace {
3888 /// This is a helper class for lowering variable arguments parameters.
3889 class VarArgsLoweringHelper {
3890 public:
VarArgsLoweringHelper(X86MachineFunctionInfo * FuncInfo,const SDLoc & Loc,SelectionDAG & DAG,const X86Subtarget & Subtarget,CallingConv::ID CallConv,CCState & CCInfo)3891   VarArgsLoweringHelper(X86MachineFunctionInfo *FuncInfo, const SDLoc &Loc,
3892                         SelectionDAG &DAG, const X86Subtarget &Subtarget,
3893                         CallingConv::ID CallConv, CCState &CCInfo)
3894       : FuncInfo(FuncInfo), DL(Loc), DAG(DAG), Subtarget(Subtarget),
3895         TheMachineFunction(DAG.getMachineFunction()),
3896         TheFunction(TheMachineFunction.getFunction()),
3897         FrameInfo(TheMachineFunction.getFrameInfo()),
3898         FrameLowering(*Subtarget.getFrameLowering()),
3899         TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
3900         CCInfo(CCInfo) {}
3901 
3902   // Lower variable arguments parameters.
3903   void lowerVarArgsParameters(SDValue &Chain, unsigned StackSize);
3904 
3905 private:
3906   void createVarArgAreaAndStoreRegisters(SDValue &Chain, unsigned StackSize);
3907 
3908   void forwardMustTailParameters(SDValue &Chain);
3909 
is64Bit() const3910   bool is64Bit() const { return Subtarget.is64Bit(); }
isWin64() const3911   bool isWin64() const { return Subtarget.isCallingConvWin64(CallConv); }
3912 
3913   X86MachineFunctionInfo *FuncInfo;
3914   const SDLoc &DL;
3915   SelectionDAG &DAG;
3916   const X86Subtarget &Subtarget;
3917   MachineFunction &TheMachineFunction;
3918   const Function &TheFunction;
3919   MachineFrameInfo &FrameInfo;
3920   const TargetFrameLowering &FrameLowering;
3921   const TargetLowering &TargLowering;
3922   CallingConv::ID CallConv;
3923   CCState &CCInfo;
3924 };
3925 } // namespace
3926 
createVarArgAreaAndStoreRegisters(SDValue & Chain,unsigned StackSize)3927 void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
3928     SDValue &Chain, unsigned StackSize) {
3929   // If the function takes variable number of arguments, make a frame index for
3930   // the start of the first vararg value... for expansion of llvm.va_start. We
3931   // can skip this if there are no va_start calls.
3932   if (is64Bit() || (CallConv != CallingConv::X86_FastCall &&
3933                     CallConv != CallingConv::X86_ThisCall)) {
3934     FuncInfo->setVarArgsFrameIndex(
3935         FrameInfo.CreateFixedObject(1, StackSize, true));
3936   }
3937 
3938   // 64-bit calling conventions support varargs and register parameters, so we
3939   // have to do extra work to spill them in the prologue.
3940   if (is64Bit()) {
3941     // Find the first unallocated argument registers.
3942     ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3943     ArrayRef<MCPhysReg> ArgXMMs =
3944         get64BitArgumentXMMs(TheMachineFunction, CallConv, Subtarget);
3945     unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3946     unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3947 
3948     assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3949            "SSE register cannot be used when SSE is disabled!");
3950 
3951     if (isWin64()) {
3952       // Get to the caller-allocated home save location.  Add 8 to account
3953       // for the return address.
3954       int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
3955       FuncInfo->setRegSaveFrameIndex(
3956           FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3957       // Fixup to set vararg frame on shadow area (4 x i64).
3958       if (NumIntRegs < 4)
3959         FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3960     } else {
3961       // For X86-64, if there are vararg parameters that are passed via
3962       // registers, then we must store them to their spots on the stack so
3963       // they may be loaded by dereferencing the result of va_next.
3964       FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3965       FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3966       FuncInfo->setRegSaveFrameIndex(FrameInfo.CreateStackObject(
3967           ArgGPRs.size() * 8 + ArgXMMs.size() * 16, Align(16), false));
3968     }
3969 
3970     SmallVector<SDValue, 6>
3971         LiveGPRs; // list of SDValue for GPR registers keeping live input value
3972     SmallVector<SDValue, 8> LiveXMMRegs; // list of SDValue for XMM registers
3973                                          // keeping live input value
3974     SDValue ALVal; // if applicable keeps SDValue for %al register
3975 
3976     // Gather all the live in physical registers.
3977     for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3978       Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
3979       LiveGPRs.push_back(DAG.getCopyFromReg(Chain, DL, GPR, MVT::i64));
3980     }
3981     const auto &AvailableXmms = ArgXMMs.slice(NumXMMRegs);
3982     if (!AvailableXmms.empty()) {
3983       Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
3984       ALVal = DAG.getCopyFromReg(Chain, DL, AL, MVT::i8);
3985       for (MCPhysReg Reg : AvailableXmms) {
3986         // FastRegisterAllocator spills virtual registers at basic
3987         // block boundary. That leads to usages of xmm registers
3988         // outside of check for %al. Pass physical registers to
3989         // VASTART_SAVE_XMM_REGS to avoid unneccessary spilling.
3990         TheMachineFunction.getRegInfo().addLiveIn(Reg);
3991         LiveXMMRegs.push_back(DAG.getRegister(Reg, MVT::v4f32));
3992       }
3993     }
3994 
3995     // Store the integer parameter registers.
3996     SmallVector<SDValue, 8> MemOps;
3997     SDValue RSFIN =
3998         DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3999                           TargLowering.getPointerTy(DAG.getDataLayout()));
4000     unsigned Offset = FuncInfo->getVarArgsGPOffset();
4001     for (SDValue Val : LiveGPRs) {
4002       SDValue FIN = DAG.getNode(ISD::ADD, DL,
4003                                 TargLowering.getPointerTy(DAG.getDataLayout()),
4004                                 RSFIN, DAG.getIntPtrConstant(Offset, DL));
4005       SDValue Store =
4006           DAG.getStore(Val.getValue(1), DL, Val, FIN,
4007                        MachinePointerInfo::getFixedStack(
4008                            DAG.getMachineFunction(),
4009                            FuncInfo->getRegSaveFrameIndex(), Offset));
4010       MemOps.push_back(Store);
4011       Offset += 8;
4012     }
4013 
4014     // Now store the XMM (fp + vector) parameter registers.
4015     if (!LiveXMMRegs.empty()) {
4016       SmallVector<SDValue, 12> SaveXMMOps;
4017       SaveXMMOps.push_back(Chain);
4018       SaveXMMOps.push_back(ALVal);
4019       SaveXMMOps.push_back(RSFIN);
4020       SaveXMMOps.push_back(
4021           DAG.getTargetConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32));
4022       llvm::append_range(SaveXMMOps, LiveXMMRegs);
4023       MachineMemOperand *StoreMMO =
4024           DAG.getMachineFunction().getMachineMemOperand(
4025               MachinePointerInfo::getFixedStack(
4026                   DAG.getMachineFunction(), FuncInfo->getRegSaveFrameIndex(),
4027                   Offset),
4028               MachineMemOperand::MOStore, 128, Align(16));
4029       MemOps.push_back(DAG.getMemIntrinsicNode(X86ISD::VASTART_SAVE_XMM_REGS,
4030                                                DL, DAG.getVTList(MVT::Other),
4031                                                SaveXMMOps, MVT::i8, StoreMMO));
4032     }
4033 
4034     if (!MemOps.empty())
4035       Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
4036   }
4037 }
4038 
forwardMustTailParameters(SDValue & Chain)4039 void VarArgsLoweringHelper::forwardMustTailParameters(SDValue &Chain) {
4040   // Find the largest legal vector type.
4041   MVT VecVT = MVT::Other;
4042   // FIXME: Only some x86_32 calling conventions support AVX512.
4043   if (Subtarget.useAVX512Regs() &&
4044       (is64Bit() || (CallConv == CallingConv::X86_VectorCall ||
4045                      CallConv == CallingConv::Intel_OCL_BI)))
4046     VecVT = MVT::v16f32;
4047   else if (Subtarget.hasAVX())
4048     VecVT = MVT::v8f32;
4049   else if (Subtarget.hasSSE2())
4050     VecVT = MVT::v4f32;
4051 
4052   // We forward some GPRs and some vector types.
4053   SmallVector<MVT, 2> RegParmTypes;
4054   MVT IntVT = is64Bit() ? MVT::i64 : MVT::i32;
4055   RegParmTypes.push_back(IntVT);
4056   if (VecVT != MVT::Other)
4057     RegParmTypes.push_back(VecVT);
4058 
4059   // Compute the set of forwarded registers. The rest are scratch.
4060   SmallVectorImpl<ForwardedRegister> &Forwards =
4061       FuncInfo->getForwardedMustTailRegParms();
4062   CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
4063 
4064   // Forward AL for SysV x86_64 targets, since it is used for varargs.
4065   if (is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
4066     Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
4067     Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
4068   }
4069 
4070   // Copy all forwards from physical to virtual registers.
4071   for (ForwardedRegister &FR : Forwards) {
4072     // FIXME: Can we use a less constrained schedule?
4073     SDValue RegVal = DAG.getCopyFromReg(Chain, DL, FR.VReg, FR.VT);
4074     FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
4075         TargLowering.getRegClassFor(FR.VT));
4076     Chain = DAG.getCopyToReg(Chain, DL, FR.VReg, RegVal);
4077   }
4078 }
4079 
lowerVarArgsParameters(SDValue & Chain,unsigned StackSize)4080 void VarArgsLoweringHelper::lowerVarArgsParameters(SDValue &Chain,
4081                                                    unsigned StackSize) {
4082   // Set FrameIndex to the 0xAAAAAAA value to mark unset state.
4083   // If necessary, it would be set into the correct value later.
4084   FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
4085   FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
4086 
4087   if (FrameInfo.hasVAStart())
4088     createVarArgAreaAndStoreRegisters(Chain, StackSize);
4089 
4090   if (FrameInfo.hasMustTailInVarArgFunc())
4091     forwardMustTailParameters(Chain);
4092 }
4093 
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const4094 SDValue X86TargetLowering::LowerFormalArguments(
4095     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
4096     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4097     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4098   MachineFunction &MF = DAG.getMachineFunction();
4099   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4100 
4101   const Function &F = MF.getFunction();
4102   if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
4103       F.getName() == "main")
4104     FuncInfo->setForceFramePointer(true);
4105 
4106   MachineFrameInfo &MFI = MF.getFrameInfo();
4107   bool Is64Bit = Subtarget.is64Bit();
4108   bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
4109 
4110   assert(
4111       !(IsVarArg && canGuaranteeTCO(CallConv)) &&
4112       "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
4113 
4114   // Assign locations to all of the incoming arguments.
4115   SmallVector<CCValAssign, 16> ArgLocs;
4116   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
4117 
4118   // Allocate shadow area for Win64.
4119   if (IsWin64)
4120     CCInfo.AllocateStack(32, Align(8));
4121 
4122   CCInfo.AnalyzeArguments(Ins, CC_X86);
4123 
4124   // In vectorcall calling convention a second pass is required for the HVA
4125   // types.
4126   if (CallingConv::X86_VectorCall == CallConv) {
4127     CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
4128   }
4129 
4130   // The next loop assumes that the locations are in the same order of the
4131   // input arguments.
4132   assert(isSortedByValueNo(ArgLocs) &&
4133          "Argument Location list must be sorted before lowering");
4134 
4135   SDValue ArgValue;
4136   for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
4137        ++I, ++InsIndex) {
4138     assert(InsIndex < Ins.size() && "Invalid Ins index");
4139     CCValAssign &VA = ArgLocs[I];
4140 
4141     if (VA.isRegLoc()) {
4142       EVT RegVT = VA.getLocVT();
4143       if (VA.needsCustom()) {
4144         assert(
4145             VA.getValVT() == MVT::v64i1 &&
4146             "Currently the only custom case is when we split v64i1 to 2 regs");
4147 
4148         // v64i1 values, in regcall calling convention, that are
4149         // compiled to 32 bit arch, are split up into two registers.
4150         ArgValue =
4151             getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
4152       } else {
4153         const TargetRegisterClass *RC;
4154         if (RegVT == MVT::i8)
4155           RC = &X86::GR8RegClass;
4156         else if (RegVT == MVT::i16)
4157           RC = &X86::GR16RegClass;
4158         else if (RegVT == MVT::i32)
4159           RC = &X86::GR32RegClass;
4160         else if (Is64Bit && RegVT == MVT::i64)
4161           RC = &X86::GR64RegClass;
4162         else if (RegVT == MVT::f16)
4163           RC = Subtarget.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
4164         else if (RegVT == MVT::f32)
4165           RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
4166         else if (RegVT == MVT::f64)
4167           RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
4168         else if (RegVT == MVT::f80)
4169           RC = &X86::RFP80RegClass;
4170         else if (RegVT == MVT::f128)
4171           RC = &X86::VR128RegClass;
4172         else if (RegVT.is512BitVector())
4173           RC = &X86::VR512RegClass;
4174         else if (RegVT.is256BitVector())
4175           RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
4176         else if (RegVT.is128BitVector())
4177           RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
4178         else if (RegVT == MVT::x86mmx)
4179           RC = &X86::VR64RegClass;
4180         else if (RegVT == MVT::v1i1)
4181           RC = &X86::VK1RegClass;
4182         else if (RegVT == MVT::v8i1)
4183           RC = &X86::VK8RegClass;
4184         else if (RegVT == MVT::v16i1)
4185           RC = &X86::VK16RegClass;
4186         else if (RegVT == MVT::v32i1)
4187           RC = &X86::VK32RegClass;
4188         else if (RegVT == MVT::v64i1)
4189           RC = &X86::VK64RegClass;
4190         else
4191           llvm_unreachable("Unknown argument type!");
4192 
4193         Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
4194         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
4195       }
4196 
4197       // If this is an 8 or 16-bit value, it is really passed promoted to 32
4198       // bits.  Insert an assert[sz]ext to capture this, then truncate to the
4199       // right size.
4200       if (VA.getLocInfo() == CCValAssign::SExt)
4201         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
4202                                DAG.getValueType(VA.getValVT()));
4203       else if (VA.getLocInfo() == CCValAssign::ZExt)
4204         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
4205                                DAG.getValueType(VA.getValVT()));
4206       else if (VA.getLocInfo() == CCValAssign::BCvt)
4207         ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
4208 
4209       if (VA.isExtInLoc()) {
4210         // Handle MMX values passed in XMM regs.
4211         if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
4212           ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
4213         else if (VA.getValVT().isVector() &&
4214                  VA.getValVT().getScalarType() == MVT::i1 &&
4215                  ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
4216                   (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
4217           // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
4218           ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
4219         } else
4220           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4221       }
4222     } else {
4223       assert(VA.isMemLoc());
4224       ArgValue =
4225           LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
4226     }
4227 
4228     // If value is passed via pointer - do a load.
4229     if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
4230       ArgValue =
4231           DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
4232 
4233     InVals.push_back(ArgValue);
4234   }
4235 
4236   for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
4237     if (Ins[I].Flags.isSwiftAsync()) {
4238       auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
4239       if (Subtarget.is64Bit())
4240         X86FI->setHasSwiftAsyncContext(true);
4241       else {
4242         int FI = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
4243         X86FI->setSwiftAsyncContextFrameIdx(FI);
4244         SDValue St = DAG.getStore(DAG.getEntryNode(), dl, InVals[I],
4245                                   DAG.getFrameIndex(FI, MVT::i32),
4246                                   MachinePointerInfo::getFixedStack(MF, FI));
4247         Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, St, Chain);
4248       }
4249     }
4250 
4251     // Swift calling convention does not require we copy the sret argument
4252     // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
4253     if (CallConv == CallingConv::Swift || CallConv == CallingConv::SwiftTail)
4254       continue;
4255 
4256     // All x86 ABIs require that for returning structs by value we copy the
4257     // sret argument into %rax/%eax (depending on ABI) for the return. Save
4258     // the argument into a virtual register so that we can access it from the
4259     // return points.
4260     if (Ins[I].Flags.isSRet()) {
4261       assert(!FuncInfo->getSRetReturnReg() &&
4262              "SRet return has already been set");
4263       MVT PtrTy = getPointerTy(DAG.getDataLayout());
4264       Register Reg =
4265           MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
4266       FuncInfo->setSRetReturnReg(Reg);
4267       SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
4268       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
4269       break;
4270     }
4271   }
4272 
4273   unsigned StackSize = CCInfo.getNextStackOffset();
4274   // Align stack specially for tail calls.
4275   if (shouldGuaranteeTCO(CallConv,
4276                          MF.getTarget().Options.GuaranteedTailCallOpt))
4277     StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
4278 
4279   if (IsVarArg)
4280     VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
4281         .lowerVarArgsParameters(Chain, StackSize);
4282 
4283   // Some CCs need callee pop.
4284   if (X86::isCalleePop(CallConv, Is64Bit, IsVarArg,
4285                        MF.getTarget().Options.GuaranteedTailCallOpt)) {
4286     FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
4287   } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
4288     // X86 interrupts must pop the error code (and the alignment padding) if
4289     // present.
4290     FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
4291   } else {
4292     FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
4293     // If this is an sret function, the return should pop the hidden pointer.
4294     if (!canGuaranteeTCO(CallConv) && hasCalleePopSRet(Ins, Subtarget))
4295       FuncInfo->setBytesToPopOnReturn(4);
4296   }
4297 
4298   if (!Is64Bit) {
4299     // RegSaveFrameIndex is X86-64 only.
4300     FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
4301   }
4302 
4303   FuncInfo->setArgumentStackSize(StackSize);
4304 
4305   if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
4306     EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
4307     if (Personality == EHPersonality::CoreCLR) {
4308       assert(Is64Bit);
4309       // TODO: Add a mechanism to frame lowering that will allow us to indicate
4310       // that we'd prefer this slot be allocated towards the bottom of the frame
4311       // (i.e. near the stack pointer after allocating the frame).  Every
4312       // funclet needs a copy of this slot in its (mostly empty) frame, and the
4313       // offset from the bottom of this and each funclet's frame must be the
4314       // same, so the size of funclets' (mostly empty) frames is dictated by
4315       // how far this slot is from the bottom (since they allocate just enough
4316       // space to accommodate holding this slot at the correct offset).
4317       int PSPSymFI = MFI.CreateStackObject(8, Align(8), /*isSpillSlot=*/false);
4318       EHInfo->PSPSymFrameIdx = PSPSymFI;
4319     }
4320   }
4321 
4322   if (CallConv == CallingConv::X86_RegCall ||
4323       F.hasFnAttribute("no_caller_saved_registers")) {
4324     MachineRegisterInfo &MRI = MF.getRegInfo();
4325     for (std::pair<Register, Register> Pair : MRI.liveins())
4326       MRI.disableCalleeSavedRegister(Pair.first);
4327   }
4328 
4329   return Chain;
4330 }
4331 
LowerMemOpCallTo(SDValue Chain,SDValue StackPtr,SDValue Arg,const SDLoc & dl,SelectionDAG & DAG,const CCValAssign & VA,ISD::ArgFlagsTy Flags,bool isByVal) const4332 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
4333                                             SDValue Arg, const SDLoc &dl,
4334                                             SelectionDAG &DAG,
4335                                             const CCValAssign &VA,
4336                                             ISD::ArgFlagsTy Flags,
4337                                             bool isByVal) const {
4338   unsigned LocMemOffset = VA.getLocMemOffset();
4339   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
4340   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4341                        StackPtr, PtrOff);
4342   if (isByVal)
4343     return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
4344 
4345   MaybeAlign Alignment;
4346   if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
4347       Arg.getSimpleValueType() != MVT::f80)
4348     Alignment = MaybeAlign(4);
4349   return DAG.getStore(
4350       Chain, dl, Arg, PtrOff,
4351       MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset),
4352       Alignment);
4353 }
4354 
4355 /// Emit a load of return address if tail call
4356 /// optimization is performed and it is required.
EmitTailCallLoadRetAddr(SelectionDAG & DAG,SDValue & OutRetAddr,SDValue Chain,bool IsTailCall,bool Is64Bit,int FPDiff,const SDLoc & dl) const4357 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
4358     SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
4359     bool Is64Bit, int FPDiff, const SDLoc &dl) const {
4360   // Adjust the Return address stack slot.
4361   EVT VT = getPointerTy(DAG.getDataLayout());
4362   OutRetAddr = getReturnAddressFrameIndex(DAG);
4363 
4364   // Load the "old" Return address.
4365   OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
4366   return SDValue(OutRetAddr.getNode(), 1);
4367 }
4368 
4369 /// Emit a store of the return address if tail call
4370 /// optimization is performed and it is required (FPDiff!=0).
EmitTailCallStoreRetAddr(SelectionDAG & DAG,MachineFunction & MF,SDValue Chain,SDValue RetAddrFrIdx,EVT PtrVT,unsigned SlotSize,int FPDiff,const SDLoc & dl)4371 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
4372                                         SDValue Chain, SDValue RetAddrFrIdx,
4373                                         EVT PtrVT, unsigned SlotSize,
4374                                         int FPDiff, const SDLoc &dl) {
4375   // Store the return address to the appropriate stack slot.
4376   if (!FPDiff) return Chain;
4377   // Calculate the new stack slot for the return address.
4378   int NewReturnAddrFI =
4379     MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
4380                                          false);
4381   SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
4382   Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
4383                        MachinePointerInfo::getFixedStack(
4384                            DAG.getMachineFunction(), NewReturnAddrFI));
4385   return Chain;
4386 }
4387 
4388 /// Returns a vector_shuffle mask for an movs{s|d}, movd
4389 /// operation of specified width.
getMOVL(SelectionDAG & DAG,const SDLoc & dl,MVT VT,SDValue V1,SDValue V2)4390 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
4391                        SDValue V2) {
4392   unsigned NumElems = VT.getVectorNumElements();
4393   SmallVector<int, 8> Mask;
4394   Mask.push_back(NumElems);
4395   for (unsigned i = 1; i != NumElems; ++i)
4396     Mask.push_back(i);
4397   return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
4398 }
4399 
4400 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const4401 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
4402                              SmallVectorImpl<SDValue> &InVals) const {
4403   SelectionDAG &DAG                     = CLI.DAG;
4404   SDLoc &dl                             = CLI.DL;
4405   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
4406   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
4407   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
4408   SDValue Chain                         = CLI.Chain;
4409   SDValue Callee                        = CLI.Callee;
4410   CallingConv::ID CallConv              = CLI.CallConv;
4411   bool &isTailCall                      = CLI.IsTailCall;
4412   bool isVarArg                         = CLI.IsVarArg;
4413   const auto *CB                        = CLI.CB;
4414 
4415   MachineFunction &MF = DAG.getMachineFunction();
4416   bool Is64Bit        = Subtarget.is64Bit();
4417   bool IsWin64        = Subtarget.isCallingConvWin64(CallConv);
4418   bool IsSibcall      = false;
4419   bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
4420       CallConv == CallingConv::Tail || CallConv == CallingConv::SwiftTail;
4421   bool IsCalleePopSRet = !IsGuaranteeTCO && hasCalleePopSRet(Outs, Subtarget);
4422   X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
4423   bool HasNCSR = (CB && isa<CallInst>(CB) &&
4424                   CB->hasFnAttr("no_caller_saved_registers"));
4425   bool HasNoCfCheck = (CB && CB->doesNoCfCheck());
4426   bool IsIndirectCall = (CB && isa<CallInst>(CB) && CB->isIndirectCall());
4427   bool IsCFICall = IsIndirectCall && CLI.CFIType;
4428   const Module *M = MF.getMMI().getModule();
4429   Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
4430 
4431   MachineFunction::CallSiteInfo CSInfo;
4432   if (CallConv == CallingConv::X86_INTR)
4433     report_fatal_error("X86 interrupts may not be called directly");
4434 
4435   bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
4436   if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {
4437     // If we are using a GOT, disable tail calls to external symbols with
4438     // default visibility. Tail calling such a symbol requires using a GOT
4439     // relocation, which forces early binding of the symbol. This breaks code
4440     // that require lazy function symbol resolution. Using musttail or
4441     // GuaranteedTailCallOpt will override this.
4442     GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4443     if (!G || (!G->getGlobal()->hasLocalLinkage() &&
4444                G->getGlobal()->hasDefaultVisibility()))
4445       isTailCall = false;
4446   }
4447 
4448   if (isTailCall && !IsMustTail) {
4449     // Check if it's really possible to do a tail call.
4450     isTailCall = IsEligibleForTailCallOptimization(
4451         Callee, CallConv, IsCalleePopSRet, isVarArg, CLI.RetTy, Outs, OutVals,
4452         Ins, DAG);
4453 
4454     // Sibcalls are automatically detected tailcalls which do not require
4455     // ABI changes.
4456     if (!IsGuaranteeTCO && isTailCall)
4457       IsSibcall = true;
4458 
4459     if (isTailCall)
4460       ++NumTailCalls;
4461   }
4462 
4463   if (IsMustTail && !isTailCall)
4464     report_fatal_error("failed to perform tail call elimination on a call "
4465                        "site marked musttail");
4466 
4467   assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
4468          "Var args not supported with calling convention fastcc, ghc or hipe");
4469 
4470   // Analyze operands of the call, assigning locations to each operand.
4471   SmallVector<CCValAssign, 16> ArgLocs;
4472   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
4473 
4474   // Allocate shadow area for Win64.
4475   if (IsWin64)
4476     CCInfo.AllocateStack(32, Align(8));
4477 
4478   CCInfo.AnalyzeArguments(Outs, CC_X86);
4479 
4480   // In vectorcall calling convention a second pass is required for the HVA
4481   // types.
4482   if (CallingConv::X86_VectorCall == CallConv) {
4483     CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
4484   }
4485 
4486   // Get a count of how many bytes are to be pushed on the stack.
4487   unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
4488   if (IsSibcall)
4489     // This is a sibcall. The memory operands are available in caller's
4490     // own caller's stack.
4491     NumBytes = 0;
4492   else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
4493     NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
4494 
4495   int FPDiff = 0;
4496   if (isTailCall &&
4497       shouldGuaranteeTCO(CallConv,
4498                          MF.getTarget().Options.GuaranteedTailCallOpt)) {
4499     // Lower arguments at fp - stackoffset + fpdiff.
4500     unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
4501 
4502     FPDiff = NumBytesCallerPushed - NumBytes;
4503 
4504     // Set the delta of movement of the returnaddr stackslot.
4505     // But only set if delta is greater than previous delta.
4506     if (FPDiff < X86Info->getTCReturnAddrDelta())
4507       X86Info->setTCReturnAddrDelta(FPDiff);
4508   }
4509 
4510   unsigned NumBytesToPush = NumBytes;
4511   unsigned NumBytesToPop = NumBytes;
4512 
4513   // If we have an inalloca argument, all stack space has already been allocated
4514   // for us and be right at the top of the stack.  We don't support multiple
4515   // arguments passed in memory when using inalloca.
4516   if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
4517     NumBytesToPush = 0;
4518     if (!ArgLocs.back().isMemLoc())
4519       report_fatal_error("cannot use inalloca attribute on a register "
4520                          "parameter");
4521     if (ArgLocs.back().getLocMemOffset() != 0)
4522       report_fatal_error("any parameter with the inalloca attribute must be "
4523                          "the only memory argument");
4524   } else if (CLI.IsPreallocated) {
4525     assert(ArgLocs.back().isMemLoc() &&
4526            "cannot use preallocated attribute on a register "
4527            "parameter");
4528     SmallVector<size_t, 4> PreallocatedOffsets;
4529     for (size_t i = 0; i < CLI.OutVals.size(); ++i) {
4530       if (CLI.CB->paramHasAttr(i, Attribute::Preallocated)) {
4531         PreallocatedOffsets.push_back(ArgLocs[i].getLocMemOffset());
4532       }
4533     }
4534     auto *MFI = DAG.getMachineFunction().getInfo<X86MachineFunctionInfo>();
4535     size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.CB);
4536     MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
4537     MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
4538     NumBytesToPush = 0;
4539   }
4540 
4541   if (!IsSibcall && !IsMustTail)
4542     Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
4543                                  NumBytes - NumBytesToPush, dl);
4544 
4545   SDValue RetAddrFrIdx;
4546   // Load return address for tail calls.
4547   if (isTailCall && FPDiff)
4548     Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
4549                                     Is64Bit, FPDiff, dl);
4550 
4551   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
4552   SmallVector<SDValue, 8> MemOpChains;
4553   SDValue StackPtr;
4554 
4555   // The next loop assumes that the locations are in the same order of the
4556   // input arguments.
4557   assert(isSortedByValueNo(ArgLocs) &&
4558          "Argument Location list must be sorted before lowering");
4559 
4560   // Walk the register/memloc assignments, inserting copies/loads.  In the case
4561   // of tail call optimization arguments are handle later.
4562   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4563   for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
4564        ++I, ++OutIndex) {
4565     assert(OutIndex < Outs.size() && "Invalid Out index");
4566     // Skip inalloca/preallocated arguments, they have already been written.
4567     ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
4568     if (Flags.isInAlloca() || Flags.isPreallocated())
4569       continue;
4570 
4571     CCValAssign &VA = ArgLocs[I];
4572     EVT RegVT = VA.getLocVT();
4573     SDValue Arg = OutVals[OutIndex];
4574     bool isByVal = Flags.isByVal();
4575 
4576     // Promote the value if needed.
4577     switch (VA.getLocInfo()) {
4578     default: llvm_unreachable("Unknown loc info!");
4579     case CCValAssign::Full: break;
4580     case CCValAssign::SExt:
4581       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
4582       break;
4583     case CCValAssign::ZExt:
4584       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
4585       break;
4586     case CCValAssign::AExt:
4587       if (Arg.getValueType().isVector() &&
4588           Arg.getValueType().getVectorElementType() == MVT::i1)
4589         Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
4590       else if (RegVT.is128BitVector()) {
4591         // Special case: passing MMX values in XMM registers.
4592         Arg = DAG.getBitcast(MVT::i64, Arg);
4593         Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
4594         Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
4595       } else
4596         Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
4597       break;
4598     case CCValAssign::BCvt:
4599       Arg = DAG.getBitcast(RegVT, Arg);
4600       break;
4601     case CCValAssign::Indirect: {
4602       if (isByVal) {
4603         // Memcpy the argument to a temporary stack slot to prevent
4604         // the caller from seeing any modifications the callee may make
4605         // as guaranteed by the `byval` attribute.
4606         int FrameIdx = MF.getFrameInfo().CreateStackObject(
4607             Flags.getByValSize(),
4608             std::max(Align(16), Flags.getNonZeroByValAlign()), false);
4609         SDValue StackSlot =
4610             DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
4611         Chain =
4612             CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
4613         // From now on treat this as a regular pointer
4614         Arg = StackSlot;
4615         isByVal = false;
4616       } else {
4617         // Store the argument.
4618         SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
4619         int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
4620         Chain = DAG.getStore(
4621             Chain, dl, Arg, SpillSlot,
4622             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4623         Arg = SpillSlot;
4624       }
4625       break;
4626     }
4627     }
4628 
4629     if (VA.needsCustom()) {
4630       assert(VA.getValVT() == MVT::v64i1 &&
4631              "Currently the only custom case is when we split v64i1 to 2 regs");
4632       // Split v64i1 value into two registers
4633       Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
4634     } else if (VA.isRegLoc()) {
4635       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4636       const TargetOptions &Options = DAG.getTarget().Options;
4637       if (Options.EmitCallSiteInfo)
4638         CSInfo.emplace_back(VA.getLocReg(), I);
4639       if (isVarArg && IsWin64) {
4640         // Win64 ABI requires argument XMM reg to be copied to the corresponding
4641         // shadow reg if callee is a varargs function.
4642         Register ShadowReg;
4643         switch (VA.getLocReg()) {
4644         case X86::XMM0: ShadowReg = X86::RCX; break;
4645         case X86::XMM1: ShadowReg = X86::RDX; break;
4646         case X86::XMM2: ShadowReg = X86::R8; break;
4647         case X86::XMM3: ShadowReg = X86::R9; break;
4648         }
4649         if (ShadowReg)
4650           RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
4651       }
4652     } else if (!IsSibcall && (!isTailCall || isByVal)) {
4653       assert(VA.isMemLoc());
4654       if (!StackPtr.getNode())
4655         StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4656                                       getPointerTy(DAG.getDataLayout()));
4657       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
4658                                              dl, DAG, VA, Flags, isByVal));
4659     }
4660   }
4661 
4662   if (!MemOpChains.empty())
4663     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4664 
4665   if (Subtarget.isPICStyleGOT()) {
4666     // ELF / PIC requires GOT in the EBX register before function calls via PLT
4667     // GOT pointer (except regcall).
4668     if (!isTailCall) {
4669       // Indirect call with RegCall calling convertion may use up all the
4670       // general registers, so it is not suitable to bind EBX reister for
4671       // GOT address, just let register allocator handle it.
4672       if (CallConv != CallingConv::X86_RegCall)
4673         RegsToPass.push_back(std::make_pair(
4674           Register(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
4675                                           getPointerTy(DAG.getDataLayout()))));
4676     } else {
4677       // If we are tail calling and generating PIC/GOT style code load the
4678       // address of the callee into ECX. The value in ecx is used as target of
4679       // the tail jump. This is done to circumvent the ebx/callee-saved problem
4680       // for tail calls on PIC/GOT architectures. Normally we would just put the
4681       // address of GOT into ebx and then call target@PLT. But for tail calls
4682       // ebx would be restored (since ebx is callee saved) before jumping to the
4683       // target@PLT.
4684 
4685       // Note: The actual moving to ECX is done further down.
4686       GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4687       if (G && !G->getGlobal()->hasLocalLinkage() &&
4688           G->getGlobal()->hasDefaultVisibility())
4689         Callee = LowerGlobalAddress(Callee, DAG);
4690       else if (isa<ExternalSymbolSDNode>(Callee))
4691         Callee = LowerExternalSymbol(Callee, DAG);
4692     }
4693   }
4694 
4695   if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&
4696       (Subtarget.hasSSE1() || !M->getModuleFlag("SkipRaxSetup"))) {
4697     // From AMD64 ABI document:
4698     // For calls that may call functions that use varargs or stdargs
4699     // (prototype-less calls or calls to functions containing ellipsis (...) in
4700     // the declaration) %al is used as hidden argument to specify the number
4701     // of SSE registers used. The contents of %al do not need to match exactly
4702     // the number of registers, but must be an ubound on the number of SSE
4703     // registers used and is in the range 0 - 8 inclusive.
4704 
4705     // Count the number of XMM registers allocated.
4706     static const MCPhysReg XMMArgRegs[] = {
4707       X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4708       X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
4709     };
4710     unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
4711     assert((Subtarget.hasSSE1() || !NumXMMRegs)
4712            && "SSE registers cannot be used when SSE is disabled");
4713     RegsToPass.push_back(std::make_pair(Register(X86::AL),
4714                                         DAG.getConstant(NumXMMRegs, dl,
4715                                                         MVT::i8)));
4716   }
4717 
4718   if (isVarArg && IsMustTail) {
4719     const auto &Forwards = X86Info->getForwardedMustTailRegParms();
4720     for (const auto &F : Forwards) {
4721       SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
4722       RegsToPass.push_back(std::make_pair(F.PReg, Val));
4723     }
4724   }
4725 
4726   // For tail calls lower the arguments to the 'real' stack slots.  Sibcalls
4727   // don't need this because the eligibility check rejects calls that require
4728   // shuffling arguments passed in memory.
4729   if (!IsSibcall && isTailCall) {
4730     // Force all the incoming stack arguments to be loaded from the stack
4731     // before any new outgoing arguments are stored to the stack, because the
4732     // outgoing stack slots may alias the incoming argument stack slots, and
4733     // the alias isn't otherwise explicit. This is slightly more conservative
4734     // than necessary, because it means that each store effectively depends
4735     // on every argument instead of just those arguments it would clobber.
4736     SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
4737 
4738     SmallVector<SDValue, 8> MemOpChains2;
4739     SDValue FIN;
4740     int FI = 0;
4741     for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
4742          ++I, ++OutsIndex) {
4743       CCValAssign &VA = ArgLocs[I];
4744 
4745       if (VA.isRegLoc()) {
4746         if (VA.needsCustom()) {
4747           assert((CallConv == CallingConv::X86_RegCall) &&
4748                  "Expecting custom case only in regcall calling convention");
4749           // This means that we are in special case where one argument was
4750           // passed through two register locations - Skip the next location
4751           ++I;
4752         }
4753 
4754         continue;
4755       }
4756 
4757       assert(VA.isMemLoc());
4758       SDValue Arg = OutVals[OutsIndex];
4759       ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
4760       // Skip inalloca/preallocated arguments.  They don't require any work.
4761       if (Flags.isInAlloca() || Flags.isPreallocated())
4762         continue;
4763       // Create frame index.
4764       int32_t Offset = VA.getLocMemOffset()+FPDiff;
4765       uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
4766       FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4767       FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4768 
4769       if (Flags.isByVal()) {
4770         // Copy relative to framepointer.
4771         SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
4772         if (!StackPtr.getNode())
4773           StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4774                                         getPointerTy(DAG.getDataLayout()));
4775         Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4776                              StackPtr, Source);
4777 
4778         MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
4779                                                          ArgChain,
4780                                                          Flags, DAG, dl));
4781       } else {
4782         // Store relative to framepointer.
4783         MemOpChains2.push_back(DAG.getStore(
4784             ArgChain, dl, Arg, FIN,
4785             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4786       }
4787     }
4788 
4789     if (!MemOpChains2.empty())
4790       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4791 
4792     // Store the return address to the appropriate stack slot.
4793     Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
4794                                      getPointerTy(DAG.getDataLayout()),
4795                                      RegInfo->getSlotSize(), FPDiff, dl);
4796   }
4797 
4798   // Build a sequence of copy-to-reg nodes chained together with token chain
4799   // and flag operands which copy the outgoing args into registers.
4800   SDValue InFlag;
4801   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4802     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4803                              RegsToPass[i].second, InFlag);
4804     InFlag = Chain.getValue(1);
4805   }
4806 
4807   if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
4808     assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
4809     // In the 64-bit large code model, we have to make all calls
4810     // through a register, since the call instruction's 32-bit
4811     // pc-relative offset may not be large enough to hold the whole
4812     // address.
4813   } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4814              Callee->getOpcode() == ISD::ExternalSymbol) {
4815     // Lower direct calls to global addresses and external symbols. Setting
4816     // ForCall to true here has the effect of removing WrapperRIP when possible
4817     // to allow direct calls to be selected without first materializing the
4818     // address into a register.
4819     Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4820   } else if (Subtarget.isTarget64BitILP32() &&
4821              Callee.getValueType() == MVT::i32) {
4822     // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4823     Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4824   }
4825 
4826   // Returns a chain & a flag for retval copy to use.
4827   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4828   SmallVector<SDValue, 8> Ops;
4829 
4830   if (!IsSibcall && isTailCall && !IsMustTail) {
4831     Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, 0, InFlag, dl);
4832     InFlag = Chain.getValue(1);
4833   }
4834 
4835   Ops.push_back(Chain);
4836   Ops.push_back(Callee);
4837 
4838   if (isTailCall)
4839     Ops.push_back(DAG.getTargetConstant(FPDiff, dl, MVT::i32));
4840 
4841   // Add argument registers to the end of the list so that they are known live
4842   // into the call.
4843   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4844     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4845                                   RegsToPass[i].second.getValueType()));
4846 
4847   // Add a register mask operand representing the call-preserved registers.
4848   const uint32_t *Mask = [&]() {
4849     auto AdaptedCC = CallConv;
4850     // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists),
4851     // use X86_INTR calling convention because it has the same CSR mask
4852     // (same preserved registers).
4853     if (HasNCSR)
4854       AdaptedCC = (CallingConv::ID)CallingConv::X86_INTR;
4855     // If NoCalleeSavedRegisters is requested, than use GHC since it happens
4856     // to use the CSR_NoRegs_RegMask.
4857     if (CB && CB->hasFnAttr("no_callee_saved_registers"))
4858       AdaptedCC = (CallingConv::ID)CallingConv::GHC;
4859     return RegInfo->getCallPreservedMask(MF, AdaptedCC);
4860   }();
4861   assert(Mask && "Missing call preserved mask for calling convention");
4862 
4863   // If this is an invoke in a 32-bit function using a funclet-based
4864   // personality, assume the function clobbers all registers. If an exception
4865   // is thrown, the runtime will not restore CSRs.
4866   // FIXME: Model this more precisely so that we can register allocate across
4867   // the normal edge and spill and fill across the exceptional edge.
4868   if (!Is64Bit && CLI.CB && isa<InvokeInst>(CLI.CB)) {
4869     const Function &CallerFn = MF.getFunction();
4870     EHPersonality Pers =
4871         CallerFn.hasPersonalityFn()
4872             ? classifyEHPersonality(CallerFn.getPersonalityFn())
4873             : EHPersonality::Unknown;
4874     if (isFuncletEHPersonality(Pers))
4875       Mask = RegInfo->getNoPreservedMask();
4876   }
4877 
4878   // Define a new register mask from the existing mask.
4879   uint32_t *RegMask = nullptr;
4880 
4881   // In some calling conventions we need to remove the used physical registers
4882   // from the reg mask.
4883   if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4884     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4885 
4886     // Allocate a new Reg Mask and copy Mask.
4887     RegMask = MF.allocateRegMask();
4888     unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4889     memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4890 
4891     // Make sure all sub registers of the argument registers are reset
4892     // in the RegMask.
4893     for (auto const &RegPair : RegsToPass)
4894       for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4895            SubRegs.isValid(); ++SubRegs)
4896         RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4897 
4898     // Create the RegMask Operand according to our updated mask.
4899     Ops.push_back(DAG.getRegisterMask(RegMask));
4900   } else {
4901     // Create the RegMask Operand according to the static mask.
4902     Ops.push_back(DAG.getRegisterMask(Mask));
4903   }
4904 
4905   if (InFlag.getNode())
4906     Ops.push_back(InFlag);
4907 
4908   if (isTailCall) {
4909     // We used to do:
4910     //// If this is the first return lowered for this function, add the regs
4911     //// to the liveout set for the function.
4912     // This isn't right, although it's probably harmless on x86; liveouts
4913     // should be computed from returns not tail calls.  Consider a void
4914     // function making a tail call to a function returning int.
4915     MF.getFrameInfo().setHasTailCall();
4916     SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4917 
4918     if (IsCFICall)
4919       Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
4920 
4921     DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4922     return Ret;
4923   }
4924 
4925   if (HasNoCfCheck && IsCFProtectionSupported && IsIndirectCall) {
4926     Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4927   } else if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
4928     // Calls with a "clang.arc.attachedcall" bundle are special. They should be
4929     // expanded to the call, directly followed by a special marker sequence and
4930     // a call to a ObjC library function. Use the CALL_RVMARKER to do that.
4931     assert(!isTailCall &&
4932            "tail calls cannot be marked with clang.arc.attachedcall");
4933     assert(Is64Bit && "clang.arc.attachedcall is only supported in 64bit mode");
4934 
4935     // Add a target global address for the retainRV/claimRV runtime function
4936     // just before the call target.
4937     Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB);
4938     auto PtrVT = getPointerTy(DAG.getDataLayout());
4939     auto GA = DAG.getTargetGlobalAddress(ARCFn, dl, PtrVT);
4940     Ops.insert(Ops.begin() + 1, GA);
4941     Chain = DAG.getNode(X86ISD::CALL_RVMARKER, dl, NodeTys, Ops);
4942   } else {
4943     Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4944   }
4945 
4946   if (IsCFICall)
4947     Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue());
4948 
4949   InFlag = Chain.getValue(1);
4950   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
4951   DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4952 
4953   // Save heapallocsite metadata.
4954   if (CLI.CB)
4955     if (MDNode *HeapAlloc = CLI.CB->getMetadata("heapallocsite"))
4956       DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4957 
4958   // Create the CALLSEQ_END node.
4959   unsigned NumBytesForCalleeToPop = 0; // Callee pops nothing.
4960   if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4961                        DAG.getTarget().Options.GuaranteedTailCallOpt))
4962     NumBytesForCalleeToPop = NumBytes;    // Callee pops everything
4963   else if (!canGuaranteeTCO(CallConv) && IsCalleePopSRet)
4964     // If this call passes a struct-return pointer, the callee
4965     // pops that struct pointer.
4966     NumBytesForCalleeToPop = 4;
4967 
4968   // Returns a flag for retval copy to use.
4969   if (!IsSibcall) {
4970     Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
4971                                InFlag, dl);
4972     InFlag = Chain.getValue(1);
4973   }
4974 
4975   // Handle result values, copying them out of physregs into vregs that we
4976   // return.
4977   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4978                          InVals, RegMask);
4979 }
4980 
4981 //===----------------------------------------------------------------------===//
4982 //                Fast Calling Convention (tail call) implementation
4983 //===----------------------------------------------------------------------===//
4984 
4985 //  Like std call, callee cleans arguments, convention except that ECX is
4986 //  reserved for storing the tail called function address. Only 2 registers are
4987 //  free for argument passing (inreg). Tail call optimization is performed
4988 //  provided:
4989 //                * tailcallopt is enabled
4990 //                * caller/callee are fastcc
4991 //  On X86_64 architecture with GOT-style position independent code only local
4992 //  (within module) calls are supported at the moment.
4993 //  To keep the stack aligned according to platform abi the function
4994 //  GetAlignedArgumentStackSize ensures that argument delta is always multiples
4995 //  of stack alignment. (Dynamic linkers need this - Darwin's dyld for example)
4996 //  If a tail called function callee has more arguments than the caller the
4997 //  caller needs to make sure that there is room to move the RETADDR to. This is
4998 //  achieved by reserving an area the size of the argument delta right after the
4999 //  original RETADDR, but before the saved framepointer or the spilled registers
5000 //  e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
5001 //  stack layout:
5002 //    arg1
5003 //    arg2
5004 //    RETADDR
5005 //    [ new RETADDR
5006 //      move area ]
5007 //    (possible EBP)
5008 //    ESI
5009 //    EDI
5010 //    local1 ..
5011 
5012 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
5013 /// requirement.
5014 unsigned
GetAlignedArgumentStackSize(const unsigned StackSize,SelectionDAG & DAG) const5015 X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
5016                                                SelectionDAG &DAG) const {
5017   const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign();
5018   const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
5019   assert(StackSize % SlotSize == 0 &&
5020          "StackSize must be a multiple of SlotSize");
5021   return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
5022 }
5023 
5024 /// Return true if the given stack call argument is already available in the
5025 /// same position (relatively) of the caller's incoming argument stack.
5026 static
MatchingStackOffset(SDValue Arg,unsigned Offset,ISD::ArgFlagsTy Flags,MachineFrameInfo & MFI,const MachineRegisterInfo * MRI,const X86InstrInfo * TII,const CCValAssign & VA)5027 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
5028                          MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
5029                          const X86InstrInfo *TII, const CCValAssign &VA) {
5030   unsigned Bytes = Arg.getValueSizeInBits() / 8;
5031 
5032   for (;;) {
5033     // Look through nodes that don't alter the bits of the incoming value.
5034     unsigned Op = Arg.getOpcode();
5035     if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
5036       Arg = Arg.getOperand(0);
5037       continue;
5038     }
5039     if (Op == ISD::TRUNCATE) {
5040       const SDValue &TruncInput = Arg.getOperand(0);
5041       if (TruncInput.getOpcode() == ISD::AssertZext &&
5042           cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
5043               Arg.getValueType()) {
5044         Arg = TruncInput.getOperand(0);
5045         continue;
5046       }
5047     }
5048     break;
5049   }
5050 
5051   int FI = INT_MAX;
5052   if (Arg.getOpcode() == ISD::CopyFromReg) {
5053     Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
5054     if (!VR.isVirtual())
5055       return false;
5056     MachineInstr *Def = MRI->getVRegDef(VR);
5057     if (!Def)
5058       return false;
5059     if (!Flags.isByVal()) {
5060       if (!TII->isLoadFromStackSlot(*Def, FI))
5061         return false;
5062     } else {
5063       unsigned Opcode = Def->getOpcode();
5064       if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
5065            Opcode == X86::LEA64_32r) &&
5066           Def->getOperand(1).isFI()) {
5067         FI = Def->getOperand(1).getIndex();
5068         Bytes = Flags.getByValSize();
5069       } else
5070         return false;
5071     }
5072   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
5073     if (Flags.isByVal())
5074       // ByVal argument is passed in as a pointer but it's now being
5075       // dereferenced. e.g.
5076       // define @foo(%struct.X* %A) {
5077       //   tail call @bar(%struct.X* byval %A)
5078       // }
5079       return false;
5080     SDValue Ptr = Ld->getBasePtr();
5081     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
5082     if (!FINode)
5083       return false;
5084     FI = FINode->getIndex();
5085   } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
5086     FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
5087     FI = FINode->getIndex();
5088     Bytes = Flags.getByValSize();
5089   } else
5090     return false;
5091 
5092   assert(FI != INT_MAX);
5093   if (!MFI.isFixedObjectIndex(FI))
5094     return false;
5095 
5096   if (Offset != MFI.getObjectOffset(FI))
5097     return false;
5098 
5099   // If this is not byval, check that the argument stack object is immutable.
5100   // inalloca and argument copy elision can create mutable argument stack
5101   // objects. Byval objects can be mutated, but a byval call intends to pass the
5102   // mutated memory.
5103   if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
5104     return false;
5105 
5106   if (VA.getLocVT().getFixedSizeInBits() >
5107       Arg.getValueSizeInBits().getFixedValue()) {
5108     // If the argument location is wider than the argument type, check that any
5109     // extension flags match.
5110     if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
5111         Flags.isSExt() != MFI.isObjectSExt(FI)) {
5112       return false;
5113     }
5114   }
5115 
5116   return Bytes == MFI.getObjectSize(FI);
5117 }
5118 
5119 /// Check whether the call is eligible for tail call optimization. Targets
5120 /// that want to do tail call optimization should implement this function.
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool IsCalleePopSRet,bool isVarArg,Type * RetTy,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const5121 bool X86TargetLowering::IsEligibleForTailCallOptimization(
5122     SDValue Callee, CallingConv::ID CalleeCC, bool IsCalleePopSRet,
5123     bool isVarArg, Type *RetTy, const SmallVectorImpl<ISD::OutputArg> &Outs,
5124     const SmallVectorImpl<SDValue> &OutVals,
5125     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
5126   if (!mayTailCallThisCC(CalleeCC))
5127     return false;
5128 
5129   // If -tailcallopt is specified, make fastcc functions tail-callable.
5130   MachineFunction &MF = DAG.getMachineFunction();
5131   const Function &CallerF = MF.getFunction();
5132 
5133   // If the function return type is x86_fp80 and the callee return type is not,
5134   // then the FP_EXTEND of the call result is not a nop. It's not safe to
5135   // perform a tailcall optimization here.
5136   if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
5137     return false;
5138 
5139   CallingConv::ID CallerCC = CallerF.getCallingConv();
5140   bool CCMatch = CallerCC == CalleeCC;
5141   bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
5142   bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
5143   bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
5144       CalleeCC == CallingConv::Tail || CalleeCC == CallingConv::SwiftTail;
5145 
5146   // Win64 functions have extra shadow space for argument homing. Don't do the
5147   // sibcall if the caller and callee have mismatched expectations for this
5148   // space.
5149   if (IsCalleeWin64 != IsCallerWin64)
5150     return false;
5151 
5152   if (IsGuaranteeTCO) {
5153     if (canGuaranteeTCO(CalleeCC) && CCMatch)
5154       return true;
5155     return false;
5156   }
5157 
5158   // Look for obvious safe cases to perform tail call optimization that do not
5159   // require ABI changes. This is what gcc calls sibcall.
5160 
5161   // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
5162   // emit a special epilogue.
5163   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
5164   if (RegInfo->hasStackRealignment(MF))
5165     return false;
5166 
5167   // Also avoid sibcall optimization if we're an sret return fn and the callee
5168   // is incompatible. See comment in LowerReturn about why hasStructRetAttr is
5169   // insufficient.
5170   if (MF.getInfo<X86MachineFunctionInfo>()->getSRetReturnReg()) {
5171     // For a compatible tail call the callee must return our sret pointer. So it
5172     // needs to be (a) an sret function itself and (b) we pass our sret as its
5173     // sret. Condition #b is harder to determine.
5174     return false;
5175   } else if (IsCalleePopSRet)
5176     // The callee pops an sret, so we cannot tail-call, as our caller doesn't
5177     // expect that.
5178     return false;
5179 
5180   // Do not sibcall optimize vararg calls unless all arguments are passed via
5181   // registers.
5182   LLVMContext &C = *DAG.getContext();
5183   if (isVarArg && !Outs.empty()) {
5184     // Optimizing for varargs on Win64 is unlikely to be safe without
5185     // additional testing.
5186     if (IsCalleeWin64 || IsCallerWin64)
5187       return false;
5188 
5189     SmallVector<CCValAssign, 16> ArgLocs;
5190     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
5191 
5192     CCInfo.AnalyzeCallOperands(Outs, CC_X86);
5193     for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
5194       if (!ArgLocs[i].isRegLoc())
5195         return false;
5196   }
5197 
5198   // If the call result is in ST0 / ST1, it needs to be popped off the x87
5199   // stack.  Therefore, if it's not used by the call it is not safe to optimize
5200   // this into a sibcall.
5201   bool Unused = false;
5202   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5203     if (!Ins[i].Used) {
5204       Unused = true;
5205       break;
5206     }
5207   }
5208   if (Unused) {
5209     SmallVector<CCValAssign, 16> RVLocs;
5210     CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
5211     CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
5212     for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5213       CCValAssign &VA = RVLocs[i];
5214       if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
5215         return false;
5216     }
5217   }
5218 
5219   // Check that the call results are passed in the same way.
5220   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
5221                                   RetCC_X86, RetCC_X86))
5222     return false;
5223   // The callee has to preserve all registers the caller needs to preserve.
5224   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
5225   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
5226   if (!CCMatch) {
5227     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
5228     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
5229       return false;
5230   }
5231 
5232   unsigned StackArgsSize = 0;
5233 
5234   // If the callee takes no arguments then go on to check the results of the
5235   // call.
5236   if (!Outs.empty()) {
5237     // Check if stack adjustment is needed. For now, do not do this if any
5238     // argument is passed on the stack.
5239     SmallVector<CCValAssign, 16> ArgLocs;
5240     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
5241 
5242     // Allocate shadow area for Win64
5243     if (IsCalleeWin64)
5244       CCInfo.AllocateStack(32, Align(8));
5245 
5246     CCInfo.AnalyzeCallOperands(Outs, CC_X86);
5247     StackArgsSize = CCInfo.getNextStackOffset();
5248 
5249     if (CCInfo.getNextStackOffset()) {
5250       // Check if the arguments are already laid out in the right way as
5251       // the caller's fixed stack objects.
5252       MachineFrameInfo &MFI = MF.getFrameInfo();
5253       const MachineRegisterInfo *MRI = &MF.getRegInfo();
5254       const X86InstrInfo *TII = Subtarget.getInstrInfo();
5255       for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
5256         CCValAssign &VA = ArgLocs[i];
5257         SDValue Arg = OutVals[i];
5258         ISD::ArgFlagsTy Flags = Outs[i].Flags;
5259         if (VA.getLocInfo() == CCValAssign::Indirect)
5260           return false;
5261         if (!VA.isRegLoc()) {
5262           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
5263                                    MFI, MRI, TII, VA))
5264             return false;
5265         }
5266       }
5267     }
5268 
5269     bool PositionIndependent = isPositionIndependent();
5270     // If the tailcall address may be in a register, then make sure it's
5271     // possible to register allocate for it. In 32-bit, the call address can
5272     // only target EAX, EDX, or ECX since the tail call must be scheduled after
5273     // callee-saved registers are restored. These happen to be the same
5274     // registers used to pass 'inreg' arguments so watch out for those.
5275     if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
5276                                   !isa<ExternalSymbolSDNode>(Callee)) ||
5277                                  PositionIndependent)) {
5278       unsigned NumInRegs = 0;
5279       // In PIC we need an extra register to formulate the address computation
5280       // for the callee.
5281       unsigned MaxInRegs = PositionIndependent ? 2 : 3;
5282 
5283       for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
5284         CCValAssign &VA = ArgLocs[i];
5285         if (!VA.isRegLoc())
5286           continue;
5287         Register Reg = VA.getLocReg();
5288         switch (Reg) {
5289         default: break;
5290         case X86::EAX: case X86::EDX: case X86::ECX:
5291           if (++NumInRegs == MaxInRegs)
5292             return false;
5293           break;
5294         }
5295       }
5296     }
5297 
5298     const MachineRegisterInfo &MRI = MF.getRegInfo();
5299     if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
5300       return false;
5301   }
5302 
5303   bool CalleeWillPop =
5304       X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
5305                        MF.getTarget().Options.GuaranteedTailCallOpt);
5306 
5307   if (unsigned BytesToPop =
5308           MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
5309     // If we have bytes to pop, the callee must pop them.
5310     bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
5311     if (!CalleePopMatches)
5312       return false;
5313   } else if (CalleeWillPop && StackArgsSize > 0) {
5314     // If we don't have bytes to pop, make sure the callee doesn't pop any.
5315     return false;
5316   }
5317 
5318   return true;
5319 }
5320 
5321 FastISel *
createFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo) const5322 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
5323                                   const TargetLibraryInfo *libInfo) const {
5324   return X86::createFastISel(funcInfo, libInfo);
5325 }
5326 
5327 //===----------------------------------------------------------------------===//
5328 //                           Other Lowering Hooks
5329 //===----------------------------------------------------------------------===//
5330 
mayFoldLoad(SDValue Op,const X86Subtarget & Subtarget,bool AssumeSingleUse)5331 bool X86::mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
5332                       bool AssumeSingleUse) {
5333   if (!AssumeSingleUse && !Op.hasOneUse())
5334     return false;
5335   if (!ISD::isNormalLoad(Op.getNode()))
5336     return false;
5337 
5338   // If this is an unaligned vector, make sure the target supports folding it.
5339   auto *Ld = cast<LoadSDNode>(Op.getNode());
5340   if (!Subtarget.hasAVX() && !Subtarget.hasSSEUnalignedMem() &&
5341       Ld->getValueSizeInBits(0) == 128 && Ld->getAlign() < Align(16))
5342     return false;
5343 
5344   // TODO: If this is a non-temporal load and the target has an instruction
5345   //       for it, it should not be folded. See "useNonTemporalLoad()".
5346 
5347   return true;
5348 }
5349 
mayFoldLoadIntoBroadcastFromMem(SDValue Op,MVT EltVT,const X86Subtarget & Subtarget,bool AssumeSingleUse)5350 bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
5351                                           const X86Subtarget &Subtarget,
5352                                           bool AssumeSingleUse) {
5353   assert(Subtarget.hasAVX() && "Expected AVX for broadcast from memory");
5354   if (!X86::mayFoldLoad(Op, Subtarget, AssumeSingleUse))
5355     return false;
5356 
5357   // We can not replace a wide volatile load with a broadcast-from-memory,
5358   // because that would narrow the load, which isn't legal for volatiles.
5359   auto *Ld = cast<LoadSDNode>(Op.getNode());
5360   return !Ld->isVolatile() ||
5361          Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits();
5362 }
5363 
mayFoldIntoStore(SDValue Op)5364 bool X86::mayFoldIntoStore(SDValue Op) {
5365   return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
5366 }
5367 
mayFoldIntoZeroExtend(SDValue Op)5368 bool X86::mayFoldIntoZeroExtend(SDValue Op) {
5369   if (Op.hasOneUse()) {
5370     unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
5371     return (ISD::ZERO_EXTEND == Opcode);
5372   }
5373   return false;
5374 }
5375 
isTargetShuffle(unsigned Opcode)5376 static bool isTargetShuffle(unsigned Opcode) {
5377   switch(Opcode) {
5378   default: return false;
5379   case X86ISD::BLENDI:
5380   case X86ISD::PSHUFB:
5381   case X86ISD::PSHUFD:
5382   case X86ISD::PSHUFHW:
5383   case X86ISD::PSHUFLW:
5384   case X86ISD::SHUFP:
5385   case X86ISD::INSERTPS:
5386   case X86ISD::EXTRQI:
5387   case X86ISD::INSERTQI:
5388   case X86ISD::VALIGN:
5389   case X86ISD::PALIGNR:
5390   case X86ISD::VSHLDQ:
5391   case X86ISD::VSRLDQ:
5392   case X86ISD::MOVLHPS:
5393   case X86ISD::MOVHLPS:
5394   case X86ISD::MOVSHDUP:
5395   case X86ISD::MOVSLDUP:
5396   case X86ISD::MOVDDUP:
5397   case X86ISD::MOVSS:
5398   case X86ISD::MOVSD:
5399   case X86ISD::MOVSH:
5400   case X86ISD::UNPCKL:
5401   case X86ISD::UNPCKH:
5402   case X86ISD::VBROADCAST:
5403   case X86ISD::VPERMILPI:
5404   case X86ISD::VPERMILPV:
5405   case X86ISD::VPERM2X128:
5406   case X86ISD::SHUF128:
5407   case X86ISD::VPERMIL2:
5408   case X86ISD::VPERMI:
5409   case X86ISD::VPPERM:
5410   case X86ISD::VPERMV:
5411   case X86ISD::VPERMV3:
5412   case X86ISD::VZEXT_MOVL:
5413     return true;
5414   }
5415 }
5416 
isTargetShuffleVariableMask(unsigned Opcode)5417 static bool isTargetShuffleVariableMask(unsigned Opcode) {
5418   switch (Opcode) {
5419   default: return false;
5420   // Target Shuffles.
5421   case X86ISD::PSHUFB:
5422   case X86ISD::VPERMILPV:
5423   case X86ISD::VPERMIL2:
5424   case X86ISD::VPPERM:
5425   case X86ISD::VPERMV:
5426   case X86ISD::VPERMV3:
5427     return true;
5428   // 'Faux' Target Shuffles.
5429   case ISD::OR:
5430   case ISD::AND:
5431   case X86ISD::ANDNP:
5432     return true;
5433   }
5434 }
5435 
getReturnAddressFrameIndex(SelectionDAG & DAG) const5436 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
5437   MachineFunction &MF = DAG.getMachineFunction();
5438   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
5439   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
5440   int ReturnAddrIndex = FuncInfo->getRAIndex();
5441 
5442   if (ReturnAddrIndex == 0) {
5443     // Set up a frame object for the return address.
5444     unsigned SlotSize = RegInfo->getSlotSize();
5445     ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
5446                                                           -(int64_t)SlotSize,
5447                                                           false);
5448     FuncInfo->setRAIndex(ReturnAddrIndex);
5449   }
5450 
5451   return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
5452 }
5453 
isOffsetSuitableForCodeModel(int64_t Offset,CodeModel::Model M,bool hasSymbolicDisplacement)5454 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
5455                                        bool hasSymbolicDisplacement) {
5456   // Offset should fit into 32 bit immediate field.
5457   if (!isInt<32>(Offset))
5458     return false;
5459 
5460   // If we don't have a symbolic displacement - we don't have any extra
5461   // restrictions.
5462   if (!hasSymbolicDisplacement)
5463     return true;
5464 
5465   // FIXME: Some tweaks might be needed for medium code model.
5466   if (M != CodeModel::Small && M != CodeModel::Kernel)
5467     return false;
5468 
5469   // For small code model we assume that latest object is 16MB before end of 31
5470   // bits boundary. We may also accept pretty large negative constants knowing
5471   // that all objects are in the positive half of address space.
5472   if (M == CodeModel::Small && Offset < 16*1024*1024)
5473     return true;
5474 
5475   // For kernel code model we know that all object resist in the negative half
5476   // of 32bits address space. We may not accept negative offsets, since they may
5477   // be just off and we may accept pretty large positive ones.
5478   if (M == CodeModel::Kernel && Offset >= 0)
5479     return true;
5480 
5481   return false;
5482 }
5483 
5484 /// Determines whether the callee is required to pop its own arguments.
5485 /// Callee pop is necessary to support tail calls.
isCalleePop(CallingConv::ID CallingConv,bool is64Bit,bool IsVarArg,bool GuaranteeTCO)5486 bool X86::isCalleePop(CallingConv::ID CallingConv,
5487                       bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
5488   // If GuaranteeTCO is true, we force some calls to be callee pop so that we
5489   // can guarantee TCO.
5490   if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
5491     return true;
5492 
5493   switch (CallingConv) {
5494   default:
5495     return false;
5496   case CallingConv::X86_StdCall:
5497   case CallingConv::X86_FastCall:
5498   case CallingConv::X86_ThisCall:
5499   case CallingConv::X86_VectorCall:
5500     return !is64Bit;
5501   }
5502 }
5503 
5504 /// Return true if the condition is an signed comparison operation.
isX86CCSigned(unsigned X86CC)5505 static bool isX86CCSigned(unsigned X86CC) {
5506   switch (X86CC) {
5507   default:
5508     llvm_unreachable("Invalid integer condition!");
5509   case X86::COND_E:
5510   case X86::COND_NE:
5511   case X86::COND_B:
5512   case X86::COND_A:
5513   case X86::COND_BE:
5514   case X86::COND_AE:
5515     return false;
5516   case X86::COND_G:
5517   case X86::COND_GE:
5518   case X86::COND_L:
5519   case X86::COND_LE:
5520     return true;
5521   }
5522 }
5523 
TranslateIntegerX86CC(ISD::CondCode SetCCOpcode)5524 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
5525   switch (SetCCOpcode) {
5526   default: llvm_unreachable("Invalid integer condition!");
5527   case ISD::SETEQ:  return X86::COND_E;
5528   case ISD::SETGT:  return X86::COND_G;
5529   case ISD::SETGE:  return X86::COND_GE;
5530   case ISD::SETLT:  return X86::COND_L;
5531   case ISD::SETLE:  return X86::COND_LE;
5532   case ISD::SETNE:  return X86::COND_NE;
5533   case ISD::SETULT: return X86::COND_B;
5534   case ISD::SETUGT: return X86::COND_A;
5535   case ISD::SETULE: return X86::COND_BE;
5536   case ISD::SETUGE: return X86::COND_AE;
5537   }
5538 }
5539 
5540 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
5541 /// condition code, returning the condition code and the LHS/RHS of the
5542 /// comparison to make.
TranslateX86CC(ISD::CondCode SetCCOpcode,const SDLoc & DL,bool isFP,SDValue & LHS,SDValue & RHS,SelectionDAG & DAG)5543 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
5544                                     bool isFP, SDValue &LHS, SDValue &RHS,
5545                                     SelectionDAG &DAG) {
5546   if (!isFP) {
5547     if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5548       if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
5549         // X > -1   -> X == 0, jump !sign.
5550         RHS = DAG.getConstant(0, DL, RHS.getValueType());
5551         return X86::COND_NS;
5552       }
5553       if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
5554         // X < 0   -> X == 0, jump on sign.
5555         return X86::COND_S;
5556       }
5557       if (SetCCOpcode == ISD::SETGE && RHSC->isZero()) {
5558         // X >= 0   -> X == 0, jump on !sign.
5559         return X86::COND_NS;
5560       }
5561       if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
5562         // X < 1   -> X <= 0
5563         RHS = DAG.getConstant(0, DL, RHS.getValueType());
5564         return X86::COND_LE;
5565       }
5566     }
5567 
5568     return TranslateIntegerX86CC(SetCCOpcode);
5569   }
5570 
5571   // First determine if it is required or is profitable to flip the operands.
5572 
5573   // If LHS is a foldable load, but RHS is not, flip the condition.
5574   if (ISD::isNON_EXTLoad(LHS.getNode()) &&
5575       !ISD::isNON_EXTLoad(RHS.getNode())) {
5576     SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
5577     std::swap(LHS, RHS);
5578   }
5579 
5580   switch (SetCCOpcode) {
5581   default: break;
5582   case ISD::SETOLT:
5583   case ISD::SETOLE:
5584   case ISD::SETUGT:
5585   case ISD::SETUGE:
5586     std::swap(LHS, RHS);
5587     break;
5588   }
5589 
5590   // On a floating point condition, the flags are set as follows:
5591   // ZF  PF  CF   op
5592   //  0 | 0 | 0 | X > Y
5593   //  0 | 0 | 1 | X < Y
5594   //  1 | 0 | 0 | X == Y
5595   //  1 | 1 | 1 | unordered
5596   switch (SetCCOpcode) {
5597   default: llvm_unreachable("Condcode should be pre-legalized away");
5598   case ISD::SETUEQ:
5599   case ISD::SETEQ:   return X86::COND_E;
5600   case ISD::SETOLT:              // flipped
5601   case ISD::SETOGT:
5602   case ISD::SETGT:   return X86::COND_A;
5603   case ISD::SETOLE:              // flipped
5604   case ISD::SETOGE:
5605   case ISD::SETGE:   return X86::COND_AE;
5606   case ISD::SETUGT:              // flipped
5607   case ISD::SETULT:
5608   case ISD::SETLT:   return X86::COND_B;
5609   case ISD::SETUGE:              // flipped
5610   case ISD::SETULE:
5611   case ISD::SETLE:   return X86::COND_BE;
5612   case ISD::SETONE:
5613   case ISD::SETNE:   return X86::COND_NE;
5614   case ISD::SETUO:   return X86::COND_P;
5615   case ISD::SETO:    return X86::COND_NP;
5616   case ISD::SETOEQ:
5617   case ISD::SETUNE:  return X86::COND_INVALID;
5618   }
5619 }
5620 
5621 /// Is there a floating point cmov for the specific X86 condition code?
5622 /// Current x86 isa includes the following FP cmov instructions:
5623 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
hasFPCMov(unsigned X86CC)5624 static bool hasFPCMov(unsigned X86CC) {
5625   switch (X86CC) {
5626   default:
5627     return false;
5628   case X86::COND_B:
5629   case X86::COND_BE:
5630   case X86::COND_E:
5631   case X86::COND_P:
5632   case X86::COND_A:
5633   case X86::COND_AE:
5634   case X86::COND_NE:
5635   case X86::COND_NP:
5636     return true;
5637   }
5638 }
5639 
useVPTERNLOG(const X86Subtarget & Subtarget,MVT VT)5640 static bool useVPTERNLOG(const X86Subtarget &Subtarget, MVT VT) {
5641   return Subtarget.hasVLX() || Subtarget.canExtendTo512DQ() ||
5642          VT.is512BitVector();
5643 }
5644 
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const5645 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
5646                                            const CallInst &I,
5647                                            MachineFunction &MF,
5648                                            unsigned Intrinsic) const {
5649   Info.flags = MachineMemOperand::MONone;
5650   Info.offset = 0;
5651 
5652   const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
5653   if (!IntrData) {
5654     switch (Intrinsic) {
5655     case Intrinsic::x86_aesenc128kl:
5656     case Intrinsic::x86_aesdec128kl:
5657       Info.opc = ISD::INTRINSIC_W_CHAIN;
5658       Info.ptrVal = I.getArgOperand(1);
5659       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
5660       Info.align = Align(1);
5661       Info.flags |= MachineMemOperand::MOLoad;
5662       return true;
5663     case Intrinsic::x86_aesenc256kl:
5664     case Intrinsic::x86_aesdec256kl:
5665       Info.opc = ISD::INTRINSIC_W_CHAIN;
5666       Info.ptrVal = I.getArgOperand(1);
5667       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
5668       Info.align = Align(1);
5669       Info.flags |= MachineMemOperand::MOLoad;
5670       return true;
5671     case Intrinsic::x86_aesencwide128kl:
5672     case Intrinsic::x86_aesdecwide128kl:
5673       Info.opc = ISD::INTRINSIC_W_CHAIN;
5674       Info.ptrVal = I.getArgOperand(0);
5675       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
5676       Info.align = Align(1);
5677       Info.flags |= MachineMemOperand::MOLoad;
5678       return true;
5679     case Intrinsic::x86_aesencwide256kl:
5680     case Intrinsic::x86_aesdecwide256kl:
5681       Info.opc = ISD::INTRINSIC_W_CHAIN;
5682       Info.ptrVal = I.getArgOperand(0);
5683       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
5684       Info.align = Align(1);
5685       Info.flags |= MachineMemOperand::MOLoad;
5686       return true;
5687     case Intrinsic::x86_cmpccxadd32:
5688     case Intrinsic::x86_cmpccxadd64:
5689     case Intrinsic::x86_atomic_bts:
5690     case Intrinsic::x86_atomic_btc:
5691     case Intrinsic::x86_atomic_btr: {
5692       Info.opc = ISD::INTRINSIC_W_CHAIN;
5693       Info.ptrVal = I.getArgOperand(0);
5694       unsigned Size = I.getType()->getScalarSizeInBits();
5695       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5696       Info.align = Align(Size);
5697       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5698                     MachineMemOperand::MOVolatile;
5699       return true;
5700     }
5701     case Intrinsic::x86_atomic_bts_rm:
5702     case Intrinsic::x86_atomic_btc_rm:
5703     case Intrinsic::x86_atomic_btr_rm: {
5704       Info.opc = ISD::INTRINSIC_W_CHAIN;
5705       Info.ptrVal = I.getArgOperand(0);
5706       unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
5707       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5708       Info.align = Align(Size);
5709       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5710                     MachineMemOperand::MOVolatile;
5711       return true;
5712     }
5713     case Intrinsic::x86_aadd32:
5714     case Intrinsic::x86_aadd64:
5715     case Intrinsic::x86_aand32:
5716     case Intrinsic::x86_aand64:
5717     case Intrinsic::x86_aor32:
5718     case Intrinsic::x86_aor64:
5719     case Intrinsic::x86_axor32:
5720     case Intrinsic::x86_axor64:
5721     case Intrinsic::x86_atomic_add_cc:
5722     case Intrinsic::x86_atomic_sub_cc:
5723     case Intrinsic::x86_atomic_or_cc:
5724     case Intrinsic::x86_atomic_and_cc:
5725     case Intrinsic::x86_atomic_xor_cc: {
5726       Info.opc = ISD::INTRINSIC_W_CHAIN;
5727       Info.ptrVal = I.getArgOperand(0);
5728       unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
5729       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5730       Info.align = Align(Size);
5731       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5732                     MachineMemOperand::MOVolatile;
5733       return true;
5734     }
5735     }
5736     return false;
5737   }
5738 
5739   switch (IntrData->Type) {
5740   case TRUNCATE_TO_MEM_VI8:
5741   case TRUNCATE_TO_MEM_VI16:
5742   case TRUNCATE_TO_MEM_VI32: {
5743     Info.opc = ISD::INTRINSIC_VOID;
5744     Info.ptrVal = I.getArgOperand(0);
5745     MVT VT  = MVT::getVT(I.getArgOperand(1)->getType());
5746     MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
5747     if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
5748       ScalarVT = MVT::i8;
5749     else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
5750       ScalarVT = MVT::i16;
5751     else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
5752       ScalarVT = MVT::i32;
5753 
5754     Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
5755     Info.align = Align(1);
5756     Info.flags |= MachineMemOperand::MOStore;
5757     break;
5758   }
5759   case GATHER:
5760   case GATHER_AVX2: {
5761     Info.opc = ISD::INTRINSIC_W_CHAIN;
5762     Info.ptrVal = nullptr;
5763     MVT DataVT = MVT::getVT(I.getType());
5764     MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5765     unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5766                                 IndexVT.getVectorNumElements());
5767     Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5768     Info.align = Align(1);
5769     Info.flags |= MachineMemOperand::MOLoad;
5770     break;
5771   }
5772   case SCATTER: {
5773     Info.opc = ISD::INTRINSIC_VOID;
5774     Info.ptrVal = nullptr;
5775     MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
5776     MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5777     unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5778                                 IndexVT.getVectorNumElements());
5779     Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5780     Info.align = Align(1);
5781     Info.flags |= MachineMemOperand::MOStore;
5782     break;
5783   }
5784   default:
5785     return false;
5786   }
5787 
5788   return true;
5789 }
5790 
5791 /// Returns true if the target can instruction select the
5792 /// specified FP immediate natively. If false, the legalizer will
5793 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const5794 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
5795                                      bool ForCodeSize) const {
5796   for (const APFloat &FPImm : LegalFPImmediates)
5797     if (Imm.bitwiseIsEqual(FPImm))
5798       return true;
5799   return false;
5800 }
5801 
shouldReduceLoadWidth(SDNode * Load,ISD::LoadExtType ExtTy,EVT NewVT) const5802 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
5803                                               ISD::LoadExtType ExtTy,
5804                                               EVT NewVT) const {
5805   assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
5806 
5807   // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
5808   // relocation target a movq or addq instruction: don't let the load shrink.
5809   SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
5810   if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
5811     if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
5812       return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
5813 
5814   // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
5815   // those uses are extracted directly into a store, then the extract + store
5816   // can be store-folded. Therefore, it's probably not worth splitting the load.
5817   EVT VT = Load->getValueType(0);
5818   if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
5819     for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
5820       // Skip uses of the chain value. Result 0 of the node is the load value.
5821       if (UI.getUse().getResNo() != 0)
5822         continue;
5823 
5824       // If this use is not an extract + store, it's probably worth splitting.
5825       if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
5826           UI->use_begin()->getOpcode() != ISD::STORE)
5827         return true;
5828     }
5829     // All non-chain uses are extract + store.
5830     return false;
5831   }
5832 
5833   return true;
5834 }
5835 
5836 /// Returns true if it is beneficial to convert a load of a constant
5837 /// to just the constant itself.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty) const5838 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
5839                                                           Type *Ty) const {
5840   assert(Ty->isIntegerTy());
5841 
5842   unsigned BitSize = Ty->getPrimitiveSizeInBits();
5843   if (BitSize == 0 || BitSize > 64)
5844     return false;
5845   return true;
5846 }
5847 
reduceSelectOfFPConstantLoads(EVT CmpOpVT) const5848 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
5849   // If we are using XMM registers in the ABI and the condition of the select is
5850   // a floating-point compare and we have blendv or conditional move, then it is
5851   // cheaper to select instead of doing a cross-register move and creating a
5852   // load that depends on the compare result.
5853   bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
5854   return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
5855 }
5856 
convertSelectOfConstantsToMath(EVT VT) const5857 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
5858   // TODO: It might be a win to ease or lift this restriction, but the generic
5859   // folds in DAGCombiner conflict with vector folds for an AVX512 target.
5860   if (VT.isVector() && Subtarget.hasAVX512())
5861     return false;
5862 
5863   return true;
5864 }
5865 
decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C) const5866 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
5867                                                SDValue C) const {
5868   // TODO: We handle scalars using custom code, but generic combining could make
5869   // that unnecessary.
5870   APInt MulC;
5871   if (!ISD::isConstantSplatVector(C.getNode(), MulC))
5872     return false;
5873 
5874   // Find the type this will be legalized too. Otherwise we might prematurely
5875   // convert this to shl+add/sub and then still have to type legalize those ops.
5876   // Another choice would be to defer the decision for illegal types until
5877   // after type legalization. But constant splat vectors of i64 can't make it
5878   // through type legalization on 32-bit targets so we would need to special
5879   // case vXi64.
5880   while (getTypeAction(Context, VT) != TypeLegal)
5881     VT = getTypeToTransformTo(Context, VT);
5882 
5883   // If vector multiply is legal, assume that's faster than shl + add/sub.
5884   // Multiply is a complex op with higher latency and lower throughput in
5885   // most implementations, sub-vXi32 vector multiplies are always fast,
5886   // vXi32 mustn't have a SlowMULLD implementation, and anything larger (vXi64)
5887   // is always going to be slow.
5888   unsigned EltSizeInBits = VT.getScalarSizeInBits();
5889   if (isOperationLegal(ISD::MUL, VT) && EltSizeInBits <= 32 &&
5890       (EltSizeInBits != 32 || !Subtarget.isPMULLDSlow()))
5891     return false;
5892 
5893   // shl+add, shl+sub, shl+add+neg
5894   return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
5895          (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
5896 }
5897 
isExtractSubvectorCheap(EVT ResVT,EVT SrcVT,unsigned Index) const5898 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
5899                                                 unsigned Index) const {
5900   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
5901     return false;
5902 
5903   // Mask vectors support all subregister combinations and operations that
5904   // extract half of vector.
5905   if (ResVT.getVectorElementType() == MVT::i1)
5906     return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
5907                           (Index == ResVT.getVectorNumElements()));
5908 
5909   return (Index % ResVT.getVectorNumElements()) == 0;
5910 }
5911 
shouldScalarizeBinop(SDValue VecOp) const5912 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
5913   unsigned Opc = VecOp.getOpcode();
5914 
5915   // Assume target opcodes can't be scalarized.
5916   // TODO - do we have any exceptions?
5917   if (Opc >= ISD::BUILTIN_OP_END)
5918     return false;
5919 
5920   // If the vector op is not supported, try to convert to scalar.
5921   EVT VecVT = VecOp.getValueType();
5922   if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
5923     return true;
5924 
5925   // If the vector op is supported, but the scalar op is not, the transform may
5926   // not be worthwhile.
5927   EVT ScalarVT = VecVT.getScalarType();
5928   return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
5929 }
5930 
shouldFormOverflowOp(unsigned Opcode,EVT VT,bool) const5931 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
5932                                              bool) const {
5933   // TODO: Allow vectors?
5934   if (VT.isVector())
5935     return false;
5936   return VT.isSimple() || !isOperationExpand(Opcode, VT);
5937 }
5938 
isCheapToSpeculateCttz(Type * Ty) const5939 bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
5940   // Speculate cttz only if we can directly use TZCNT or can promote to i32.
5941   return Subtarget.hasBMI() ||
5942          (!Ty->isVectorTy() && Ty->getScalarSizeInBits() < 32);
5943 }
5944 
isCheapToSpeculateCtlz(Type * Ty) const5945 bool X86TargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
5946   // Speculate ctlz only if we can directly use LZCNT.
5947   return Subtarget.hasLZCNT();
5948 }
5949 
hasBitPreservingFPLogic(EVT VT) const5950 bool X86TargetLowering::hasBitPreservingFPLogic(EVT VT) const {
5951   return VT == MVT::f32 || VT == MVT::f64 || VT.isVector();
5952 }
5953 
ShouldShrinkFPConstant(EVT VT) const5954 bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {
5955   // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
5956   // expensive than a straight movsd. On the other hand, it's important to
5957   // shrink long double fp constant since fldt is very slow.
5958   return !Subtarget.hasSSE2() || VT == MVT::f80;
5959 }
5960 
isScalarFPTypeInSSEReg(EVT VT) const5961 bool X86TargetLowering::isScalarFPTypeInSSEReg(EVT VT) const {
5962   return (VT == MVT::f64 && Subtarget.hasSSE2()) ||
5963          (VT == MVT::f32 && Subtarget.hasSSE1()) || VT == MVT::f16;
5964 }
5965 
isLoadBitCastBeneficial(EVT LoadVT,EVT BitcastVT,const SelectionDAG & DAG,const MachineMemOperand & MMO) const5966 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5967                                                 const SelectionDAG &DAG,
5968                                                 const MachineMemOperand &MMO) const {
5969   if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5970       BitcastVT.getVectorElementType() == MVT::i1)
5971     return false;
5972 
5973   if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5974     return false;
5975 
5976   // If both types are legal vectors, it's always ok to convert them.
5977   if (LoadVT.isVector() && BitcastVT.isVector() &&
5978       isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5979     return true;
5980 
5981   return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5982 }
5983 
canMergeStoresTo(unsigned AddressSpace,EVT MemVT,const MachineFunction & MF) const5984 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5985                                          const MachineFunction &MF) const {
5986   // Do not merge to float value size (128 bytes) if no implicit
5987   // float attribute is set.
5988   bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat);
5989 
5990   if (NoFloat) {
5991     unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5992     return (MemVT.getSizeInBits() <= MaxIntSize);
5993   }
5994   // Make sure we don't merge greater than our preferred vector
5995   // width.
5996   if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5997     return false;
5998 
5999   return true;
6000 }
6001 
isCtlzFast() const6002 bool X86TargetLowering::isCtlzFast() const {
6003   return Subtarget.hasFastLZCNT();
6004 }
6005 
isMaskAndCmp0FoldingBeneficial(const Instruction & AndI) const6006 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
6007     const Instruction &AndI) const {
6008   return true;
6009 }
6010 
hasAndNotCompare(SDValue Y) const6011 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
6012   EVT VT = Y.getValueType();
6013 
6014   if (VT.isVector())
6015     return false;
6016 
6017   if (!Subtarget.hasBMI())
6018     return false;
6019 
6020   // There are only 32-bit and 64-bit forms for 'andn'.
6021   if (VT != MVT::i32 && VT != MVT::i64)
6022     return false;
6023 
6024   return !isa<ConstantSDNode>(Y);
6025 }
6026 
hasAndNot(SDValue Y) const6027 bool X86TargetLowering::hasAndNot(SDValue Y) const {
6028   EVT VT = Y.getValueType();
6029 
6030   if (!VT.isVector())
6031     return hasAndNotCompare(Y);
6032 
6033   // Vector.
6034 
6035   if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
6036     return false;
6037 
6038   if (VT == MVT::v4i32)
6039     return true;
6040 
6041   return Subtarget.hasSSE2();
6042 }
6043 
hasBitTest(SDValue X,SDValue Y) const6044 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
6045   return X.getValueType().isScalarInteger(); // 'bt'
6046 }
6047 
6048 bool X86TargetLowering::
shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X,ConstantSDNode * XC,ConstantSDNode * CC,SDValue Y,unsigned OldShiftOpcode,unsigned NewShiftOpcode,SelectionDAG & DAG) const6049     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
6050         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
6051         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
6052         SelectionDAG &DAG) const {
6053   // Does baseline recommend not to perform the fold by default?
6054   if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
6055           X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
6056     return false;
6057   // For scalars this transform is always beneficial.
6058   if (X.getValueType().isScalarInteger())
6059     return true;
6060   // If all the shift amounts are identical, then transform is beneficial even
6061   // with rudimentary SSE2 shifts.
6062   if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
6063     return true;
6064   // If we have AVX2 with it's powerful shift operations, then it's also good.
6065   if (Subtarget.hasAVX2())
6066     return true;
6067   // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
6068   return NewShiftOpcode == ISD::SHL;
6069 }
6070 
preferScalarizeSplat(unsigned Opc) const6071 bool X86TargetLowering::preferScalarizeSplat(unsigned Opc) const {
6072   return Opc != ISD::FP_EXTEND;
6073 }
6074 
shouldFoldConstantShiftPairToMask(const SDNode * N,CombineLevel Level) const6075 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
6076     const SDNode *N, CombineLevel Level) const {
6077   assert(((N->getOpcode() == ISD::SHL &&
6078            N->getOperand(0).getOpcode() == ISD::SRL) ||
6079           (N->getOpcode() == ISD::SRL &&
6080            N->getOperand(0).getOpcode() == ISD::SHL)) &&
6081          "Expected shift-shift mask");
6082   // TODO: Should we always create i64 masks? Or only folded immediates?
6083   EVT VT = N->getValueType(0);
6084   if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
6085       (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
6086     // Only fold if the shift values are equal - so it folds to AND.
6087     // TODO - we should fold if either is a non-uniform vector but we don't do
6088     // the fold for non-splats yet.
6089     return N->getOperand(1) == N->getOperand(0).getOperand(1);
6090   }
6091   return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
6092 }
6093 
shouldFoldMaskToVariableShiftPair(SDValue Y) const6094 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
6095   EVT VT = Y.getValueType();
6096 
6097   // For vectors, we don't have a preference, but we probably want a mask.
6098   if (VT.isVector())
6099     return false;
6100 
6101   // 64-bit shifts on 32-bit targets produce really bad bloated code.
6102   if (VT == MVT::i64 && !Subtarget.is64Bit())
6103     return false;
6104 
6105   return true;
6106 }
6107 
6108 TargetLowering::ShiftLegalizationStrategy
preferredShiftLegalizationStrategy(SelectionDAG & DAG,SDNode * N,unsigned ExpansionFactor) const6109 X86TargetLowering::preferredShiftLegalizationStrategy(
6110     SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const {
6111   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
6112       !Subtarget.isOSWindows())
6113     return ShiftLegalizationStrategy::LowerToLibcall;
6114   return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
6115                                                             ExpansionFactor);
6116 }
6117 
shouldSplatInsEltVarIndex(EVT VT) const6118 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
6119   // Any legal vector type can be splatted more efficiently than
6120   // loading/spilling from memory.
6121   return isTypeLegal(VT);
6122 }
6123 
hasFastEqualityCompare(unsigned NumBits) const6124 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
6125   MVT VT = MVT::getIntegerVT(NumBits);
6126   if (isTypeLegal(VT))
6127     return VT;
6128 
6129   // PMOVMSKB can handle this.
6130   if (NumBits == 128 && isTypeLegal(MVT::v16i8))
6131     return MVT::v16i8;
6132 
6133   // VPMOVMSKB can handle this.
6134   if (NumBits == 256 && isTypeLegal(MVT::v32i8))
6135     return MVT::v32i8;
6136 
6137   // TODO: Allow 64-bit type for 32-bit target.
6138   // TODO: 512-bit types should be allowed, but make sure that those
6139   // cases are handled in combineVectorSizedSetCCEquality().
6140 
6141   return MVT::INVALID_SIMPLE_VALUE_TYPE;
6142 }
6143 
6144 /// Val is the undef sentinel value or equal to the specified value.
isUndefOrEqual(int Val,int CmpVal)6145 static bool isUndefOrEqual(int Val, int CmpVal) {
6146   return ((Val == SM_SentinelUndef) || (Val == CmpVal));
6147 }
6148 
6149 /// Return true if every element in Mask is the undef sentinel value or equal to
6150 /// the specified value..
isUndefOrEqual(ArrayRef<int> Mask,int CmpVal)6151 static bool isUndefOrEqual(ArrayRef<int> Mask, int CmpVal) {
6152   return llvm::all_of(Mask, [CmpVal](int M) {
6153     return (M == SM_SentinelUndef) || (M == CmpVal);
6154   });
6155 }
6156 
6157 /// Val is either the undef or zero sentinel value.
isUndefOrZero(int Val)6158 static bool isUndefOrZero(int Val) {
6159   return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
6160 }
6161 
6162 /// Return true if every element in Mask, beginning from position Pos and ending
6163 /// in Pos+Size is the undef sentinel value.
isUndefInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size)6164 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
6165   return llvm::all_of(Mask.slice(Pos, Size),
6166                       [](int M) { return M == SM_SentinelUndef; });
6167 }
6168 
6169 /// Return true if the mask creates a vector whose lower half is undefined.
isUndefLowerHalf(ArrayRef<int> Mask)6170 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
6171   unsigned NumElts = Mask.size();
6172   return isUndefInRange(Mask, 0, NumElts / 2);
6173 }
6174 
6175 /// Return true if the mask creates a vector whose upper half is undefined.
isUndefUpperHalf(ArrayRef<int> Mask)6176 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
6177   unsigned NumElts = Mask.size();
6178   return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
6179 }
6180 
6181 /// Return true if Val falls within the specified range (L, H].
isInRange(int Val,int Low,int Hi)6182 static bool isInRange(int Val, int Low, int Hi) {
6183   return (Val >= Low && Val < Hi);
6184 }
6185 
6186 /// Return true if the value of any element in Mask falls within the specified
6187 /// range (L, H].
isAnyInRange(ArrayRef<int> Mask,int Low,int Hi)6188 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
6189   return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
6190 }
6191 
6192 /// Return true if the value of any element in Mask is the zero sentinel value.
isAnyZero(ArrayRef<int> Mask)6193 static bool isAnyZero(ArrayRef<int> Mask) {
6194   return llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
6195 }
6196 
6197 /// Return true if the value of any element in Mask is the zero or undef
6198 /// sentinel values.
isAnyZeroOrUndef(ArrayRef<int> Mask)6199 static bool isAnyZeroOrUndef(ArrayRef<int> Mask) {
6200   return llvm::any_of(Mask, [](int M) {
6201     return M == SM_SentinelZero || M == SM_SentinelUndef;
6202   });
6203 }
6204 
6205 /// Return true if Val is undef or if its value falls within the
6206 /// specified range (L, H].
isUndefOrInRange(int Val,int Low,int Hi)6207 static bool isUndefOrInRange(int Val, int Low, int Hi) {
6208   return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
6209 }
6210 
6211 /// Return true if every element in Mask is undef or if its value
6212 /// falls within the specified range (L, H].
isUndefOrInRange(ArrayRef<int> Mask,int Low,int Hi)6213 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
6214   return llvm::all_of(
6215       Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
6216 }
6217 
6218 /// Return true if Val is undef, zero or if its value falls within the
6219 /// specified range (L, H].
isUndefOrZeroOrInRange(int Val,int Low,int Hi)6220 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
6221   return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
6222 }
6223 
6224 /// Return true if every element in Mask is undef, zero or if its value
6225 /// falls within the specified range (L, H].
isUndefOrZeroOrInRange(ArrayRef<int> Mask,int Low,int Hi)6226 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
6227   return llvm::all_of(
6228       Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
6229 }
6230 
6231 /// Return true if every element in Mask, beginning
6232 /// from position Pos and ending in Pos + Size, falls within the specified
6233 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
isSequentialOrUndefInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size,int Low,int Step=1)6234 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
6235                                        unsigned Size, int Low, int Step = 1) {
6236   for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
6237     if (!isUndefOrEqual(Mask[i], Low))
6238       return false;
6239   return true;
6240 }
6241 
6242 /// Return true if every element in Mask, beginning
6243 /// from position Pos and ending in Pos+Size, falls within the specified
6244 /// sequential range (Low, Low+Size], or is undef or is zero.
isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size,int Low,int Step=1)6245 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
6246                                              unsigned Size, int Low,
6247                                              int Step = 1) {
6248   for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
6249     if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
6250       return false;
6251   return true;
6252 }
6253 
6254 /// Return true if every element in Mask, beginning
6255 /// from position Pos and ending in Pos+Size is undef or is zero.
isUndefOrZeroInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size)6256 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
6257                                  unsigned Size) {
6258   return llvm::all_of(Mask.slice(Pos, Size), isUndefOrZero);
6259 }
6260 
6261 /// Helper function to test whether a shuffle mask could be
6262 /// simplified by widening the elements being shuffled.
6263 ///
6264 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
6265 /// leaves it in an unspecified state.
6266 ///
6267 /// NOTE: This must handle normal vector shuffle masks and *target* vector
6268 /// shuffle masks. The latter have the special property of a '-2' representing
6269 /// a zero-ed lane of a vector.
canWidenShuffleElements(ArrayRef<int> Mask,SmallVectorImpl<int> & WidenedMask)6270 static bool canWidenShuffleElements(ArrayRef<int> Mask,
6271                                     SmallVectorImpl<int> &WidenedMask) {
6272   WidenedMask.assign(Mask.size() / 2, 0);
6273   for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
6274     int M0 = Mask[i];
6275     int M1 = Mask[i + 1];
6276 
6277     // If both elements are undef, its trivial.
6278     if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
6279       WidenedMask[i / 2] = SM_SentinelUndef;
6280       continue;
6281     }
6282 
6283     // Check for an undef mask and a mask value properly aligned to fit with
6284     // a pair of values. If we find such a case, use the non-undef mask's value.
6285     if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
6286       WidenedMask[i / 2] = M1 / 2;
6287       continue;
6288     }
6289     if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
6290       WidenedMask[i / 2] = M0 / 2;
6291       continue;
6292     }
6293 
6294     // When zeroing, we need to spread the zeroing across both lanes to widen.
6295     if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
6296       if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
6297           (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
6298         WidenedMask[i / 2] = SM_SentinelZero;
6299         continue;
6300       }
6301       return false;
6302     }
6303 
6304     // Finally check if the two mask values are adjacent and aligned with
6305     // a pair.
6306     if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
6307       WidenedMask[i / 2] = M0 / 2;
6308       continue;
6309     }
6310 
6311     // Otherwise we can't safely widen the elements used in this shuffle.
6312     return false;
6313   }
6314   assert(WidenedMask.size() == Mask.size() / 2 &&
6315          "Incorrect size of mask after widening the elements!");
6316 
6317   return true;
6318 }
6319 
canWidenShuffleElements(ArrayRef<int> Mask,const APInt & Zeroable,bool V2IsZero,SmallVectorImpl<int> & WidenedMask)6320 static bool canWidenShuffleElements(ArrayRef<int> Mask,
6321                                     const APInt &Zeroable,
6322                                     bool V2IsZero,
6323                                     SmallVectorImpl<int> &WidenedMask) {
6324   // Create an alternative mask with info about zeroable elements.
6325   // Here we do not set undef elements as zeroable.
6326   SmallVector<int, 64> ZeroableMask(Mask);
6327   if (V2IsZero) {
6328     assert(!Zeroable.isZero() && "V2's non-undef elements are used?!");
6329     for (int i = 0, Size = Mask.size(); i != Size; ++i)
6330       if (Mask[i] != SM_SentinelUndef && Zeroable[i])
6331         ZeroableMask[i] = SM_SentinelZero;
6332   }
6333   return canWidenShuffleElements(ZeroableMask, WidenedMask);
6334 }
6335 
canWidenShuffleElements(ArrayRef<int> Mask)6336 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
6337   SmallVector<int, 32> WidenedMask;
6338   return canWidenShuffleElements(Mask, WidenedMask);
6339 }
6340 
6341 // Attempt to narrow/widen shuffle mask until it matches the target number of
6342 // elements.
scaleShuffleElements(ArrayRef<int> Mask,unsigned NumDstElts,SmallVectorImpl<int> & ScaledMask)6343 static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
6344                                  SmallVectorImpl<int> &ScaledMask) {
6345   unsigned NumSrcElts = Mask.size();
6346   assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
6347          "Illegal shuffle scale factor");
6348 
6349   // Narrowing is guaranteed to work.
6350   if (NumDstElts >= NumSrcElts) {
6351     int Scale = NumDstElts / NumSrcElts;
6352     llvm::narrowShuffleMaskElts(Scale, Mask, ScaledMask);
6353     return true;
6354   }
6355 
6356   // We have to repeat the widening until we reach the target size, but we can
6357   // split out the first widening as it sets up ScaledMask for us.
6358   if (canWidenShuffleElements(Mask, ScaledMask)) {
6359     while (ScaledMask.size() > NumDstElts) {
6360       SmallVector<int, 16> WidenedMask;
6361       if (!canWidenShuffleElements(ScaledMask, WidenedMask))
6362         return false;
6363       ScaledMask = std::move(WidenedMask);
6364     }
6365     return true;
6366   }
6367 
6368   return false;
6369 }
6370 
6371 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
isZeroNode(SDValue Elt)6372 bool X86::isZeroNode(SDValue Elt) {
6373   return isNullConstant(Elt) || isNullFPConstant(Elt);
6374 }
6375 
6376 // Build a vector of constants.
6377 // Use an UNDEF node if MaskElt == -1.
6378 // Split 64-bit constants in the 32-bit mode.
getConstVector(ArrayRef<int> Values,MVT VT,SelectionDAG & DAG,const SDLoc & dl,bool IsMask=false)6379 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
6380                               const SDLoc &dl, bool IsMask = false) {
6381 
6382   SmallVector<SDValue, 32>  Ops;
6383   bool Split = false;
6384 
6385   MVT ConstVecVT = VT;
6386   unsigned NumElts = VT.getVectorNumElements();
6387   bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
6388   if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
6389     ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
6390     Split = true;
6391   }
6392 
6393   MVT EltVT = ConstVecVT.getVectorElementType();
6394   for (unsigned i = 0; i < NumElts; ++i) {
6395     bool IsUndef = Values[i] < 0 && IsMask;
6396     SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
6397       DAG.getConstant(Values[i], dl, EltVT);
6398     Ops.push_back(OpNode);
6399     if (Split)
6400       Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
6401                     DAG.getConstant(0, dl, EltVT));
6402   }
6403   SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
6404   if (Split)
6405     ConstsNode = DAG.getBitcast(VT, ConstsNode);
6406   return ConstsNode;
6407 }
6408 
getConstVector(ArrayRef<APInt> Bits,APInt & Undefs,MVT VT,SelectionDAG & DAG,const SDLoc & dl)6409 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
6410                               MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6411   assert(Bits.size() == Undefs.getBitWidth() &&
6412          "Unequal constant and undef arrays");
6413   SmallVector<SDValue, 32> Ops;
6414   bool Split = false;
6415 
6416   MVT ConstVecVT = VT;
6417   unsigned NumElts = VT.getVectorNumElements();
6418   bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
6419   if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
6420     ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
6421     Split = true;
6422   }
6423 
6424   MVT EltVT = ConstVecVT.getVectorElementType();
6425   for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
6426     if (Undefs[i]) {
6427       Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
6428       continue;
6429     }
6430     const APInt &V = Bits[i];
6431     assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
6432     if (Split) {
6433       Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
6434       Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
6435     } else if (EltVT == MVT::f32) {
6436       APFloat FV(APFloat::IEEEsingle(), V);
6437       Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
6438     } else if (EltVT == MVT::f64) {
6439       APFloat FV(APFloat::IEEEdouble(), V);
6440       Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
6441     } else {
6442       Ops.push_back(DAG.getConstant(V, dl, EltVT));
6443     }
6444   }
6445 
6446   SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
6447   return DAG.getBitcast(VT, ConstsNode);
6448 }
6449 
6450 /// Returns a vector of specified type with all zero elements.
getZeroVector(MVT VT,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)6451 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
6452                              SelectionDAG &DAG, const SDLoc &dl) {
6453   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
6454           VT.getVectorElementType() == MVT::i1) &&
6455          "Unexpected vector type");
6456 
6457   // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
6458   // type. This ensures they get CSE'd. But if the integer type is not
6459   // available, use a floating-point +0.0 instead.
6460   SDValue Vec;
6461   if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
6462     Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
6463   } else if (VT.isFloatingPoint()) {
6464     Vec = DAG.getConstantFP(+0.0, dl, VT);
6465   } else if (VT.getVectorElementType() == MVT::i1) {
6466     assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
6467            "Unexpected vector type");
6468     Vec = DAG.getConstant(0, dl, VT);
6469   } else {
6470     unsigned Num32BitElts = VT.getSizeInBits() / 32;
6471     Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
6472   }
6473   return DAG.getBitcast(VT, Vec);
6474 }
6475 
6476 // Helper to determine if the ops are all the extracted subvectors come from a
6477 // single source. If we allow commute they don't have to be in order (Lo/Hi).
getSplitVectorSrc(SDValue LHS,SDValue RHS,bool AllowCommute)6478 static SDValue getSplitVectorSrc(SDValue LHS, SDValue RHS, bool AllowCommute) {
6479   if (LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6480       RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6481       LHS.getValueType() != RHS.getValueType() ||
6482       LHS.getOperand(0) != RHS.getOperand(0))
6483     return SDValue();
6484 
6485   SDValue Src = LHS.getOperand(0);
6486   if (Src.getValueSizeInBits() != (LHS.getValueSizeInBits() * 2))
6487     return SDValue();
6488 
6489   unsigned NumElts = LHS.getValueType().getVectorNumElements();
6490   if ((LHS.getConstantOperandAPInt(1) == 0 &&
6491        RHS.getConstantOperandAPInt(1) == NumElts) ||
6492       (AllowCommute && RHS.getConstantOperandAPInt(1) == 0 &&
6493        LHS.getConstantOperandAPInt(1) == NumElts))
6494     return Src;
6495 
6496   return SDValue();
6497 }
6498 
extractSubVector(SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl,unsigned vectorWidth)6499 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
6500                                 const SDLoc &dl, unsigned vectorWidth) {
6501   EVT VT = Vec.getValueType();
6502   EVT ElVT = VT.getVectorElementType();
6503   unsigned Factor = VT.getSizeInBits() / vectorWidth;
6504   EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
6505                                   VT.getVectorNumElements() / Factor);
6506 
6507   // Extract the relevant vectorWidth bits.  Generate an EXTRACT_SUBVECTOR
6508   unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
6509   assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
6510 
6511   // This is the index of the first element of the vectorWidth-bit chunk
6512   // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
6513   IdxVal &= ~(ElemsPerChunk - 1);
6514 
6515   // If the input is a buildvector just emit a smaller one.
6516   if (Vec.getOpcode() == ISD::BUILD_VECTOR)
6517     return DAG.getBuildVector(ResultVT, dl,
6518                               Vec->ops().slice(IdxVal, ElemsPerChunk));
6519 
6520   SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
6521   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
6522 }
6523 
6524 /// Generate a DAG to grab 128-bits from a vector > 128 bits.  This
6525 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
6526 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
6527 /// instructions or a simple subregister reference. Idx is an index in the
6528 /// 128 bits we want.  It need not be aligned to a 128-bit boundary.  That makes
6529 /// lowering EXTRACT_VECTOR_ELT operations easier.
extract128BitVector(SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl)6530 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
6531                                    SelectionDAG &DAG, const SDLoc &dl) {
6532   assert((Vec.getValueType().is256BitVector() ||
6533           Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
6534   return extractSubVector(Vec, IdxVal, DAG, dl, 128);
6535 }
6536 
6537 /// Generate a DAG to grab 256-bits from a 512-bit vector.
extract256BitVector(SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl)6538 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
6539                                    SelectionDAG &DAG, const SDLoc &dl) {
6540   assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
6541   return extractSubVector(Vec, IdxVal, DAG, dl, 256);
6542 }
6543 
insertSubVector(SDValue Result,SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl,unsigned vectorWidth)6544 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
6545                                SelectionDAG &DAG, const SDLoc &dl,
6546                                unsigned vectorWidth) {
6547   assert((vectorWidth == 128 || vectorWidth == 256) &&
6548          "Unsupported vector width");
6549   // Inserting UNDEF is Result
6550   if (Vec.isUndef())
6551     return Result;
6552   EVT VT = Vec.getValueType();
6553   EVT ElVT = VT.getVectorElementType();
6554   EVT ResultVT = Result.getValueType();
6555 
6556   // Insert the relevant vectorWidth bits.
6557   unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
6558   assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
6559 
6560   // This is the index of the first element of the vectorWidth-bit chunk
6561   // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
6562   IdxVal &= ~(ElemsPerChunk - 1);
6563 
6564   SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
6565   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
6566 }
6567 
6568 /// Generate a DAG to put 128-bits into a vector > 128 bits.  This
6569 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
6570 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
6571 /// simple superregister reference.  Idx is an index in the 128 bits
6572 /// we want.  It need not be aligned to a 128-bit boundary.  That makes
6573 /// lowering INSERT_VECTOR_ELT operations easier.
insert128BitVector(SDValue Result,SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl)6574 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
6575                                   SelectionDAG &DAG, const SDLoc &dl) {
6576   assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
6577   return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
6578 }
6579 
6580 /// Widen a vector to a larger size with the same scalar type, with the new
6581 /// elements either zero or undef.
widenSubVector(MVT VT,SDValue Vec,bool ZeroNewElements,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)6582 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
6583                               const X86Subtarget &Subtarget, SelectionDAG &DAG,
6584                               const SDLoc &dl) {
6585   assert(Vec.getValueSizeInBits().getFixedValue() < VT.getFixedSizeInBits() &&
6586          Vec.getValueType().getScalarType() == VT.getScalarType() &&
6587          "Unsupported vector widening type");
6588   SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
6589                                 : DAG.getUNDEF(VT);
6590   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
6591                      DAG.getIntPtrConstant(0, dl));
6592 }
6593 
6594 /// Widen a vector to a larger size with the same scalar type, with the new
6595 /// elements either zero or undef.
widenSubVector(SDValue Vec,bool ZeroNewElements,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl,unsigned WideSizeInBits)6596 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
6597                               const X86Subtarget &Subtarget, SelectionDAG &DAG,
6598                               const SDLoc &dl, unsigned WideSizeInBits) {
6599   assert(Vec.getValueSizeInBits() < WideSizeInBits &&
6600          (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
6601          "Unsupported vector widening type");
6602   unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
6603   MVT SVT = Vec.getSimpleValueType().getScalarType();
6604   MVT VT = MVT::getVectorVT(SVT, WideNumElts);
6605   return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
6606 }
6607 
6608 // Helper function to collect subvector ops that are concatenated together,
6609 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
6610 // The subvectors in Ops are guaranteed to be the same type.
collectConcatOps(SDNode * N,SmallVectorImpl<SDValue> & Ops,SelectionDAG & DAG)6611 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
6612                              SelectionDAG &DAG) {
6613   assert(Ops.empty() && "Expected an empty ops vector");
6614 
6615   if (N->getOpcode() == ISD::CONCAT_VECTORS) {
6616     Ops.append(N->op_begin(), N->op_end());
6617     return true;
6618   }
6619 
6620   if (N->getOpcode() == ISD::INSERT_SUBVECTOR) {
6621     SDValue Src = N->getOperand(0);
6622     SDValue Sub = N->getOperand(1);
6623     const APInt &Idx = N->getConstantOperandAPInt(2);
6624     EVT VT = Src.getValueType();
6625     EVT SubVT = Sub.getValueType();
6626 
6627     // TODO - Handle more general insert_subvector chains.
6628     if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2)) {
6629       // insert_subvector(undef, x, lo)
6630       if (Idx == 0 && Src.isUndef()) {
6631         Ops.push_back(Sub);
6632         Ops.push_back(DAG.getUNDEF(SubVT));
6633         return true;
6634       }
6635       if (Idx == (VT.getVectorNumElements() / 2)) {
6636         // insert_subvector(insert_subvector(undef, x, lo), y, hi)
6637         if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
6638             Src.getOperand(1).getValueType() == SubVT &&
6639             isNullConstant(Src.getOperand(2))) {
6640           Ops.push_back(Src.getOperand(1));
6641           Ops.push_back(Sub);
6642           return true;
6643         }
6644         // insert_subvector(x, extract_subvector(x, lo), hi)
6645         if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6646             Sub.getOperand(0) == Src && isNullConstant(Sub.getOperand(1))) {
6647           Ops.append(2, Sub);
6648           return true;
6649         }
6650         // insert_subvector(undef, x, hi)
6651         if (Src.isUndef()) {
6652           Ops.push_back(DAG.getUNDEF(SubVT));
6653           Ops.push_back(Sub);
6654           return true;
6655         }
6656       }
6657     }
6658   }
6659 
6660   return false;
6661 }
6662 
splitVector(SDValue Op,SelectionDAG & DAG,const SDLoc & dl)6663 static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
6664                                                const SDLoc &dl) {
6665   EVT VT = Op.getValueType();
6666   unsigned NumElems = VT.getVectorNumElements();
6667   unsigned SizeInBits = VT.getSizeInBits();
6668   assert((NumElems % 2) == 0 && (SizeInBits % 2) == 0 &&
6669          "Can't split odd sized vector");
6670 
6671   // If this is a splat value (with no-undefs) then use the lower subvector,
6672   // which should be a free extraction.
6673   SDValue Lo = extractSubVector(Op, 0, DAG, dl, SizeInBits / 2);
6674   if (DAG.isSplatValue(Op, /*AllowUndefs*/ false))
6675     return std::make_pair(Lo, Lo);
6676 
6677   SDValue Hi = extractSubVector(Op, NumElems / 2, DAG, dl, SizeInBits / 2);
6678   return std::make_pair(Lo, Hi);
6679 }
6680 
6681 /// Break an operation into 2 half sized ops and then concatenate the results.
splitVectorOp(SDValue Op,SelectionDAG & DAG)6682 static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG) {
6683   unsigned NumOps = Op.getNumOperands();
6684   EVT VT = Op.getValueType();
6685   SDLoc dl(Op);
6686 
6687   // Extract the LHS Lo/Hi vectors
6688   SmallVector<SDValue> LoOps(NumOps, SDValue());
6689   SmallVector<SDValue> HiOps(NumOps, SDValue());
6690   for (unsigned I = 0; I != NumOps; ++I) {
6691     SDValue SrcOp = Op.getOperand(I);
6692     if (!SrcOp.getValueType().isVector()) {
6693       LoOps[I] = HiOps[I] = SrcOp;
6694       continue;
6695     }
6696     std::tie(LoOps[I], HiOps[I]) = splitVector(SrcOp, DAG, dl);
6697   }
6698 
6699   EVT LoVT, HiVT;
6700   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
6701   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
6702                      DAG.getNode(Op.getOpcode(), dl, LoVT, LoOps),
6703                      DAG.getNode(Op.getOpcode(), dl, HiVT, HiOps));
6704 }
6705 
6706 /// Break an unary integer operation into 2 half sized ops and then
6707 /// concatenate the result back.
splitVectorIntUnary(SDValue Op,SelectionDAG & DAG)6708 static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
6709   // Make sure we only try to split 256/512-bit types to avoid creating
6710   // narrow vectors.
6711   EVT VT = Op.getValueType();
6712   (void)VT;
6713   assert((Op.getOperand(0).getValueType().is256BitVector() ||
6714           Op.getOperand(0).getValueType().is512BitVector()) &&
6715          (VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
6716   assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
6717              VT.getVectorNumElements() &&
6718          "Unexpected VTs!");
6719   return splitVectorOp(Op, DAG);
6720 }
6721 
6722 /// Break a binary integer operation into 2 half sized ops and then
6723 /// concatenate the result back.
splitVectorIntBinary(SDValue Op,SelectionDAG & DAG)6724 static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
6725   // Assert that all the types match.
6726   EVT VT = Op.getValueType();
6727   (void)VT;
6728   assert(Op.getOperand(0).getValueType() == VT &&
6729          Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
6730   assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
6731   return splitVectorOp(Op, DAG);
6732 }
6733 
6734 // Helper for splitting operands of an operation to legal target size and
6735 // apply a function on each part.
6736 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
6737 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
6738 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
6739 // The argument Builder is a function that will be applied on each split part:
6740 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
6741 template <typename F>
SplitOpsAndApply(SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,F Builder,bool CheckBWI=true)6742 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
6743                          const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
6744                          F Builder, bool CheckBWI = true) {
6745   assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
6746   unsigned NumSubs = 1;
6747   if ((CheckBWI && Subtarget.useBWIRegs()) ||
6748       (!CheckBWI && Subtarget.useAVX512Regs())) {
6749     if (VT.getSizeInBits() > 512) {
6750       NumSubs = VT.getSizeInBits() / 512;
6751       assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
6752     }
6753   } else if (Subtarget.hasAVX2()) {
6754     if (VT.getSizeInBits() > 256) {
6755       NumSubs = VT.getSizeInBits() / 256;
6756       assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
6757     }
6758   } else {
6759     if (VT.getSizeInBits() > 128) {
6760       NumSubs = VT.getSizeInBits() / 128;
6761       assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
6762     }
6763   }
6764 
6765   if (NumSubs == 1)
6766     return Builder(DAG, DL, Ops);
6767 
6768   SmallVector<SDValue, 4> Subs;
6769   for (unsigned i = 0; i != NumSubs; ++i) {
6770     SmallVector<SDValue, 2> SubOps;
6771     for (SDValue Op : Ops) {
6772       EVT OpVT = Op.getValueType();
6773       unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
6774       unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
6775       SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
6776     }
6777     Subs.push_back(Builder(DAG, DL, SubOps));
6778   }
6779   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
6780 }
6781 
6782 // Helper function that extends a non-512-bit vector op to 512-bits on non-VLX
6783 // targets.
getAVX512Node(unsigned Opcode,const SDLoc & DL,MVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG,const X86Subtarget & Subtarget)6784 static SDValue getAVX512Node(unsigned Opcode, const SDLoc &DL, MVT VT,
6785                              ArrayRef<SDValue> Ops, SelectionDAG &DAG,
6786                              const X86Subtarget &Subtarget) {
6787   assert(Subtarget.hasAVX512() && "AVX512 target expected");
6788   MVT SVT = VT.getScalarType();
6789 
6790   // If we have a 32/64 splatted constant, splat it to DstTy to
6791   // encourage a foldable broadcast'd operand.
6792   auto MakeBroadcastOp = [&](SDValue Op, MVT OpVT, MVT DstVT) {
6793     unsigned OpEltSizeInBits = OpVT.getScalarSizeInBits();
6794     // AVX512 broadcasts 32/64-bit operands.
6795     // TODO: Support float once getAVX512Node is used by fp-ops.
6796     if (!OpVT.isInteger() || OpEltSizeInBits < 32 ||
6797         !DAG.getTargetLoweringInfo().isTypeLegal(SVT))
6798       return SDValue();
6799     // If we're not widening, don't bother if we're not bitcasting.
6800     if (OpVT == DstVT && Op.getOpcode() != ISD::BITCAST)
6801       return SDValue();
6802     if (auto *BV = dyn_cast<BuildVectorSDNode>(peekThroughBitcasts(Op))) {
6803       APInt SplatValue, SplatUndef;
6804       unsigned SplatBitSize;
6805       bool HasAnyUndefs;
6806       if (BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
6807                               HasAnyUndefs, OpEltSizeInBits) &&
6808           !HasAnyUndefs && SplatValue.getBitWidth() == OpEltSizeInBits)
6809         return DAG.getConstant(SplatValue, DL, DstVT);
6810     }
6811     return SDValue();
6812   };
6813 
6814   bool Widen = !(Subtarget.hasVLX() || VT.is512BitVector());
6815 
6816   MVT DstVT = VT;
6817   if (Widen)
6818     DstVT = MVT::getVectorVT(SVT, 512 / SVT.getSizeInBits());
6819 
6820   // Canonicalize src operands.
6821   SmallVector<SDValue> SrcOps(Ops.begin(), Ops.end());
6822   for (SDValue &Op : SrcOps) {
6823     MVT OpVT = Op.getSimpleValueType();
6824     // Just pass through scalar operands.
6825     if (!OpVT.isVector())
6826       continue;
6827     assert(OpVT == VT && "Vector type mismatch");
6828 
6829     if (SDValue BroadcastOp = MakeBroadcastOp(Op, OpVT, DstVT)) {
6830       Op = BroadcastOp;
6831       continue;
6832     }
6833 
6834     // Just widen the subvector by inserting into an undef wide vector.
6835     if (Widen)
6836       Op = widenSubVector(Op, false, Subtarget, DAG, DL, 512);
6837   }
6838 
6839   SDValue Res = DAG.getNode(Opcode, DL, DstVT, SrcOps);
6840 
6841   // Perform the 512-bit op then extract the bottom subvector.
6842   if (Widen)
6843     Res = extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
6844   return Res;
6845 }
6846 
6847 /// Insert i1-subvector to i1-vector.
insert1BitVector(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)6848 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
6849                                 const X86Subtarget &Subtarget) {
6850 
6851   SDLoc dl(Op);
6852   SDValue Vec = Op.getOperand(0);
6853   SDValue SubVec = Op.getOperand(1);
6854   SDValue Idx = Op.getOperand(2);
6855   unsigned IdxVal = Op.getConstantOperandVal(2);
6856 
6857   // Inserting undef is a nop. We can just return the original vector.
6858   if (SubVec.isUndef())
6859     return Vec;
6860 
6861   if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
6862     return Op;
6863 
6864   MVT OpVT = Op.getSimpleValueType();
6865   unsigned NumElems = OpVT.getVectorNumElements();
6866   SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
6867 
6868   // Extend to natively supported kshift.
6869   MVT WideOpVT = OpVT;
6870   if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
6871     WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
6872 
6873   // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
6874   // if necessary.
6875   if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
6876     // May need to promote to a legal type.
6877     Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6878                      DAG.getConstant(0, dl, WideOpVT),
6879                      SubVec, Idx);
6880     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6881   }
6882 
6883   MVT SubVecVT = SubVec.getSimpleValueType();
6884   unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
6885   assert(IdxVal + SubVecNumElems <= NumElems &&
6886          IdxVal % SubVecVT.getSizeInBits() == 0 &&
6887          "Unexpected index value in INSERT_SUBVECTOR");
6888 
6889   SDValue Undef = DAG.getUNDEF(WideOpVT);
6890 
6891   if (IdxVal == 0) {
6892     // Zero lower bits of the Vec
6893     SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
6894     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
6895                       ZeroIdx);
6896     Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6897     Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6898     // Merge them together, SubVec should be zero extended.
6899     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6900                          DAG.getConstant(0, dl, WideOpVT),
6901                          SubVec, ZeroIdx);
6902     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6903     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6904   }
6905 
6906   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6907                        Undef, SubVec, ZeroIdx);
6908 
6909   if (Vec.isUndef()) {
6910     assert(IdxVal != 0 && "Unexpected index");
6911     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6912                          DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6913     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6914   }
6915 
6916   if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
6917     assert(IdxVal != 0 && "Unexpected index");
6918     // If upper elements of Vec are known undef, then just shift into place.
6919     if (llvm::all_of(Vec->ops().slice(IdxVal + SubVecNumElems),
6920                      [](SDValue V) { return V.isUndef(); })) {
6921       SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6922                            DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6923     } else {
6924       NumElems = WideOpVT.getVectorNumElements();
6925       unsigned ShiftLeft = NumElems - SubVecNumElems;
6926       unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6927       SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6928                            DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6929       if (ShiftRight != 0)
6930         SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6931                              DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6932     }
6933     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6934   }
6935 
6936   // Simple case when we put subvector in the upper part
6937   if (IdxVal + SubVecNumElems == NumElems) {
6938     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6939                          DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6940     if (SubVecNumElems * 2 == NumElems) {
6941       // Special case, use legal zero extending insert_subvector. This allows
6942       // isel to optimize when bits are known zero.
6943       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
6944       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6945                         DAG.getConstant(0, dl, WideOpVT),
6946                         Vec, ZeroIdx);
6947     } else {
6948       // Otherwise use explicit shifts to zero the bits.
6949       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6950                         Undef, Vec, ZeroIdx);
6951       NumElems = WideOpVT.getVectorNumElements();
6952       SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
6953       Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6954       Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6955     }
6956     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6957     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6958   }
6959 
6960   // Inserting into the middle is more complicated.
6961 
6962   NumElems = WideOpVT.getVectorNumElements();
6963 
6964   // Widen the vector if needed.
6965   Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
6966 
6967   unsigned ShiftLeft = NumElems - SubVecNumElems;
6968   unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6969 
6970   // Do an optimization for the the most frequently used types.
6971   if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
6972     APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
6973     Mask0.flipAllBits();
6974     SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
6975     SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
6976     Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
6977     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6978                          DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6979     SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6980                          DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6981     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6982 
6983     // Reduce to original width if needed.
6984     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6985   }
6986 
6987   // Clear the upper bits of the subvector and move it to its insert position.
6988   SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6989                        DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6990   SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6991                        DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6992 
6993   // Isolate the bits below the insertion point.
6994   unsigned LowShift = NumElems - IdxVal;
6995   SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
6996                             DAG.getTargetConstant(LowShift, dl, MVT::i8));
6997   Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
6998                     DAG.getTargetConstant(LowShift, dl, MVT::i8));
6999 
7000   // Isolate the bits after the last inserted bit.
7001   unsigned HighShift = IdxVal + SubVecNumElems;
7002   SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
7003                             DAG.getTargetConstant(HighShift, dl, MVT::i8));
7004   High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
7005                     DAG.getTargetConstant(HighShift, dl, MVT::i8));
7006 
7007   // Now OR all 3 pieces together.
7008   Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
7009   SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
7010 
7011   // Reduce to original width if needed.
7012   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
7013 }
7014 
concatSubVectors(SDValue V1,SDValue V2,SelectionDAG & DAG,const SDLoc & dl)7015 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
7016                                 const SDLoc &dl) {
7017   assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
7018   EVT SubVT = V1.getValueType();
7019   EVT SubSVT = SubVT.getScalarType();
7020   unsigned SubNumElts = SubVT.getVectorNumElements();
7021   unsigned SubVectorWidth = SubVT.getSizeInBits();
7022   EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
7023   SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
7024   return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
7025 }
7026 
7027 /// Returns a vector of specified type with all bits set.
7028 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
7029 /// Then bitcast to their original type, ensuring they get CSE'd.
getOnesVector(EVT VT,SelectionDAG & DAG,const SDLoc & dl)7030 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
7031   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
7032          "Expected a 128/256/512-bit vector type");
7033 
7034   APInt Ones = APInt::getAllOnes(32);
7035   unsigned NumElts = VT.getSizeInBits() / 32;
7036   SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
7037   return DAG.getBitcast(VT, Vec);
7038 }
7039 
getEXTEND_VECTOR_INREG(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue In,SelectionDAG & DAG)7040 static SDValue getEXTEND_VECTOR_INREG(unsigned Opcode, const SDLoc &DL, EVT VT,
7041                                       SDValue In, SelectionDAG &DAG) {
7042   EVT InVT = In.getValueType();
7043   assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
7044   assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
7045           ISD::ZERO_EXTEND == Opcode) &&
7046          "Unknown extension opcode");
7047 
7048   // For 256-bit vectors, we only need the lower (128-bit) input half.
7049   // For 512-bit vectors, we only need the lower input half or quarter.
7050   if (InVT.getSizeInBits() > 128) {
7051     assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
7052            "Expected VTs to be the same size!");
7053     unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
7054     In = extractSubVector(In, 0, DAG, DL,
7055                           std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
7056     InVT = In.getValueType();
7057   }
7058 
7059   if (VT.getVectorNumElements() != InVT.getVectorNumElements())
7060     Opcode = DAG.getOpcode_EXTEND_VECTOR_INREG(Opcode);
7061 
7062   return DAG.getNode(Opcode, DL, VT, In);
7063 }
7064 
7065 // Match (xor X, -1) -> X.
7066 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
7067 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
IsNOT(SDValue V,SelectionDAG & DAG)7068 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
7069   V = peekThroughBitcasts(V);
7070   if (V.getOpcode() == ISD::XOR &&
7071       (ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()) ||
7072        isAllOnesConstant(V.getOperand(1))))
7073     return V.getOperand(0);
7074   if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7075       (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
7076     if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
7077       Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
7078       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
7079                          Not, V.getOperand(1));
7080     }
7081   }
7082   SmallVector<SDValue, 2> CatOps;
7083   if (collectConcatOps(V.getNode(), CatOps, DAG)) {
7084     for (SDValue &CatOp : CatOps) {
7085       SDValue NotCat = IsNOT(CatOp, DAG);
7086       if (!NotCat) return SDValue();
7087       CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
7088     }
7089     return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
7090   }
7091   return SDValue();
7092 }
7093 
createUnpackShuffleMask(EVT VT,SmallVectorImpl<int> & Mask,bool Lo,bool Unary)7094 void llvm::createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask,
7095                                    bool Lo, bool Unary) {
7096   assert(VT.getScalarType().isSimple() && (VT.getSizeInBits() % 128) == 0 &&
7097          "Illegal vector type to unpack");
7098   assert(Mask.empty() && "Expected an empty shuffle mask vector");
7099   int NumElts = VT.getVectorNumElements();
7100   int NumEltsInLane = 128 / VT.getScalarSizeInBits();
7101   for (int i = 0; i < NumElts; ++i) {
7102     unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
7103     int Pos = (i % NumEltsInLane) / 2 + LaneStart;
7104     Pos += (Unary ? 0 : NumElts * (i % 2));
7105     Pos += (Lo ? 0 : NumEltsInLane / 2);
7106     Mask.push_back(Pos);
7107   }
7108 }
7109 
7110 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
7111 /// imposed by AVX and specific to the unary pattern. Example:
7112 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
7113 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
createSplat2ShuffleMask(MVT VT,SmallVectorImpl<int> & Mask,bool Lo)7114 void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
7115                                    bool Lo) {
7116   assert(Mask.empty() && "Expected an empty shuffle mask vector");
7117   int NumElts = VT.getVectorNumElements();
7118   for (int i = 0; i < NumElts; ++i) {
7119     int Pos = i / 2;
7120     Pos += (Lo ? 0 : NumElts / 2);
7121     Mask.push_back(Pos);
7122   }
7123 }
7124 
7125 // Attempt to constant fold, else just create a VECTOR_SHUFFLE.
getVectorShuffle(SelectionDAG & DAG,EVT VT,const SDLoc & dl,SDValue V1,SDValue V2,ArrayRef<int> Mask)7126 static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
7127                                 SDValue V1, SDValue V2, ArrayRef<int> Mask) {
7128   if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
7129       (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
7130     SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
7131     for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
7132       int M = Mask[I];
7133       if (M < 0)
7134         continue;
7135       SDValue V = (M < NumElts) ? V1 : V2;
7136       if (V.isUndef())
7137         continue;
7138       Ops[I] = V.getOperand(M % NumElts);
7139     }
7140     return DAG.getBuildVector(VT, dl, Ops);
7141   }
7142 
7143   return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
7144 }
7145 
7146 /// Returns a vector_shuffle node for an unpackl operation.
getUnpackl(SelectionDAG & DAG,const SDLoc & dl,EVT VT,SDValue V1,SDValue V2)7147 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
7148                           SDValue V1, SDValue V2) {
7149   SmallVector<int, 8> Mask;
7150   createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
7151   return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
7152 }
7153 
7154 /// Returns a vector_shuffle node for an unpackh operation.
getUnpackh(SelectionDAG & DAG,const SDLoc & dl,EVT VT,SDValue V1,SDValue V2)7155 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
7156                           SDValue V1, SDValue V2) {
7157   SmallVector<int, 8> Mask;
7158   createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
7159   return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
7160 }
7161 
7162 /// Returns a node that packs the LHS + RHS nodes together at half width.
7163 /// May return X86ISD::PACKSS/PACKUS, packing the top/bottom half.
7164 /// TODO: Add subvector splitting if/when we have a need for it.
getPack(SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & dl,MVT VT,SDValue LHS,SDValue RHS,bool PackHiHalf=false)7165 static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
7166                        const SDLoc &dl, MVT VT, SDValue LHS, SDValue RHS,
7167                        bool PackHiHalf = false) {
7168   MVT OpVT = LHS.getSimpleValueType();
7169   unsigned EltSizeInBits = VT.getScalarSizeInBits();
7170   bool UsePackUS = Subtarget.hasSSE41() || EltSizeInBits == 8;
7171   assert(OpVT == RHS.getSimpleValueType() &&
7172          VT.getSizeInBits() == OpVT.getSizeInBits() &&
7173          (EltSizeInBits * 2) == OpVT.getScalarSizeInBits() &&
7174          "Unexpected PACK operand types");
7175   assert((EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) &&
7176          "Unexpected PACK result type");
7177 
7178   // Rely on vector shuffles for vXi64 -> vXi32 packing.
7179   if (EltSizeInBits == 32) {
7180     SmallVector<int> PackMask;
7181     int Offset = PackHiHalf ? 1 : 0;
7182     int NumElts = VT.getVectorNumElements();
7183     for (int I = 0; I != NumElts; I += 4) {
7184       PackMask.push_back(I + Offset);
7185       PackMask.push_back(I + Offset + 2);
7186       PackMask.push_back(I + Offset + NumElts);
7187       PackMask.push_back(I + Offset + NumElts + 2);
7188     }
7189     return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, LHS),
7190                                 DAG.getBitcast(VT, RHS), PackMask);
7191   }
7192 
7193   // See if we already have sufficient leading bits for PACKSS/PACKUS.
7194   if (!PackHiHalf) {
7195     if (UsePackUS &&
7196         DAG.computeKnownBits(LHS).countMaxActiveBits() <= EltSizeInBits &&
7197         DAG.computeKnownBits(RHS).countMaxActiveBits() <= EltSizeInBits)
7198       return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
7199 
7200     if (DAG.ComputeMaxSignificantBits(LHS) <= EltSizeInBits &&
7201         DAG.ComputeMaxSignificantBits(RHS) <= EltSizeInBits)
7202       return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
7203   }
7204 
7205   // Fallback to sign/zero extending the requested half and pack.
7206   SDValue Amt = DAG.getTargetConstant(EltSizeInBits, dl, MVT::i8);
7207   if (UsePackUS) {
7208     if (PackHiHalf) {
7209       LHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, LHS, Amt);
7210       RHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, RHS, Amt);
7211     } else {
7212       SDValue Mask = DAG.getConstant((1ULL << EltSizeInBits) - 1, dl, OpVT);
7213       LHS = DAG.getNode(ISD::AND, dl, OpVT, LHS, Mask);
7214       RHS = DAG.getNode(ISD::AND, dl, OpVT, RHS, Mask);
7215     };
7216     return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
7217   };
7218 
7219   if (!PackHiHalf) {
7220     LHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, LHS, Amt);
7221     RHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, RHS, Amt);
7222   }
7223   LHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, LHS, Amt);
7224   RHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, RHS, Amt);
7225   return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
7226 }
7227 
7228 /// Return a vector_shuffle of the specified vector of zero or undef vector.
7229 /// This produces a shuffle where the low element of V2 is swizzled into the
7230 /// zero/undef vector, landing at element Idx.
7231 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4 (idx=3).
getShuffleVectorZeroOrUndef(SDValue V2,int Idx,bool IsZero,const X86Subtarget & Subtarget,SelectionDAG & DAG)7232 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
7233                                            bool IsZero,
7234                                            const X86Subtarget &Subtarget,
7235                                            SelectionDAG &DAG) {
7236   MVT VT = V2.getSimpleValueType();
7237   SDValue V1 = IsZero
7238     ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
7239   int NumElems = VT.getVectorNumElements();
7240   SmallVector<int, 16> MaskVec(NumElems);
7241   for (int i = 0; i != NumElems; ++i)
7242     // If this is the insertion idx, put the low elt of V2 here.
7243     MaskVec[i] = (i == Idx) ? NumElems : i;
7244   return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
7245 }
7246 
getTargetConstantFromBasePtr(SDValue Ptr)7247 static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {
7248   if (Ptr.getOpcode() == X86ISD::Wrapper ||
7249       Ptr.getOpcode() == X86ISD::WrapperRIP)
7250     Ptr = Ptr.getOperand(0);
7251 
7252   auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
7253   if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
7254     return nullptr;
7255 
7256   return CNode->getConstVal();
7257 }
7258 
getTargetConstantFromNode(LoadSDNode * Load)7259 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
7260   if (!Load || !ISD::isNormalLoad(Load))
7261     return nullptr;
7262   return getTargetConstantFromBasePtr(Load->getBasePtr());
7263 }
7264 
getTargetConstantFromNode(SDValue Op)7265 static const Constant *getTargetConstantFromNode(SDValue Op) {
7266   Op = peekThroughBitcasts(Op);
7267   return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
7268 }
7269 
7270 const Constant *
getTargetConstantFromLoad(LoadSDNode * LD) const7271 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
7272   assert(LD && "Unexpected null LoadSDNode");
7273   return getTargetConstantFromNode(LD);
7274 }
7275 
7276 // Extract raw constant bits from constant pools.
getTargetConstantBitsFromNode(SDValue Op,unsigned EltSizeInBits,APInt & UndefElts,SmallVectorImpl<APInt> & EltBits,bool AllowWholeUndefs=true,bool AllowPartialUndefs=true)7277 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
7278                                           APInt &UndefElts,
7279                                           SmallVectorImpl<APInt> &EltBits,
7280                                           bool AllowWholeUndefs = true,
7281                                           bool AllowPartialUndefs = true) {
7282   assert(EltBits.empty() && "Expected an empty EltBits vector");
7283 
7284   Op = peekThroughBitcasts(Op);
7285 
7286   EVT VT = Op.getValueType();
7287   unsigned SizeInBits = VT.getSizeInBits();
7288   assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
7289   unsigned NumElts = SizeInBits / EltSizeInBits;
7290 
7291   // Bitcast a source array of element bits to the target size.
7292   auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
7293     unsigned NumSrcElts = UndefSrcElts.getBitWidth();
7294     unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
7295     assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
7296            "Constant bit sizes don't match");
7297 
7298     // Don't split if we don't allow undef bits.
7299     bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
7300     if (UndefSrcElts.getBoolValue() && !AllowUndefs)
7301       return false;
7302 
7303     // If we're already the right size, don't bother bitcasting.
7304     if (NumSrcElts == NumElts) {
7305       UndefElts = UndefSrcElts;
7306       EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
7307       return true;
7308     }
7309 
7310     // Extract all the undef/constant element data and pack into single bitsets.
7311     APInt UndefBits(SizeInBits, 0);
7312     APInt MaskBits(SizeInBits, 0);
7313 
7314     for (unsigned i = 0; i != NumSrcElts; ++i) {
7315       unsigned BitOffset = i * SrcEltSizeInBits;
7316       if (UndefSrcElts[i])
7317         UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
7318       MaskBits.insertBits(SrcEltBits[i], BitOffset);
7319     }
7320 
7321     // Split the undef/constant single bitset data into the target elements.
7322     UndefElts = APInt(NumElts, 0);
7323     EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
7324 
7325     for (unsigned i = 0; i != NumElts; ++i) {
7326       unsigned BitOffset = i * EltSizeInBits;
7327       APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
7328 
7329       // Only treat an element as UNDEF if all bits are UNDEF.
7330       if (UndefEltBits.isAllOnes()) {
7331         if (!AllowWholeUndefs)
7332           return false;
7333         UndefElts.setBit(i);
7334         continue;
7335       }
7336 
7337       // If only some bits are UNDEF then treat them as zero (or bail if not
7338       // supported).
7339       if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
7340         return false;
7341 
7342       EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
7343     }
7344     return true;
7345   };
7346 
7347   // Collect constant bits and insert into mask/undef bit masks.
7348   auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
7349                                 unsigned UndefBitIndex) {
7350     if (!Cst)
7351       return false;
7352     if (isa<UndefValue>(Cst)) {
7353       Undefs.setBit(UndefBitIndex);
7354       return true;
7355     }
7356     if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
7357       Mask = CInt->getValue();
7358       return true;
7359     }
7360     if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
7361       Mask = CFP->getValueAPF().bitcastToAPInt();
7362       return true;
7363     }
7364     return false;
7365   };
7366 
7367   // Handle UNDEFs.
7368   if (Op.isUndef()) {
7369     APInt UndefSrcElts = APInt::getAllOnes(NumElts);
7370     SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
7371     return CastBitData(UndefSrcElts, SrcEltBits);
7372   }
7373 
7374   // Extract scalar constant bits.
7375   if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
7376     APInt UndefSrcElts = APInt::getZero(1);
7377     SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
7378     return CastBitData(UndefSrcElts, SrcEltBits);
7379   }
7380   if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
7381     APInt UndefSrcElts = APInt::getZero(1);
7382     APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
7383     SmallVector<APInt, 64> SrcEltBits(1, RawBits);
7384     return CastBitData(UndefSrcElts, SrcEltBits);
7385   }
7386 
7387   // Extract constant bits from build vector.
7388   if (auto *BV = dyn_cast<BuildVectorSDNode>(Op)) {
7389     BitVector Undefs;
7390     SmallVector<APInt> SrcEltBits;
7391     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7392     if (BV->getConstantRawBits(true, SrcEltSizeInBits, SrcEltBits, Undefs)) {
7393       APInt UndefSrcElts = APInt::getNullValue(SrcEltBits.size());
7394       for (unsigned I = 0, E = SrcEltBits.size(); I != E; ++I)
7395         if (Undefs[I])
7396           UndefSrcElts.setBit(I);
7397       return CastBitData(UndefSrcElts, SrcEltBits);
7398     }
7399   }
7400 
7401   // Extract constant bits from constant pool vector.
7402   if (auto *Cst = getTargetConstantFromNode(Op)) {
7403     Type *CstTy = Cst->getType();
7404     unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
7405     if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
7406       return false;
7407 
7408     unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
7409     unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7410 
7411     APInt UndefSrcElts(NumSrcElts, 0);
7412     SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
7413     for (unsigned i = 0; i != NumSrcElts; ++i)
7414       if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
7415                                UndefSrcElts, i))
7416         return false;
7417 
7418     return CastBitData(UndefSrcElts, SrcEltBits);
7419   }
7420 
7421   // Extract constant bits from a broadcasted constant pool scalar.
7422   if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
7423       EltSizeInBits <= VT.getScalarSizeInBits()) {
7424     auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
7425     if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
7426       return false;
7427 
7428     SDValue Ptr = MemIntr->getBasePtr();
7429     if (const Constant *C = getTargetConstantFromBasePtr(Ptr)) {
7430       unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
7431       unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7432 
7433       APInt UndefSrcElts(NumSrcElts, 0);
7434       SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
7435       if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
7436         if (UndefSrcElts[0])
7437           UndefSrcElts.setBits(0, NumSrcElts);
7438         SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
7439         return CastBitData(UndefSrcElts, SrcEltBits);
7440       }
7441     }
7442   }
7443 
7444   // Extract constant bits from a subvector broadcast.
7445   if (Op.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
7446     auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
7447     SDValue Ptr = MemIntr->getBasePtr();
7448     // The source constant may be larger than the subvector broadcast,
7449     // ensure we extract the correct subvector constants.
7450     if (const Constant *Cst = getTargetConstantFromBasePtr(Ptr)) {
7451       Type *CstTy = Cst->getType();
7452       unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
7453       unsigned SubVecSizeInBits = MemIntr->getMemoryVT().getStoreSizeInBits();
7454       if (!CstTy->isVectorTy() || (CstSizeInBits % SubVecSizeInBits) != 0 ||
7455           (SizeInBits % SubVecSizeInBits) != 0)
7456         return false;
7457       unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits();
7458       unsigned NumSubElts = SubVecSizeInBits / CstEltSizeInBits;
7459       unsigned NumSubVecs = SizeInBits / SubVecSizeInBits;
7460       APInt UndefSubElts(NumSubElts, 0);
7461       SmallVector<APInt, 64> SubEltBits(NumSubElts * NumSubVecs,
7462                                         APInt(CstEltSizeInBits, 0));
7463       for (unsigned i = 0; i != NumSubElts; ++i) {
7464         if (!CollectConstantBits(Cst->getAggregateElement(i), SubEltBits[i],
7465                                  UndefSubElts, i))
7466           return false;
7467         for (unsigned j = 1; j != NumSubVecs; ++j)
7468           SubEltBits[i + (j * NumSubElts)] = SubEltBits[i];
7469       }
7470       UndefSubElts = APInt::getSplat(NumSubVecs * UndefSubElts.getBitWidth(),
7471                                      UndefSubElts);
7472       return CastBitData(UndefSubElts, SubEltBits);
7473     }
7474   }
7475 
7476   // Extract a rematerialized scalar constant insertion.
7477   if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
7478       Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
7479       isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
7480     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7481     unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7482 
7483     APInt UndefSrcElts(NumSrcElts, 0);
7484     SmallVector<APInt, 64> SrcEltBits;
7485     auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
7486     SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
7487     SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
7488     return CastBitData(UndefSrcElts, SrcEltBits);
7489   }
7490 
7491   // Insert constant bits from a base and sub vector sources.
7492   if (Op.getOpcode() == ISD::INSERT_SUBVECTOR) {
7493     // If bitcasts to larger elements we might lose track of undefs - don't
7494     // allow any to be safe.
7495     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7496     bool AllowUndefs = EltSizeInBits >= SrcEltSizeInBits;
7497 
7498     APInt UndefSrcElts, UndefSubElts;
7499     SmallVector<APInt, 32> EltSrcBits, EltSubBits;
7500     if (getTargetConstantBitsFromNode(Op.getOperand(1), SrcEltSizeInBits,
7501                                       UndefSubElts, EltSubBits,
7502                                       AllowWholeUndefs && AllowUndefs,
7503                                       AllowPartialUndefs && AllowUndefs) &&
7504         getTargetConstantBitsFromNode(Op.getOperand(0), SrcEltSizeInBits,
7505                                       UndefSrcElts, EltSrcBits,
7506                                       AllowWholeUndefs && AllowUndefs,
7507                                       AllowPartialUndefs && AllowUndefs)) {
7508       unsigned BaseIdx = Op.getConstantOperandVal(2);
7509       UndefSrcElts.insertBits(UndefSubElts, BaseIdx);
7510       for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
7511         EltSrcBits[BaseIdx + i] = EltSubBits[i];
7512       return CastBitData(UndefSrcElts, EltSrcBits);
7513     }
7514   }
7515 
7516   // Extract constant bits from a subvector's source.
7517   if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
7518     // TODO - support extract_subvector through bitcasts.
7519     if (EltSizeInBits != VT.getScalarSizeInBits())
7520       return false;
7521 
7522     if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
7523                                       UndefElts, EltBits, AllowWholeUndefs,
7524                                       AllowPartialUndefs)) {
7525       EVT SrcVT = Op.getOperand(0).getValueType();
7526       unsigned NumSrcElts = SrcVT.getVectorNumElements();
7527       unsigned NumSubElts = VT.getVectorNumElements();
7528       unsigned BaseIdx = Op.getConstantOperandVal(1);
7529       UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
7530       if ((BaseIdx + NumSubElts) != NumSrcElts)
7531         EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
7532       if (BaseIdx != 0)
7533         EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
7534       return true;
7535     }
7536   }
7537 
7538   // Extract constant bits from shuffle node sources.
7539   if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
7540     // TODO - support shuffle through bitcasts.
7541     if (EltSizeInBits != VT.getScalarSizeInBits())
7542       return false;
7543 
7544     ArrayRef<int> Mask = SVN->getMask();
7545     if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
7546         llvm::any_of(Mask, [](int M) { return M < 0; }))
7547       return false;
7548 
7549     APInt UndefElts0, UndefElts1;
7550     SmallVector<APInt, 32> EltBits0, EltBits1;
7551     if (isAnyInRange(Mask, 0, NumElts) &&
7552         !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
7553                                        UndefElts0, EltBits0, AllowWholeUndefs,
7554                                        AllowPartialUndefs))
7555       return false;
7556     if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
7557         !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
7558                                        UndefElts1, EltBits1, AllowWholeUndefs,
7559                                        AllowPartialUndefs))
7560       return false;
7561 
7562     UndefElts = APInt::getZero(NumElts);
7563     for (int i = 0; i != (int)NumElts; ++i) {
7564       int M = Mask[i];
7565       if (M < 0) {
7566         UndefElts.setBit(i);
7567         EltBits.push_back(APInt::getZero(EltSizeInBits));
7568       } else if (M < (int)NumElts) {
7569         if (UndefElts0[M])
7570           UndefElts.setBit(i);
7571         EltBits.push_back(EltBits0[M]);
7572       } else {
7573         if (UndefElts1[M - NumElts])
7574           UndefElts.setBit(i);
7575         EltBits.push_back(EltBits1[M - NumElts]);
7576       }
7577     }
7578     return true;
7579   }
7580 
7581   return false;
7582 }
7583 
7584 namespace llvm {
7585 namespace X86 {
isConstantSplat(SDValue Op,APInt & SplatVal,bool AllowPartialUndefs)7586 bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {
7587   APInt UndefElts;
7588   SmallVector<APInt, 16> EltBits;
7589   if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
7590                                     UndefElts, EltBits, true,
7591                                     AllowPartialUndefs)) {
7592     int SplatIndex = -1;
7593     for (int i = 0, e = EltBits.size(); i != e; ++i) {
7594       if (UndefElts[i])
7595         continue;
7596       if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
7597         SplatIndex = -1;
7598         break;
7599       }
7600       SplatIndex = i;
7601     }
7602     if (0 <= SplatIndex) {
7603       SplatVal = EltBits[SplatIndex];
7604       return true;
7605     }
7606   }
7607 
7608   return false;
7609 }
7610 } // namespace X86
7611 } // namespace llvm
7612 
getTargetShuffleMaskIndices(SDValue MaskNode,unsigned MaskEltSizeInBits,SmallVectorImpl<uint64_t> & RawMask,APInt & UndefElts)7613 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
7614                                         unsigned MaskEltSizeInBits,
7615                                         SmallVectorImpl<uint64_t> &RawMask,
7616                                         APInt &UndefElts) {
7617   // Extract the raw target constant bits.
7618   SmallVector<APInt, 64> EltBits;
7619   if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
7620                                      EltBits, /* AllowWholeUndefs */ true,
7621                                      /* AllowPartialUndefs */ false))
7622     return false;
7623 
7624   // Insert the extracted elements into the mask.
7625   for (const APInt &Elt : EltBits)
7626     RawMask.push_back(Elt.getZExtValue());
7627 
7628   return true;
7629 }
7630 
7631 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
7632 /// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
7633 /// Note: This ignores saturation, so inputs must be checked first.
createPackShuffleMask(MVT VT,SmallVectorImpl<int> & Mask,bool Unary,unsigned NumStages=1)7634 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
7635                                   bool Unary, unsigned NumStages = 1) {
7636   assert(Mask.empty() && "Expected an empty shuffle mask vector");
7637   unsigned NumElts = VT.getVectorNumElements();
7638   unsigned NumLanes = VT.getSizeInBits() / 128;
7639   unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
7640   unsigned Offset = Unary ? 0 : NumElts;
7641   unsigned Repetitions = 1u << (NumStages - 1);
7642   unsigned Increment = 1u << NumStages;
7643   assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
7644 
7645   for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
7646     for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
7647       for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
7648         Mask.push_back(Elt + (Lane * NumEltsPerLane));
7649       for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
7650         Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
7651     }
7652   }
7653 }
7654 
7655 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
getPackDemandedElts(EVT VT,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)7656 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
7657                                 APInt &DemandedLHS, APInt &DemandedRHS) {
7658   int NumLanes = VT.getSizeInBits() / 128;
7659   int NumElts = DemandedElts.getBitWidth();
7660   int NumInnerElts = NumElts / 2;
7661   int NumEltsPerLane = NumElts / NumLanes;
7662   int NumInnerEltsPerLane = NumInnerElts / NumLanes;
7663 
7664   DemandedLHS = APInt::getZero(NumInnerElts);
7665   DemandedRHS = APInt::getZero(NumInnerElts);
7666 
7667   // Map DemandedElts to the packed operands.
7668   for (int Lane = 0; Lane != NumLanes; ++Lane) {
7669     for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
7670       int OuterIdx = (Lane * NumEltsPerLane) + Elt;
7671       int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
7672       if (DemandedElts[OuterIdx])
7673         DemandedLHS.setBit(InnerIdx);
7674       if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
7675         DemandedRHS.setBit(InnerIdx);
7676     }
7677   }
7678 }
7679 
7680 // Split the demanded elts of a HADD/HSUB node between its operands.
getHorizDemandedElts(EVT VT,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)7681 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
7682                                  APInt &DemandedLHS, APInt &DemandedRHS) {
7683   int NumLanes = VT.getSizeInBits() / 128;
7684   int NumElts = DemandedElts.getBitWidth();
7685   int NumEltsPerLane = NumElts / NumLanes;
7686   int HalfEltsPerLane = NumEltsPerLane / 2;
7687 
7688   DemandedLHS = APInt::getZero(NumElts);
7689   DemandedRHS = APInt::getZero(NumElts);
7690 
7691   // Map DemandedElts to the horizontal operands.
7692   for (int Idx = 0; Idx != NumElts; ++Idx) {
7693     if (!DemandedElts[Idx])
7694       continue;
7695     int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
7696     int LocalIdx = Idx % NumEltsPerLane;
7697     if (LocalIdx < HalfEltsPerLane) {
7698       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
7699       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
7700     } else {
7701       LocalIdx -= HalfEltsPerLane;
7702       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
7703       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
7704     }
7705   }
7706 }
7707 
7708 /// Calculates the shuffle mask corresponding to the target-specific opcode.
7709 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
7710 /// operands in \p Ops, and returns true.
7711 /// Sets \p IsUnary to true if only one source is used. Note that this will set
7712 /// IsUnary for shuffles which use a single input multiple times, and in those
7713 /// cases it will adjust the mask to only have indices within that single input.
7714 /// It is an error to call this with non-empty Mask/Ops vectors.
getTargetShuffleMask(SDNode * N,MVT VT,bool AllowSentinelZero,SmallVectorImpl<SDValue> & Ops,SmallVectorImpl<int> & Mask,bool & IsUnary)7715 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
7716                                  SmallVectorImpl<SDValue> &Ops,
7717                                  SmallVectorImpl<int> &Mask, bool &IsUnary) {
7718   unsigned NumElems = VT.getVectorNumElements();
7719   unsigned MaskEltSize = VT.getScalarSizeInBits();
7720   SmallVector<uint64_t, 32> RawMask;
7721   APInt RawUndefs;
7722   uint64_t ImmN;
7723 
7724   assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
7725   assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
7726 
7727   IsUnary = false;
7728   bool IsFakeUnary = false;
7729   switch (N->getOpcode()) {
7730   case X86ISD::BLENDI:
7731     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7732     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7733     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7734     DecodeBLENDMask(NumElems, ImmN, Mask);
7735     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7736     break;
7737   case X86ISD::SHUFP:
7738     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7739     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7740     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7741     DecodeSHUFPMask(NumElems, MaskEltSize, ImmN, Mask);
7742     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7743     break;
7744   case X86ISD::INSERTPS:
7745     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7746     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7747     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7748     DecodeINSERTPSMask(ImmN, Mask);
7749     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7750     break;
7751   case X86ISD::EXTRQI:
7752     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7753     if (isa<ConstantSDNode>(N->getOperand(1)) &&
7754         isa<ConstantSDNode>(N->getOperand(2))) {
7755       int BitLen = N->getConstantOperandVal(1);
7756       int BitIdx = N->getConstantOperandVal(2);
7757       DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
7758       IsUnary = true;
7759     }
7760     break;
7761   case X86ISD::INSERTQI:
7762     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7763     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7764     if (isa<ConstantSDNode>(N->getOperand(2)) &&
7765         isa<ConstantSDNode>(N->getOperand(3))) {
7766       int BitLen = N->getConstantOperandVal(2);
7767       int BitIdx = N->getConstantOperandVal(3);
7768       DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
7769       IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7770     }
7771     break;
7772   case X86ISD::UNPCKH:
7773     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7774     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7775     DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
7776     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7777     break;
7778   case X86ISD::UNPCKL:
7779     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7780     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7781     DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
7782     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7783     break;
7784   case X86ISD::MOVHLPS:
7785     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7786     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7787     DecodeMOVHLPSMask(NumElems, Mask);
7788     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7789     break;
7790   case X86ISD::MOVLHPS:
7791     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7792     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7793     DecodeMOVLHPSMask(NumElems, Mask);
7794     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7795     break;
7796   case X86ISD::VALIGN:
7797     assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
7798            "Only 32-bit and 64-bit elements are supported!");
7799     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7800     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7801     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7802     DecodeVALIGNMask(NumElems, ImmN, Mask);
7803     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7804     Ops.push_back(N->getOperand(1));
7805     Ops.push_back(N->getOperand(0));
7806     break;
7807   case X86ISD::PALIGNR:
7808     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7809     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7810     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7811     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7812     DecodePALIGNRMask(NumElems, ImmN, Mask);
7813     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7814     Ops.push_back(N->getOperand(1));
7815     Ops.push_back(N->getOperand(0));
7816     break;
7817   case X86ISD::VSHLDQ:
7818     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7819     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7820     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7821     DecodePSLLDQMask(NumElems, ImmN, Mask);
7822     IsUnary = true;
7823     break;
7824   case X86ISD::VSRLDQ:
7825     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7826     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7827     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7828     DecodePSRLDQMask(NumElems, ImmN, Mask);
7829     IsUnary = true;
7830     break;
7831   case X86ISD::PSHUFD:
7832   case X86ISD::VPERMILPI:
7833     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7834     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7835     DecodePSHUFMask(NumElems, MaskEltSize, ImmN, Mask);
7836     IsUnary = true;
7837     break;
7838   case X86ISD::PSHUFHW:
7839     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7840     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7841     DecodePSHUFHWMask(NumElems, ImmN, Mask);
7842     IsUnary = true;
7843     break;
7844   case X86ISD::PSHUFLW:
7845     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7846     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7847     DecodePSHUFLWMask(NumElems, ImmN, Mask);
7848     IsUnary = true;
7849     break;
7850   case X86ISD::VZEXT_MOVL:
7851     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7852     DecodeZeroMoveLowMask(NumElems, Mask);
7853     IsUnary = true;
7854     break;
7855   case X86ISD::VBROADCAST:
7856     // We only decode broadcasts of same-sized vectors, peeking through to
7857     // extracted subvectors is likely to cause hasOneUse issues with
7858     // SimplifyDemandedBits etc.
7859     if (N->getOperand(0).getValueType() == VT) {
7860       DecodeVectorBroadcast(NumElems, Mask);
7861       IsUnary = true;
7862       break;
7863     }
7864     return false;
7865   case X86ISD::VPERMILPV: {
7866     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7867     IsUnary = true;
7868     SDValue MaskNode = N->getOperand(1);
7869     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7870                                     RawUndefs)) {
7871       DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
7872       break;
7873     }
7874     return false;
7875   }
7876   case X86ISD::PSHUFB: {
7877     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7878     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7879     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7880     IsUnary = true;
7881     SDValue MaskNode = N->getOperand(1);
7882     if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
7883       DecodePSHUFBMask(RawMask, RawUndefs, Mask);
7884       break;
7885     }
7886     return false;
7887   }
7888   case X86ISD::VPERMI:
7889     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7890     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7891     DecodeVPERMMask(NumElems, ImmN, Mask);
7892     IsUnary = true;
7893     break;
7894   case X86ISD::MOVSS:
7895   case X86ISD::MOVSD:
7896   case X86ISD::MOVSH:
7897     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7898     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7899     DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
7900     break;
7901   case X86ISD::VPERM2X128:
7902     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7903     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7904     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7905     DecodeVPERM2X128Mask(NumElems, ImmN, Mask);
7906     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7907     break;
7908   case X86ISD::SHUF128:
7909     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7910     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7911     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7912     decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, ImmN, Mask);
7913     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7914     break;
7915   case X86ISD::MOVSLDUP:
7916     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7917     DecodeMOVSLDUPMask(NumElems, Mask);
7918     IsUnary = true;
7919     break;
7920   case X86ISD::MOVSHDUP:
7921     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7922     DecodeMOVSHDUPMask(NumElems, Mask);
7923     IsUnary = true;
7924     break;
7925   case X86ISD::MOVDDUP:
7926     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7927     DecodeMOVDDUPMask(NumElems, Mask);
7928     IsUnary = true;
7929     break;
7930   case X86ISD::VPERMIL2: {
7931     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7932     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7933     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7934     SDValue MaskNode = N->getOperand(2);
7935     SDValue CtrlNode = N->getOperand(3);
7936     if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
7937       unsigned CtrlImm = CtrlOp->getZExtValue();
7938       if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7939                                       RawUndefs)) {
7940         DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
7941                             Mask);
7942         break;
7943       }
7944     }
7945     return false;
7946   }
7947   case X86ISD::VPPERM: {
7948     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7949     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7950     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7951     SDValue MaskNode = N->getOperand(2);
7952     if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
7953       DecodeVPPERMMask(RawMask, RawUndefs, Mask);
7954       break;
7955     }
7956     return false;
7957   }
7958   case X86ISD::VPERMV: {
7959     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7960     IsUnary = true;
7961     // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
7962     Ops.push_back(N->getOperand(1));
7963     SDValue MaskNode = N->getOperand(0);
7964     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7965                                     RawUndefs)) {
7966       DecodeVPERMVMask(RawMask, RawUndefs, Mask);
7967       break;
7968     }
7969     return false;
7970   }
7971   case X86ISD::VPERMV3: {
7972     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7973     assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
7974     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
7975     // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
7976     Ops.push_back(N->getOperand(0));
7977     Ops.push_back(N->getOperand(2));
7978     SDValue MaskNode = N->getOperand(1);
7979     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7980                                     RawUndefs)) {
7981       DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
7982       break;
7983     }
7984     return false;
7985   }
7986   default: llvm_unreachable("unknown target shuffle node");
7987   }
7988 
7989   // Empty mask indicates the decode failed.
7990   if (Mask.empty())
7991     return false;
7992 
7993   // Check if we're getting a shuffle mask with zero'd elements.
7994   if (!AllowSentinelZero && isAnyZero(Mask))
7995     return false;
7996 
7997   // If we have a fake unary shuffle, the shuffle mask is spread across two
7998   // inputs that are actually the same node. Re-map the mask to always point
7999   // into the first input.
8000   if (IsFakeUnary)
8001     for (int &M : Mask)
8002       if (M >= (int)Mask.size())
8003         M -= Mask.size();
8004 
8005   // If we didn't already add operands in the opcode-specific code, default to
8006   // adding 1 or 2 operands starting at 0.
8007   if (Ops.empty()) {
8008     Ops.push_back(N->getOperand(0));
8009     if (!IsUnary || IsFakeUnary)
8010       Ops.push_back(N->getOperand(1));
8011   }
8012 
8013   return true;
8014 }
8015 
8016 // Wrapper for getTargetShuffleMask with InUnary;
getTargetShuffleMask(SDNode * N,MVT VT,bool AllowSentinelZero,SmallVectorImpl<SDValue> & Ops,SmallVectorImpl<int> & Mask)8017 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
8018                                  SmallVectorImpl<SDValue> &Ops,
8019                                  SmallVectorImpl<int> &Mask) {
8020   bool IsUnary;
8021   return getTargetShuffleMask(N, VT, AllowSentinelZero, Ops, Mask, IsUnary);
8022 }
8023 
8024 /// Compute whether each element of a shuffle is zeroable.
8025 ///
8026 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
8027 /// Either it is an undef element in the shuffle mask, the element of the input
8028 /// referenced is undef, or the element of the input referenced is known to be
8029 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
8030 /// as many lanes with this technique as possible to simplify the remaining
8031 /// shuffle.
computeZeroableShuffleElements(ArrayRef<int> Mask,SDValue V1,SDValue V2,APInt & KnownUndef,APInt & KnownZero)8032 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
8033                                            SDValue V1, SDValue V2,
8034                                            APInt &KnownUndef, APInt &KnownZero) {
8035   int Size = Mask.size();
8036   KnownUndef = KnownZero = APInt::getZero(Size);
8037 
8038   V1 = peekThroughBitcasts(V1);
8039   V2 = peekThroughBitcasts(V2);
8040 
8041   bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
8042   bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
8043 
8044   int VectorSizeInBits = V1.getValueSizeInBits();
8045   int ScalarSizeInBits = VectorSizeInBits / Size;
8046   assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
8047 
8048   for (int i = 0; i < Size; ++i) {
8049     int M = Mask[i];
8050     // Handle the easy cases.
8051     if (M < 0) {
8052       KnownUndef.setBit(i);
8053       continue;
8054     }
8055     if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
8056       KnownZero.setBit(i);
8057       continue;
8058     }
8059 
8060     // Determine shuffle input and normalize the mask.
8061     SDValue V = M < Size ? V1 : V2;
8062     M %= Size;
8063 
8064     // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
8065     if (V.getOpcode() != ISD::BUILD_VECTOR)
8066       continue;
8067 
8068     // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
8069     // the (larger) source element must be UNDEF/ZERO.
8070     if ((Size % V.getNumOperands()) == 0) {
8071       int Scale = Size / V->getNumOperands();
8072       SDValue Op = V.getOperand(M / Scale);
8073       if (Op.isUndef())
8074         KnownUndef.setBit(i);
8075       if (X86::isZeroNode(Op))
8076         KnownZero.setBit(i);
8077       else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
8078         APInt Val = Cst->getAPIntValue();
8079         Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
8080         if (Val == 0)
8081           KnownZero.setBit(i);
8082       } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
8083         APInt Val = Cst->getValueAPF().bitcastToAPInt();
8084         Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
8085         if (Val == 0)
8086           KnownZero.setBit(i);
8087       }
8088       continue;
8089     }
8090 
8091     // If the BUILD_VECTOR has more elements then all the (smaller) source
8092     // elements must be UNDEF or ZERO.
8093     if ((V.getNumOperands() % Size) == 0) {
8094       int Scale = V->getNumOperands() / Size;
8095       bool AllUndef = true;
8096       bool AllZero = true;
8097       for (int j = 0; j < Scale; ++j) {
8098         SDValue Op = V.getOperand((M * Scale) + j);
8099         AllUndef &= Op.isUndef();
8100         AllZero &= X86::isZeroNode(Op);
8101       }
8102       if (AllUndef)
8103         KnownUndef.setBit(i);
8104       if (AllZero)
8105         KnownZero.setBit(i);
8106       continue;
8107     }
8108   }
8109 }
8110 
8111 /// Decode a target shuffle mask and inputs and see if any values are
8112 /// known to be undef or zero from their inputs.
8113 /// Returns true if the target shuffle mask was decoded.
8114 /// FIXME: Merge this with computeZeroableShuffleElements?
getTargetShuffleAndZeroables(SDValue N,SmallVectorImpl<int> & Mask,SmallVectorImpl<SDValue> & Ops,APInt & KnownUndef,APInt & KnownZero)8115 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
8116                                          SmallVectorImpl<SDValue> &Ops,
8117                                          APInt &KnownUndef, APInt &KnownZero) {
8118   bool IsUnary;
8119   if (!isTargetShuffle(N.getOpcode()))
8120     return false;
8121 
8122   MVT VT = N.getSimpleValueType();
8123   if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
8124     return false;
8125 
8126   int Size = Mask.size();
8127   SDValue V1 = Ops[0];
8128   SDValue V2 = IsUnary ? V1 : Ops[1];
8129   KnownUndef = KnownZero = APInt::getZero(Size);
8130 
8131   V1 = peekThroughBitcasts(V1);
8132   V2 = peekThroughBitcasts(V2);
8133 
8134   assert((VT.getSizeInBits() % Size) == 0 &&
8135          "Illegal split of shuffle value type");
8136   unsigned EltSizeInBits = VT.getSizeInBits() / Size;
8137 
8138   // Extract known constant input data.
8139   APInt UndefSrcElts[2];
8140   SmallVector<APInt, 32> SrcEltBits[2];
8141   bool IsSrcConstant[2] = {
8142       getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
8143                                     SrcEltBits[0], true, false),
8144       getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
8145                                     SrcEltBits[1], true, false)};
8146 
8147   for (int i = 0; i < Size; ++i) {
8148     int M = Mask[i];
8149 
8150     // Already decoded as SM_SentinelZero / SM_SentinelUndef.
8151     if (M < 0) {
8152       assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
8153       if (SM_SentinelUndef == M)
8154         KnownUndef.setBit(i);
8155       if (SM_SentinelZero == M)
8156         KnownZero.setBit(i);
8157       continue;
8158     }
8159 
8160     // Determine shuffle input and normalize the mask.
8161     unsigned SrcIdx = M / Size;
8162     SDValue V = M < Size ? V1 : V2;
8163     M %= Size;
8164 
8165     // We are referencing an UNDEF input.
8166     if (V.isUndef()) {
8167       KnownUndef.setBit(i);
8168       continue;
8169     }
8170 
8171     // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
8172     // TODO: We currently only set UNDEF for integer types - floats use the same
8173     // registers as vectors and many of the scalar folded loads rely on the
8174     // SCALAR_TO_VECTOR pattern.
8175     if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
8176         (Size % V.getValueType().getVectorNumElements()) == 0) {
8177       int Scale = Size / V.getValueType().getVectorNumElements();
8178       int Idx = M / Scale;
8179       if (Idx != 0 && !VT.isFloatingPoint())
8180         KnownUndef.setBit(i);
8181       else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
8182         KnownZero.setBit(i);
8183       continue;
8184     }
8185 
8186     // INSERT_SUBVECTOR - to widen vectors we often insert them into UNDEF
8187     // base vectors.
8188     if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
8189       SDValue Vec = V.getOperand(0);
8190       int NumVecElts = Vec.getValueType().getVectorNumElements();
8191       if (Vec.isUndef() && Size == NumVecElts) {
8192         int Idx = V.getConstantOperandVal(2);
8193         int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
8194         if (M < Idx || (Idx + NumSubElts) <= M)
8195           KnownUndef.setBit(i);
8196       }
8197       continue;
8198     }
8199 
8200     // Attempt to extract from the source's constant bits.
8201     if (IsSrcConstant[SrcIdx]) {
8202       if (UndefSrcElts[SrcIdx][M])
8203         KnownUndef.setBit(i);
8204       else if (SrcEltBits[SrcIdx][M] == 0)
8205         KnownZero.setBit(i);
8206     }
8207   }
8208 
8209   assert(VT.getVectorNumElements() == (unsigned)Size &&
8210          "Different mask size from vector size!");
8211   return true;
8212 }
8213 
8214 // Replace target shuffle mask elements with known undef/zero sentinels.
resolveTargetShuffleFromZeroables(SmallVectorImpl<int> & Mask,const APInt & KnownUndef,const APInt & KnownZero,bool ResolveKnownZeros=true)8215 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
8216                                               const APInt &KnownUndef,
8217                                               const APInt &KnownZero,
8218                                               bool ResolveKnownZeros= true) {
8219   unsigned NumElts = Mask.size();
8220   assert(KnownUndef.getBitWidth() == NumElts &&
8221          KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
8222 
8223   for (unsigned i = 0; i != NumElts; ++i) {
8224     if (KnownUndef[i])
8225       Mask[i] = SM_SentinelUndef;
8226     else if (ResolveKnownZeros && KnownZero[i])
8227       Mask[i] = SM_SentinelZero;
8228   }
8229 }
8230 
8231 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> & Mask,APInt & KnownUndef,APInt & KnownZero)8232 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
8233                                               APInt &KnownUndef,
8234                                               APInt &KnownZero) {
8235   unsigned NumElts = Mask.size();
8236   KnownUndef = KnownZero = APInt::getZero(NumElts);
8237 
8238   for (unsigned i = 0; i != NumElts; ++i) {
8239     int M = Mask[i];
8240     if (SM_SentinelUndef == M)
8241       KnownUndef.setBit(i);
8242     if (SM_SentinelZero == M)
8243       KnownZero.setBit(i);
8244   }
8245 }
8246 
8247 // Attempt to create a shuffle mask from a VSELECT/BLENDV condition mask.
createShuffleMaskFromVSELECT(SmallVectorImpl<int> & Mask,SDValue Cond,bool IsBLENDV=false)8248 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
8249                                          SDValue Cond, bool IsBLENDV = false) {
8250   EVT CondVT = Cond.getValueType();
8251   unsigned EltSizeInBits = CondVT.getScalarSizeInBits();
8252   unsigned NumElts = CondVT.getVectorNumElements();
8253 
8254   APInt UndefElts;
8255   SmallVector<APInt, 32> EltBits;
8256   if (!getTargetConstantBitsFromNode(Cond, EltSizeInBits, UndefElts, EltBits,
8257                                      true, false))
8258     return false;
8259 
8260   Mask.resize(NumElts, SM_SentinelUndef);
8261 
8262   for (int i = 0; i != (int)NumElts; ++i) {
8263     Mask[i] = i;
8264     // Arbitrarily choose from the 2nd operand if the select condition element
8265     // is undef.
8266     // TODO: Can we do better by matching patterns such as even/odd?
8267     if (UndefElts[i] || (!IsBLENDV && EltBits[i].isZero()) ||
8268         (IsBLENDV && EltBits[i].isNonNegative()))
8269       Mask[i] += NumElts;
8270   }
8271 
8272   return true;
8273 }
8274 
8275 // Forward declaration (for getFauxShuffleMask recursive check).
8276 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8277                                    SmallVectorImpl<SDValue> &Inputs,
8278                                    SmallVectorImpl<int> &Mask,
8279                                    const SelectionDAG &DAG, unsigned Depth,
8280                                    bool ResolveKnownElts);
8281 
8282 // Attempt to decode ops that could be represented as a shuffle mask.
8283 // The decoded shuffle mask may contain a different number of elements to the
8284 // destination value type.
8285 // TODO: Merge into getTargetShuffleInputs()
getFauxShuffleMask(SDValue N,const APInt & DemandedElts,SmallVectorImpl<int> & Mask,SmallVectorImpl<SDValue> & Ops,const SelectionDAG & DAG,unsigned Depth,bool ResolveKnownElts)8286 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
8287                                SmallVectorImpl<int> &Mask,
8288                                SmallVectorImpl<SDValue> &Ops,
8289                                const SelectionDAG &DAG, unsigned Depth,
8290                                bool ResolveKnownElts) {
8291   Mask.clear();
8292   Ops.clear();
8293 
8294   MVT VT = N.getSimpleValueType();
8295   unsigned NumElts = VT.getVectorNumElements();
8296   unsigned NumSizeInBits = VT.getSizeInBits();
8297   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
8298   if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
8299     return false;
8300   assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
8301   unsigned NumSizeInBytes = NumSizeInBits / 8;
8302   unsigned NumBytesPerElt = NumBitsPerElt / 8;
8303 
8304   unsigned Opcode = N.getOpcode();
8305   switch (Opcode) {
8306   case ISD::VECTOR_SHUFFLE: {
8307     // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
8308     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
8309     if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
8310       Mask.append(ShuffleMask.begin(), ShuffleMask.end());
8311       Ops.push_back(N.getOperand(0));
8312       Ops.push_back(N.getOperand(1));
8313       return true;
8314     }
8315     return false;
8316   }
8317   case ISD::AND:
8318   case X86ISD::ANDNP: {
8319     // Attempt to decode as a per-byte mask.
8320     APInt UndefElts;
8321     SmallVector<APInt, 32> EltBits;
8322     SDValue N0 = N.getOperand(0);
8323     SDValue N1 = N.getOperand(1);
8324     bool IsAndN = (X86ISD::ANDNP == Opcode);
8325     uint64_t ZeroMask = IsAndN ? 255 : 0;
8326     if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
8327       return false;
8328     // We can't assume an undef src element gives an undef dst - the other src
8329     // might be zero.
8330     if (!UndefElts.isZero())
8331       return false;
8332     for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
8333       const APInt &ByteBits = EltBits[i];
8334       if (ByteBits != 0 && ByteBits != 255)
8335         return false;
8336       Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
8337     }
8338     Ops.push_back(IsAndN ? N1 : N0);
8339     return true;
8340   }
8341   case ISD::OR: {
8342     // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
8343     // is a valid shuffle index.
8344     SDValue N0 = peekThroughBitcasts(N.getOperand(0));
8345     SDValue N1 = peekThroughBitcasts(N.getOperand(1));
8346     if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
8347       return false;
8348 
8349     SmallVector<int, 64> SrcMask0, SrcMask1;
8350     SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
8351     APInt Demand0 = APInt::getAllOnes(N0.getValueType().getVectorNumElements());
8352     APInt Demand1 = APInt::getAllOnes(N1.getValueType().getVectorNumElements());
8353     if (!getTargetShuffleInputs(N0, Demand0, SrcInputs0, SrcMask0, DAG,
8354                                 Depth + 1, true) ||
8355         !getTargetShuffleInputs(N1, Demand1, SrcInputs1, SrcMask1, DAG,
8356                                 Depth + 1, true))
8357       return false;
8358 
8359     size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
8360     SmallVector<int, 64> Mask0, Mask1;
8361     narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
8362     narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
8363     for (int i = 0; i != (int)MaskSize; ++i) {
8364       // NOTE: Don't handle SM_SentinelUndef, as we can end up in infinite
8365       // loops converting between OR and BLEND shuffles due to
8366       // canWidenShuffleElements merging away undef elements, meaning we
8367       // fail to recognise the OR as the undef element isn't known zero.
8368       if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
8369         Mask.push_back(SM_SentinelZero);
8370       else if (Mask1[i] == SM_SentinelZero)
8371         Mask.push_back(i);
8372       else if (Mask0[i] == SM_SentinelZero)
8373         Mask.push_back(i + MaskSize);
8374       else
8375         return false;
8376     }
8377     Ops.push_back(N0);
8378     Ops.push_back(N1);
8379     return true;
8380   }
8381   case ISD::INSERT_SUBVECTOR: {
8382     SDValue Src = N.getOperand(0);
8383     SDValue Sub = N.getOperand(1);
8384     EVT SubVT = Sub.getValueType();
8385     unsigned NumSubElts = SubVT.getVectorNumElements();
8386     if (!N->isOnlyUserOf(Sub.getNode()))
8387       return false;
8388     uint64_t InsertIdx = N.getConstantOperandVal(2);
8389     // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
8390     if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
8391         Sub.getOperand(0).getValueType() == VT) {
8392       uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
8393       for (int i = 0; i != (int)NumElts; ++i)
8394         Mask.push_back(i);
8395       for (int i = 0; i != (int)NumSubElts; ++i)
8396         Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
8397       Ops.push_back(Src);
8398       Ops.push_back(Sub.getOperand(0));
8399       return true;
8400     }
8401     // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
8402     SmallVector<int, 64> SubMask;
8403     SmallVector<SDValue, 2> SubInputs;
8404     SDValue SubSrc = peekThroughOneUseBitcasts(Sub);
8405     EVT SubSrcVT = SubSrc.getValueType();
8406     if (!SubSrcVT.isVector())
8407       return false;
8408 
8409     APInt SubDemand = APInt::getAllOnes(SubSrcVT.getVectorNumElements());
8410     if (!getTargetShuffleInputs(SubSrc, SubDemand, SubInputs, SubMask, DAG,
8411                                 Depth + 1, ResolveKnownElts))
8412       return false;
8413 
8414     // Subvector shuffle inputs must not be larger than the subvector.
8415     if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) {
8416           return SubVT.getFixedSizeInBits() <
8417                  SubInput.getValueSizeInBits().getFixedValue();
8418         }))
8419       return false;
8420 
8421     if (SubMask.size() != NumSubElts) {
8422       assert(((SubMask.size() % NumSubElts) == 0 ||
8423               (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
8424       if ((NumSubElts % SubMask.size()) == 0) {
8425         int Scale = NumSubElts / SubMask.size();
8426         SmallVector<int,64> ScaledSubMask;
8427         narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
8428         SubMask = ScaledSubMask;
8429       } else {
8430         int Scale = SubMask.size() / NumSubElts;
8431         NumSubElts = SubMask.size();
8432         NumElts *= Scale;
8433         InsertIdx *= Scale;
8434       }
8435     }
8436     Ops.push_back(Src);
8437     Ops.append(SubInputs.begin(), SubInputs.end());
8438     if (ISD::isBuildVectorAllZeros(Src.getNode()))
8439       Mask.append(NumElts, SM_SentinelZero);
8440     else
8441       for (int i = 0; i != (int)NumElts; ++i)
8442         Mask.push_back(i);
8443     for (int i = 0; i != (int)NumSubElts; ++i) {
8444       int M = SubMask[i];
8445       if (0 <= M) {
8446         int InputIdx = M / NumSubElts;
8447         M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
8448       }
8449       Mask[i + InsertIdx] = M;
8450     }
8451     return true;
8452   }
8453   case X86ISD::PINSRB:
8454   case X86ISD::PINSRW:
8455   case ISD::SCALAR_TO_VECTOR:
8456   case ISD::INSERT_VECTOR_ELT: {
8457     // Match against a insert_vector_elt/scalar_to_vector of an extract from a
8458     // vector, for matching src/dst vector types.
8459     SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
8460 
8461     unsigned DstIdx = 0;
8462     if (Opcode != ISD::SCALAR_TO_VECTOR) {
8463       // Check we have an in-range constant insertion index.
8464       if (!isa<ConstantSDNode>(N.getOperand(2)) ||
8465           N.getConstantOperandAPInt(2).uge(NumElts))
8466         return false;
8467       DstIdx = N.getConstantOperandVal(2);
8468 
8469       // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
8470       if (X86::isZeroNode(Scl)) {
8471         Ops.push_back(N.getOperand(0));
8472         for (unsigned i = 0; i != NumElts; ++i)
8473           Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
8474         return true;
8475       }
8476     }
8477 
8478     // Peek through trunc/aext/zext.
8479     // TODO: aext shouldn't require SM_SentinelZero padding.
8480     // TODO: handle shift of scalars.
8481     unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
8482     while (Scl.getOpcode() == ISD::TRUNCATE ||
8483            Scl.getOpcode() == ISD::ANY_EXTEND ||
8484            Scl.getOpcode() == ISD::ZERO_EXTEND) {
8485       Scl = Scl.getOperand(0);
8486       MinBitsPerElt =
8487           std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
8488     }
8489     if ((MinBitsPerElt % 8) != 0)
8490       return false;
8491 
8492     // Attempt to find the source vector the scalar was extracted from.
8493     SDValue SrcExtract;
8494     if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
8495          Scl.getOpcode() == X86ISD::PEXTRW ||
8496          Scl.getOpcode() == X86ISD::PEXTRB) &&
8497         Scl.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
8498       SrcExtract = Scl;
8499     }
8500     if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
8501       return false;
8502 
8503     SDValue SrcVec = SrcExtract.getOperand(0);
8504     EVT SrcVT = SrcVec.getValueType();
8505     if (!SrcVT.getScalarType().isByteSized())
8506       return false;
8507     unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
8508     unsigned SrcByte = SrcIdx * (SrcVT.getScalarSizeInBits() / 8);
8509     unsigned DstByte = DstIdx * NumBytesPerElt;
8510     MinBitsPerElt =
8511         std::min<unsigned>(MinBitsPerElt, SrcVT.getScalarSizeInBits());
8512 
8513     // Create 'identity' byte level shuffle mask and then add inserted bytes.
8514     if (Opcode == ISD::SCALAR_TO_VECTOR) {
8515       Ops.push_back(SrcVec);
8516       Mask.append(NumSizeInBytes, SM_SentinelUndef);
8517     } else {
8518       Ops.push_back(SrcVec);
8519       Ops.push_back(N.getOperand(0));
8520       for (int i = 0; i != (int)NumSizeInBytes; ++i)
8521         Mask.push_back(NumSizeInBytes + i);
8522     }
8523 
8524     unsigned MinBytesPerElts = MinBitsPerElt / 8;
8525     MinBytesPerElts = std::min(MinBytesPerElts, NumBytesPerElt);
8526     for (unsigned i = 0; i != MinBytesPerElts; ++i)
8527       Mask[DstByte + i] = SrcByte + i;
8528     for (unsigned i = MinBytesPerElts; i < NumBytesPerElt; ++i)
8529       Mask[DstByte + i] = SM_SentinelZero;
8530     return true;
8531   }
8532   case X86ISD::PACKSS:
8533   case X86ISD::PACKUS: {
8534     SDValue N0 = N.getOperand(0);
8535     SDValue N1 = N.getOperand(1);
8536     assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
8537            N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
8538            "Unexpected input value type");
8539 
8540     APInt EltsLHS, EltsRHS;
8541     getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
8542 
8543     // If we know input saturation won't happen (or we don't care for particular
8544     // lanes), we can treat this as a truncation shuffle.
8545     bool Offset0 = false, Offset1 = false;
8546     if (Opcode == X86ISD::PACKSS) {
8547       if ((!(N0.isUndef() || EltsLHS.isZero()) &&
8548            DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
8549           (!(N1.isUndef() || EltsRHS.isZero()) &&
8550            DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
8551         return false;
8552       // We can't easily fold ASHR into a shuffle, but if it was feeding a
8553       // PACKSS then it was likely being used for sign-extension for a
8554       // truncation, so just peek through and adjust the mask accordingly.
8555       if (N0.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N0.getNode()) &&
8556           N0.getConstantOperandAPInt(1) == NumBitsPerElt) {
8557         Offset0 = true;
8558         N0 = N0.getOperand(0);
8559       }
8560       if (N1.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N1.getNode()) &&
8561           N1.getConstantOperandAPInt(1) == NumBitsPerElt) {
8562         Offset1 = true;
8563         N1 = N1.getOperand(0);
8564       }
8565     } else {
8566       APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
8567       if ((!(N0.isUndef() || EltsLHS.isZero()) &&
8568            !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
8569           (!(N1.isUndef() || EltsRHS.isZero()) &&
8570            !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
8571         return false;
8572     }
8573 
8574     bool IsUnary = (N0 == N1);
8575 
8576     Ops.push_back(N0);
8577     if (!IsUnary)
8578       Ops.push_back(N1);
8579 
8580     createPackShuffleMask(VT, Mask, IsUnary);
8581 
8582     if (Offset0 || Offset1) {
8583       for (int &M : Mask)
8584         if ((Offset0 && isInRange(M, 0, NumElts)) ||
8585             (Offset1 && isInRange(M, NumElts, 2 * NumElts)))
8586           ++M;
8587     }
8588     return true;
8589   }
8590   case ISD::VSELECT:
8591   case X86ISD::BLENDV: {
8592     SDValue Cond = N.getOperand(0);
8593     if (createShuffleMaskFromVSELECT(Mask, Cond, Opcode == X86ISD::BLENDV)) {
8594       Ops.push_back(N.getOperand(1));
8595       Ops.push_back(N.getOperand(2));
8596       return true;
8597     }
8598     return false;
8599   }
8600   case X86ISD::VTRUNC: {
8601     SDValue Src = N.getOperand(0);
8602     EVT SrcVT = Src.getValueType();
8603     // Truncated source must be a simple vector.
8604     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
8605         (SrcVT.getScalarSizeInBits() % 8) != 0)
8606       return false;
8607     unsigned NumSrcElts = SrcVT.getVectorNumElements();
8608     unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
8609     unsigned Scale = NumBitsPerSrcElt / NumBitsPerElt;
8610     assert((NumBitsPerSrcElt % NumBitsPerElt) == 0 && "Illegal truncation");
8611     for (unsigned i = 0; i != NumSrcElts; ++i)
8612       Mask.push_back(i * Scale);
8613     Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
8614     Ops.push_back(Src);
8615     return true;
8616   }
8617   case X86ISD::VSHLI:
8618   case X86ISD::VSRLI: {
8619     uint64_t ShiftVal = N.getConstantOperandVal(1);
8620     // Out of range bit shifts are guaranteed to be zero.
8621     if (NumBitsPerElt <= ShiftVal) {
8622       Mask.append(NumElts, SM_SentinelZero);
8623       return true;
8624     }
8625 
8626     // We can only decode 'whole byte' bit shifts as shuffles.
8627     if ((ShiftVal % 8) != 0)
8628       break;
8629 
8630     uint64_t ByteShift = ShiftVal / 8;
8631     Ops.push_back(N.getOperand(0));
8632 
8633     // Clear mask to all zeros and insert the shifted byte indices.
8634     Mask.append(NumSizeInBytes, SM_SentinelZero);
8635 
8636     if (X86ISD::VSHLI == Opcode) {
8637       for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
8638         for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
8639           Mask[i + j] = i + j - ByteShift;
8640     } else {
8641       for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
8642         for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
8643           Mask[i + j - ByteShift] = i + j;
8644     }
8645     return true;
8646   }
8647   case X86ISD::VROTLI:
8648   case X86ISD::VROTRI: {
8649     // We can only decode 'whole byte' bit rotates as shuffles.
8650     uint64_t RotateVal = N.getConstantOperandAPInt(1).urem(NumBitsPerElt);
8651     if ((RotateVal % 8) != 0)
8652       return false;
8653     Ops.push_back(N.getOperand(0));
8654     int Offset = RotateVal / 8;
8655     Offset = (X86ISD::VROTLI == Opcode ? NumBytesPerElt - Offset : Offset);
8656     for (int i = 0; i != (int)NumElts; ++i) {
8657       int BaseIdx = i * NumBytesPerElt;
8658       for (int j = 0; j != (int)NumBytesPerElt; ++j) {
8659         Mask.push_back(BaseIdx + ((Offset + j) % NumBytesPerElt));
8660       }
8661     }
8662     return true;
8663   }
8664   case X86ISD::VBROADCAST: {
8665     SDValue Src = N.getOperand(0);
8666     if (!Src.getSimpleValueType().isVector()) {
8667       if (Src.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8668           !isNullConstant(Src.getOperand(1)) ||
8669           Src.getOperand(0).getValueType().getScalarType() !=
8670               VT.getScalarType())
8671         return false;
8672       Src = Src.getOperand(0);
8673     }
8674     Ops.push_back(Src);
8675     Mask.append(NumElts, 0);
8676     return true;
8677   }
8678   case ISD::ZERO_EXTEND:
8679   case ISD::ANY_EXTEND:
8680   case ISD::ZERO_EXTEND_VECTOR_INREG:
8681   case ISD::ANY_EXTEND_VECTOR_INREG: {
8682     SDValue Src = N.getOperand(0);
8683     EVT SrcVT = Src.getValueType();
8684 
8685     // Extended source must be a simple vector.
8686     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
8687         (SrcVT.getScalarSizeInBits() % 8) != 0)
8688       return false;
8689 
8690     bool IsAnyExtend =
8691         (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
8692     DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
8693                          IsAnyExtend, Mask);
8694     Ops.push_back(Src);
8695     return true;
8696   }
8697   }
8698 
8699   return false;
8700 }
8701 
8702 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask)8703 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
8704                                               SmallVectorImpl<int> &Mask) {
8705   int MaskWidth = Mask.size();
8706   SmallVector<SDValue, 16> UsedInputs;
8707   for (int i = 0, e = Inputs.size(); i < e; ++i) {
8708     int lo = UsedInputs.size() * MaskWidth;
8709     int hi = lo + MaskWidth;
8710 
8711     // Strip UNDEF input usage.
8712     if (Inputs[i].isUndef())
8713       for (int &M : Mask)
8714         if ((lo <= M) && (M < hi))
8715           M = SM_SentinelUndef;
8716 
8717     // Check for unused inputs.
8718     if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
8719       for (int &M : Mask)
8720         if (lo <= M)
8721           M -= MaskWidth;
8722       continue;
8723     }
8724 
8725     // Check for repeated inputs.
8726     bool IsRepeat = false;
8727     for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
8728       if (UsedInputs[j] != Inputs[i])
8729         continue;
8730       for (int &M : Mask)
8731         if (lo <= M)
8732           M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
8733       IsRepeat = true;
8734       break;
8735     }
8736     if (IsRepeat)
8737       continue;
8738 
8739     UsedInputs.push_back(Inputs[i]);
8740   }
8741   Inputs = UsedInputs;
8742 }
8743 
8744 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
8745 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
8746 /// Returns true if the target shuffle mask was decoded.
getTargetShuffleInputs(SDValue Op,const APInt & DemandedElts,SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask,APInt & KnownUndef,APInt & KnownZero,const SelectionDAG & DAG,unsigned Depth,bool ResolveKnownElts)8747 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8748                                    SmallVectorImpl<SDValue> &Inputs,
8749                                    SmallVectorImpl<int> &Mask,
8750                                    APInt &KnownUndef, APInt &KnownZero,
8751                                    const SelectionDAG &DAG, unsigned Depth,
8752                                    bool ResolveKnownElts) {
8753   if (Depth >= SelectionDAG::MaxRecursionDepth)
8754     return false; // Limit search depth.
8755 
8756   EVT VT = Op.getValueType();
8757   if (!VT.isSimple() || !VT.isVector())
8758     return false;
8759 
8760   if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
8761     if (ResolveKnownElts)
8762       resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
8763     return true;
8764   }
8765   if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
8766                          ResolveKnownElts)) {
8767     resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
8768     return true;
8769   }
8770   return false;
8771 }
8772 
getTargetShuffleInputs(SDValue Op,const APInt & DemandedElts,SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask,const SelectionDAG & DAG,unsigned Depth,bool ResolveKnownElts)8773 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8774                                    SmallVectorImpl<SDValue> &Inputs,
8775                                    SmallVectorImpl<int> &Mask,
8776                                    const SelectionDAG &DAG, unsigned Depth,
8777                                    bool ResolveKnownElts) {
8778   APInt KnownUndef, KnownZero;
8779   return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
8780                                 KnownZero, DAG, Depth, ResolveKnownElts);
8781 }
8782 
getTargetShuffleInputs(SDValue Op,SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask,const SelectionDAG & DAG,unsigned Depth=0,bool ResolveKnownElts=true)8783 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
8784                                    SmallVectorImpl<int> &Mask,
8785                                    const SelectionDAG &DAG, unsigned Depth = 0,
8786                                    bool ResolveKnownElts = true) {
8787   EVT VT = Op.getValueType();
8788   if (!VT.isSimple() || !VT.isVector())
8789     return false;
8790 
8791   unsigned NumElts = Op.getValueType().getVectorNumElements();
8792   APInt DemandedElts = APInt::getAllOnes(NumElts);
8793   return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, DAG, Depth,
8794                                 ResolveKnownElts);
8795 }
8796 
8797 // Attempt to create a scalar/subvector broadcast from the base MemSDNode.
getBROADCAST_LOAD(unsigned Opcode,const SDLoc & DL,EVT VT,EVT MemVT,MemSDNode * Mem,unsigned Offset,SelectionDAG & DAG)8798 static SDValue getBROADCAST_LOAD(unsigned Opcode, const SDLoc &DL, EVT VT,
8799                                  EVT MemVT, MemSDNode *Mem, unsigned Offset,
8800                                  SelectionDAG &DAG) {
8801   assert((Opcode == X86ISD::VBROADCAST_LOAD ||
8802           Opcode == X86ISD::SUBV_BROADCAST_LOAD) &&
8803          "Unknown broadcast load type");
8804 
8805   // Ensure this is a simple (non-atomic, non-voltile), temporal read memop.
8806   if (!Mem || !Mem->readMem() || !Mem->isSimple() || Mem->isNonTemporal())
8807     return SDValue();
8808 
8809   SDValue Ptr =
8810       DAG.getMemBasePlusOffset(Mem->getBasePtr(), TypeSize::Fixed(Offset), DL);
8811   SDVTList Tys = DAG.getVTList(VT, MVT::Other);
8812   SDValue Ops[] = {Mem->getChain(), Ptr};
8813   SDValue BcstLd = DAG.getMemIntrinsicNode(
8814       Opcode, DL, Tys, Ops, MemVT,
8815       DAG.getMachineFunction().getMachineMemOperand(
8816           Mem->getMemOperand(), Offset, MemVT.getStoreSize()));
8817   DAG.makeEquivalentMemoryOrdering(SDValue(Mem, 1), BcstLd.getValue(1));
8818   return BcstLd;
8819 }
8820 
8821 /// Returns the scalar element that will make up the i'th
8822 /// element of the result of the vector shuffle.
getShuffleScalarElt(SDValue Op,unsigned Index,SelectionDAG & DAG,unsigned Depth)8823 static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
8824                                    SelectionDAG &DAG, unsigned Depth) {
8825   if (Depth >= SelectionDAG::MaxRecursionDepth)
8826     return SDValue(); // Limit search depth.
8827 
8828   EVT VT = Op.getValueType();
8829   unsigned Opcode = Op.getOpcode();
8830   unsigned NumElems = VT.getVectorNumElements();
8831 
8832   // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
8833   if (auto *SV = dyn_cast<ShuffleVectorSDNode>(Op)) {
8834     int Elt = SV->getMaskElt(Index);
8835 
8836     if (Elt < 0)
8837       return DAG.getUNDEF(VT.getVectorElementType());
8838 
8839     SDValue Src = (Elt < (int)NumElems) ? SV->getOperand(0) : SV->getOperand(1);
8840     return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
8841   }
8842 
8843   // Recurse into target specific vector shuffles to find scalars.
8844   if (isTargetShuffle(Opcode)) {
8845     MVT ShufVT = VT.getSimpleVT();
8846     MVT ShufSVT = ShufVT.getVectorElementType();
8847     int NumElems = (int)ShufVT.getVectorNumElements();
8848     SmallVector<int, 16> ShuffleMask;
8849     SmallVector<SDValue, 16> ShuffleOps;
8850     if (!getTargetShuffleMask(Op.getNode(), ShufVT, true, ShuffleOps,
8851                               ShuffleMask))
8852       return SDValue();
8853 
8854     int Elt = ShuffleMask[Index];
8855     if (Elt == SM_SentinelZero)
8856       return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(Op), ShufSVT)
8857                                  : DAG.getConstantFP(+0.0, SDLoc(Op), ShufSVT);
8858     if (Elt == SM_SentinelUndef)
8859       return DAG.getUNDEF(ShufSVT);
8860 
8861     assert(0 <= Elt && Elt < (2 * NumElems) && "Shuffle index out of range");
8862     SDValue Src = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
8863     return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
8864   }
8865 
8866   // Recurse into insert_subvector base/sub vector to find scalars.
8867   if (Opcode == ISD::INSERT_SUBVECTOR) {
8868     SDValue Vec = Op.getOperand(0);
8869     SDValue Sub = Op.getOperand(1);
8870     uint64_t SubIdx = Op.getConstantOperandVal(2);
8871     unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
8872 
8873     if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
8874       return getShuffleScalarElt(Sub, Index - SubIdx, DAG, Depth + 1);
8875     return getShuffleScalarElt(Vec, Index, DAG, Depth + 1);
8876   }
8877 
8878   // Recurse into concat_vectors sub vector to find scalars.
8879   if (Opcode == ISD::CONCAT_VECTORS) {
8880     EVT SubVT = Op.getOperand(0).getValueType();
8881     unsigned NumSubElts = SubVT.getVectorNumElements();
8882     uint64_t SubIdx = Index / NumSubElts;
8883     uint64_t SubElt = Index % NumSubElts;
8884     return getShuffleScalarElt(Op.getOperand(SubIdx), SubElt, DAG, Depth + 1);
8885   }
8886 
8887   // Recurse into extract_subvector src vector to find scalars.
8888   if (Opcode == ISD::EXTRACT_SUBVECTOR) {
8889     SDValue Src = Op.getOperand(0);
8890     uint64_t SrcIdx = Op.getConstantOperandVal(1);
8891     return getShuffleScalarElt(Src, Index + SrcIdx, DAG, Depth + 1);
8892   }
8893 
8894   // We only peek through bitcasts of the same vector width.
8895   if (Opcode == ISD::BITCAST) {
8896     SDValue Src = Op.getOperand(0);
8897     EVT SrcVT = Src.getValueType();
8898     if (SrcVT.isVector() && SrcVT.getVectorNumElements() == NumElems)
8899       return getShuffleScalarElt(Src, Index, DAG, Depth + 1);
8900     return SDValue();
8901   }
8902 
8903   // Actual nodes that may contain scalar elements
8904 
8905   // For insert_vector_elt - either return the index matching scalar or recurse
8906   // into the base vector.
8907   if (Opcode == ISD::INSERT_VECTOR_ELT &&
8908       isa<ConstantSDNode>(Op.getOperand(2))) {
8909     if (Op.getConstantOperandAPInt(2) == Index)
8910       return Op.getOperand(1);
8911     return getShuffleScalarElt(Op.getOperand(0), Index, DAG, Depth + 1);
8912   }
8913 
8914   if (Opcode == ISD::SCALAR_TO_VECTOR)
8915     return (Index == 0) ? Op.getOperand(0)
8916                         : DAG.getUNDEF(VT.getVectorElementType());
8917 
8918   if (Opcode == ISD::BUILD_VECTOR)
8919     return Op.getOperand(Index);
8920 
8921   return SDValue();
8922 }
8923 
8924 // Use PINSRB/PINSRW/PINSRD to create a build vector.
LowerBuildVectorAsInsert(SDValue Op,const APInt & NonZeroMask,unsigned NumNonZero,unsigned NumZero,SelectionDAG & DAG,const X86Subtarget & Subtarget)8925 static SDValue LowerBuildVectorAsInsert(SDValue Op, const APInt &NonZeroMask,
8926                                         unsigned NumNonZero, unsigned NumZero,
8927                                         SelectionDAG &DAG,
8928                                         const X86Subtarget &Subtarget) {
8929   MVT VT = Op.getSimpleValueType();
8930   unsigned NumElts = VT.getVectorNumElements();
8931   assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
8932           ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
8933          "Illegal vector insertion");
8934 
8935   SDLoc dl(Op);
8936   SDValue V;
8937   bool First = true;
8938 
8939   for (unsigned i = 0; i < NumElts; ++i) {
8940     bool IsNonZero = NonZeroMask[i];
8941     if (!IsNonZero)
8942       continue;
8943 
8944     // If the build vector contains zeros or our first insertion is not the
8945     // first index then insert into zero vector to break any register
8946     // dependency else use SCALAR_TO_VECTOR.
8947     if (First) {
8948       First = false;
8949       if (NumZero || 0 != i)
8950         V = getZeroVector(VT, Subtarget, DAG, dl);
8951       else {
8952         assert(0 == i && "Expected insertion into zero-index");
8953         V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
8954         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
8955         V = DAG.getBitcast(VT, V);
8956         continue;
8957       }
8958     }
8959     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
8960                     DAG.getIntPtrConstant(i, dl));
8961   }
8962 
8963   return V;
8964 }
8965 
8966 /// Custom lower build_vector of v16i8.
LowerBuildVectorv16i8(SDValue Op,const APInt & NonZeroMask,unsigned NumNonZero,unsigned NumZero,SelectionDAG & DAG,const X86Subtarget & Subtarget)8967 static SDValue LowerBuildVectorv16i8(SDValue Op, const APInt &NonZeroMask,
8968                                      unsigned NumNonZero, unsigned NumZero,
8969                                      SelectionDAG &DAG,
8970                                      const X86Subtarget &Subtarget) {
8971   if (NumNonZero > 8 && !Subtarget.hasSSE41())
8972     return SDValue();
8973 
8974   // SSE4.1 - use PINSRB to insert each byte directly.
8975   if (Subtarget.hasSSE41())
8976     return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
8977                                     Subtarget);
8978 
8979   SDLoc dl(Op);
8980   SDValue V;
8981 
8982   // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
8983   for (unsigned i = 0; i < 16; i += 2) {
8984     bool ThisIsNonZero = NonZeroMask[i];
8985     bool NextIsNonZero = NonZeroMask[i + 1];
8986     if (!ThisIsNonZero && !NextIsNonZero)
8987       continue;
8988 
8989     // FIXME: Investigate combining the first 4 bytes as a i32 instead.
8990     SDValue Elt;
8991     if (ThisIsNonZero) {
8992       if (NumZero || NextIsNonZero)
8993         Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
8994       else
8995         Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
8996     }
8997 
8998     if (NextIsNonZero) {
8999       SDValue NextElt = Op.getOperand(i + 1);
9000       if (i == 0 && NumZero)
9001         NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
9002       else
9003         NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
9004       NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
9005                             DAG.getConstant(8, dl, MVT::i8));
9006       if (ThisIsNonZero)
9007         Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
9008       else
9009         Elt = NextElt;
9010     }
9011 
9012     // If our first insertion is not the first index or zeros are needed, then
9013     // insert into zero vector. Otherwise, use SCALAR_TO_VECTOR (leaves high
9014     // elements undefined).
9015     if (!V) {
9016       if (i != 0 || NumZero)
9017         V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
9018       else {
9019         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
9020         V = DAG.getBitcast(MVT::v8i16, V);
9021         continue;
9022       }
9023     }
9024     Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
9025     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
9026                     DAG.getIntPtrConstant(i / 2, dl));
9027   }
9028 
9029   return DAG.getBitcast(MVT::v16i8, V);
9030 }
9031 
9032 /// Custom lower build_vector of v8i16.
LowerBuildVectorv8i16(SDValue Op,const APInt & NonZeroMask,unsigned NumNonZero,unsigned NumZero,SelectionDAG & DAG,const X86Subtarget & Subtarget)9033 static SDValue LowerBuildVectorv8i16(SDValue Op, const APInt &NonZeroMask,
9034                                      unsigned NumNonZero, unsigned NumZero,
9035                                      SelectionDAG &DAG,
9036                                      const X86Subtarget &Subtarget) {
9037   if (NumNonZero > 4 && !Subtarget.hasSSE41())
9038     return SDValue();
9039 
9040   // Use PINSRW to insert each byte directly.
9041   return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
9042                                   Subtarget);
9043 }
9044 
9045 /// Custom lower build_vector of v4i32 or v4f32.
LowerBuildVectorv4x32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)9046 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
9047                                      const X86Subtarget &Subtarget) {
9048   // If this is a splat of a pair of elements, use MOVDDUP (unless the target
9049   // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
9050   // Because we're creating a less complicated build vector here, we may enable
9051   // further folding of the MOVDDUP via shuffle transforms.
9052   if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
9053       Op.getOperand(0) == Op.getOperand(2) &&
9054       Op.getOperand(1) == Op.getOperand(3) &&
9055       Op.getOperand(0) != Op.getOperand(1)) {
9056     SDLoc DL(Op);
9057     MVT VT = Op.getSimpleValueType();
9058     MVT EltVT = VT.getVectorElementType();
9059     // Create a new build vector with the first 2 elements followed by undef
9060     // padding, bitcast to v2f64, duplicate, and bitcast back.
9061     SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
9062                        DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
9063     SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
9064     SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
9065     return DAG.getBitcast(VT, Dup);
9066   }
9067 
9068   // Find all zeroable elements.
9069   std::bitset<4> Zeroable, Undefs;
9070   for (int i = 0; i < 4; ++i) {
9071     SDValue Elt = Op.getOperand(i);
9072     Undefs[i] = Elt.isUndef();
9073     Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
9074   }
9075   assert(Zeroable.size() - Zeroable.count() > 1 &&
9076          "We expect at least two non-zero elements!");
9077 
9078   // We only know how to deal with build_vector nodes where elements are either
9079   // zeroable or extract_vector_elt with constant index.
9080   SDValue FirstNonZero;
9081   unsigned FirstNonZeroIdx;
9082   for (unsigned i = 0; i < 4; ++i) {
9083     if (Zeroable[i])
9084       continue;
9085     SDValue Elt = Op.getOperand(i);
9086     if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9087         !isa<ConstantSDNode>(Elt.getOperand(1)))
9088       return SDValue();
9089     // Make sure that this node is extracting from a 128-bit vector.
9090     MVT VT = Elt.getOperand(0).getSimpleValueType();
9091     if (!VT.is128BitVector())
9092       return SDValue();
9093     if (!FirstNonZero.getNode()) {
9094       FirstNonZero = Elt;
9095       FirstNonZeroIdx = i;
9096     }
9097   }
9098 
9099   assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
9100   SDValue V1 = FirstNonZero.getOperand(0);
9101   MVT VT = V1.getSimpleValueType();
9102 
9103   // See if this build_vector can be lowered as a blend with zero.
9104   SDValue Elt;
9105   unsigned EltMaskIdx, EltIdx;
9106   int Mask[4];
9107   for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
9108     if (Zeroable[EltIdx]) {
9109       // The zero vector will be on the right hand side.
9110       Mask[EltIdx] = EltIdx+4;
9111       continue;
9112     }
9113 
9114     Elt = Op->getOperand(EltIdx);
9115     // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
9116     EltMaskIdx = Elt.getConstantOperandVal(1);
9117     if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
9118       break;
9119     Mask[EltIdx] = EltIdx;
9120   }
9121 
9122   if (EltIdx == 4) {
9123     // Let the shuffle legalizer deal with blend operations.
9124     SDValue VZeroOrUndef = (Zeroable == Undefs)
9125                                ? DAG.getUNDEF(VT)
9126                                : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
9127     if (V1.getSimpleValueType() != VT)
9128       V1 = DAG.getBitcast(VT, V1);
9129     return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
9130   }
9131 
9132   // See if we can lower this build_vector to a INSERTPS.
9133   if (!Subtarget.hasSSE41())
9134     return SDValue();
9135 
9136   SDValue V2 = Elt.getOperand(0);
9137   if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
9138     V1 = SDValue();
9139 
9140   bool CanFold = true;
9141   for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
9142     if (Zeroable[i])
9143       continue;
9144 
9145     SDValue Current = Op->getOperand(i);
9146     SDValue SrcVector = Current->getOperand(0);
9147     if (!V1.getNode())
9148       V1 = SrcVector;
9149     CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
9150   }
9151 
9152   if (!CanFold)
9153     return SDValue();
9154 
9155   assert(V1.getNode() && "Expected at least two non-zero elements!");
9156   if (V1.getSimpleValueType() != MVT::v4f32)
9157     V1 = DAG.getBitcast(MVT::v4f32, V1);
9158   if (V2.getSimpleValueType() != MVT::v4f32)
9159     V2 = DAG.getBitcast(MVT::v4f32, V2);
9160 
9161   // Ok, we can emit an INSERTPS instruction.
9162   unsigned ZMask = Zeroable.to_ulong();
9163 
9164   unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
9165   assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
9166   SDLoc DL(Op);
9167   SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
9168                                DAG.getIntPtrConstant(InsertPSMask, DL, true));
9169   return DAG.getBitcast(VT, Result);
9170 }
9171 
9172 /// Return a vector logical shift node.
getVShift(bool isLeft,EVT VT,SDValue SrcOp,unsigned NumBits,SelectionDAG & DAG,const TargetLowering & TLI,const SDLoc & dl)9173 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
9174                          SelectionDAG &DAG, const TargetLowering &TLI,
9175                          const SDLoc &dl) {
9176   assert(VT.is128BitVector() && "Unknown type for VShift");
9177   MVT ShVT = MVT::v16i8;
9178   unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
9179   SrcOp = DAG.getBitcast(ShVT, SrcOp);
9180   assert(NumBits % 8 == 0 && "Only support byte sized shifts");
9181   SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
9182   return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
9183 }
9184 
LowerAsSplatVectorLoad(SDValue SrcOp,MVT VT,const SDLoc & dl,SelectionDAG & DAG)9185 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
9186                                       SelectionDAG &DAG) {
9187 
9188   // Check if the scalar load can be widened into a vector load. And if
9189   // the address is "base + cst" see if the cst can be "absorbed" into
9190   // the shuffle mask.
9191   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
9192     SDValue Ptr = LD->getBasePtr();
9193     if (!ISD::isNormalLoad(LD) || !LD->isSimple())
9194       return SDValue();
9195     EVT PVT = LD->getValueType(0);
9196     if (PVT != MVT::i32 && PVT != MVT::f32)
9197       return SDValue();
9198 
9199     int FI = -1;
9200     int64_t Offset = 0;
9201     if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
9202       FI = FINode->getIndex();
9203       Offset = 0;
9204     } else if (DAG.isBaseWithConstantOffset(Ptr) &&
9205                isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9206       FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9207       Offset = Ptr.getConstantOperandVal(1);
9208       Ptr = Ptr.getOperand(0);
9209     } else {
9210       return SDValue();
9211     }
9212 
9213     // FIXME: 256-bit vector instructions don't require a strict alignment,
9214     // improve this code to support it better.
9215     Align RequiredAlign(VT.getSizeInBits() / 8);
9216     SDValue Chain = LD->getChain();
9217     // Make sure the stack object alignment is at least 16 or 32.
9218     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9219     MaybeAlign InferredAlign = DAG.InferPtrAlign(Ptr);
9220     if (!InferredAlign || *InferredAlign < RequiredAlign) {
9221       if (MFI.isFixedObjectIndex(FI)) {
9222         // Can't change the alignment. FIXME: It's possible to compute
9223         // the exact stack offset and reference FI + adjust offset instead.
9224         // If someone *really* cares about this. That's the way to implement it.
9225         return SDValue();
9226       } else {
9227         MFI.setObjectAlignment(FI, RequiredAlign);
9228       }
9229     }
9230 
9231     // (Offset % 16 or 32) must be multiple of 4. Then address is then
9232     // Ptr + (Offset & ~15).
9233     if (Offset < 0)
9234       return SDValue();
9235     if ((Offset % RequiredAlign.value()) & 3)
9236       return SDValue();
9237     int64_t StartOffset = Offset & ~int64_t(RequiredAlign.value() - 1);
9238     if (StartOffset) {
9239       SDLoc DL(Ptr);
9240       Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
9241                         DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
9242     }
9243 
9244     int EltNo = (Offset - StartOffset) >> 2;
9245     unsigned NumElems = VT.getVectorNumElements();
9246 
9247     EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
9248     SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
9249                              LD->getPointerInfo().getWithOffset(StartOffset));
9250 
9251     SmallVector<int, 8> Mask(NumElems, EltNo);
9252 
9253     return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
9254   }
9255 
9256   return SDValue();
9257 }
9258 
9259 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
findEltLoadSrc(SDValue Elt,LoadSDNode * & Ld,int64_t & ByteOffset)9260 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
9261   if (ISD::isNON_EXTLoad(Elt.getNode())) {
9262     auto *BaseLd = cast<LoadSDNode>(Elt);
9263     if (!BaseLd->isSimple())
9264       return false;
9265     Ld = BaseLd;
9266     ByteOffset = 0;
9267     return true;
9268   }
9269 
9270   switch (Elt.getOpcode()) {
9271   case ISD::BITCAST:
9272   case ISD::TRUNCATE:
9273   case ISD::SCALAR_TO_VECTOR:
9274     return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
9275   case ISD::SRL:
9276     if (auto *AmtC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
9277       uint64_t Amt = AmtC->getZExtValue();
9278       if ((Amt % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
9279         ByteOffset += Amt / 8;
9280         return true;
9281       }
9282     }
9283     break;
9284   case ISD::EXTRACT_VECTOR_ELT:
9285     if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
9286       SDValue Src = Elt.getOperand(0);
9287       unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
9288       unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
9289       if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
9290           findEltLoadSrc(Src, Ld, ByteOffset)) {
9291         uint64_t Idx = IdxC->getZExtValue();
9292         ByteOffset += Idx * (SrcSizeInBits / 8);
9293         return true;
9294       }
9295     }
9296     break;
9297   }
9298 
9299   return false;
9300 }
9301 
9302 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
9303 /// elements can be replaced by a single large load which has the same value as
9304 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
9305 ///
9306 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
EltsFromConsecutiveLoads(EVT VT,ArrayRef<SDValue> Elts,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,bool IsAfterLegalize)9307 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
9308                                         const SDLoc &DL, SelectionDAG &DAG,
9309                                         const X86Subtarget &Subtarget,
9310                                         bool IsAfterLegalize) {
9311   if ((VT.getScalarSizeInBits() % 8) != 0)
9312     return SDValue();
9313 
9314   unsigned NumElems = Elts.size();
9315 
9316   int LastLoadedElt = -1;
9317   APInt LoadMask = APInt::getZero(NumElems);
9318   APInt ZeroMask = APInt::getZero(NumElems);
9319   APInt UndefMask = APInt::getZero(NumElems);
9320 
9321   SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
9322   SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
9323 
9324   // For each element in the initializer, see if we've found a load, zero or an
9325   // undef.
9326   for (unsigned i = 0; i < NumElems; ++i) {
9327     SDValue Elt = peekThroughBitcasts(Elts[i]);
9328     if (!Elt.getNode())
9329       return SDValue();
9330     if (Elt.isUndef()) {
9331       UndefMask.setBit(i);
9332       continue;
9333     }
9334     if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
9335       ZeroMask.setBit(i);
9336       continue;
9337     }
9338 
9339     // Each loaded element must be the correct fractional portion of the
9340     // requested vector load.
9341     unsigned EltSizeInBits = Elt.getValueSizeInBits();
9342     if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
9343       return SDValue();
9344 
9345     if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
9346       return SDValue();
9347     unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
9348     if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
9349       return SDValue();
9350 
9351     LoadMask.setBit(i);
9352     LastLoadedElt = i;
9353   }
9354   assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
9355           LoadMask.countPopulation()) == NumElems &&
9356          "Incomplete element masks");
9357 
9358   // Handle Special Cases - all undef or undef/zero.
9359   if (UndefMask.countPopulation() == NumElems)
9360     return DAG.getUNDEF(VT);
9361   if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
9362     return VT.isInteger() ? DAG.getConstant(0, DL, VT)
9363                           : DAG.getConstantFP(0.0, DL, VT);
9364 
9365   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9366   int FirstLoadedElt = LoadMask.countTrailingZeros();
9367   SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
9368   EVT EltBaseVT = EltBase.getValueType();
9369   assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
9370          "Register/Memory size mismatch");
9371   LoadSDNode *LDBase = Loads[FirstLoadedElt];
9372   assert(LDBase && "Did not find base load for merging consecutive loads");
9373   unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
9374   unsigned BaseSizeInBytes = BaseSizeInBits / 8;
9375   int NumLoadedElts = (1 + LastLoadedElt - FirstLoadedElt);
9376   int LoadSizeInBits = NumLoadedElts * BaseSizeInBits;
9377   assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
9378 
9379   // TODO: Support offsetting the base load.
9380   if (ByteOffsets[FirstLoadedElt] != 0)
9381     return SDValue();
9382 
9383   // Check to see if the element's load is consecutive to the base load
9384   // or offset from a previous (already checked) load.
9385   auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
9386     LoadSDNode *Ld = Loads[EltIdx];
9387     int64_t ByteOffset = ByteOffsets[EltIdx];
9388     if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
9389       int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
9390       return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
9391               Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
9392     }
9393     return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
9394                                               EltIdx - FirstLoadedElt);
9395   };
9396 
9397   // Consecutive loads can contain UNDEFS but not ZERO elements.
9398   // Consecutive loads with UNDEFs and ZEROs elements require a
9399   // an additional shuffle stage to clear the ZERO elements.
9400   bool IsConsecutiveLoad = true;
9401   bool IsConsecutiveLoadWithZeros = true;
9402   for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
9403     if (LoadMask[i]) {
9404       if (!CheckConsecutiveLoad(LDBase, i)) {
9405         IsConsecutiveLoad = false;
9406         IsConsecutiveLoadWithZeros = false;
9407         break;
9408       }
9409     } else if (ZeroMask[i]) {
9410       IsConsecutiveLoad = false;
9411     }
9412   }
9413 
9414   auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
9415     auto MMOFlags = LDBase->getMemOperand()->getFlags();
9416     assert(LDBase->isSimple() &&
9417            "Cannot merge volatile or atomic loads.");
9418     SDValue NewLd =
9419         DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
9420                     LDBase->getPointerInfo(), LDBase->getOriginalAlign(),
9421                     MMOFlags);
9422     for (auto *LD : Loads)
9423       if (LD)
9424         DAG.makeEquivalentMemoryOrdering(LD, NewLd);
9425     return NewLd;
9426   };
9427 
9428   // Check if the base load is entirely dereferenceable.
9429   bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
9430       VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
9431 
9432   // LOAD - all consecutive load/undefs (must start/end with a load or be
9433   // entirely dereferenceable). If we have found an entire vector of loads and
9434   // undefs, then return a large load of the entire vector width starting at the
9435   // base pointer. If the vector contains zeros, then attempt to shuffle those
9436   // elements.
9437   if (FirstLoadedElt == 0 &&
9438       (NumLoadedElts == (int)NumElems || IsDereferenceable) &&
9439       (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
9440     if (IsAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
9441       return SDValue();
9442 
9443     // Don't create 256-bit non-temporal aligned loads without AVX2 as these
9444     // will lower to regular temporal loads and use the cache.
9445     if (LDBase->isNonTemporal() && LDBase->getAlign() >= Align(32) &&
9446         VT.is256BitVector() && !Subtarget.hasInt256())
9447       return SDValue();
9448 
9449     if (NumElems == 1)
9450       return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
9451 
9452     if (!ZeroMask)
9453       return CreateLoad(VT, LDBase);
9454 
9455     // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
9456     // vector and a zero vector to clear out the zero elements.
9457     if (!IsAfterLegalize && VT.isVector()) {
9458       unsigned NumMaskElts = VT.getVectorNumElements();
9459       if ((NumMaskElts % NumElems) == 0) {
9460         unsigned Scale = NumMaskElts / NumElems;
9461         SmallVector<int, 4> ClearMask(NumMaskElts, -1);
9462         for (unsigned i = 0; i < NumElems; ++i) {
9463           if (UndefMask[i])
9464             continue;
9465           int Offset = ZeroMask[i] ? NumMaskElts : 0;
9466           for (unsigned j = 0; j != Scale; ++j)
9467             ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
9468         }
9469         SDValue V = CreateLoad(VT, LDBase);
9470         SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
9471                                    : DAG.getConstantFP(0.0, DL, VT);
9472         return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
9473       }
9474     }
9475   }
9476 
9477   // If the upper half of a ymm/zmm load is undef then just load the lower half.
9478   if (VT.is256BitVector() || VT.is512BitVector()) {
9479     unsigned HalfNumElems = NumElems / 2;
9480     if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnes()) {
9481       EVT HalfVT =
9482           EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
9483       SDValue HalfLD =
9484           EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
9485                                    DAG, Subtarget, IsAfterLegalize);
9486       if (HalfLD)
9487         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
9488                            HalfLD, DAG.getIntPtrConstant(0, DL));
9489     }
9490   }
9491 
9492   // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
9493   if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
9494       ((LoadSizeInBits == 16 && Subtarget.hasFP16()) || LoadSizeInBits == 32 ||
9495        LoadSizeInBits == 64) &&
9496       ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
9497     MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
9498                                       : MVT::getIntegerVT(LoadSizeInBits);
9499     MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
9500     // Allow v4f32 on SSE1 only targets.
9501     // FIXME: Add more isel patterns so we can just use VT directly.
9502     if (!Subtarget.hasSSE2() && VT == MVT::v4f32)
9503       VecVT = MVT::v4f32;
9504     if (TLI.isTypeLegal(VecVT)) {
9505       SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
9506       SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
9507       SDValue ResNode = DAG.getMemIntrinsicNode(
9508           X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(),
9509           LDBase->getOriginalAlign(), MachineMemOperand::MOLoad);
9510       for (auto *LD : Loads)
9511         if (LD)
9512           DAG.makeEquivalentMemoryOrdering(LD, ResNode);
9513       return DAG.getBitcast(VT, ResNode);
9514     }
9515   }
9516 
9517   // BROADCAST - match the smallest possible repetition pattern, load that
9518   // scalar/subvector element and then broadcast to the entire vector.
9519   if (ZeroMask.isZero() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
9520       (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
9521     for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
9522       unsigned RepeatSize = SubElems * BaseSizeInBits;
9523       unsigned ScalarSize = std::min(RepeatSize, 64u);
9524       if (!Subtarget.hasAVX2() && ScalarSize < 32)
9525         continue;
9526 
9527       // Don't attempt a 1:N subvector broadcast - it should be caught by
9528       // combineConcatVectorOps, else will cause infinite loops.
9529       if (RepeatSize > ScalarSize && SubElems == 1)
9530         continue;
9531 
9532       bool Match = true;
9533       SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
9534       for (unsigned i = 0; i != NumElems && Match; ++i) {
9535         if (!LoadMask[i])
9536           continue;
9537         SDValue Elt = peekThroughBitcasts(Elts[i]);
9538         if (RepeatedLoads[i % SubElems].isUndef())
9539           RepeatedLoads[i % SubElems] = Elt;
9540         else
9541           Match &= (RepeatedLoads[i % SubElems] == Elt);
9542       }
9543 
9544       // We must have loads at both ends of the repetition.
9545       Match &= !RepeatedLoads.front().isUndef();
9546       Match &= !RepeatedLoads.back().isUndef();
9547       if (!Match)
9548         continue;
9549 
9550       EVT RepeatVT =
9551           VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
9552               ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
9553               : EVT::getFloatingPointVT(ScalarSize);
9554       if (RepeatSize > ScalarSize)
9555         RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
9556                                     RepeatSize / ScalarSize);
9557       EVT BroadcastVT =
9558           EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
9559                            VT.getSizeInBits() / ScalarSize);
9560       if (TLI.isTypeLegal(BroadcastVT)) {
9561         if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
9562                 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, IsAfterLegalize)) {
9563           SDValue Broadcast = RepeatLoad;
9564           if (RepeatSize > ScalarSize) {
9565             while (Broadcast.getValueSizeInBits() < VT.getSizeInBits())
9566               Broadcast = concatSubVectors(Broadcast, Broadcast, DAG, DL);
9567           } else {
9568             if (!Subtarget.hasAVX2() &&
9569                 !X86::mayFoldLoadIntoBroadcastFromMem(
9570                     RepeatLoad, RepeatVT.getScalarType().getSimpleVT(),
9571                     Subtarget,
9572                     /*AssumeSingleUse=*/true))
9573               return SDValue();
9574             Broadcast =
9575                 DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, RepeatLoad);
9576           }
9577           return DAG.getBitcast(VT, Broadcast);
9578         }
9579       }
9580     }
9581   }
9582 
9583   return SDValue();
9584 }
9585 
9586 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
9587 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
9588 // are consecutive, non-overlapping, and in the right order.
combineToConsecutiveLoads(EVT VT,SDValue Op,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,bool IsAfterLegalize)9589 static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
9590                                          SelectionDAG &DAG,
9591                                          const X86Subtarget &Subtarget,
9592                                          bool IsAfterLegalize) {
9593   SmallVector<SDValue, 64> Elts;
9594   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9595     if (SDValue Elt = getShuffleScalarElt(Op, i, DAG, 0)) {
9596       Elts.push_back(Elt);
9597       continue;
9598     }
9599     return SDValue();
9600   }
9601   assert(Elts.size() == VT.getVectorNumElements());
9602   return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
9603                                   IsAfterLegalize);
9604 }
9605 
getConstantVector(MVT VT,const APInt & SplatValue,unsigned SplatBitSize,LLVMContext & C)9606 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
9607                                    unsigned SplatBitSize, LLVMContext &C) {
9608   unsigned ScalarSize = VT.getScalarSizeInBits();
9609   unsigned NumElm = SplatBitSize / ScalarSize;
9610 
9611   SmallVector<Constant *, 32> ConstantVec;
9612   for (unsigned i = 0; i < NumElm; i++) {
9613     APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
9614     Constant *Const;
9615     if (VT.isFloatingPoint()) {
9616       if (ScalarSize == 16) {
9617         Const = ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
9618       } else if (ScalarSize == 32) {
9619         Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
9620       } else {
9621         assert(ScalarSize == 64 && "Unsupported floating point scalar size");
9622         Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
9623       }
9624     } else
9625       Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
9626     ConstantVec.push_back(Const);
9627   }
9628   return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
9629 }
9630 
isFoldableUseOfShuffle(SDNode * N)9631 static bool isFoldableUseOfShuffle(SDNode *N) {
9632   for (auto *U : N->uses()) {
9633     unsigned Opc = U->getOpcode();
9634     // VPERMV/VPERMV3 shuffles can never fold their index operands.
9635     if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
9636       return false;
9637     if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
9638       return false;
9639     if (isTargetShuffle(Opc))
9640       return true;
9641     if (Opc == ISD::BITCAST) // Ignore bitcasts
9642       return isFoldableUseOfShuffle(U);
9643     if (N->hasOneUse()) {
9644       // TODO, there may be some general way to know if a SDNode can
9645       // be folded. We now only know whether an MI is foldable.
9646       if (Opc == X86ISD::VPDPBUSD && U->getOperand(2).getNode() != N)
9647         return false;
9648       return true;
9649     }
9650   }
9651   return false;
9652 }
9653 
9654 /// Attempt to use the vbroadcast instruction to generate a splat value
9655 /// from a splat BUILD_VECTOR which uses:
9656 ///  a. A single scalar load, or a constant.
9657 ///  b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
9658 ///
9659 /// The VBROADCAST node is returned when a pattern is found,
9660 /// or SDValue() otherwise.
lowerBuildVectorAsBroadcast(BuildVectorSDNode * BVOp,const X86Subtarget & Subtarget,SelectionDAG & DAG)9661 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
9662                                            const X86Subtarget &Subtarget,
9663                                            SelectionDAG &DAG) {
9664   // VBROADCAST requires AVX.
9665   // TODO: Splats could be generated for non-AVX CPUs using SSE
9666   // instructions, but there's less potential gain for only 128-bit vectors.
9667   if (!Subtarget.hasAVX())
9668     return SDValue();
9669 
9670   MVT VT = BVOp->getSimpleValueType(0);
9671   unsigned NumElts = VT.getVectorNumElements();
9672   SDLoc dl(BVOp);
9673 
9674   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
9675          "Unsupported vector type for broadcast.");
9676 
9677   // See if the build vector is a repeating sequence of scalars (inc. splat).
9678   SDValue Ld;
9679   BitVector UndefElements;
9680   SmallVector<SDValue, 16> Sequence;
9681   if (BVOp->getRepeatedSequence(Sequence, &UndefElements)) {
9682     assert((NumElts % Sequence.size()) == 0 && "Sequence doesn't fit.");
9683     if (Sequence.size() == 1)
9684       Ld = Sequence[0];
9685   }
9686 
9687   // Attempt to use VBROADCASTM
9688   // From this pattern:
9689   // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
9690   // b. t1 = (build_vector t0 t0)
9691   //
9692   // Create (VBROADCASTM v2i1 X)
9693   if (!Sequence.empty() && Subtarget.hasCDI()) {
9694     // If not a splat, are the upper sequence values zeroable?
9695     unsigned SeqLen = Sequence.size();
9696     bool UpperZeroOrUndef =
9697         SeqLen == 1 ||
9698         llvm::all_of(ArrayRef(Sequence).drop_front(), [](SDValue V) {
9699           return !V || V.isUndef() || isNullConstant(V);
9700         });
9701     SDValue Op0 = Sequence[0];
9702     if (UpperZeroOrUndef && ((Op0.getOpcode() == ISD::BITCAST) ||
9703                              (Op0.getOpcode() == ISD::ZERO_EXTEND &&
9704                               Op0.getOperand(0).getOpcode() == ISD::BITCAST))) {
9705       SDValue BOperand = Op0.getOpcode() == ISD::BITCAST
9706                              ? Op0.getOperand(0)
9707                              : Op0.getOperand(0).getOperand(0);
9708       MVT MaskVT = BOperand.getSimpleValueType();
9709       MVT EltType = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
9710       if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) ||  // for broadcastmb2q
9711           (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
9712         MVT BcstVT = MVT::getVectorVT(EltType, NumElts / SeqLen);
9713         if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
9714           unsigned Scale = 512 / VT.getSizeInBits();
9715           BcstVT = MVT::getVectorVT(EltType, Scale * (NumElts / SeqLen));
9716         }
9717         SDValue Bcst = DAG.getNode(X86ISD::VBROADCASTM, dl, BcstVT, BOperand);
9718         if (BcstVT.getSizeInBits() != VT.getSizeInBits())
9719           Bcst = extractSubVector(Bcst, 0, DAG, dl, VT.getSizeInBits());
9720         return DAG.getBitcast(VT, Bcst);
9721       }
9722     }
9723   }
9724 
9725   unsigned NumUndefElts = UndefElements.count();
9726   if (!Ld || (NumElts - NumUndefElts) <= 1) {
9727     APInt SplatValue, Undef;
9728     unsigned SplatBitSize;
9729     bool HasUndef;
9730     // Check if this is a repeated constant pattern suitable for broadcasting.
9731     if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
9732         SplatBitSize > VT.getScalarSizeInBits() &&
9733         SplatBitSize < VT.getSizeInBits()) {
9734       // Avoid replacing with broadcast when it's a use of a shuffle
9735       // instruction to preserve the present custom lowering of shuffles.
9736       if (isFoldableUseOfShuffle(BVOp))
9737         return SDValue();
9738       // replace BUILD_VECTOR with broadcast of the repeated constants.
9739       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9740       LLVMContext *Ctx = DAG.getContext();
9741       MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
9742       if (Subtarget.hasAVX()) {
9743         if (SplatBitSize == 32 || SplatBitSize == 64 ||
9744             (SplatBitSize < 32 && Subtarget.hasAVX2())) {
9745           // Splatted value can fit in one INTEGER constant in constant pool.
9746           // Load the constant and broadcast it.
9747           MVT CVT = MVT::getIntegerVT(SplatBitSize);
9748           Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
9749           Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
9750           SDValue CP = DAG.getConstantPool(C, PVT);
9751           unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
9752 
9753           Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
9754           SDVTList Tys =
9755               DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other);
9756           SDValue Ops[] = {DAG.getEntryNode(), CP};
9757           MachinePointerInfo MPI =
9758               MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9759           SDValue Brdcst = DAG.getMemIntrinsicNode(
9760               X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT, MPI, Alignment,
9761               MachineMemOperand::MOLoad);
9762           return DAG.getBitcast(VT, Brdcst);
9763         }
9764         if (SplatBitSize > 64) {
9765           // Load the vector of constants and broadcast it.
9766           Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
9767                                              *Ctx);
9768           SDValue VCP = DAG.getConstantPool(VecC, PVT);
9769           unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
9770           MVT VVT = MVT::getVectorVT(VT.getScalarType(), NumElm);
9771           Align Alignment = cast<ConstantPoolSDNode>(VCP)->getAlign();
9772           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9773           SDValue Ops[] = {DAG.getEntryNode(), VCP};
9774           MachinePointerInfo MPI =
9775               MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9776           return DAG.getMemIntrinsicNode(
9777               X86ISD::SUBV_BROADCAST_LOAD, dl, Tys, Ops, VVT, MPI, Alignment,
9778               MachineMemOperand::MOLoad);
9779         }
9780       }
9781     }
9782 
9783     // If we are moving a scalar into a vector (Ld must be set and all elements
9784     // but 1 are undef) and that operation is not obviously supported by
9785     // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
9786     // That's better than general shuffling and may eliminate a load to GPR and
9787     // move from scalar to vector register.
9788     if (!Ld || NumElts - NumUndefElts != 1)
9789       return SDValue();
9790     unsigned ScalarSize = Ld.getValueSizeInBits();
9791     if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
9792       return SDValue();
9793   }
9794 
9795   bool ConstSplatVal =
9796       (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
9797   bool IsLoad = ISD::isNormalLoad(Ld.getNode());
9798 
9799   // TODO: Handle broadcasts of non-constant sequences.
9800 
9801   // Make sure that all of the users of a non-constant load are from the
9802   // BUILD_VECTOR node.
9803   // FIXME: Is the use count needed for non-constant, non-load case?
9804   if (!ConstSplatVal && !IsLoad && !BVOp->isOnlyUserOf(Ld.getNode()))
9805     return SDValue();
9806 
9807   unsigned ScalarSize = Ld.getValueSizeInBits();
9808   bool IsGE256 = (VT.getSizeInBits() >= 256);
9809 
9810   // When optimizing for size, generate up to 5 extra bytes for a broadcast
9811   // instruction to save 8 or more bytes of constant pool data.
9812   // TODO: If multiple splats are generated to load the same constant,
9813   // it may be detrimental to overall size. There needs to be a way to detect
9814   // that condition to know if this is truly a size win.
9815   bool OptForSize = DAG.shouldOptForSize();
9816 
9817   // Handle broadcasting a single constant scalar from the constant pool
9818   // into a vector.
9819   // On Sandybridge (no AVX2), it is still better to load a constant vector
9820   // from the constant pool and not to broadcast it from a scalar.
9821   // But override that restriction when optimizing for size.
9822   // TODO: Check if splatting is recommended for other AVX-capable CPUs.
9823   if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
9824     EVT CVT = Ld.getValueType();
9825     assert(!CVT.isVector() && "Must not broadcast a vector type");
9826 
9827     // Splat f16, f32, i32, v4f64, v4i64 in all cases with AVX2.
9828     // For size optimization, also splat v2f64 and v2i64, and for size opt
9829     // with AVX2, also splat i8 and i16.
9830     // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
9831     if (ScalarSize == 32 ||
9832         (ScalarSize == 64 && (IsGE256 || Subtarget.hasVLX())) ||
9833         CVT == MVT::f16 ||
9834         (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
9835       const Constant *C = nullptr;
9836       if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
9837         C = CI->getConstantIntValue();
9838       else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
9839         C = CF->getConstantFPValue();
9840 
9841       assert(C && "Invalid constant type");
9842 
9843       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9844       SDValue CP =
9845           DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
9846       Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
9847 
9848       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9849       SDValue Ops[] = {DAG.getEntryNode(), CP};
9850       MachinePointerInfo MPI =
9851           MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9852       return DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
9853                                      MPI, Alignment, MachineMemOperand::MOLoad);
9854     }
9855   }
9856 
9857   // Handle AVX2 in-register broadcasts.
9858   if (!IsLoad && Subtarget.hasInt256() &&
9859       (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
9860     return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
9861 
9862   // The scalar source must be a normal load.
9863   if (!IsLoad)
9864     return SDValue();
9865 
9866   // Make sure the non-chain result is only used by this build vector.
9867   if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
9868     return SDValue();
9869 
9870   if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
9871       (Subtarget.hasVLX() && ScalarSize == 64)) {
9872     auto *LN = cast<LoadSDNode>(Ld);
9873     SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9874     SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
9875     SDValue BCast =
9876         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
9877                                 LN->getMemoryVT(), LN->getMemOperand());
9878     DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
9879     return BCast;
9880   }
9881 
9882   // The integer check is needed for the 64-bit into 128-bit so it doesn't match
9883   // double since there is no vbroadcastsd xmm
9884   if (Subtarget.hasInt256() && Ld.getValueType().isInteger() &&
9885       (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) {
9886     auto *LN = cast<LoadSDNode>(Ld);
9887     SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9888     SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
9889     SDValue BCast =
9890         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
9891                                 LN->getMemoryVT(), LN->getMemOperand());
9892     DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
9893     return BCast;
9894   }
9895 
9896   if (ScalarSize == 16 && Subtarget.hasFP16() && IsGE256)
9897     return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
9898 
9899   // Unsupported broadcast.
9900   return SDValue();
9901 }
9902 
9903 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
9904 /// underlying vector and index.
9905 ///
9906 /// Modifies \p ExtractedFromVec to the real vector and returns the real
9907 /// index.
getUnderlyingExtractedFromVec(SDValue & ExtractedFromVec,SDValue ExtIdx)9908 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
9909                                          SDValue ExtIdx) {
9910   int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
9911   if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
9912     return Idx;
9913 
9914   // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
9915   // lowered this:
9916   //   (extract_vector_elt (v8f32 %1), Constant<6>)
9917   // to:
9918   //   (extract_vector_elt (vector_shuffle<2,u,u,u>
9919   //                           (extract_subvector (v8f32 %0), Constant<4>),
9920   //                           undef)
9921   //                       Constant<0>)
9922   // In this case the vector is the extract_subvector expression and the index
9923   // is 2, as specified by the shuffle.
9924   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
9925   SDValue ShuffleVec = SVOp->getOperand(0);
9926   MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
9927   assert(ShuffleVecVT.getVectorElementType() ==
9928          ExtractedFromVec.getSimpleValueType().getVectorElementType());
9929 
9930   int ShuffleIdx = SVOp->getMaskElt(Idx);
9931   if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
9932     ExtractedFromVec = ShuffleVec;
9933     return ShuffleIdx;
9934   }
9935   return Idx;
9936 }
9937 
buildFromShuffleMostly(SDValue Op,SelectionDAG & DAG)9938 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
9939   MVT VT = Op.getSimpleValueType();
9940 
9941   // Skip if insert_vec_elt is not supported.
9942   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9943   if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
9944     return SDValue();
9945 
9946   SDLoc DL(Op);
9947   unsigned NumElems = Op.getNumOperands();
9948 
9949   SDValue VecIn1;
9950   SDValue VecIn2;
9951   SmallVector<unsigned, 4> InsertIndices;
9952   SmallVector<int, 8> Mask(NumElems, -1);
9953 
9954   for (unsigned i = 0; i != NumElems; ++i) {
9955     unsigned Opc = Op.getOperand(i).getOpcode();
9956 
9957     if (Opc == ISD::UNDEF)
9958       continue;
9959 
9960     if (Opc != ISD::EXTRACT_VECTOR_ELT) {
9961       // Quit if more than 1 elements need inserting.
9962       if (InsertIndices.size() > 1)
9963         return SDValue();
9964 
9965       InsertIndices.push_back(i);
9966       continue;
9967     }
9968 
9969     SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
9970     SDValue ExtIdx = Op.getOperand(i).getOperand(1);
9971 
9972     // Quit if non-constant index.
9973     if (!isa<ConstantSDNode>(ExtIdx))
9974       return SDValue();
9975     int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
9976 
9977     // Quit if extracted from vector of different type.
9978     if (ExtractedFromVec.getValueType() != VT)
9979       return SDValue();
9980 
9981     if (!VecIn1.getNode())
9982       VecIn1 = ExtractedFromVec;
9983     else if (VecIn1 != ExtractedFromVec) {
9984       if (!VecIn2.getNode())
9985         VecIn2 = ExtractedFromVec;
9986       else if (VecIn2 != ExtractedFromVec)
9987         // Quit if more than 2 vectors to shuffle
9988         return SDValue();
9989     }
9990 
9991     if (ExtractedFromVec == VecIn1)
9992       Mask[i] = Idx;
9993     else if (ExtractedFromVec == VecIn2)
9994       Mask[i] = Idx + NumElems;
9995   }
9996 
9997   if (!VecIn1.getNode())
9998     return SDValue();
9999 
10000   VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
10001   SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
10002 
10003   for (unsigned Idx : InsertIndices)
10004     NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
10005                      DAG.getIntPtrConstant(Idx, DL));
10006 
10007   return NV;
10008 }
10009 
10010 // Lower BUILD_VECTOR operation for v8bf16, v16bf16 and v32bf16 types.
LowerBUILD_VECTORvXbf16(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)10011 static SDValue LowerBUILD_VECTORvXbf16(SDValue Op, SelectionDAG &DAG,
10012                                        const X86Subtarget &Subtarget) {
10013   MVT VT = Op.getSimpleValueType();
10014   MVT IVT = VT.changeVectorElementTypeToInteger();
10015   SmallVector<SDValue, 16> NewOps;
10016   for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I)
10017     NewOps.push_back(DAG.getBitcast(MVT::i16, Op.getOperand(I)));
10018   SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(), IVT, NewOps);
10019   return DAG.getBitcast(VT, Res);
10020 }
10021 
10022 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
LowerBUILD_VECTORvXi1(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)10023 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
10024                                      const X86Subtarget &Subtarget) {
10025 
10026   MVT VT = Op.getSimpleValueType();
10027   assert((VT.getVectorElementType() == MVT::i1) &&
10028          "Unexpected type in LowerBUILD_VECTORvXi1!");
10029 
10030   SDLoc dl(Op);
10031   if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
10032       ISD::isBuildVectorAllOnes(Op.getNode()))
10033     return Op;
10034 
10035   uint64_t Immediate = 0;
10036   SmallVector<unsigned, 16> NonConstIdx;
10037   bool IsSplat = true;
10038   bool HasConstElts = false;
10039   int SplatIdx = -1;
10040   for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
10041     SDValue In = Op.getOperand(idx);
10042     if (In.isUndef())
10043       continue;
10044     if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
10045       Immediate |= (InC->getZExtValue() & 0x1) << idx;
10046       HasConstElts = true;
10047     } else {
10048       NonConstIdx.push_back(idx);
10049     }
10050     if (SplatIdx < 0)
10051       SplatIdx = idx;
10052     else if (In != Op.getOperand(SplatIdx))
10053       IsSplat = false;
10054   }
10055 
10056   // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
10057   if (IsSplat) {
10058     // The build_vector allows the scalar element to be larger than the vector
10059     // element type. We need to mask it to use as a condition unless we know
10060     // the upper bits are zero.
10061     // FIXME: Use computeKnownBits instead of checking specific opcode?
10062     SDValue Cond = Op.getOperand(SplatIdx);
10063     assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
10064     if (Cond.getOpcode() != ISD::SETCC)
10065       Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
10066                          DAG.getConstant(1, dl, MVT::i8));
10067 
10068     // Perform the select in the scalar domain so we can use cmov.
10069     if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
10070       SDValue Select = DAG.getSelect(dl, MVT::i32, Cond,
10071                                      DAG.getAllOnesConstant(dl, MVT::i32),
10072                                      DAG.getConstant(0, dl, MVT::i32));
10073       Select = DAG.getBitcast(MVT::v32i1, Select);
10074       return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Select, Select);
10075     } else {
10076       MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
10077       SDValue Select = DAG.getSelect(dl, ImmVT, Cond,
10078                                      DAG.getAllOnesConstant(dl, ImmVT),
10079                                      DAG.getConstant(0, dl, ImmVT));
10080       MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
10081       Select = DAG.getBitcast(VecVT, Select);
10082       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Select,
10083                          DAG.getIntPtrConstant(0, dl));
10084     }
10085   }
10086 
10087   // insert elements one by one
10088   SDValue DstVec;
10089   if (HasConstElts) {
10090     if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
10091       SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
10092       SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
10093       ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
10094       ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
10095       DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
10096     } else {
10097       MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
10098       SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
10099       MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
10100       DstVec = DAG.getBitcast(VecVT, Imm);
10101       DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
10102                            DAG.getIntPtrConstant(0, dl));
10103     }
10104   } else
10105     DstVec = DAG.getUNDEF(VT);
10106 
10107   for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
10108     unsigned InsertIdx = NonConstIdx[i];
10109     DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
10110                          Op.getOperand(InsertIdx),
10111                          DAG.getIntPtrConstant(InsertIdx, dl));
10112   }
10113   return DstVec;
10114 }
10115 
isHorizOp(unsigned Opcode)10116 LLVM_ATTRIBUTE_UNUSED static bool isHorizOp(unsigned Opcode) {
10117   switch (Opcode) {
10118   case X86ISD::PACKSS:
10119   case X86ISD::PACKUS:
10120   case X86ISD::FHADD:
10121   case X86ISD::FHSUB:
10122   case X86ISD::HADD:
10123   case X86ISD::HSUB:
10124     return true;
10125   }
10126   return false;
10127 }
10128 
10129 /// This is a helper function of LowerToHorizontalOp().
10130 /// This function checks that the build_vector \p N in input implements a
10131 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
10132 /// may not match the layout of an x86 256-bit horizontal instruction.
10133 /// In other words, if this returns true, then some extraction/insertion will
10134 /// be required to produce a valid horizontal instruction.
10135 ///
10136 /// Parameter \p Opcode defines the kind of horizontal operation to match.
10137 /// For example, if \p Opcode is equal to ISD::ADD, then this function
10138 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
10139 /// is equal to ISD::SUB, then this function checks if this is a horizontal
10140 /// arithmetic sub.
10141 ///
10142 /// This function only analyzes elements of \p N whose indices are
10143 /// in range [BaseIdx, LastIdx).
10144 ///
10145 /// TODO: This function was originally used to match both real and fake partial
10146 /// horizontal operations, but the index-matching logic is incorrect for that.
10147 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
10148 /// code because it is only used for partial h-op matching now?
isHorizontalBinOpPart(const BuildVectorSDNode * N,unsigned Opcode,SelectionDAG & DAG,unsigned BaseIdx,unsigned LastIdx,SDValue & V0,SDValue & V1)10149 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
10150                                   SelectionDAG &DAG,
10151                                   unsigned BaseIdx, unsigned LastIdx,
10152                                   SDValue &V0, SDValue &V1) {
10153   EVT VT = N->getValueType(0);
10154   assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
10155   assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
10156   assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
10157          "Invalid Vector in input!");
10158 
10159   bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
10160   bool CanFold = true;
10161   unsigned ExpectedVExtractIdx = BaseIdx;
10162   unsigned NumElts = LastIdx - BaseIdx;
10163   V0 = DAG.getUNDEF(VT);
10164   V1 = DAG.getUNDEF(VT);
10165 
10166   // Check if N implements a horizontal binop.
10167   for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
10168     SDValue Op = N->getOperand(i + BaseIdx);
10169 
10170     // Skip UNDEFs.
10171     if (Op->isUndef()) {
10172       // Update the expected vector extract index.
10173       if (i * 2 == NumElts)
10174         ExpectedVExtractIdx = BaseIdx;
10175       ExpectedVExtractIdx += 2;
10176       continue;
10177     }
10178 
10179     CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
10180 
10181     if (!CanFold)
10182       break;
10183 
10184     SDValue Op0 = Op.getOperand(0);
10185     SDValue Op1 = Op.getOperand(1);
10186 
10187     // Try to match the following pattern:
10188     // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
10189     CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
10190         Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
10191         Op0.getOperand(0) == Op1.getOperand(0) &&
10192         isa<ConstantSDNode>(Op0.getOperand(1)) &&
10193         isa<ConstantSDNode>(Op1.getOperand(1)));
10194     if (!CanFold)
10195       break;
10196 
10197     unsigned I0 = Op0.getConstantOperandVal(1);
10198     unsigned I1 = Op1.getConstantOperandVal(1);
10199 
10200     if (i * 2 < NumElts) {
10201       if (V0.isUndef()) {
10202         V0 = Op0.getOperand(0);
10203         if (V0.getValueType() != VT)
10204           return false;
10205       }
10206     } else {
10207       if (V1.isUndef()) {
10208         V1 = Op0.getOperand(0);
10209         if (V1.getValueType() != VT)
10210           return false;
10211       }
10212       if (i * 2 == NumElts)
10213         ExpectedVExtractIdx = BaseIdx;
10214     }
10215 
10216     SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
10217     if (I0 == ExpectedVExtractIdx)
10218       CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
10219     else if (IsCommutable && I1 == ExpectedVExtractIdx) {
10220       // Try to match the following dag sequence:
10221       // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
10222       CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
10223     } else
10224       CanFold = false;
10225 
10226     ExpectedVExtractIdx += 2;
10227   }
10228 
10229   return CanFold;
10230 }
10231 
10232 /// Emit a sequence of two 128-bit horizontal add/sub followed by
10233 /// a concat_vector.
10234 ///
10235 /// This is a helper function of LowerToHorizontalOp().
10236 /// This function expects two 256-bit vectors called V0 and V1.
10237 /// At first, each vector is split into two separate 128-bit vectors.
10238 /// Then, the resulting 128-bit vectors are used to implement two
10239 /// horizontal binary operations.
10240 ///
10241 /// The kind of horizontal binary operation is defined by \p X86Opcode.
10242 ///
10243 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
10244 /// the two new horizontal binop.
10245 /// When Mode is set, the first horizontal binop dag node would take as input
10246 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
10247 /// horizontal binop dag node would take as input the lower 128-bit of V1
10248 /// and the upper 128-bit of V1.
10249 ///   Example:
10250 ///     HADD V0_LO, V0_HI
10251 ///     HADD V1_LO, V1_HI
10252 ///
10253 /// Otherwise, the first horizontal binop dag node takes as input the lower
10254 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
10255 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
10256 ///   Example:
10257 ///     HADD V0_LO, V1_LO
10258 ///     HADD V0_HI, V1_HI
10259 ///
10260 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
10261 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
10262 /// the upper 128-bits of the result.
ExpandHorizontalBinOp(const SDValue & V0,const SDValue & V1,const SDLoc & DL,SelectionDAG & DAG,unsigned X86Opcode,bool Mode,bool isUndefLO,bool isUndefHI)10263 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
10264                                      const SDLoc &DL, SelectionDAG &DAG,
10265                                      unsigned X86Opcode, bool Mode,
10266                                      bool isUndefLO, bool isUndefHI) {
10267   MVT VT = V0.getSimpleValueType();
10268   assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
10269          "Invalid nodes in input!");
10270 
10271   unsigned NumElts = VT.getVectorNumElements();
10272   SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
10273   SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
10274   SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
10275   SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
10276   MVT NewVT = V0_LO.getSimpleValueType();
10277 
10278   SDValue LO = DAG.getUNDEF(NewVT);
10279   SDValue HI = DAG.getUNDEF(NewVT);
10280 
10281   if (Mode) {
10282     // Don't emit a horizontal binop if the result is expected to be UNDEF.
10283     if (!isUndefLO && !V0->isUndef())
10284       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
10285     if (!isUndefHI && !V1->isUndef())
10286       HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
10287   } else {
10288     // Don't emit a horizontal binop if the result is expected to be UNDEF.
10289     if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
10290       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
10291 
10292     if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
10293       HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
10294   }
10295 
10296   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
10297 }
10298 
10299 /// Returns true iff \p BV builds a vector with the result equivalent to
10300 /// the result of ADDSUB/SUBADD operation.
10301 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
10302 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
10303 /// \p Opnd0 and \p Opnd1.
isAddSubOrSubAdd(const BuildVectorSDNode * BV,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & Opnd0,SDValue & Opnd1,unsigned & NumExtracts,bool & IsSubAdd)10304 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
10305                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
10306                              SDValue &Opnd0, SDValue &Opnd1,
10307                              unsigned &NumExtracts,
10308                              bool &IsSubAdd) {
10309 
10310   MVT VT = BV->getSimpleValueType(0);
10311   if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
10312     return false;
10313 
10314   unsigned NumElts = VT.getVectorNumElements();
10315   SDValue InVec0 = DAG.getUNDEF(VT);
10316   SDValue InVec1 = DAG.getUNDEF(VT);
10317 
10318   NumExtracts = 0;
10319 
10320   // Odd-numbered elements in the input build vector are obtained from
10321   // adding/subtracting two integer/float elements.
10322   // Even-numbered elements in the input build vector are obtained from
10323   // subtracting/adding two integer/float elements.
10324   unsigned Opc[2] = {0, 0};
10325   for (unsigned i = 0, e = NumElts; i != e; ++i) {
10326     SDValue Op = BV->getOperand(i);
10327 
10328     // Skip 'undef' values.
10329     unsigned Opcode = Op.getOpcode();
10330     if (Opcode == ISD::UNDEF)
10331       continue;
10332 
10333     // Early exit if we found an unexpected opcode.
10334     if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
10335       return false;
10336 
10337     SDValue Op0 = Op.getOperand(0);
10338     SDValue Op1 = Op.getOperand(1);
10339 
10340     // Try to match the following pattern:
10341     // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
10342     // Early exit if we cannot match that sequence.
10343     if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10344         Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10345         !isa<ConstantSDNode>(Op0.getOperand(1)) ||
10346         Op0.getOperand(1) != Op1.getOperand(1))
10347       return false;
10348 
10349     unsigned I0 = Op0.getConstantOperandVal(1);
10350     if (I0 != i)
10351       return false;
10352 
10353     // We found a valid add/sub node, make sure its the same opcode as previous
10354     // elements for this parity.
10355     if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
10356       return false;
10357     Opc[i % 2] = Opcode;
10358 
10359     // Update InVec0 and InVec1.
10360     if (InVec0.isUndef()) {
10361       InVec0 = Op0.getOperand(0);
10362       if (InVec0.getSimpleValueType() != VT)
10363         return false;
10364     }
10365     if (InVec1.isUndef()) {
10366       InVec1 = Op1.getOperand(0);
10367       if (InVec1.getSimpleValueType() != VT)
10368         return false;
10369     }
10370 
10371     // Make sure that operands in input to each add/sub node always
10372     // come from a same pair of vectors.
10373     if (InVec0 != Op0.getOperand(0)) {
10374       if (Opcode == ISD::FSUB)
10375         return false;
10376 
10377       // FADD is commutable. Try to commute the operands
10378       // and then test again.
10379       std::swap(Op0, Op1);
10380       if (InVec0 != Op0.getOperand(0))
10381         return false;
10382     }
10383 
10384     if (InVec1 != Op1.getOperand(0))
10385       return false;
10386 
10387     // Increment the number of extractions done.
10388     ++NumExtracts;
10389   }
10390 
10391   // Ensure we have found an opcode for both parities and that they are
10392   // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
10393   // inputs are undef.
10394   if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
10395       InVec0.isUndef() || InVec1.isUndef())
10396     return false;
10397 
10398   IsSubAdd = Opc[0] == ISD::FADD;
10399 
10400   Opnd0 = InVec0;
10401   Opnd1 = InVec1;
10402   return true;
10403 }
10404 
10405 /// Returns true if is possible to fold MUL and an idiom that has already been
10406 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
10407 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
10408 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
10409 ///
10410 /// Prior to calling this function it should be known that there is some
10411 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
10412 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
10413 /// before replacement of such SDNode with ADDSUB operation. Thus the number
10414 /// of \p Opnd0 uses is expected to be equal to 2.
10415 /// For example, this function may be called for the following IR:
10416 ///    %AB = fmul fast <2 x double> %A, %B
10417 ///    %Sub = fsub fast <2 x double> %AB, %C
10418 ///    %Add = fadd fast <2 x double> %AB, %C
10419 ///    %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
10420 ///                            <2 x i32> <i32 0, i32 3>
10421 /// There is a def for %Addsub here, which potentially can be replaced by
10422 /// X86ISD::ADDSUB operation:
10423 ///    %Addsub = X86ISD::ADDSUB %AB, %C
10424 /// and such ADDSUB can further be replaced with FMADDSUB:
10425 ///    %Addsub = FMADDSUB %A, %B, %C.
10426 ///
10427 /// The main reason why this method is called before the replacement of the
10428 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
10429 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
10430 /// FMADDSUB is.
isFMAddSubOrFMSubAdd(const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & Opnd0,SDValue & Opnd1,SDValue & Opnd2,unsigned ExpectedUses)10431 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
10432                                  SelectionDAG &DAG,
10433                                  SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
10434                                  unsigned ExpectedUses) {
10435   if (Opnd0.getOpcode() != ISD::FMUL ||
10436       !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
10437     return false;
10438 
10439   // FIXME: These checks must match the similar ones in
10440   // DAGCombiner::visitFADDForFMACombine. It would be good to have one
10441   // function that would answer if it is Ok to fuse MUL + ADD to FMADD
10442   // or MUL + ADDSUB to FMADDSUB.
10443   const TargetOptions &Options = DAG.getTarget().Options;
10444   bool AllowFusion =
10445       (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
10446   if (!AllowFusion)
10447     return false;
10448 
10449   Opnd2 = Opnd1;
10450   Opnd1 = Opnd0.getOperand(1);
10451   Opnd0 = Opnd0.getOperand(0);
10452 
10453   return true;
10454 }
10455 
10456 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
10457 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
10458 /// X86ISD::FMSUBADD node.
lowerToAddSubOrFMAddSub(const BuildVectorSDNode * BV,const X86Subtarget & Subtarget,SelectionDAG & DAG)10459 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
10460                                        const X86Subtarget &Subtarget,
10461                                        SelectionDAG &DAG) {
10462   SDValue Opnd0, Opnd1;
10463   unsigned NumExtracts;
10464   bool IsSubAdd;
10465   if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
10466                         IsSubAdd))
10467     return SDValue();
10468 
10469   MVT VT = BV->getSimpleValueType(0);
10470   SDLoc DL(BV);
10471 
10472   // Try to generate X86ISD::FMADDSUB node here.
10473   SDValue Opnd2;
10474   if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
10475     unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
10476     return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
10477   }
10478 
10479   // We only support ADDSUB.
10480   if (IsSubAdd)
10481     return SDValue();
10482 
10483   // There are no known X86 targets with 512-bit ADDSUB instructions!
10484   // Convert to blend(fsub,fadd).
10485   if (VT.is512BitVector()) {
10486     SmallVector<int> Mask;
10487     for (int I = 0, E = VT.getVectorNumElements(); I != E; I += 2) {
10488         Mask.push_back(I);
10489         Mask.push_back(I + E + 1);
10490     }
10491     SDValue Sub = DAG.getNode(ISD::FSUB, DL, VT, Opnd0, Opnd1);
10492     SDValue Add = DAG.getNode(ISD::FADD, DL, VT, Opnd0, Opnd1);
10493     return DAG.getVectorShuffle(VT, DL, Sub, Add, Mask);
10494   }
10495 
10496   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
10497 }
10498 
isHopBuildVector(const BuildVectorSDNode * BV,SelectionDAG & DAG,unsigned & HOpcode,SDValue & V0,SDValue & V1)10499 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
10500                              unsigned &HOpcode, SDValue &V0, SDValue &V1) {
10501   // Initialize outputs to known values.
10502   MVT VT = BV->getSimpleValueType(0);
10503   HOpcode = ISD::DELETED_NODE;
10504   V0 = DAG.getUNDEF(VT);
10505   V1 = DAG.getUNDEF(VT);
10506 
10507   // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
10508   // half of the result is calculated independently from the 128-bit halves of
10509   // the inputs, so that makes the index-checking logic below more complicated.
10510   unsigned NumElts = VT.getVectorNumElements();
10511   unsigned GenericOpcode = ISD::DELETED_NODE;
10512   unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
10513   unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
10514   unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
10515   for (unsigned i = 0; i != Num128BitChunks; ++i) {
10516     for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
10517       // Ignore undef elements.
10518       SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
10519       if (Op.isUndef())
10520         continue;
10521 
10522       // If there's an opcode mismatch, we're done.
10523       if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
10524         return false;
10525 
10526       // Initialize horizontal opcode.
10527       if (HOpcode == ISD::DELETED_NODE) {
10528         GenericOpcode = Op.getOpcode();
10529         switch (GenericOpcode) {
10530         case ISD::ADD: HOpcode = X86ISD::HADD; break;
10531         case ISD::SUB: HOpcode = X86ISD::HSUB; break;
10532         case ISD::FADD: HOpcode = X86ISD::FHADD; break;
10533         case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
10534         default: return false;
10535         }
10536       }
10537 
10538       SDValue Op0 = Op.getOperand(0);
10539       SDValue Op1 = Op.getOperand(1);
10540       if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10541           Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10542           Op0.getOperand(0) != Op1.getOperand(0) ||
10543           !isa<ConstantSDNode>(Op0.getOperand(1)) ||
10544           !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
10545         return false;
10546 
10547       // The source vector is chosen based on which 64-bit half of the
10548       // destination vector is being calculated.
10549       if (j < NumEltsIn64Bits) {
10550         if (V0.isUndef())
10551           V0 = Op0.getOperand(0);
10552       } else {
10553         if (V1.isUndef())
10554           V1 = Op0.getOperand(0);
10555       }
10556 
10557       SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
10558       if (SourceVec != Op0.getOperand(0))
10559         return false;
10560 
10561       // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
10562       unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
10563       unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
10564       unsigned ExpectedIndex = i * NumEltsIn128Bits +
10565                                (j % NumEltsIn64Bits) * 2;
10566       if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
10567         continue;
10568 
10569       // If this is not a commutative op, this does not match.
10570       if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
10571         return false;
10572 
10573       // Addition is commutative, so try swapping the extract indexes.
10574       // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
10575       if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
10576         continue;
10577 
10578       // Extract indexes do not match horizontal requirement.
10579       return false;
10580     }
10581   }
10582   // We matched. Opcode and operands are returned by reference as arguments.
10583   return true;
10584 }
10585 
getHopForBuildVector(const BuildVectorSDNode * BV,SelectionDAG & DAG,unsigned HOpcode,SDValue V0,SDValue V1)10586 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
10587                                     SelectionDAG &DAG, unsigned HOpcode,
10588                                     SDValue V0, SDValue V1) {
10589   // If either input vector is not the same size as the build vector,
10590   // extract/insert the low bits to the correct size.
10591   // This is free (examples: zmm --> xmm, xmm --> ymm).
10592   MVT VT = BV->getSimpleValueType(0);
10593   unsigned Width = VT.getSizeInBits();
10594   if (V0.getValueSizeInBits() > Width)
10595     V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
10596   else if (V0.getValueSizeInBits() < Width)
10597     V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
10598 
10599   if (V1.getValueSizeInBits() > Width)
10600     V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
10601   else if (V1.getValueSizeInBits() < Width)
10602     V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
10603 
10604   unsigned NumElts = VT.getVectorNumElements();
10605   APInt DemandedElts = APInt::getAllOnes(NumElts);
10606   for (unsigned i = 0; i != NumElts; ++i)
10607     if (BV->getOperand(i).isUndef())
10608       DemandedElts.clearBit(i);
10609 
10610   // If we don't need the upper xmm, then perform as a xmm hop.
10611   unsigned HalfNumElts = NumElts / 2;
10612   if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
10613     MVT HalfVT = VT.getHalfNumVectorElementsVT();
10614     V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
10615     V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
10616     SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
10617     return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
10618   }
10619 
10620   return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
10621 }
10622 
10623 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
LowerToHorizontalOp(const BuildVectorSDNode * BV,const X86Subtarget & Subtarget,SelectionDAG & DAG)10624 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
10625                                    const X86Subtarget &Subtarget,
10626                                    SelectionDAG &DAG) {
10627   // We need at least 2 non-undef elements to make this worthwhile by default.
10628   unsigned NumNonUndefs =
10629       count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
10630   if (NumNonUndefs < 2)
10631     return SDValue();
10632 
10633   // There are 4 sets of horizontal math operations distinguished by type:
10634   // int/FP at 128-bit/256-bit. Each type was introduced with a different
10635   // subtarget feature. Try to match those "native" patterns first.
10636   MVT VT = BV->getSimpleValueType(0);
10637   if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
10638       ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
10639       ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
10640       ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
10641     unsigned HOpcode;
10642     SDValue V0, V1;
10643     if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
10644       return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
10645   }
10646 
10647   // Try harder to match 256-bit ops by using extract/concat.
10648   if (!Subtarget.hasAVX() || !VT.is256BitVector())
10649     return SDValue();
10650 
10651   // Count the number of UNDEF operands in the build_vector in input.
10652   unsigned NumElts = VT.getVectorNumElements();
10653   unsigned Half = NumElts / 2;
10654   unsigned NumUndefsLO = 0;
10655   unsigned NumUndefsHI = 0;
10656   for (unsigned i = 0, e = Half; i != e; ++i)
10657     if (BV->getOperand(i)->isUndef())
10658       NumUndefsLO++;
10659 
10660   for (unsigned i = Half, e = NumElts; i != e; ++i)
10661     if (BV->getOperand(i)->isUndef())
10662       NumUndefsHI++;
10663 
10664   SDLoc DL(BV);
10665   SDValue InVec0, InVec1;
10666   if (VT == MVT::v8i32 || VT == MVT::v16i16) {
10667     SDValue InVec2, InVec3;
10668     unsigned X86Opcode;
10669     bool CanFold = true;
10670 
10671     if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
10672         isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
10673                               InVec3) &&
10674         ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
10675         ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
10676       X86Opcode = X86ISD::HADD;
10677     else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
10678                                    InVec1) &&
10679              isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
10680                                    InVec3) &&
10681              ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
10682              ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
10683       X86Opcode = X86ISD::HSUB;
10684     else
10685       CanFold = false;
10686 
10687     if (CanFold) {
10688       // Do not try to expand this build_vector into a pair of horizontal
10689       // add/sub if we can emit a pair of scalar add/sub.
10690       if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
10691         return SDValue();
10692 
10693       // Convert this build_vector into a pair of horizontal binops followed by
10694       // a concat vector. We must adjust the outputs from the partial horizontal
10695       // matching calls above to account for undefined vector halves.
10696       SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
10697       SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
10698       assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
10699       bool isUndefLO = NumUndefsLO == Half;
10700       bool isUndefHI = NumUndefsHI == Half;
10701       return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
10702                                    isUndefHI);
10703     }
10704   }
10705 
10706   if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
10707       VT == MVT::v16i16) {
10708     unsigned X86Opcode;
10709     if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
10710       X86Opcode = X86ISD::HADD;
10711     else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
10712                                    InVec1))
10713       X86Opcode = X86ISD::HSUB;
10714     else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
10715                                    InVec1))
10716       X86Opcode = X86ISD::FHADD;
10717     else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
10718                                    InVec1))
10719       X86Opcode = X86ISD::FHSUB;
10720     else
10721       return SDValue();
10722 
10723     // Don't try to expand this build_vector into a pair of horizontal add/sub
10724     // if we can simply emit a pair of scalar add/sub.
10725     if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
10726       return SDValue();
10727 
10728     // Convert this build_vector into two horizontal add/sub followed by
10729     // a concat vector.
10730     bool isUndefLO = NumUndefsLO == Half;
10731     bool isUndefHI = NumUndefsHI == Half;
10732     return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
10733                                  isUndefLO, isUndefHI);
10734   }
10735 
10736   return SDValue();
10737 }
10738 
10739 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
10740                           SelectionDAG &DAG);
10741 
10742 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
10743 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
10744 /// just apply the bit to the vectors.
10745 /// NOTE: Its not in our interest to start make a general purpose vectorizer
10746 /// from this, but enough scalar bit operations are created from the later
10747 /// legalization + scalarization stages to need basic support.
lowerBuildVectorToBitOp(BuildVectorSDNode * Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)10748 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
10749                                        const X86Subtarget &Subtarget,
10750                                        SelectionDAG &DAG) {
10751   SDLoc DL(Op);
10752   MVT VT = Op->getSimpleValueType(0);
10753   unsigned NumElems = VT.getVectorNumElements();
10754   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10755 
10756   // Check that all elements have the same opcode.
10757   // TODO: Should we allow UNDEFS and if so how many?
10758   unsigned Opcode = Op->getOperand(0).getOpcode();
10759   for (unsigned i = 1; i < NumElems; ++i)
10760     if (Opcode != Op->getOperand(i).getOpcode())
10761       return SDValue();
10762 
10763   // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
10764   bool IsShift = false;
10765   switch (Opcode) {
10766   default:
10767     return SDValue();
10768   case ISD::SHL:
10769   case ISD::SRL:
10770   case ISD::SRA:
10771     IsShift = true;
10772     break;
10773   case ISD::AND:
10774   case ISD::XOR:
10775   case ISD::OR:
10776     // Don't do this if the buildvector is a splat - we'd replace one
10777     // constant with an entire vector.
10778     if (Op->getSplatValue())
10779       return SDValue();
10780     if (!TLI.isOperationLegalOrPromote(Opcode, VT))
10781       return SDValue();
10782     break;
10783   }
10784 
10785   SmallVector<SDValue, 4> LHSElts, RHSElts;
10786   for (SDValue Elt : Op->ops()) {
10787     SDValue LHS = Elt.getOperand(0);
10788     SDValue RHS = Elt.getOperand(1);
10789 
10790     // We expect the canonicalized RHS operand to be the constant.
10791     if (!isa<ConstantSDNode>(RHS))
10792       return SDValue();
10793 
10794     // Extend shift amounts.
10795     if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
10796       if (!IsShift)
10797         return SDValue();
10798       RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
10799     }
10800 
10801     LHSElts.push_back(LHS);
10802     RHSElts.push_back(RHS);
10803   }
10804 
10805   // Limit to shifts by uniform immediates.
10806   // TODO: Only accept vXi8/vXi64 special cases?
10807   // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
10808   if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
10809     return SDValue();
10810 
10811   SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
10812   SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
10813   SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
10814 
10815   if (!IsShift)
10816     return Res;
10817 
10818   // Immediately lower the shift to ensure the constant build vector doesn't
10819   // get converted to a constant pool before the shift is lowered.
10820   return LowerShift(Res, Subtarget, DAG);
10821 }
10822 
10823 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
10824 /// functionality to do this, so it's all zeros, all ones, or some derivation
10825 /// that is cheap to calculate.
materializeVectorConstant(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)10826 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
10827                                          const X86Subtarget &Subtarget) {
10828   SDLoc DL(Op);
10829   MVT VT = Op.getSimpleValueType();
10830 
10831   // Vectors containing all zeros can be matched by pxor and xorps.
10832   if (ISD::isBuildVectorAllZeros(Op.getNode()))
10833     return Op;
10834 
10835   // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
10836   // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
10837   // vpcmpeqd on 256-bit vectors.
10838   if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
10839     if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
10840       return Op;
10841 
10842     return getOnesVector(VT, DAG, DL);
10843   }
10844 
10845   return SDValue();
10846 }
10847 
10848 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
10849 /// from a vector of source values and a vector of extraction indices.
10850 /// The vectors might be manipulated to match the type of the permute op.
createVariablePermute(MVT VT,SDValue SrcVec,SDValue IndicesVec,SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)10851 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
10852                                      SDLoc &DL, SelectionDAG &DAG,
10853                                      const X86Subtarget &Subtarget) {
10854   MVT ShuffleVT = VT;
10855   EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
10856   unsigned NumElts = VT.getVectorNumElements();
10857   unsigned SizeInBits = VT.getSizeInBits();
10858 
10859   // Adjust IndicesVec to match VT size.
10860   assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
10861          "Illegal variable permute mask size");
10862   if (IndicesVec.getValueType().getVectorNumElements() > NumElts) {
10863     // Narrow/widen the indices vector to the correct size.
10864     if (IndicesVec.getValueSizeInBits() > SizeInBits)
10865       IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
10866                                     NumElts * VT.getScalarSizeInBits());
10867     else if (IndicesVec.getValueSizeInBits() < SizeInBits)
10868       IndicesVec = widenSubVector(IndicesVec, false, Subtarget, DAG,
10869                                   SDLoc(IndicesVec), SizeInBits);
10870     // Zero-extend the index elements within the vector.
10871     if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
10872       IndicesVec = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(IndicesVec),
10873                                IndicesVT, IndicesVec);
10874   }
10875   IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
10876 
10877   // Handle SrcVec that don't match VT type.
10878   if (SrcVec.getValueSizeInBits() != SizeInBits) {
10879     if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
10880       // Handle larger SrcVec by treating it as a larger permute.
10881       unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
10882       VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
10883       IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
10884       IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
10885                                   Subtarget, DAG, SDLoc(IndicesVec));
10886       SDValue NewSrcVec =
10887           createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
10888       if (NewSrcVec)
10889         return extractSubVector(NewSrcVec, 0, DAG, DL, SizeInBits);
10890       return SDValue();
10891     } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
10892       // Widen smaller SrcVec to match VT.
10893       SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
10894     } else
10895       return SDValue();
10896   }
10897 
10898   auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
10899     assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
10900     EVT SrcVT = Idx.getValueType();
10901     unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
10902     uint64_t IndexScale = 0;
10903     uint64_t IndexOffset = 0;
10904 
10905     // If we're scaling a smaller permute op, then we need to repeat the
10906     // indices, scaling and offsetting them as well.
10907     // e.g. v4i32 -> v16i8 (Scale = 4)
10908     // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
10909     // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
10910     for (uint64_t i = 0; i != Scale; ++i) {
10911       IndexScale |= Scale << (i * NumDstBits);
10912       IndexOffset |= i << (i * NumDstBits);
10913     }
10914 
10915     Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
10916                       DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
10917     Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
10918                       DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
10919     return Idx;
10920   };
10921 
10922   unsigned Opcode = 0;
10923   switch (VT.SimpleTy) {
10924   default:
10925     break;
10926   case MVT::v16i8:
10927     if (Subtarget.hasSSSE3())
10928       Opcode = X86ISD::PSHUFB;
10929     break;
10930   case MVT::v8i16:
10931     if (Subtarget.hasVLX() && Subtarget.hasBWI())
10932       Opcode = X86ISD::VPERMV;
10933     else if (Subtarget.hasSSSE3()) {
10934       Opcode = X86ISD::PSHUFB;
10935       ShuffleVT = MVT::v16i8;
10936     }
10937     break;
10938   case MVT::v4f32:
10939   case MVT::v4i32:
10940     if (Subtarget.hasAVX()) {
10941       Opcode = X86ISD::VPERMILPV;
10942       ShuffleVT = MVT::v4f32;
10943     } else if (Subtarget.hasSSSE3()) {
10944       Opcode = X86ISD::PSHUFB;
10945       ShuffleVT = MVT::v16i8;
10946     }
10947     break;
10948   case MVT::v2f64:
10949   case MVT::v2i64:
10950     if (Subtarget.hasAVX()) {
10951       // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
10952       IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
10953       Opcode = X86ISD::VPERMILPV;
10954       ShuffleVT = MVT::v2f64;
10955     } else if (Subtarget.hasSSE41()) {
10956       // SSE41 can compare v2i64 - select between indices 0 and 1.
10957       return DAG.getSelectCC(
10958           DL, IndicesVec,
10959           getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
10960           DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
10961           DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
10962           ISD::CondCode::SETEQ);
10963     }
10964     break;
10965   case MVT::v32i8:
10966     if (Subtarget.hasVLX() && Subtarget.hasVBMI())
10967       Opcode = X86ISD::VPERMV;
10968     else if (Subtarget.hasXOP()) {
10969       SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
10970       SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
10971       SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
10972       SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
10973       return DAG.getNode(
10974           ISD::CONCAT_VECTORS, DL, VT,
10975           DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
10976           DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
10977     } else if (Subtarget.hasAVX()) {
10978       SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
10979       SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
10980       SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
10981       SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
10982       auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
10983                               ArrayRef<SDValue> Ops) {
10984         // Permute Lo and Hi and then select based on index range.
10985         // This works as SHUFB uses bits[3:0] to permute elements and we don't
10986         // care about the bit[7] as its just an index vector.
10987         SDValue Idx = Ops[2];
10988         EVT VT = Idx.getValueType();
10989         return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
10990                                DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
10991                                DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
10992                                ISD::CondCode::SETGT);
10993       };
10994       SDValue Ops[] = {LoLo, HiHi, IndicesVec};
10995       return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
10996                               PSHUFBBuilder);
10997     }
10998     break;
10999   case MVT::v16i16:
11000     if (Subtarget.hasVLX() && Subtarget.hasBWI())
11001       Opcode = X86ISD::VPERMV;
11002     else if (Subtarget.hasAVX()) {
11003       // Scale to v32i8 and perform as v32i8.
11004       IndicesVec = ScaleIndices(IndicesVec, 2);
11005       return DAG.getBitcast(
11006           VT, createVariablePermute(
11007                   MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
11008                   DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
11009     }
11010     break;
11011   case MVT::v8f32:
11012   case MVT::v8i32:
11013     if (Subtarget.hasAVX2())
11014       Opcode = X86ISD::VPERMV;
11015     else if (Subtarget.hasAVX()) {
11016       SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
11017       SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
11018                                           {0, 1, 2, 3, 0, 1, 2, 3});
11019       SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
11020                                           {4, 5, 6, 7, 4, 5, 6, 7});
11021       if (Subtarget.hasXOP())
11022         return DAG.getBitcast(
11023             VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
11024                             IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
11025       // Permute Lo and Hi and then select based on index range.
11026       // This works as VPERMILPS only uses index bits[0:1] to permute elements.
11027       SDValue Res = DAG.getSelectCC(
11028           DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
11029           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
11030           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
11031           ISD::CondCode::SETGT);
11032       return DAG.getBitcast(VT, Res);
11033     }
11034     break;
11035   case MVT::v4i64:
11036   case MVT::v4f64:
11037     if (Subtarget.hasAVX512()) {
11038       if (!Subtarget.hasVLX()) {
11039         MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
11040         SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
11041                                 SDLoc(SrcVec));
11042         IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
11043                                     DAG, SDLoc(IndicesVec));
11044         SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
11045                                             DAG, Subtarget);
11046         return extract256BitVector(Res, 0, DAG, DL);
11047       }
11048       Opcode = X86ISD::VPERMV;
11049     } else if (Subtarget.hasAVX()) {
11050       SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
11051       SDValue LoLo =
11052           DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
11053       SDValue HiHi =
11054           DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
11055       // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
11056       IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
11057       if (Subtarget.hasXOP())
11058         return DAG.getBitcast(
11059             VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
11060                             IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
11061       // Permute Lo and Hi and then select based on index range.
11062       // This works as VPERMILPD only uses index bit[1] to permute elements.
11063       SDValue Res = DAG.getSelectCC(
11064           DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
11065           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
11066           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
11067           ISD::CondCode::SETGT);
11068       return DAG.getBitcast(VT, Res);
11069     }
11070     break;
11071   case MVT::v64i8:
11072     if (Subtarget.hasVBMI())
11073       Opcode = X86ISD::VPERMV;
11074     break;
11075   case MVT::v32i16:
11076     if (Subtarget.hasBWI())
11077       Opcode = X86ISD::VPERMV;
11078     break;
11079   case MVT::v16f32:
11080   case MVT::v16i32:
11081   case MVT::v8f64:
11082   case MVT::v8i64:
11083     if (Subtarget.hasAVX512())
11084       Opcode = X86ISD::VPERMV;
11085     break;
11086   }
11087   if (!Opcode)
11088     return SDValue();
11089 
11090   assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
11091          (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
11092          "Illegal variable permute shuffle type");
11093 
11094   uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
11095   if (Scale > 1)
11096     IndicesVec = ScaleIndices(IndicesVec, Scale);
11097 
11098   EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
11099   IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
11100 
11101   SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
11102   SDValue Res = Opcode == X86ISD::VPERMV
11103                     ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
11104                     : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
11105   return DAG.getBitcast(VT, Res);
11106 }
11107 
11108 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
11109 // reasoned to be a permutation of a vector by indices in a non-constant vector.
11110 // (build_vector (extract_elt V, (extract_elt I, 0)),
11111 //               (extract_elt V, (extract_elt I, 1)),
11112 //                    ...
11113 // ->
11114 // (vpermv I, V)
11115 //
11116 // TODO: Handle undefs
11117 // TODO: Utilize pshufb and zero mask blending to support more efficient
11118 // construction of vectors with constant-0 elements.
11119 static SDValue
LowerBUILD_VECTORAsVariablePermute(SDValue V,SelectionDAG & DAG,const X86Subtarget & Subtarget)11120 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
11121                                    const X86Subtarget &Subtarget) {
11122   SDValue SrcVec, IndicesVec;
11123   // Check for a match of the permute source vector and permute index elements.
11124   // This is done by checking that the i-th build_vector operand is of the form:
11125   // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
11126   for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
11127     SDValue Op = V.getOperand(Idx);
11128     if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11129       return SDValue();
11130 
11131     // If this is the first extract encountered in V, set the source vector,
11132     // otherwise verify the extract is from the previously defined source
11133     // vector.
11134     if (!SrcVec)
11135       SrcVec = Op.getOperand(0);
11136     else if (SrcVec != Op.getOperand(0))
11137       return SDValue();
11138     SDValue ExtractedIndex = Op->getOperand(1);
11139     // Peek through extends.
11140     if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
11141         ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
11142       ExtractedIndex = ExtractedIndex.getOperand(0);
11143     if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11144       return SDValue();
11145 
11146     // If this is the first extract from the index vector candidate, set the
11147     // indices vector, otherwise verify the extract is from the previously
11148     // defined indices vector.
11149     if (!IndicesVec)
11150       IndicesVec = ExtractedIndex.getOperand(0);
11151     else if (IndicesVec != ExtractedIndex.getOperand(0))
11152       return SDValue();
11153 
11154     auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
11155     if (!PermIdx || PermIdx->getAPIntValue() != Idx)
11156       return SDValue();
11157   }
11158 
11159   SDLoc DL(V);
11160   MVT VT = V.getSimpleValueType();
11161   return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
11162 }
11163 
11164 SDValue
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const11165 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
11166   SDLoc dl(Op);
11167 
11168   MVT VT = Op.getSimpleValueType();
11169   MVT EltVT = VT.getVectorElementType();
11170   MVT OpEltVT = Op.getOperand(0).getSimpleValueType();
11171   unsigned NumElems = Op.getNumOperands();
11172 
11173   // Generate vectors for predicate vectors.
11174   if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
11175     return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
11176 
11177   if (VT.getVectorElementType() == MVT::bf16 && Subtarget.hasBF16())
11178     return LowerBUILD_VECTORvXbf16(Op, DAG, Subtarget);
11179 
11180   if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
11181     return VectorConstant;
11182 
11183   unsigned EVTBits = EltVT.getSizeInBits();
11184   APInt UndefMask = APInt::getZero(NumElems);
11185   APInt FrozenUndefMask = APInt::getZero(NumElems);
11186   APInt ZeroMask = APInt::getZero(NumElems);
11187   APInt NonZeroMask = APInt::getZero(NumElems);
11188   bool IsAllConstants = true;
11189   SmallSet<SDValue, 8> Values;
11190   unsigned NumConstants = NumElems;
11191   for (unsigned i = 0; i < NumElems; ++i) {
11192     SDValue Elt = Op.getOperand(i);
11193     if (Elt.isUndef()) {
11194       UndefMask.setBit(i);
11195       continue;
11196     }
11197     if (Elt.getOpcode() == ISD::FREEZE && Elt.getOperand(0).isUndef()) {
11198       FrozenUndefMask.setBit(i);
11199       continue;
11200     }
11201     Values.insert(Elt);
11202     if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
11203       IsAllConstants = false;
11204       NumConstants--;
11205     }
11206     if (X86::isZeroNode(Elt)) {
11207       ZeroMask.setBit(i);
11208     } else {
11209       NonZeroMask.setBit(i);
11210     }
11211   }
11212 
11213   // All undef vector. Return an UNDEF.
11214   if (UndefMask.isAllOnes())
11215     return DAG.getUNDEF(VT);
11216 
11217   // If we have multiple FREEZE-UNDEF operands, we are likely going to end up
11218   // lowering into a suboptimal insertion sequence. Instead, thaw the UNDEF in
11219   // our source BUILD_VECTOR, create another FREEZE-UNDEF splat BUILD_VECTOR,
11220   // and blend the FREEZE-UNDEF operands back in.
11221   // FIXME: is this worthwhile even for a single FREEZE-UNDEF operand?
11222   if (unsigned NumFrozenUndefElts = FrozenUndefMask.countPopulation();
11223       NumFrozenUndefElts >= 2 && NumFrozenUndefElts < NumElems) {
11224     SmallVector<int, 16> BlendMask(NumElems, -1);
11225     SmallVector<SDValue, 16> Elts(NumElems, DAG.getUNDEF(OpEltVT));
11226     for (unsigned i = 0; i < NumElems; ++i) {
11227       if (UndefMask[i]) {
11228         BlendMask[i] = -1;
11229         continue;
11230       }
11231       BlendMask[i] = i;
11232       if (!FrozenUndefMask[i])
11233         Elts[i] = Op.getOperand(i);
11234       else
11235         BlendMask[i] += NumElems;
11236     }
11237     SDValue EltsBV = DAG.getBuildVector(VT, dl, Elts);
11238     SDValue FrozenUndefElt = DAG.getFreeze(DAG.getUNDEF(OpEltVT));
11239     SDValue FrozenUndefBV = DAG.getSplatBuildVector(VT, dl, FrozenUndefElt);
11240     return DAG.getVectorShuffle(VT, dl, EltsBV, FrozenUndefBV, BlendMask);
11241   }
11242 
11243   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
11244 
11245   // If the upper elts of a ymm/zmm are undef/zero then we might be better off
11246   // lowering to a smaller build vector and padding with undef/zero.
11247   if ((VT.is256BitVector() || VT.is512BitVector()) &&
11248       !isFoldableUseOfShuffle(BV)) {
11249     unsigned UpperElems = NumElems / 2;
11250     APInt UndefOrZeroMask = UndefMask | ZeroMask;
11251     unsigned NumUpperUndefsOrZeros = UndefOrZeroMask.countLeadingOnes();
11252     if (NumUpperUndefsOrZeros >= UpperElems) {
11253       if (VT.is512BitVector() &&
11254           NumUpperUndefsOrZeros >= (NumElems - (NumElems / 4)))
11255         UpperElems = NumElems - (NumElems / 4);
11256       bool UndefUpper = UndefMask.countLeadingOnes() >= UpperElems;
11257       MVT LowerVT = MVT::getVectorVT(EltVT, NumElems - UpperElems);
11258       SDValue NewBV =
11259           DAG.getBuildVector(LowerVT, dl, Op->ops().drop_back(UpperElems));
11260       return widenSubVector(VT, NewBV, !UndefUpper, Subtarget, DAG, dl);
11261     }
11262   }
11263 
11264   if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
11265     return AddSub;
11266   if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
11267     return HorizontalOp;
11268   if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
11269     return Broadcast;
11270   if (SDValue BitOp = lowerBuildVectorToBitOp(BV, Subtarget, DAG))
11271     return BitOp;
11272 
11273   unsigned NumZero = ZeroMask.countPopulation();
11274   unsigned NumNonZero = NonZeroMask.countPopulation();
11275 
11276   // If we are inserting one variable into a vector of non-zero constants, try
11277   // to avoid loading each constant element as a scalar. Load the constants as a
11278   // vector and then insert the variable scalar element. If insertion is not
11279   // supported, fall back to a shuffle to get the scalar blended with the
11280   // constants. Insertion into a zero vector is handled as a special-case
11281   // somewhere below here.
11282   if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
11283       (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
11284        isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
11285     // Create an all-constant vector. The variable element in the old
11286     // build vector is replaced by undef in the constant vector. Save the
11287     // variable scalar element and its index for use in the insertelement.
11288     LLVMContext &Context = *DAG.getContext();
11289     Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
11290     SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
11291     SDValue VarElt;
11292     SDValue InsIndex;
11293     for (unsigned i = 0; i != NumElems; ++i) {
11294       SDValue Elt = Op.getOperand(i);
11295       if (auto *C = dyn_cast<ConstantSDNode>(Elt))
11296         ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
11297       else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
11298         ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
11299       else if (!Elt.isUndef()) {
11300         assert(!VarElt.getNode() && !InsIndex.getNode() &&
11301                "Expected one variable element in this vector");
11302         VarElt = Elt;
11303         InsIndex = DAG.getVectorIdxConstant(i, dl);
11304       }
11305     }
11306     Constant *CV = ConstantVector::get(ConstVecOps);
11307     SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
11308 
11309     // The constants we just created may not be legal (eg, floating point). We
11310     // must lower the vector right here because we can not guarantee that we'll
11311     // legalize it before loading it. This is also why we could not just create
11312     // a new build vector here. If the build vector contains illegal constants,
11313     // it could get split back up into a series of insert elements.
11314     // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
11315     SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
11316     MachineFunction &MF = DAG.getMachineFunction();
11317     MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
11318     SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
11319     unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
11320     unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
11321     if (InsertC < NumEltsInLow128Bits)
11322       return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
11323 
11324     // There's no good way to insert into the high elements of a >128-bit
11325     // vector, so use shuffles to avoid an extract/insert sequence.
11326     assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
11327     assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
11328     SmallVector<int, 8> ShuffleMask;
11329     unsigned NumElts = VT.getVectorNumElements();
11330     for (unsigned i = 0; i != NumElts; ++i)
11331       ShuffleMask.push_back(i == InsertC ? NumElts : i);
11332     SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
11333     return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
11334   }
11335 
11336   // Special case for single non-zero, non-undef, element.
11337   if (NumNonZero == 1) {
11338     unsigned Idx = NonZeroMask.countTrailingZeros();
11339     SDValue Item = Op.getOperand(Idx);
11340 
11341     // If we have a constant or non-constant insertion into the low element of
11342     // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
11343     // the rest of the elements.  This will be matched as movd/movq/movss/movsd
11344     // depending on what the source datatype is.
11345     if (Idx == 0) {
11346       if (NumZero == 0)
11347         return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11348 
11349       if (EltVT == MVT::i32 || EltVT == MVT::f16 || EltVT == MVT::f32 ||
11350           EltVT == MVT::f64 || (EltVT == MVT::i64 && Subtarget.is64Bit()) ||
11351           (EltVT == MVT::i16 && Subtarget.hasFP16())) {
11352         assert((VT.is128BitVector() || VT.is256BitVector() ||
11353                 VT.is512BitVector()) &&
11354                "Expected an SSE value type!");
11355         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11356         // Turn it into a MOVL (i.e. movsh, movss, movsd, movw or movd) to a
11357         // zero vector.
11358         return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
11359       }
11360 
11361       // We can't directly insert an i8 or i16 into a vector, so zero extend
11362       // it to i32 first.
11363       if (EltVT == MVT::i16 || EltVT == MVT::i8) {
11364         Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
11365         MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
11366         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
11367         Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
11368         return DAG.getBitcast(VT, Item);
11369       }
11370     }
11371 
11372     // Is it a vector logical left shift?
11373     if (NumElems == 2 && Idx == 1 &&
11374         X86::isZeroNode(Op.getOperand(0)) &&
11375         !X86::isZeroNode(Op.getOperand(1))) {
11376       unsigned NumBits = VT.getSizeInBits();
11377       return getVShift(true, VT,
11378                        DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11379                                    VT, Op.getOperand(1)),
11380                        NumBits/2, DAG, *this, dl);
11381     }
11382 
11383     if (IsAllConstants) // Otherwise, it's better to do a constpool load.
11384       return SDValue();
11385 
11386     // Otherwise, if this is a vector with i32 or f32 elements, and the element
11387     // is a non-constant being inserted into an element other than the low one,
11388     // we can't use a constant pool load.  Instead, use SCALAR_TO_VECTOR (aka
11389     // movd/movss) to move this into the low element, then shuffle it into
11390     // place.
11391     if (EVTBits == 32) {
11392       Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11393       return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
11394     }
11395   }
11396 
11397   // Splat is obviously ok. Let legalizer expand it to a shuffle.
11398   if (Values.size() == 1) {
11399     if (EVTBits == 32) {
11400       // Instead of a shuffle like this:
11401       // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
11402       // Check if it's possible to issue this instead.
11403       // shuffle (vload ptr)), undef, <1, 1, 1, 1>
11404       unsigned Idx = NonZeroMask.countTrailingZeros();
11405       SDValue Item = Op.getOperand(Idx);
11406       if (Op.getNode()->isOnlyUserOf(Item.getNode()))
11407         return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
11408     }
11409     return SDValue();
11410   }
11411 
11412   // A vector full of immediates; various special cases are already
11413   // handled, so this is best done with a single constant-pool load.
11414   if (IsAllConstants)
11415     return SDValue();
11416 
11417   if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
11418       return V;
11419 
11420   // See if we can use a vector load to get all of the elements.
11421   {
11422     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
11423     if (SDValue LD =
11424             EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
11425       return LD;
11426   }
11427 
11428   // If this is a splat of pairs of 32-bit elements, we can use a narrower
11429   // build_vector and broadcast it.
11430   // TODO: We could probably generalize this more.
11431   if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
11432     SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
11433                        DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
11434     auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
11435       // Make sure all the even/odd operands match.
11436       for (unsigned i = 2; i != NumElems; ++i)
11437         if (Ops[i % 2] != Op.getOperand(i))
11438           return false;
11439       return true;
11440     };
11441     if (CanSplat(Op, NumElems, Ops)) {
11442       MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
11443       MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
11444       // Create a new build vector and cast to v2i64/v2f64.
11445       SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
11446                                      DAG.getBuildVector(NarrowVT, dl, Ops));
11447       // Broadcast from v2i64/v2f64 and cast to final VT.
11448       MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems / 2);
11449       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
11450                                             NewBV));
11451     }
11452   }
11453 
11454   // For AVX-length vectors, build the individual 128-bit pieces and use
11455   // shuffles to put them in place.
11456   if (VT.getSizeInBits() > 128) {
11457     MVT HVT = MVT::getVectorVT(EltVT, NumElems / 2);
11458 
11459     // Build both the lower and upper subvector.
11460     SDValue Lower =
11461         DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
11462     SDValue Upper = DAG.getBuildVector(
11463         HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
11464 
11465     // Recreate the wider vector with the lower and upper part.
11466     return concatSubVectors(Lower, Upper, DAG, dl);
11467   }
11468 
11469   // Let legalizer expand 2-wide build_vectors.
11470   if (EVTBits == 64) {
11471     if (NumNonZero == 1) {
11472       // One half is zero or undef.
11473       unsigned Idx = NonZeroMask.countTrailingZeros();
11474       SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
11475                                Op.getOperand(Idx));
11476       return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
11477     }
11478     return SDValue();
11479   }
11480 
11481   // If element VT is < 32 bits, convert it to inserts into a zero vector.
11482   if (EVTBits == 8 && NumElems == 16)
11483     if (SDValue V = LowerBuildVectorv16i8(Op, NonZeroMask, NumNonZero, NumZero,
11484                                           DAG, Subtarget))
11485       return V;
11486 
11487   if (EltVT == MVT::i16 && NumElems == 8)
11488     if (SDValue V = LowerBuildVectorv8i16(Op, NonZeroMask, NumNonZero, NumZero,
11489                                           DAG, Subtarget))
11490       return V;
11491 
11492   // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
11493   if (EVTBits == 32 && NumElems == 4)
11494     if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
11495       return V;
11496 
11497   // If element VT is == 32 bits, turn it into a number of shuffles.
11498   if (NumElems == 4 && NumZero > 0) {
11499     SmallVector<SDValue, 8> Ops(NumElems);
11500     for (unsigned i = 0; i < 4; ++i) {
11501       bool isZero = !NonZeroMask[i];
11502       if (isZero)
11503         Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
11504       else
11505         Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
11506     }
11507 
11508     for (unsigned i = 0; i < 2; ++i) {
11509       switch (NonZeroMask.extractBitsAsZExtValue(2, i * 2)) {
11510         default: llvm_unreachable("Unexpected NonZero count");
11511         case 0:
11512           Ops[i] = Ops[i*2];  // Must be a zero vector.
11513           break;
11514         case 1:
11515           Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
11516           break;
11517         case 2:
11518           Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
11519           break;
11520         case 3:
11521           Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
11522           break;
11523       }
11524     }
11525 
11526     bool Reverse1 = NonZeroMask.extractBitsAsZExtValue(2, 0) == 2;
11527     bool Reverse2 = NonZeroMask.extractBitsAsZExtValue(2, 2) == 2;
11528     int MaskVec[] = {
11529       Reverse1 ? 1 : 0,
11530       Reverse1 ? 0 : 1,
11531       static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
11532       static_cast<int>(Reverse2 ? NumElems   : NumElems+1)
11533     };
11534     return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
11535   }
11536 
11537   assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
11538 
11539   // Check for a build vector from mostly shuffle plus few inserting.
11540   if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
11541     return Sh;
11542 
11543   // For SSE 4.1, use insertps to put the high elements into the low element.
11544   if (Subtarget.hasSSE41() && EltVT != MVT::f16) {
11545     SDValue Result;
11546     if (!Op.getOperand(0).isUndef())
11547       Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
11548     else
11549       Result = DAG.getUNDEF(VT);
11550 
11551     for (unsigned i = 1; i < NumElems; ++i) {
11552       if (Op.getOperand(i).isUndef()) continue;
11553       Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
11554                            Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
11555     }
11556     return Result;
11557   }
11558 
11559   // Otherwise, expand into a number of unpckl*, start by extending each of
11560   // our (non-undef) elements to the full vector width with the element in the
11561   // bottom slot of the vector (which generates no code for SSE).
11562   SmallVector<SDValue, 8> Ops(NumElems);
11563   for (unsigned i = 0; i < NumElems; ++i) {
11564     if (!Op.getOperand(i).isUndef())
11565       Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
11566     else
11567       Ops[i] = DAG.getUNDEF(VT);
11568   }
11569 
11570   // Next, we iteratively mix elements, e.g. for v4f32:
11571   //   Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
11572   //         : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
11573   //   Step 2: unpcklpd X, Y ==>    <3, 2, 1, 0>
11574   for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
11575     // Generate scaled UNPCKL shuffle mask.
11576     SmallVector<int, 16> Mask;
11577     for(unsigned i = 0; i != Scale; ++i)
11578       Mask.push_back(i);
11579     for (unsigned i = 0; i != Scale; ++i)
11580       Mask.push_back(NumElems+i);
11581     Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
11582 
11583     for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
11584       Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
11585   }
11586   return Ops[0];
11587 }
11588 
11589 // 256-bit AVX can use the vinsertf128 instruction
11590 // to create 256-bit vectors from two other 128-bit ones.
11591 // TODO: Detect subvector broadcast here instead of DAG combine?
LowerAVXCONCAT_VECTORS(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)11592 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
11593                                       const X86Subtarget &Subtarget) {
11594   SDLoc dl(Op);
11595   MVT ResVT = Op.getSimpleValueType();
11596 
11597   assert((ResVT.is256BitVector() ||
11598           ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
11599 
11600   unsigned NumOperands = Op.getNumOperands();
11601   unsigned NumFreezeUndef = 0;
11602   unsigned NumZero = 0;
11603   unsigned NumNonZero = 0;
11604   unsigned NonZeros = 0;
11605   for (unsigned i = 0; i != NumOperands; ++i) {
11606     SDValue SubVec = Op.getOperand(i);
11607     if (SubVec.isUndef())
11608       continue;
11609     if (ISD::isFreezeUndef(SubVec.getNode()) && SubVec.hasOneUse())
11610       ++NumFreezeUndef;
11611     else if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
11612       ++NumZero;
11613     else {
11614       assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
11615       NonZeros |= 1 << i;
11616       ++NumNonZero;
11617     }
11618   }
11619 
11620   // If we have more than 2 non-zeros, build each half separately.
11621   if (NumNonZero > 2) {
11622     MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
11623     ArrayRef<SDUse> Ops = Op->ops();
11624     SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11625                              Ops.slice(0, NumOperands/2));
11626     SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11627                              Ops.slice(NumOperands/2));
11628     return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
11629   }
11630 
11631   // Otherwise, build it up through insert_subvectors.
11632   SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
11633                         : (NumFreezeUndef ? DAG.getFreeze(DAG.getUNDEF(ResVT))
11634                                           : DAG.getUNDEF(ResVT));
11635 
11636   MVT SubVT = Op.getOperand(0).getSimpleValueType();
11637   unsigned NumSubElems = SubVT.getVectorNumElements();
11638   for (unsigned i = 0; i != NumOperands; ++i) {
11639     if ((NonZeros & (1 << i)) == 0)
11640       continue;
11641 
11642     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
11643                       Op.getOperand(i),
11644                       DAG.getIntPtrConstant(i * NumSubElems, dl));
11645   }
11646 
11647   return Vec;
11648 }
11649 
11650 // Returns true if the given node is a type promotion (by concatenating i1
11651 // zeros) of the result of a node that already zeros all upper bits of
11652 // k-register.
11653 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
LowerCONCAT_VECTORSvXi1(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)11654 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
11655                                        const X86Subtarget &Subtarget,
11656                                        SelectionDAG & DAG) {
11657   SDLoc dl(Op);
11658   MVT ResVT = Op.getSimpleValueType();
11659   unsigned NumOperands = Op.getNumOperands();
11660 
11661   assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
11662          "Unexpected number of operands in CONCAT_VECTORS");
11663 
11664   uint64_t Zeros = 0;
11665   uint64_t NonZeros = 0;
11666   for (unsigned i = 0; i != NumOperands; ++i) {
11667     SDValue SubVec = Op.getOperand(i);
11668     if (SubVec.isUndef())
11669       continue;
11670     assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
11671     if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
11672       Zeros |= (uint64_t)1 << i;
11673     else
11674       NonZeros |= (uint64_t)1 << i;
11675   }
11676 
11677   unsigned NumElems = ResVT.getVectorNumElements();
11678 
11679   // If we are inserting non-zero vector and there are zeros in LSBs and undef
11680   // in the MSBs we need to emit a KSHIFTL. The generic lowering to
11681   // insert_subvector will give us two kshifts.
11682   if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
11683       Log2_64(NonZeros) != NumOperands - 1) {
11684     MVT ShiftVT = ResVT;
11685     if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
11686       ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
11687     unsigned Idx = Log2_64(NonZeros);
11688     SDValue SubVec = Op.getOperand(Idx);
11689     unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
11690     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
11691                          DAG.getUNDEF(ShiftVT), SubVec,
11692                          DAG.getIntPtrConstant(0, dl));
11693     Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
11694                      DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
11695     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
11696                        DAG.getIntPtrConstant(0, dl));
11697   }
11698 
11699   // If there are zero or one non-zeros we can handle this very simply.
11700   if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
11701     SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
11702     if (!NonZeros)
11703       return Vec;
11704     unsigned Idx = Log2_64(NonZeros);
11705     SDValue SubVec = Op.getOperand(Idx);
11706     unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
11707     return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
11708                        DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
11709   }
11710 
11711   if (NumOperands > 2) {
11712     MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
11713     ArrayRef<SDUse> Ops = Op->ops();
11714     SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11715                              Ops.slice(0, NumOperands/2));
11716     SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11717                              Ops.slice(NumOperands/2));
11718     return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
11719   }
11720 
11721   assert(llvm::popcount(NonZeros) == 2 && "Simple cases not handled?");
11722 
11723   if (ResVT.getVectorNumElements() >= 16)
11724     return Op; // The operation is legal with KUNPCK
11725 
11726   SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
11727                             DAG.getUNDEF(ResVT), Op.getOperand(0),
11728                             DAG.getIntPtrConstant(0, dl));
11729   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
11730                      DAG.getIntPtrConstant(NumElems/2, dl));
11731 }
11732 
LowerCONCAT_VECTORS(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)11733 static SDValue LowerCONCAT_VECTORS(SDValue Op,
11734                                    const X86Subtarget &Subtarget,
11735                                    SelectionDAG &DAG) {
11736   MVT VT = Op.getSimpleValueType();
11737   if (VT.getVectorElementType() == MVT::i1)
11738     return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
11739 
11740   assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
11741          (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
11742           Op.getNumOperands() == 4)));
11743 
11744   // AVX can use the vinsertf128 instruction to create 256-bit vectors
11745   // from two other 128-bit ones.
11746 
11747   // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
11748   return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
11749 }
11750 
11751 //===----------------------------------------------------------------------===//
11752 // Vector shuffle lowering
11753 //
11754 // This is an experimental code path for lowering vector shuffles on x86. It is
11755 // designed to handle arbitrary vector shuffles and blends, gracefully
11756 // degrading performance as necessary. It works hard to recognize idiomatic
11757 // shuffles and lower them to optimal instruction patterns without leaving
11758 // a framework that allows reasonably efficient handling of all vector shuffle
11759 // patterns.
11760 //===----------------------------------------------------------------------===//
11761 
11762 /// Tiny helper function to identify a no-op mask.
11763 ///
11764 /// This is a somewhat boring predicate function. It checks whether the mask
11765 /// array input, which is assumed to be a single-input shuffle mask of the kind
11766 /// used by the X86 shuffle instructions (not a fully general
11767 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
11768 /// in-place shuffle are 'no-op's.
isNoopShuffleMask(ArrayRef<int> Mask)11769 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
11770   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11771     assert(Mask[i] >= -1 && "Out of bound mask element!");
11772     if (Mask[i] >= 0 && Mask[i] != i)
11773       return false;
11774   }
11775   return true;
11776 }
11777 
11778 /// Test whether there are elements crossing LaneSizeInBits lanes in this
11779 /// shuffle mask.
11780 ///
11781 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
11782 /// and we routinely test for these.
isLaneCrossingShuffleMask(unsigned LaneSizeInBits,unsigned ScalarSizeInBits,ArrayRef<int> Mask)11783 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
11784                                       unsigned ScalarSizeInBits,
11785                                       ArrayRef<int> Mask) {
11786   assert(LaneSizeInBits && ScalarSizeInBits &&
11787          (LaneSizeInBits % ScalarSizeInBits) == 0 &&
11788          "Illegal shuffle lane size");
11789   int LaneSize = LaneSizeInBits / ScalarSizeInBits;
11790   int Size = Mask.size();
11791   for (int i = 0; i < Size; ++i)
11792     if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
11793       return true;
11794   return false;
11795 }
11796 
11797 /// Test whether there are elements crossing 128-bit lanes in this
11798 /// shuffle mask.
is128BitLaneCrossingShuffleMask(MVT VT,ArrayRef<int> Mask)11799 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
11800   return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
11801 }
11802 
11803 /// Test whether elements in each LaneSizeInBits lane in this shuffle mask come
11804 /// from multiple lanes - this is different to isLaneCrossingShuffleMask to
11805 /// better support 'repeated mask + lane permute' style shuffles.
isMultiLaneShuffleMask(unsigned LaneSizeInBits,unsigned ScalarSizeInBits,ArrayRef<int> Mask)11806 static bool isMultiLaneShuffleMask(unsigned LaneSizeInBits,
11807                                    unsigned ScalarSizeInBits,
11808                                    ArrayRef<int> Mask) {
11809   assert(LaneSizeInBits && ScalarSizeInBits &&
11810          (LaneSizeInBits % ScalarSizeInBits) == 0 &&
11811          "Illegal shuffle lane size");
11812   int NumElts = Mask.size();
11813   int NumEltsPerLane = LaneSizeInBits / ScalarSizeInBits;
11814   int NumLanes = NumElts / NumEltsPerLane;
11815   if (NumLanes > 1) {
11816     for (int i = 0; i != NumLanes; ++i) {
11817       int SrcLane = -1;
11818       for (int j = 0; j != NumEltsPerLane; ++j) {
11819         int M = Mask[(i * NumEltsPerLane) + j];
11820         if (M < 0)
11821           continue;
11822         int Lane = (M % NumElts) / NumEltsPerLane;
11823         if (SrcLane >= 0 && SrcLane != Lane)
11824           return true;
11825         SrcLane = Lane;
11826       }
11827     }
11828   }
11829   return false;
11830 }
11831 
11832 /// Test whether a shuffle mask is equivalent within each sub-lane.
11833 ///
11834 /// This checks a shuffle mask to see if it is performing the same
11835 /// lane-relative shuffle in each sub-lane. This trivially implies
11836 /// that it is also not lane-crossing. It may however involve a blend from the
11837 /// same lane of a second vector.
11838 ///
11839 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
11840 /// non-trivial to compute in the face of undef lanes. The representation is
11841 /// suitable for use with existing 128-bit shuffles as entries from the second
11842 /// vector have been remapped to [LaneSize, 2*LaneSize).
isRepeatedShuffleMask(unsigned LaneSizeInBits,MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11843 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
11844                                   ArrayRef<int> Mask,
11845                                   SmallVectorImpl<int> &RepeatedMask) {
11846   auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
11847   RepeatedMask.assign(LaneSize, -1);
11848   int Size = Mask.size();
11849   for (int i = 0; i < Size; ++i) {
11850     assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
11851     if (Mask[i] < 0)
11852       continue;
11853     if ((Mask[i] % Size) / LaneSize != i / LaneSize)
11854       // This entry crosses lanes, so there is no way to model this shuffle.
11855       return false;
11856 
11857     // Ok, handle the in-lane shuffles by detecting if and when they repeat.
11858     // Adjust second vector indices to start at LaneSize instead of Size.
11859     int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
11860                                 : Mask[i] % LaneSize + LaneSize;
11861     if (RepeatedMask[i % LaneSize] < 0)
11862       // This is the first non-undef entry in this slot of a 128-bit lane.
11863       RepeatedMask[i % LaneSize] = LocalM;
11864     else if (RepeatedMask[i % LaneSize] != LocalM)
11865       // Found a mismatch with the repeated mask.
11866       return false;
11867   }
11868   return true;
11869 }
11870 
11871 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
11872 static bool
is128BitLaneRepeatedShuffleMask(MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11873 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
11874                                 SmallVectorImpl<int> &RepeatedMask) {
11875   return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
11876 }
11877 
11878 static bool
is128BitLaneRepeatedShuffleMask(MVT VT,ArrayRef<int> Mask)11879 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
11880   SmallVector<int, 32> RepeatedMask;
11881   return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
11882 }
11883 
11884 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
11885 static bool
is256BitLaneRepeatedShuffleMask(MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11886 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
11887                                 SmallVectorImpl<int> &RepeatedMask) {
11888   return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
11889 }
11890 
11891 /// Test whether a target shuffle mask is equivalent within each sub-lane.
11892 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,unsigned EltSizeInBits,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11893 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,
11894                                         unsigned EltSizeInBits,
11895                                         ArrayRef<int> Mask,
11896                                         SmallVectorImpl<int> &RepeatedMask) {
11897   int LaneSize = LaneSizeInBits / EltSizeInBits;
11898   RepeatedMask.assign(LaneSize, SM_SentinelUndef);
11899   int Size = Mask.size();
11900   for (int i = 0; i < Size; ++i) {
11901     assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
11902     if (Mask[i] == SM_SentinelUndef)
11903       continue;
11904     if (Mask[i] == SM_SentinelZero) {
11905       if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
11906         return false;
11907       RepeatedMask[i % LaneSize] = SM_SentinelZero;
11908       continue;
11909     }
11910     if ((Mask[i] % Size) / LaneSize != i / LaneSize)
11911       // This entry crosses lanes, so there is no way to model this shuffle.
11912       return false;
11913 
11914     // Handle the in-lane shuffles by detecting if and when they repeat. Adjust
11915     // later vector indices to start at multiples of LaneSize instead of Size.
11916     int LaneM = Mask[i] / Size;
11917     int LocalM = (Mask[i] % LaneSize) + (LaneM * LaneSize);
11918     if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
11919       // This is the first non-undef entry in this slot of a 128-bit lane.
11920       RepeatedMask[i % LaneSize] = LocalM;
11921     else if (RepeatedMask[i % LaneSize] != LocalM)
11922       // Found a mismatch with the repeated mask.
11923       return false;
11924   }
11925   return true;
11926 }
11927 
11928 /// Test whether a target shuffle mask is equivalent within each sub-lane.
11929 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11930 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
11931                                         ArrayRef<int> Mask,
11932                                         SmallVectorImpl<int> &RepeatedMask) {
11933   return isRepeatedTargetShuffleMask(LaneSizeInBits, VT.getScalarSizeInBits(),
11934                                      Mask, RepeatedMask);
11935 }
11936 
11937 /// Checks whether the vector elements referenced by two shuffle masks are
11938 /// equivalent.
IsElementEquivalent(int MaskSize,SDValue Op,SDValue ExpectedOp,int Idx,int ExpectedIdx)11939 static bool IsElementEquivalent(int MaskSize, SDValue Op, SDValue ExpectedOp,
11940                                 int Idx, int ExpectedIdx) {
11941   assert(0 <= Idx && Idx < MaskSize && 0 <= ExpectedIdx &&
11942          ExpectedIdx < MaskSize && "Out of range element index");
11943   if (!Op || !ExpectedOp || Op.getOpcode() != ExpectedOp.getOpcode())
11944     return false;
11945 
11946   switch (Op.getOpcode()) {
11947   case ISD::BUILD_VECTOR:
11948     // If the values are build vectors, we can look through them to find
11949     // equivalent inputs that make the shuffles equivalent.
11950     // TODO: Handle MaskSize != Op.getNumOperands()?
11951     if (MaskSize == (int)Op.getNumOperands() &&
11952         MaskSize == (int)ExpectedOp.getNumOperands())
11953       return Op.getOperand(Idx) == ExpectedOp.getOperand(ExpectedIdx);
11954     break;
11955   case X86ISD::VBROADCAST:
11956   case X86ISD::VBROADCAST_LOAD:
11957     // TODO: Handle MaskSize != Op.getValueType().getVectorNumElements()?
11958     return (Op == ExpectedOp &&
11959             (int)Op.getValueType().getVectorNumElements() == MaskSize);
11960   case X86ISD::HADD:
11961   case X86ISD::HSUB:
11962   case X86ISD::FHADD:
11963   case X86ISD::FHSUB:
11964   case X86ISD::PACKSS:
11965   case X86ISD::PACKUS:
11966     // HOP(X,X) can refer to the elt from the lower/upper half of a lane.
11967     // TODO: Handle MaskSize != NumElts?
11968     // TODO: Handle HOP(X,Y) vs HOP(Y,X) equivalence cases.
11969     if (Op == ExpectedOp && Op.getOperand(0) == Op.getOperand(1)) {
11970       MVT VT = Op.getSimpleValueType();
11971       int NumElts = VT.getVectorNumElements();
11972       if (MaskSize == NumElts) {
11973         int NumLanes = VT.getSizeInBits() / 128;
11974         int NumEltsPerLane = NumElts / NumLanes;
11975         int NumHalfEltsPerLane = NumEltsPerLane / 2;
11976         bool SameLane =
11977             (Idx / NumEltsPerLane) == (ExpectedIdx / NumEltsPerLane);
11978         bool SameElt =
11979             (Idx % NumHalfEltsPerLane) == (ExpectedIdx % NumHalfEltsPerLane);
11980         return SameLane && SameElt;
11981       }
11982     }
11983     break;
11984   }
11985 
11986   return false;
11987 }
11988 
11989 /// Checks whether a shuffle mask is equivalent to an explicit list of
11990 /// arguments.
11991 ///
11992 /// This is a fast way to test a shuffle mask against a fixed pattern:
11993 ///
11994 ///   if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
11995 ///
11996 /// It returns true if the mask is exactly as wide as the argument list, and
11997 /// each element of the mask is either -1 (signifying undef) or the value given
11998 /// in the argument.
isShuffleEquivalent(ArrayRef<int> Mask,ArrayRef<int> ExpectedMask,SDValue V1=SDValue (),SDValue V2=SDValue ())11999 static bool isShuffleEquivalent(ArrayRef<int> Mask, ArrayRef<int> ExpectedMask,
12000                                 SDValue V1 = SDValue(),
12001                                 SDValue V2 = SDValue()) {
12002   int Size = Mask.size();
12003   if (Size != (int)ExpectedMask.size())
12004     return false;
12005 
12006   for (int i = 0; i < Size; ++i) {
12007     assert(Mask[i] >= -1 && "Out of bound mask element!");
12008     int MaskIdx = Mask[i];
12009     int ExpectedIdx = ExpectedMask[i];
12010     if (0 <= MaskIdx && MaskIdx != ExpectedIdx) {
12011       SDValue MaskV = MaskIdx < Size ? V1 : V2;
12012       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
12013       MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
12014       ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
12015       if (!IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
12016         return false;
12017     }
12018   }
12019   return true;
12020 }
12021 
12022 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
12023 ///
12024 /// The masks must be exactly the same width.
12025 ///
12026 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
12027 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
12028 ///
12029 /// SM_SentinelZero is accepted as a valid negative index but must match in
12030 /// both, or via a known bits test.
isTargetShuffleEquivalent(MVT VT,ArrayRef<int> Mask,ArrayRef<int> ExpectedMask,const SelectionDAG & DAG,SDValue V1=SDValue (),SDValue V2=SDValue ())12031 static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
12032                                       ArrayRef<int> ExpectedMask,
12033                                       const SelectionDAG &DAG,
12034                                       SDValue V1 = SDValue(),
12035                                       SDValue V2 = SDValue()) {
12036   int Size = Mask.size();
12037   if (Size != (int)ExpectedMask.size())
12038     return false;
12039   assert(llvm::all_of(ExpectedMask,
12040                       [Size](int M) { return isInRange(M, 0, 2 * Size); }) &&
12041          "Illegal target shuffle mask");
12042 
12043   // Check for out-of-range target shuffle mask indices.
12044   if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
12045     return false;
12046 
12047   // Don't use V1/V2 if they're not the same size as the shuffle mask type.
12048   if (V1 && V1.getValueSizeInBits() != VT.getSizeInBits())
12049     V1 = SDValue();
12050   if (V2 && V2.getValueSizeInBits() != VT.getSizeInBits())
12051     V2 = SDValue();
12052 
12053   APInt ZeroV1 = APInt::getNullValue(Size);
12054   APInt ZeroV2 = APInt::getNullValue(Size);
12055 
12056   for (int i = 0; i < Size; ++i) {
12057     int MaskIdx = Mask[i];
12058     int ExpectedIdx = ExpectedMask[i];
12059     if (MaskIdx == SM_SentinelUndef || MaskIdx == ExpectedIdx)
12060       continue;
12061     if (MaskIdx == SM_SentinelZero) {
12062       // If we need this expected index to be a zero element, then update the
12063       // relevant zero mask and perform the known bits at the end to minimize
12064       // repeated computes.
12065       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
12066       if (ExpectedV &&
12067           Size == (int)ExpectedV.getValueType().getVectorNumElements()) {
12068         int BitIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
12069         APInt &ZeroMask = ExpectedIdx < Size ? ZeroV1 : ZeroV2;
12070         ZeroMask.setBit(BitIdx);
12071         continue;
12072       }
12073     }
12074     if (MaskIdx >= 0) {
12075       SDValue MaskV = MaskIdx < Size ? V1 : V2;
12076       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
12077       MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
12078       ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
12079       if (IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
12080         continue;
12081     }
12082     return false;
12083   }
12084   return (ZeroV1.isNullValue() || DAG.MaskedVectorIsZero(V1, ZeroV1)) &&
12085          (ZeroV2.isNullValue() || DAG.MaskedVectorIsZero(V2, ZeroV2));
12086 }
12087 
12088 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
12089 // instructions.
isUnpackWdShuffleMask(ArrayRef<int> Mask,MVT VT,const SelectionDAG & DAG)12090 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT,
12091                                   const SelectionDAG &DAG) {
12092   if (VT != MVT::v8i32 && VT != MVT::v8f32)
12093     return false;
12094 
12095   SmallVector<int, 8> Unpcklwd;
12096   createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
12097                           /* Unary = */ false);
12098   SmallVector<int, 8> Unpckhwd;
12099   createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
12100                           /* Unary = */ false);
12101   bool IsUnpackwdMask = (isTargetShuffleEquivalent(VT, Mask, Unpcklwd, DAG) ||
12102                          isTargetShuffleEquivalent(VT, Mask, Unpckhwd, DAG));
12103   return IsUnpackwdMask;
12104 }
12105 
is128BitUnpackShuffleMask(ArrayRef<int> Mask,const SelectionDAG & DAG)12106 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask,
12107                                       const SelectionDAG &DAG) {
12108   // Create 128-bit vector type based on mask size.
12109   MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
12110   MVT VT = MVT::getVectorVT(EltVT, Mask.size());
12111 
12112   // We can't assume a canonical shuffle mask, so try the commuted version too.
12113   SmallVector<int, 4> CommutedMask(Mask);
12114   ShuffleVectorSDNode::commuteMask(CommutedMask);
12115 
12116   // Match any of unary/binary or low/high.
12117   for (unsigned i = 0; i != 4; ++i) {
12118     SmallVector<int, 16> UnpackMask;
12119     createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
12120     if (isTargetShuffleEquivalent(VT, Mask, UnpackMask, DAG) ||
12121         isTargetShuffleEquivalent(VT, CommutedMask, UnpackMask, DAG))
12122       return true;
12123   }
12124   return false;
12125 }
12126 
12127 /// Return true if a shuffle mask chooses elements identically in its top and
12128 /// bottom halves. For example, any splat mask has the same top and bottom
12129 /// halves. If an element is undefined in only one half of the mask, the halves
12130 /// are not considered identical.
hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask)12131 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
12132   assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
12133   unsigned HalfSize = Mask.size() / 2;
12134   for (unsigned i = 0; i != HalfSize; ++i) {
12135     if (Mask[i] != Mask[i + HalfSize])
12136       return false;
12137   }
12138   return true;
12139 }
12140 
12141 /// Get a 4-lane 8-bit shuffle immediate for a mask.
12142 ///
12143 /// This helper function produces an 8-bit shuffle immediate corresponding to
12144 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
12145 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
12146 /// example.
12147 ///
12148 /// NB: We rely heavily on "undef" masks preserving the input lane.
getV4X86ShuffleImm(ArrayRef<int> Mask)12149 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
12150   assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
12151   assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
12152   assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
12153   assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
12154   assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
12155 
12156   // If the mask only uses one non-undef element, then fully 'splat' it to
12157   // improve later broadcast matching.
12158   int FirstIndex = find_if(Mask, [](int M) { return M >= 0; }) - Mask.begin();
12159   assert(0 <= FirstIndex && FirstIndex < 4 && "All undef shuffle mask");
12160 
12161   int FirstElt = Mask[FirstIndex];
12162   if (all_of(Mask, [FirstElt](int M) { return M < 0 || M == FirstElt; }))
12163     return (FirstElt << 6) | (FirstElt << 4) | (FirstElt << 2) | FirstElt;
12164 
12165   unsigned Imm = 0;
12166   Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
12167   Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
12168   Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
12169   Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
12170   return Imm;
12171 }
12172 
getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,const SDLoc & DL,SelectionDAG & DAG)12173 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
12174                                           SelectionDAG &DAG) {
12175   return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
12176 }
12177 
12178 // The Shuffle result is as follow:
12179 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
12180 // Each Zeroable's element correspond to a particular Mask's element.
12181 // As described in computeZeroableShuffleElements function.
12182 //
12183 // The function looks for a sub-mask that the nonzero elements are in
12184 // increasing order. If such sub-mask exist. The function returns true.
isNonZeroElementsInOrder(const APInt & Zeroable,ArrayRef<int> Mask,const EVT & VectorType,bool & IsZeroSideLeft)12185 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
12186                                      ArrayRef<int> Mask, const EVT &VectorType,
12187                                      bool &IsZeroSideLeft) {
12188   int NextElement = -1;
12189   // Check if the Mask's nonzero elements are in increasing order.
12190   for (int i = 0, e = Mask.size(); i < e; i++) {
12191     // Checks if the mask's zeros elements are built from only zeros.
12192     assert(Mask[i] >= -1 && "Out of bound mask element!");
12193     if (Mask[i] < 0)
12194       return false;
12195     if (Zeroable[i])
12196       continue;
12197     // Find the lowest non zero element
12198     if (NextElement < 0) {
12199       NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
12200       IsZeroSideLeft = NextElement != 0;
12201     }
12202     // Exit if the mask's non zero elements are not in increasing order.
12203     if (NextElement != Mask[i])
12204       return false;
12205     NextElement++;
12206   }
12207   return true;
12208 }
12209 
12210 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
lowerShuffleWithPSHUFB(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)12211 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
12212                                       ArrayRef<int> Mask, SDValue V1,
12213                                       SDValue V2, const APInt &Zeroable,
12214                                       const X86Subtarget &Subtarget,
12215                                       SelectionDAG &DAG) {
12216   int Size = Mask.size();
12217   int LaneSize = 128 / VT.getScalarSizeInBits();
12218   const int NumBytes = VT.getSizeInBits() / 8;
12219   const int NumEltBytes = VT.getScalarSizeInBits() / 8;
12220 
12221   assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
12222          (Subtarget.hasAVX2() && VT.is256BitVector()) ||
12223          (Subtarget.hasBWI() && VT.is512BitVector()));
12224 
12225   SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
12226   // Sign bit set in i8 mask means zero element.
12227   SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
12228 
12229   SDValue V;
12230   for (int i = 0; i < NumBytes; ++i) {
12231     int M = Mask[i / NumEltBytes];
12232     if (M < 0) {
12233       PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
12234       continue;
12235     }
12236     if (Zeroable[i / NumEltBytes]) {
12237       PSHUFBMask[i] = ZeroMask;
12238       continue;
12239     }
12240 
12241     // We can only use a single input of V1 or V2.
12242     SDValue SrcV = (M >= Size ? V2 : V1);
12243     if (V && V != SrcV)
12244       return SDValue();
12245     V = SrcV;
12246     M %= Size;
12247 
12248     // PSHUFB can't cross lanes, ensure this doesn't happen.
12249     if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
12250       return SDValue();
12251 
12252     M = M % LaneSize;
12253     M = M * NumEltBytes + (i % NumEltBytes);
12254     PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
12255   }
12256   assert(V && "Failed to find a source input");
12257 
12258   MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
12259   return DAG.getBitcast(
12260       VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
12261                       DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
12262 }
12263 
12264 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
12265                            const X86Subtarget &Subtarget, SelectionDAG &DAG,
12266                            const SDLoc &dl);
12267 
12268 // X86 has dedicated shuffle that can be lowered to VEXPAND
lowerShuffleToEXPAND(const SDLoc & DL,MVT VT,const APInt & Zeroable,ArrayRef<int> Mask,SDValue & V1,SDValue & V2,SelectionDAG & DAG,const X86Subtarget & Subtarget)12269 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
12270                                     const APInt &Zeroable,
12271                                     ArrayRef<int> Mask, SDValue &V1,
12272                                     SDValue &V2, SelectionDAG &DAG,
12273                                     const X86Subtarget &Subtarget) {
12274   bool IsLeftZeroSide = true;
12275   if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
12276                                 IsLeftZeroSide))
12277     return SDValue();
12278   unsigned VEXPANDMask = (~Zeroable).getZExtValue();
12279   MVT IntegerType =
12280       MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
12281   SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
12282   unsigned NumElts = VT.getVectorNumElements();
12283   assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
12284          "Unexpected number of vector elements");
12285   SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
12286                               Subtarget, DAG, DL);
12287   SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
12288   SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
12289   return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
12290 }
12291 
matchShuffleWithUNPCK(MVT VT,SDValue & V1,SDValue & V2,unsigned & UnpackOpcode,bool IsUnary,ArrayRef<int> TargetMask,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)12292 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
12293                                   unsigned &UnpackOpcode, bool IsUnary,
12294                                   ArrayRef<int> TargetMask, const SDLoc &DL,
12295                                   SelectionDAG &DAG,
12296                                   const X86Subtarget &Subtarget) {
12297   int NumElts = VT.getVectorNumElements();
12298 
12299   bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
12300   for (int i = 0; i != NumElts; i += 2) {
12301     int M1 = TargetMask[i + 0];
12302     int M2 = TargetMask[i + 1];
12303     Undef1 &= (SM_SentinelUndef == M1);
12304     Undef2 &= (SM_SentinelUndef == M2);
12305     Zero1 &= isUndefOrZero(M1);
12306     Zero2 &= isUndefOrZero(M2);
12307   }
12308   assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
12309          "Zeroable shuffle detected");
12310 
12311   // Attempt to match the target mask against the unpack lo/hi mask patterns.
12312   SmallVector<int, 64> Unpckl, Unpckh;
12313   createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
12314   if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG, V1,
12315                                 (IsUnary ? V1 : V2))) {
12316     UnpackOpcode = X86ISD::UNPCKL;
12317     V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
12318     V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
12319     return true;
12320   }
12321 
12322   createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
12323   if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG, V1,
12324                                 (IsUnary ? V1 : V2))) {
12325     UnpackOpcode = X86ISD::UNPCKH;
12326     V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
12327     V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
12328     return true;
12329   }
12330 
12331   // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
12332   if (IsUnary && (Zero1 || Zero2)) {
12333     // Don't bother if we can blend instead.
12334     if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
12335         isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
12336       return false;
12337 
12338     bool MatchLo = true, MatchHi = true;
12339     for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
12340       int M = TargetMask[i];
12341 
12342       // Ignore if the input is known to be zero or the index is undef.
12343       if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
12344           (M == SM_SentinelUndef))
12345         continue;
12346 
12347       MatchLo &= (M == Unpckl[i]);
12348       MatchHi &= (M == Unpckh[i]);
12349     }
12350 
12351     if (MatchLo || MatchHi) {
12352       UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
12353       V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
12354       V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
12355       return true;
12356     }
12357   }
12358 
12359   // If a binary shuffle, commute and try again.
12360   if (!IsUnary) {
12361     ShuffleVectorSDNode::commuteMask(Unpckl);
12362     if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG)) {
12363       UnpackOpcode = X86ISD::UNPCKL;
12364       std::swap(V1, V2);
12365       return true;
12366     }
12367 
12368     ShuffleVectorSDNode::commuteMask(Unpckh);
12369     if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG)) {
12370       UnpackOpcode = X86ISD::UNPCKH;
12371       std::swap(V1, V2);
12372       return true;
12373     }
12374   }
12375 
12376   return false;
12377 }
12378 
12379 // X86 has dedicated unpack instructions that can handle specific blend
12380 // operations: UNPCKH and UNPCKL.
lowerShuffleWithUNPCK(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG)12381 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
12382                                      ArrayRef<int> Mask, SDValue V1, SDValue V2,
12383                                      SelectionDAG &DAG) {
12384   SmallVector<int, 8> Unpckl;
12385   createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
12386   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12387     return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
12388 
12389   SmallVector<int, 8> Unpckh;
12390   createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
12391   if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12392     return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
12393 
12394   // Commute and try again.
12395   ShuffleVectorSDNode::commuteMask(Unpckl);
12396   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12397     return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
12398 
12399   ShuffleVectorSDNode::commuteMask(Unpckh);
12400   if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12401     return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
12402 
12403   return SDValue();
12404 }
12405 
12406 /// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
12407 /// followed by unpack 256-bit.
lowerShuffleWithUNPCK256(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG)12408 static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT,
12409                                         ArrayRef<int> Mask, SDValue V1,
12410                                         SDValue V2, SelectionDAG &DAG) {
12411   SmallVector<int, 32> Unpckl, Unpckh;
12412   createSplat2ShuffleMask(VT, Unpckl, /* Lo */ true);
12413   createSplat2ShuffleMask(VT, Unpckh, /* Lo */ false);
12414 
12415   unsigned UnpackOpcode;
12416   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12417     UnpackOpcode = X86ISD::UNPCKL;
12418   else if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12419     UnpackOpcode = X86ISD::UNPCKH;
12420   else
12421     return SDValue();
12422 
12423   // This is a "natural" unpack operation (rather than the 128-bit sectored
12424   // operation implemented by AVX). We need to rearrange 64-bit chunks of the
12425   // input in order to use the x86 instruction.
12426   V1 = DAG.getVectorShuffle(MVT::v4f64, DL, DAG.getBitcast(MVT::v4f64, V1),
12427                             DAG.getUNDEF(MVT::v4f64), {0, 2, 1, 3});
12428   V1 = DAG.getBitcast(VT, V1);
12429   return DAG.getNode(UnpackOpcode, DL, VT, V1, V1);
12430 }
12431 
12432 // Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
12433 // source into the lower elements and zeroing the upper elements.
matchShuffleAsVTRUNC(MVT & SrcVT,MVT & DstVT,MVT VT,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget)12434 static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
12435                                  ArrayRef<int> Mask, const APInt &Zeroable,
12436                                  const X86Subtarget &Subtarget) {
12437   if (!VT.is512BitVector() && !Subtarget.hasVLX())
12438     return false;
12439 
12440   unsigned NumElts = Mask.size();
12441   unsigned EltSizeInBits = VT.getScalarSizeInBits();
12442   unsigned MaxScale = 64 / EltSizeInBits;
12443 
12444   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12445     unsigned SrcEltBits = EltSizeInBits * Scale;
12446     if (SrcEltBits < 32 && !Subtarget.hasBWI())
12447       continue;
12448     unsigned NumSrcElts = NumElts / Scale;
12449     if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale))
12450       continue;
12451     unsigned UpperElts = NumElts - NumSrcElts;
12452     if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12453       continue;
12454     SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale);
12455     SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts);
12456     DstVT = MVT::getIntegerVT(EltSizeInBits);
12457     if ((NumSrcElts * EltSizeInBits) >= 128) {
12458       // ISD::TRUNCATE
12459       DstVT = MVT::getVectorVT(DstVT, NumSrcElts);
12460     } else {
12461       // X86ISD::VTRUNC
12462       DstVT = MVT::getVectorVT(DstVT, 128 / EltSizeInBits);
12463     }
12464     return true;
12465   }
12466 
12467   return false;
12468 }
12469 
12470 // Helper to create TRUNCATE/VTRUNC nodes, optionally with zero/undef upper
12471 // element padding to the final DstVT.
getAVX512TruncNode(const SDLoc & DL,MVT DstVT,SDValue Src,const X86Subtarget & Subtarget,SelectionDAG & DAG,bool ZeroUppers)12472 static SDValue getAVX512TruncNode(const SDLoc &DL, MVT DstVT, SDValue Src,
12473                                   const X86Subtarget &Subtarget,
12474                                   SelectionDAG &DAG, bool ZeroUppers) {
12475   MVT SrcVT = Src.getSimpleValueType();
12476   MVT DstSVT = DstVT.getScalarType();
12477   unsigned NumDstElts = DstVT.getVectorNumElements();
12478   unsigned NumSrcElts = SrcVT.getVectorNumElements();
12479   unsigned DstEltSizeInBits = DstVT.getScalarSizeInBits();
12480 
12481   if (!DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
12482     return SDValue();
12483 
12484   // Perform a direct ISD::TRUNCATE if possible.
12485   if (NumSrcElts == NumDstElts)
12486     return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Src);
12487 
12488   if (NumSrcElts > NumDstElts) {
12489     MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
12490     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
12491     return extractSubVector(Trunc, 0, DAG, DL, DstVT.getSizeInBits());
12492   }
12493 
12494   if ((NumSrcElts * DstEltSizeInBits) >= 128) {
12495     MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
12496     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
12497     return widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
12498                           DstVT.getSizeInBits());
12499   }
12500 
12501   // Non-VLX targets must truncate from a 512-bit type, so we need to
12502   // widen, truncate and then possibly extract the original subvector.
12503   if (!Subtarget.hasVLX() && !SrcVT.is512BitVector()) {
12504     SDValue NewSrc = widenSubVector(Src, ZeroUppers, Subtarget, DAG, DL, 512);
12505     return getAVX512TruncNode(DL, DstVT, NewSrc, Subtarget, DAG, ZeroUppers);
12506   }
12507 
12508   // Fallback to a X86ISD::VTRUNC, padding if necessary.
12509   MVT TruncVT = MVT::getVectorVT(DstSVT, 128 / DstEltSizeInBits);
12510   SDValue Trunc = DAG.getNode(X86ISD::VTRUNC, DL, TruncVT, Src);
12511   if (DstVT != TruncVT)
12512     Trunc = widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
12513                            DstVT.getSizeInBits());
12514   return Trunc;
12515 }
12516 
12517 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
12518 //
12519 // An example is the following:
12520 //
12521 // t0: ch = EntryToken
12522 //           t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
12523 //         t25: v4i32 = truncate t2
12524 //       t41: v8i16 = bitcast t25
12525 //       t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
12526 //       Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
12527 //     t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
12528 //   t18: v2i64 = bitcast t51
12529 //
12530 // One can just use a single vpmovdw instruction, without avx512vl we need to
12531 // use the zmm variant and extract the lower subvector, padding with zeroes.
12532 // TODO: Merge with lowerShuffleAsVTRUNC.
lowerShuffleWithVPMOV(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)12533 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, MVT VT, SDValue V1,
12534                                      SDValue V2, ArrayRef<int> Mask,
12535                                      const APInt &Zeroable,
12536                                      const X86Subtarget &Subtarget,
12537                                      SelectionDAG &DAG) {
12538   assert((VT == MVT::v16i8 || VT == MVT::v8i16) && "Unexpected VTRUNC type");
12539   if (!Subtarget.hasAVX512())
12540     return SDValue();
12541 
12542   unsigned NumElts = VT.getVectorNumElements();
12543   unsigned EltSizeInBits = VT.getScalarSizeInBits();
12544   unsigned MaxScale = 64 / EltSizeInBits;
12545   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12546     unsigned SrcEltBits = EltSizeInBits * Scale;
12547     unsigned NumSrcElts = NumElts / Scale;
12548     unsigned UpperElts = NumElts - NumSrcElts;
12549     if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale) ||
12550         !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12551       continue;
12552 
12553     // Attempt to find a matching source truncation, but as a fall back VLX
12554     // cases can use the VPMOV directly.
12555     SDValue Src = peekThroughBitcasts(V1);
12556     if (Src.getOpcode() == ISD::TRUNCATE &&
12557         Src.getScalarValueSizeInBits() == SrcEltBits) {
12558       Src = Src.getOperand(0);
12559     } else if (Subtarget.hasVLX()) {
12560       MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12561       MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12562       Src = DAG.getBitcast(SrcVT, Src);
12563       // Don't do this if PACKSS/PACKUS could perform it cheaper.
12564       if (Scale == 2 &&
12565           ((DAG.ComputeNumSignBits(Src) > EltSizeInBits) ||
12566            (DAG.computeKnownBits(Src).countMinLeadingZeros() >= EltSizeInBits)))
12567         return SDValue();
12568     } else
12569       return SDValue();
12570 
12571     // VPMOVWB is only available with avx512bw.
12572     if (!Subtarget.hasBWI() && Src.getScalarValueSizeInBits() < 32)
12573       return SDValue();
12574 
12575     bool UndefUppers = isUndefInRange(Mask, NumSrcElts, UpperElts);
12576     return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
12577   }
12578 
12579   return SDValue();
12580 }
12581 
12582 // Attempt to match binary shuffle patterns as a truncate.
lowerShuffleAsVTRUNC(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)12583 static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
12584                                     SDValue V2, ArrayRef<int> Mask,
12585                                     const APInt &Zeroable,
12586                                     const X86Subtarget &Subtarget,
12587                                     SelectionDAG &DAG) {
12588   assert((VT.is128BitVector() || VT.is256BitVector()) &&
12589          "Unexpected VTRUNC type");
12590   if (!Subtarget.hasAVX512())
12591     return SDValue();
12592 
12593   unsigned NumElts = VT.getVectorNumElements();
12594   unsigned EltSizeInBits = VT.getScalarSizeInBits();
12595   unsigned MaxScale = 64 / EltSizeInBits;
12596   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12597     // TODO: Support non-BWI VPMOVWB truncations?
12598     unsigned SrcEltBits = EltSizeInBits * Scale;
12599     if (SrcEltBits < 32 && !Subtarget.hasBWI())
12600       continue;
12601 
12602     // Match shuffle <Ofs,Ofs+Scale,Ofs+2*Scale,..,undef_or_zero,undef_or_zero>
12603     // Bail if the V2 elements are undef.
12604     unsigned NumHalfSrcElts = NumElts / Scale;
12605     unsigned NumSrcElts = 2 * NumHalfSrcElts;
12606     for (unsigned Offset = 0; Offset != Scale; ++Offset) {
12607       if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, Offset, Scale) ||
12608           isUndefInRange(Mask, NumHalfSrcElts, NumHalfSrcElts))
12609         continue;
12610 
12611       // The elements beyond the truncation must be undef/zero.
12612       unsigned UpperElts = NumElts - NumSrcElts;
12613       if (UpperElts > 0 &&
12614           !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12615         continue;
12616       bool UndefUppers =
12617           UpperElts > 0 && isUndefInRange(Mask, NumSrcElts, UpperElts);
12618 
12619       // For offset truncations, ensure that the concat is cheap.
12620       if (Offset) {
12621         auto IsCheapConcat = [&](SDValue Lo, SDValue Hi) {
12622           if (Lo.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
12623               Hi.getOpcode() == ISD::EXTRACT_SUBVECTOR)
12624             return Lo.getOperand(0) == Hi.getOperand(0);
12625           if (ISD::isNormalLoad(Lo.getNode()) &&
12626               ISD::isNormalLoad(Hi.getNode())) {
12627             auto *LDLo = cast<LoadSDNode>(Lo);
12628             auto *LDHi = cast<LoadSDNode>(Hi);
12629             return DAG.areNonVolatileConsecutiveLoads(
12630                 LDHi, LDLo, Lo.getValueType().getStoreSize(), 1);
12631           }
12632           return false;
12633         };
12634         if (!IsCheapConcat(V1, V2))
12635           continue;
12636       }
12637 
12638       // As we're using both sources then we need to concat them together
12639       // and truncate from the double-sized src.
12640       MVT ConcatVT = MVT::getVectorVT(VT.getScalarType(), NumElts * 2);
12641       SDValue Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, V1, V2);
12642 
12643       MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12644       MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12645       Src = DAG.getBitcast(SrcVT, Src);
12646 
12647       // Shift the offset'd elements into place for the truncation.
12648       // TODO: Use getTargetVShiftByConstNode.
12649       if (Offset)
12650         Src = DAG.getNode(
12651             X86ISD::VSRLI, DL, SrcVT, Src,
12652             DAG.getTargetConstant(Offset * EltSizeInBits, DL, MVT::i8));
12653 
12654       return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
12655     }
12656   }
12657 
12658   return SDValue();
12659 }
12660 
12661 /// Check whether a compaction lowering can be done by dropping even/odd
12662 /// elements and compute how many times even/odd elements must be dropped.
12663 ///
12664 /// This handles shuffles which take every Nth element where N is a power of
12665 /// two. Example shuffle masks:
12666 ///
12667 /// (even)
12668 ///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14,  0,  2,  4,  6,  8, 10, 12, 14
12669 ///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
12670 ///  N = 2:  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12
12671 ///  N = 2:  0,  4,  8, 12, 16, 20, 24, 28,  0,  4,  8, 12, 16, 20, 24, 28
12672 ///  N = 3:  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8
12673 ///  N = 3:  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24
12674 ///
12675 /// (odd)
12676 ///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15,  0,  2,  4,  6,  8, 10, 12, 14
12677 ///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
12678 ///
12679 /// Any of these lanes can of course be undef.
12680 ///
12681 /// This routine only supports N <= 3.
12682 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
12683 /// for larger N.
12684 ///
12685 /// \returns N above, or the number of times even/odd elements must be dropped
12686 /// if there is such a number. Otherwise returns zero.
canLowerByDroppingElements(ArrayRef<int> Mask,bool MatchEven,bool IsSingleInput)12687 static int canLowerByDroppingElements(ArrayRef<int> Mask, bool MatchEven,
12688                                       bool IsSingleInput) {
12689   // The modulus for the shuffle vector entries is based on whether this is
12690   // a single input or not.
12691   int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
12692   assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
12693          "We should only be called with masks with a power-of-2 size!");
12694 
12695   uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
12696   int Offset = MatchEven ? 0 : 1;
12697 
12698   // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
12699   // and 2^3 simultaneously. This is because we may have ambiguity with
12700   // partially undef inputs.
12701   bool ViableForN[3] = {true, true, true};
12702 
12703   for (int i = 0, e = Mask.size(); i < e; ++i) {
12704     // Ignore undef lanes, we'll optimistically collapse them to the pattern we
12705     // want.
12706     if (Mask[i] < 0)
12707       continue;
12708 
12709     bool IsAnyViable = false;
12710     for (unsigned j = 0; j != std::size(ViableForN); ++j)
12711       if (ViableForN[j]) {
12712         uint64_t N = j + 1;
12713 
12714         // The shuffle mask must be equal to (i * 2^N) % M.
12715         if ((uint64_t)(Mask[i] - Offset) == (((uint64_t)i << N) & ModMask))
12716           IsAnyViable = true;
12717         else
12718           ViableForN[j] = false;
12719       }
12720     // Early exit if we exhaust the possible powers of two.
12721     if (!IsAnyViable)
12722       break;
12723   }
12724 
12725   for (unsigned j = 0; j != std::size(ViableForN); ++j)
12726     if (ViableForN[j])
12727       return j + 1;
12728 
12729   // Return 0 as there is no viable power of two.
12730   return 0;
12731 }
12732 
12733 // X86 has dedicated pack instructions that can handle specific truncation
12734 // operations: PACKSS and PACKUS.
12735 // Checks for compaction shuffle masks if MaxStages > 1.
12736 // TODO: Add support for matching multiple PACKSS/PACKUS stages.
matchShuffleWithPACK(MVT VT,MVT & SrcVT,SDValue & V1,SDValue & V2,unsigned & PackOpcode,ArrayRef<int> TargetMask,const SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned MaxStages=1)12737 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
12738                                  unsigned &PackOpcode, ArrayRef<int> TargetMask,
12739                                  const SelectionDAG &DAG,
12740                                  const X86Subtarget &Subtarget,
12741                                  unsigned MaxStages = 1) {
12742   unsigned NumElts = VT.getVectorNumElements();
12743   unsigned BitSize = VT.getScalarSizeInBits();
12744   assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
12745          "Illegal maximum compaction");
12746 
12747   auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
12748     unsigned NumSrcBits = PackVT.getScalarSizeInBits();
12749     unsigned NumPackedBits = NumSrcBits - BitSize;
12750     N1 = peekThroughBitcasts(N1);
12751     N2 = peekThroughBitcasts(N2);
12752     unsigned NumBits1 = N1.getScalarValueSizeInBits();
12753     unsigned NumBits2 = N2.getScalarValueSizeInBits();
12754     bool IsZero1 = llvm::isNullOrNullSplat(N1, /*AllowUndefs*/ false);
12755     bool IsZero2 = llvm::isNullOrNullSplat(N2, /*AllowUndefs*/ false);
12756     if ((!N1.isUndef() && !IsZero1 && NumBits1 != NumSrcBits) ||
12757         (!N2.isUndef() && !IsZero2 && NumBits2 != NumSrcBits))
12758       return false;
12759     if (Subtarget.hasSSE41() || BitSize == 8) {
12760       APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
12761       if ((N1.isUndef() || IsZero1 || DAG.MaskedValueIsZero(N1, ZeroMask)) &&
12762           (N2.isUndef() || IsZero2 || DAG.MaskedValueIsZero(N2, ZeroMask))) {
12763         V1 = N1;
12764         V2 = N2;
12765         SrcVT = PackVT;
12766         PackOpcode = X86ISD::PACKUS;
12767         return true;
12768       }
12769     }
12770     bool IsAllOnes1 = llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false);
12771     bool IsAllOnes2 = llvm::isAllOnesOrAllOnesSplat(N2, /*AllowUndefs*/ false);
12772     if ((N1.isUndef() || IsZero1 || IsAllOnes1 ||
12773          DAG.ComputeNumSignBits(N1) > NumPackedBits) &&
12774         (N2.isUndef() || IsZero2 || IsAllOnes2 ||
12775          DAG.ComputeNumSignBits(N2) > NumPackedBits)) {
12776       V1 = N1;
12777       V2 = N2;
12778       SrcVT = PackVT;
12779       PackOpcode = X86ISD::PACKSS;
12780       return true;
12781     }
12782     return false;
12783   };
12784 
12785   // Attempt to match against wider and wider compaction patterns.
12786   for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
12787     MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
12788     MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
12789 
12790     // Try binary shuffle.
12791     SmallVector<int, 32> BinaryMask;
12792     createPackShuffleMask(VT, BinaryMask, false, NumStages);
12793     if (isTargetShuffleEquivalent(VT, TargetMask, BinaryMask, DAG, V1, V2))
12794       if (MatchPACK(V1, V2, PackVT))
12795         return true;
12796 
12797     // Try unary shuffle.
12798     SmallVector<int, 32> UnaryMask;
12799     createPackShuffleMask(VT, UnaryMask, true, NumStages);
12800     if (isTargetShuffleEquivalent(VT, TargetMask, UnaryMask, DAG, V1))
12801       if (MatchPACK(V1, V1, PackVT))
12802         return true;
12803   }
12804 
12805   return false;
12806 }
12807 
lowerShuffleWithPACK(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG,const X86Subtarget & Subtarget)12808 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
12809                                     SDValue V1, SDValue V2, SelectionDAG &DAG,
12810                                     const X86Subtarget &Subtarget) {
12811   MVT PackVT;
12812   unsigned PackOpcode;
12813   unsigned SizeBits = VT.getSizeInBits();
12814   unsigned EltBits = VT.getScalarSizeInBits();
12815   unsigned MaxStages = Log2_32(64 / EltBits);
12816   if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
12817                             Subtarget, MaxStages))
12818     return SDValue();
12819 
12820   unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
12821   unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
12822 
12823   // Don't lower multi-stage packs on AVX512, truncation is better.
12824   if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
12825     return SDValue();
12826 
12827   // Pack to the largest type possible:
12828   // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
12829   unsigned MaxPackBits = 16;
12830   if (CurrentEltBits > 16 &&
12831       (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
12832     MaxPackBits = 32;
12833 
12834   // Repeatedly pack down to the target size.
12835   SDValue Res;
12836   for (unsigned i = 0; i != NumStages; ++i) {
12837     unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
12838     unsigned NumSrcElts = SizeBits / SrcEltBits;
12839     MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12840     MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
12841     MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12842     MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
12843     Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
12844                       DAG.getBitcast(SrcVT, V2));
12845     V1 = V2 = Res;
12846     CurrentEltBits /= 2;
12847   }
12848   assert(Res && Res.getValueType() == VT &&
12849          "Failed to lower compaction shuffle");
12850   return Res;
12851 }
12852 
12853 /// Try to emit a bitmask instruction for a shuffle.
12854 ///
12855 /// This handles cases where we can model a blend exactly as a bitmask due to
12856 /// one of the inputs being zeroable.
lowerShuffleAsBitMask(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)12857 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
12858                                      SDValue V2, ArrayRef<int> Mask,
12859                                      const APInt &Zeroable,
12860                                      const X86Subtarget &Subtarget,
12861                                      SelectionDAG &DAG) {
12862   MVT MaskVT = VT;
12863   MVT EltVT = VT.getVectorElementType();
12864   SDValue Zero, AllOnes;
12865   // Use f64 if i64 isn't legal.
12866   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
12867     EltVT = MVT::f64;
12868     MaskVT = MVT::getVectorVT(EltVT, Mask.size());
12869   }
12870 
12871   MVT LogicVT = VT;
12872   if (EltVT == MVT::f32 || EltVT == MVT::f64) {
12873     Zero = DAG.getConstantFP(0.0, DL, EltVT);
12874     APFloat AllOnesValue =
12875         APFloat::getAllOnesValue(SelectionDAG::EVTToAPFloatSemantics(EltVT));
12876     AllOnes = DAG.getConstantFP(AllOnesValue, DL, EltVT);
12877     LogicVT =
12878         MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
12879   } else {
12880     Zero = DAG.getConstant(0, DL, EltVT);
12881     AllOnes = DAG.getAllOnesConstant(DL, EltVT);
12882   }
12883 
12884   SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
12885   SDValue V;
12886   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12887     if (Zeroable[i])
12888       continue;
12889     if (Mask[i] % Size != i)
12890       return SDValue(); // Not a blend.
12891     if (!V)
12892       V = Mask[i] < Size ? V1 : V2;
12893     else if (V != (Mask[i] < Size ? V1 : V2))
12894       return SDValue(); // Can only let one input through the mask.
12895 
12896     VMaskOps[i] = AllOnes;
12897   }
12898   if (!V)
12899     return SDValue(); // No non-zeroable elements!
12900 
12901   SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
12902   VMask = DAG.getBitcast(LogicVT, VMask);
12903   V = DAG.getBitcast(LogicVT, V);
12904   SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
12905   return DAG.getBitcast(VT, And);
12906 }
12907 
12908 /// Try to emit a blend instruction for a shuffle using bit math.
12909 ///
12910 /// This is used as a fallback approach when first class blend instructions are
12911 /// unavailable. Currently it is only suitable for integer vectors, but could
12912 /// be generalized for floating point vectors if desirable.
lowerShuffleAsBitBlend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)12913 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
12914                                       SDValue V2, ArrayRef<int> Mask,
12915                                       SelectionDAG &DAG) {
12916   assert(VT.isInteger() && "Only supports integer vector types!");
12917   MVT EltVT = VT.getVectorElementType();
12918   SDValue Zero = DAG.getConstant(0, DL, EltVT);
12919   SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
12920   SmallVector<SDValue, 16> MaskOps;
12921   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12922     if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
12923       return SDValue(); // Shuffled input!
12924     MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
12925   }
12926 
12927   SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
12928   V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
12929   V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
12930   return DAG.getNode(ISD::OR, DL, VT, V1, V2);
12931 }
12932 
12933 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
12934                                     SDValue PreservedSrc,
12935                                     const X86Subtarget &Subtarget,
12936                                     SelectionDAG &DAG);
12937 
matchShuffleAsBlend(SDValue V1,SDValue V2,MutableArrayRef<int> Mask,const APInt & Zeroable,bool & ForceV1Zero,bool & ForceV2Zero,uint64_t & BlendMask)12938 static bool matchShuffleAsBlend(SDValue V1, SDValue V2,
12939                                 MutableArrayRef<int> Mask,
12940                                 const APInt &Zeroable, bool &ForceV1Zero,
12941                                 bool &ForceV2Zero, uint64_t &BlendMask) {
12942   bool V1IsZeroOrUndef =
12943       V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
12944   bool V2IsZeroOrUndef =
12945       V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
12946 
12947   BlendMask = 0;
12948   ForceV1Zero = false, ForceV2Zero = false;
12949   assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
12950 
12951   // Attempt to generate the binary blend mask. If an input is zero then
12952   // we can use any lane.
12953   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12954     int M = Mask[i];
12955     if (M == SM_SentinelUndef)
12956       continue;
12957     if (M == i ||
12958         (0 <= M && M < Size && IsElementEquivalent(Size, V1, V1, M, i))) {
12959       Mask[i] = i;
12960       continue;
12961     }
12962     if (M == (i + Size) ||
12963         (Size <= M && IsElementEquivalent(Size, V2, V2, M - Size, i))) {
12964       BlendMask |= 1ull << i;
12965       Mask[i] = i + Size;
12966       continue;
12967     }
12968     if (Zeroable[i]) {
12969       if (V1IsZeroOrUndef) {
12970         ForceV1Zero = true;
12971         Mask[i] = i;
12972         continue;
12973       }
12974       if (V2IsZeroOrUndef) {
12975         ForceV2Zero = true;
12976         BlendMask |= 1ull << i;
12977         Mask[i] = i + Size;
12978         continue;
12979       }
12980     }
12981     return false;
12982   }
12983   return true;
12984 }
12985 
scaleVectorShuffleBlendMask(uint64_t BlendMask,int Size,int Scale)12986 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
12987                                             int Scale) {
12988   uint64_t ScaledMask = 0;
12989   for (int i = 0; i != Size; ++i)
12990     if (BlendMask & (1ull << i))
12991       ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
12992   return ScaledMask;
12993 }
12994 
12995 /// Try to emit a blend instruction for a shuffle.
12996 ///
12997 /// This doesn't do any checks for the availability of instructions for blending
12998 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
12999 /// be matched in the backend with the type given. What it does check for is
13000 /// that the shuffle mask is a blend, or convertible into a blend with zero.
lowerShuffleAsBlend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Original,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)13001 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
13002                                    SDValue V2, ArrayRef<int> Original,
13003                                    const APInt &Zeroable,
13004                                    const X86Subtarget &Subtarget,
13005                                    SelectionDAG &DAG) {
13006   uint64_t BlendMask = 0;
13007   bool ForceV1Zero = false, ForceV2Zero = false;
13008   SmallVector<int, 64> Mask(Original);
13009   if (!matchShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
13010                            BlendMask))
13011     return SDValue();
13012 
13013   // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
13014   if (ForceV1Zero)
13015     V1 = getZeroVector(VT, Subtarget, DAG, DL);
13016   if (ForceV2Zero)
13017     V2 = getZeroVector(VT, Subtarget, DAG, DL);
13018 
13019   unsigned NumElts = VT.getVectorNumElements();
13020 
13021   switch (VT.SimpleTy) {
13022   case MVT::v4i64:
13023   case MVT::v8i32:
13024     assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
13025     [[fallthrough]];
13026   case MVT::v4f64:
13027   case MVT::v8f32:
13028     assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
13029     [[fallthrough]];
13030   case MVT::v2f64:
13031   case MVT::v2i64:
13032   case MVT::v4f32:
13033   case MVT::v4i32:
13034   case MVT::v8i16:
13035     assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
13036     return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
13037                        DAG.getTargetConstant(BlendMask, DL, MVT::i8));
13038   case MVT::v16i16: {
13039     assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
13040     SmallVector<int, 8> RepeatedMask;
13041     if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
13042       // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
13043       assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
13044       BlendMask = 0;
13045       for (int i = 0; i < 8; ++i)
13046         if (RepeatedMask[i] >= 8)
13047           BlendMask |= 1ull << i;
13048       return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
13049                          DAG.getTargetConstant(BlendMask, DL, MVT::i8));
13050     }
13051     // Use PBLENDW for lower/upper lanes and then blend lanes.
13052     // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
13053     // merge to VSELECT where useful.
13054     uint64_t LoMask = BlendMask & 0xFF;
13055     uint64_t HiMask = (BlendMask >> 8) & 0xFF;
13056     if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
13057       SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
13058                                DAG.getTargetConstant(LoMask, DL, MVT::i8));
13059       SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
13060                                DAG.getTargetConstant(HiMask, DL, MVT::i8));
13061       return DAG.getVectorShuffle(
13062           MVT::v16i16, DL, Lo, Hi,
13063           {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
13064     }
13065     [[fallthrough]];
13066   }
13067   case MVT::v32i8:
13068     assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
13069     [[fallthrough]];
13070   case MVT::v16i8: {
13071     assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
13072 
13073     // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
13074     if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
13075                                                Subtarget, DAG))
13076       return Masked;
13077 
13078     if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
13079       MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
13080       SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
13081       return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
13082     }
13083 
13084     // If we have VPTERNLOG, we can use that as a bit blend.
13085     if (Subtarget.hasVLX())
13086       if (SDValue BitBlend =
13087               lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
13088         return BitBlend;
13089 
13090     // Scale the blend by the number of bytes per element.
13091     int Scale = VT.getScalarSizeInBits() / 8;
13092 
13093     // This form of blend is always done on bytes. Compute the byte vector
13094     // type.
13095     MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13096 
13097     // x86 allows load folding with blendvb from the 2nd source operand. But
13098     // we are still using LLVM select here (see comment below), so that's V1.
13099     // If V2 can be load-folded and V1 cannot be load-folded, then commute to
13100     // allow that load-folding possibility.
13101     if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
13102       ShuffleVectorSDNode::commuteMask(Mask);
13103       std::swap(V1, V2);
13104     }
13105 
13106     // Compute the VSELECT mask. Note that VSELECT is really confusing in the
13107     // mix of LLVM's code generator and the x86 backend. We tell the code
13108     // generator that boolean values in the elements of an x86 vector register
13109     // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
13110     // mapping a select to operand #1, and 'false' mapping to operand #2. The
13111     // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
13112     // of the element (the remaining are ignored) and 0 in that high bit would
13113     // mean operand #1 while 1 in the high bit would mean operand #2. So while
13114     // the LLVM model for boolean values in vector elements gets the relevant
13115     // bit set, it is set backwards and over constrained relative to x86's
13116     // actual model.
13117     SmallVector<SDValue, 32> VSELECTMask;
13118     for (int i = 0, Size = Mask.size(); i < Size; ++i)
13119       for (int j = 0; j < Scale; ++j)
13120         VSELECTMask.push_back(
13121             Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
13122                         : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
13123                                           MVT::i8));
13124 
13125     V1 = DAG.getBitcast(BlendVT, V1);
13126     V2 = DAG.getBitcast(BlendVT, V2);
13127     return DAG.getBitcast(
13128         VT,
13129         DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
13130                       V1, V2));
13131   }
13132   case MVT::v16f32:
13133   case MVT::v8f64:
13134   case MVT::v8i64:
13135   case MVT::v16i32:
13136   case MVT::v32i16:
13137   case MVT::v64i8: {
13138     // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
13139     bool OptForSize = DAG.shouldOptForSize();
13140     if (!OptForSize) {
13141       if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
13142                                                  Subtarget, DAG))
13143         return Masked;
13144     }
13145 
13146     // Otherwise load an immediate into a GPR, cast to k-register, and use a
13147     // masked move.
13148     MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
13149     SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
13150     return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
13151   }
13152   default:
13153     llvm_unreachable("Not a supported integer vector type!");
13154   }
13155 }
13156 
13157 /// Try to lower as a blend of elements from two inputs followed by
13158 /// a single-input permutation.
13159 ///
13160 /// This matches the pattern where we can blend elements from two inputs and
13161 /// then reduce the shuffle to a single-input permutation.
lowerShuffleAsBlendAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,bool ImmBlends=false)13162 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
13163                                              SDValue V1, SDValue V2,
13164                                              ArrayRef<int> Mask,
13165                                              SelectionDAG &DAG,
13166                                              bool ImmBlends = false) {
13167   // We build up the blend mask while checking whether a blend is a viable way
13168   // to reduce the shuffle.
13169   SmallVector<int, 32> BlendMask(Mask.size(), -1);
13170   SmallVector<int, 32> PermuteMask(Mask.size(), -1);
13171 
13172   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
13173     if (Mask[i] < 0)
13174       continue;
13175 
13176     assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
13177 
13178     if (BlendMask[Mask[i] % Size] < 0)
13179       BlendMask[Mask[i] % Size] = Mask[i];
13180     else if (BlendMask[Mask[i] % Size] != Mask[i])
13181       return SDValue(); // Can't blend in the needed input!
13182 
13183     PermuteMask[i] = Mask[i] % Size;
13184   }
13185 
13186   // If only immediate blends, then bail if the blend mask can't be widened to
13187   // i16.
13188   unsigned EltSize = VT.getScalarSizeInBits();
13189   if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
13190     return SDValue();
13191 
13192   SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
13193   return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
13194 }
13195 
13196 /// Try to lower as an unpack of elements from two inputs followed by
13197 /// a single-input permutation.
13198 ///
13199 /// This matches the pattern where we can unpack elements from two inputs and
13200 /// then reduce the shuffle to a single-input (wider) permutation.
lowerShuffleAsUNPCKAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)13201 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
13202                                              SDValue V1, SDValue V2,
13203                                              ArrayRef<int> Mask,
13204                                              SelectionDAG &DAG) {
13205   int NumElts = Mask.size();
13206   int NumLanes = VT.getSizeInBits() / 128;
13207   int NumLaneElts = NumElts / NumLanes;
13208   int NumHalfLaneElts = NumLaneElts / 2;
13209 
13210   bool MatchLo = true, MatchHi = true;
13211   SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
13212 
13213   // Determine UNPCKL/UNPCKH type and operand order.
13214   for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
13215     for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
13216       int M = Mask[Lane + Elt];
13217       if (M < 0)
13218         continue;
13219 
13220       SDValue &Op = Ops[Elt & 1];
13221       if (M < NumElts && (Op.isUndef() || Op == V1))
13222         Op = V1;
13223       else if (NumElts <= M && (Op.isUndef() || Op == V2))
13224         Op = V2;
13225       else
13226         return SDValue();
13227 
13228       int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
13229       MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
13230                  isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
13231       MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
13232                  isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
13233       if (!MatchLo && !MatchHi)
13234         return SDValue();
13235     }
13236   }
13237   assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
13238 
13239   // Now check that each pair of elts come from the same unpack pair
13240   // and set the permute mask based on each pair.
13241   // TODO - Investigate cases where we permute individual elements.
13242   SmallVector<int, 32> PermuteMask(NumElts, -1);
13243   for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
13244     for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
13245       int M0 = Mask[Lane + Elt + 0];
13246       int M1 = Mask[Lane + Elt + 1];
13247       if (0 <= M0 && 0 <= M1 &&
13248           (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
13249         return SDValue();
13250       if (0 <= M0)
13251         PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
13252       if (0 <= M1)
13253         PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
13254     }
13255   }
13256 
13257   unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
13258   SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
13259   return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
13260 }
13261 
13262 /// Try to lower a shuffle as a permute of the inputs followed by an
13263 /// UNPCK instruction.
13264 ///
13265 /// This specifically targets cases where we end up with alternating between
13266 /// the two inputs, and so can permute them into something that feeds a single
13267 /// UNPCK instruction. Note that this routine only targets integer vectors
13268 /// because for floating point vectors we have a generalized SHUFPS lowering
13269 /// strategy that handles everything that doesn't *exactly* match an unpack,
13270 /// making this clever lowering unnecessary.
lowerShuffleAsPermuteAndUnpack(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13271 static SDValue lowerShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
13272                                               SDValue V1, SDValue V2,
13273                                               ArrayRef<int> Mask,
13274                                               const X86Subtarget &Subtarget,
13275                                               SelectionDAG &DAG) {
13276   int Size = Mask.size();
13277   assert(Mask.size() >= 2 && "Single element masks are invalid.");
13278 
13279   // This routine only supports 128-bit integer dual input vectors.
13280   if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndef())
13281     return SDValue();
13282 
13283   int NumLoInputs =
13284       count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
13285   int NumHiInputs =
13286       count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
13287 
13288   bool UnpackLo = NumLoInputs >= NumHiInputs;
13289 
13290   auto TryUnpack = [&](int ScalarSize, int Scale) {
13291     SmallVector<int, 16> V1Mask((unsigned)Size, -1);
13292     SmallVector<int, 16> V2Mask((unsigned)Size, -1);
13293 
13294     for (int i = 0; i < Size; ++i) {
13295       if (Mask[i] < 0)
13296         continue;
13297 
13298       // Each element of the unpack contains Scale elements from this mask.
13299       int UnpackIdx = i / Scale;
13300 
13301       // We only handle the case where V1 feeds the first slots of the unpack.
13302       // We rely on canonicalization to ensure this is the case.
13303       if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
13304         return SDValue();
13305 
13306       // Setup the mask for this input. The indexing is tricky as we have to
13307       // handle the unpack stride.
13308       SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
13309       VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
13310           Mask[i] % Size;
13311     }
13312 
13313     // If we will have to shuffle both inputs to use the unpack, check whether
13314     // we can just unpack first and shuffle the result. If so, skip this unpack.
13315     if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
13316         !isNoopShuffleMask(V2Mask))
13317       return SDValue();
13318 
13319     // Shuffle the inputs into place.
13320     V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13321     V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13322 
13323     // Cast the inputs to the type we will use to unpack them.
13324     MVT UnpackVT =
13325         MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
13326     V1 = DAG.getBitcast(UnpackVT, V1);
13327     V2 = DAG.getBitcast(UnpackVT, V2);
13328 
13329     // Unpack the inputs and cast the result back to the desired type.
13330     return DAG.getBitcast(
13331         VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
13332                         UnpackVT, V1, V2));
13333   };
13334 
13335   // We try each unpack from the largest to the smallest to try and find one
13336   // that fits this mask.
13337   int OrigScalarSize = VT.getScalarSizeInBits();
13338   for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
13339     if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
13340       return Unpack;
13341 
13342   // If we're shuffling with a zero vector then we're better off not doing
13343   // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
13344   if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
13345       ISD::isBuildVectorAllZeros(V2.getNode()))
13346     return SDValue();
13347 
13348   // If none of the unpack-rooted lowerings worked (or were profitable) try an
13349   // initial unpack.
13350   if (NumLoInputs == 0 || NumHiInputs == 0) {
13351     assert((NumLoInputs > 0 || NumHiInputs > 0) &&
13352            "We have to have *some* inputs!");
13353     int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
13354 
13355     // FIXME: We could consider the total complexity of the permute of each
13356     // possible unpacking. Or at the least we should consider how many
13357     // half-crossings are created.
13358     // FIXME: We could consider commuting the unpacks.
13359 
13360     SmallVector<int, 32> PermMask((unsigned)Size, -1);
13361     for (int i = 0; i < Size; ++i) {
13362       if (Mask[i] < 0)
13363         continue;
13364 
13365       assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
13366 
13367       PermMask[i] =
13368           2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
13369     }
13370     return DAG.getVectorShuffle(
13371         VT, DL,
13372         DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL, DL, VT,
13373                     V1, V2),
13374         DAG.getUNDEF(VT), PermMask);
13375   }
13376 
13377   return SDValue();
13378 }
13379 
13380 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
13381 /// permuting the elements of the result in place.
lowerShuffleAsByteRotateAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13382 static SDValue lowerShuffleAsByteRotateAndPermute(
13383     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13384     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13385   if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
13386       (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
13387       (VT.is512BitVector() && !Subtarget.hasBWI()))
13388     return SDValue();
13389 
13390   // We don't currently support lane crossing permutes.
13391   if (is128BitLaneCrossingShuffleMask(VT, Mask))
13392     return SDValue();
13393 
13394   int Scale = VT.getScalarSizeInBits() / 8;
13395   int NumLanes = VT.getSizeInBits() / 128;
13396   int NumElts = VT.getVectorNumElements();
13397   int NumEltsPerLane = NumElts / NumLanes;
13398 
13399   // Determine range of mask elts.
13400   bool Blend1 = true;
13401   bool Blend2 = true;
13402   std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
13403   std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
13404   for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
13405     for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
13406       int M = Mask[Lane + Elt];
13407       if (M < 0)
13408         continue;
13409       if (M < NumElts) {
13410         Blend1 &= (M == (Lane + Elt));
13411         assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
13412         M = M % NumEltsPerLane;
13413         Range1.first = std::min(Range1.first, M);
13414         Range1.second = std::max(Range1.second, M);
13415       } else {
13416         M -= NumElts;
13417         Blend2 &= (M == (Lane + Elt));
13418         assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
13419         M = M % NumEltsPerLane;
13420         Range2.first = std::min(Range2.first, M);
13421         Range2.second = std::max(Range2.second, M);
13422       }
13423     }
13424   }
13425 
13426   // Bail if we don't need both elements.
13427   // TODO - it might be worth doing this for unary shuffles if the permute
13428   // can be widened.
13429   if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
13430       !(0 <= Range2.first && Range2.second < NumEltsPerLane))
13431     return SDValue();
13432 
13433   if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
13434     return SDValue();
13435 
13436   // Rotate the 2 ops so we can access both ranges, then permute the result.
13437   auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
13438     MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13439     SDValue Rotate = DAG.getBitcast(
13440         VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
13441                         DAG.getBitcast(ByteVT, Lo),
13442                         DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
13443     SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
13444     for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
13445       for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
13446         int M = Mask[Lane + Elt];
13447         if (M < 0)
13448           continue;
13449         if (M < NumElts)
13450           PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
13451         else
13452           PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
13453       }
13454     }
13455     return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
13456   };
13457 
13458   // Check if the ranges are small enough to rotate from either direction.
13459   if (Range2.second < Range1.first)
13460     return RotateAndPermute(V1, V2, Range1.first, 0);
13461   if (Range1.second < Range2.first)
13462     return RotateAndPermute(V2, V1, Range2.first, NumElts);
13463   return SDValue();
13464 }
13465 
isBroadcastShuffleMask(ArrayRef<int> Mask)13466 static bool isBroadcastShuffleMask(ArrayRef<int> Mask) {
13467   return isUndefOrEqual(Mask, 0);
13468 }
13469 
isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask)13470 static bool isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask) {
13471   return isNoopShuffleMask(Mask) || isBroadcastShuffleMask(Mask);
13472 }
13473 
13474 /// Generic routine to decompose a shuffle and blend into independent
13475 /// blends and permutes.
13476 ///
13477 /// This matches the extremely common pattern for handling combined
13478 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
13479 /// operations. It will try to pick the best arrangement of shuffles and
13480 /// blends. For vXi8/vXi16 shuffles we may use unpack instead of blend.
lowerShuffleAsDecomposedShuffleMerge(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13481 static SDValue lowerShuffleAsDecomposedShuffleMerge(
13482     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13483     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13484   int NumElts = Mask.size();
13485   int NumLanes = VT.getSizeInBits() / 128;
13486   int NumEltsPerLane = NumElts / NumLanes;
13487 
13488   // Shuffle the input elements into the desired positions in V1 and V2 and
13489   // unpack/blend them together.
13490   bool IsAlternating = true;
13491   SmallVector<int, 32> V1Mask(NumElts, -1);
13492   SmallVector<int, 32> V2Mask(NumElts, -1);
13493   SmallVector<int, 32> FinalMask(NumElts, -1);
13494   for (int i = 0; i < NumElts; ++i) {
13495     int M = Mask[i];
13496     if (M >= 0 && M < NumElts) {
13497       V1Mask[i] = M;
13498       FinalMask[i] = i;
13499       IsAlternating &= (i & 1) == 0;
13500     } else if (M >= NumElts) {
13501       V2Mask[i] = M - NumElts;
13502       FinalMask[i] = i + NumElts;
13503       IsAlternating &= (i & 1) == 1;
13504     }
13505   }
13506 
13507   // If we effectively only demand the 0'th element of \p Input, and not only
13508   // as 0'th element, then broadcast said input,
13509   // and change \p InputMask to be a no-op (identity) mask.
13510   auto canonicalizeBroadcastableInput = [DL, VT, &Subtarget,
13511                                          &DAG](SDValue &Input,
13512                                                MutableArrayRef<int> InputMask) {
13513     unsigned EltSizeInBits = Input.getScalarValueSizeInBits();
13514     if (!Subtarget.hasAVX2() && (!Subtarget.hasAVX() || EltSizeInBits < 32 ||
13515                                  !X86::mayFoldLoad(Input, Subtarget)))
13516       return;
13517     if (isNoopShuffleMask(InputMask))
13518       return;
13519     assert(isBroadcastShuffleMask(InputMask) &&
13520            "Expected to demand only the 0'th element.");
13521     Input = DAG.getNode(X86ISD::VBROADCAST, DL, VT, Input);
13522     for (auto I : enumerate(InputMask)) {
13523       int &InputMaskElt = I.value();
13524       if (InputMaskElt >= 0)
13525         InputMaskElt = I.index();
13526     }
13527   };
13528 
13529   // Currently, we may need to produce one shuffle per input, and blend results.
13530   // It is possible that the shuffle for one of the inputs is already a no-op.
13531   // See if we can simplify non-no-op shuffles into broadcasts,
13532   // which we consider to be strictly better than an arbitrary shuffle.
13533   if (isNoopOrBroadcastShuffleMask(V1Mask) &&
13534       isNoopOrBroadcastShuffleMask(V2Mask)) {
13535     canonicalizeBroadcastableInput(V1, V1Mask);
13536     canonicalizeBroadcastableInput(V2, V2Mask);
13537   }
13538 
13539   // Try to lower with the simpler initial blend/unpack/rotate strategies unless
13540   // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
13541   // the shuffle may be able to fold with a load or other benefit. However, when
13542   // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
13543   // pre-shuffle first is a better strategy.
13544   if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
13545     // Only prefer immediate blends to unpack/rotate.
13546     if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
13547                                                           DAG, true))
13548       return BlendPerm;
13549     if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
13550                                                            DAG))
13551       return UnpackPerm;
13552     if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
13553             DL, VT, V1, V2, Mask, Subtarget, DAG))
13554       return RotatePerm;
13555     // Unpack/rotate failed - try again with variable blends.
13556     if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
13557                                                           DAG))
13558       return BlendPerm;
13559     if (VT.getScalarSizeInBits() >= 32)
13560       if (SDValue PermUnpack = lowerShuffleAsPermuteAndUnpack(
13561               DL, VT, V1, V2, Mask, Subtarget, DAG))
13562         return PermUnpack;
13563   }
13564 
13565   // If the final mask is an alternating blend of vXi8/vXi16, convert to an
13566   // UNPCKL(SHUFFLE, SHUFFLE) pattern.
13567   // TODO: It doesn't have to be alternating - but each lane mustn't have more
13568   // than half the elements coming from each source.
13569   if (IsAlternating && VT.getScalarSizeInBits() < 32) {
13570     V1Mask.assign(NumElts, -1);
13571     V2Mask.assign(NumElts, -1);
13572     FinalMask.assign(NumElts, -1);
13573     for (int i = 0; i != NumElts; i += NumEltsPerLane)
13574       for (int j = 0; j != NumEltsPerLane; ++j) {
13575         int M = Mask[i + j];
13576         if (M >= 0 && M < NumElts) {
13577           V1Mask[i + (j / 2)] = M;
13578           FinalMask[i + j] = i + (j / 2);
13579         } else if (M >= NumElts) {
13580           V2Mask[i + (j / 2)] = M - NumElts;
13581           FinalMask[i + j] = i + (j / 2) + NumElts;
13582         }
13583       }
13584   }
13585 
13586   V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13587   V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13588   return DAG.getVectorShuffle(VT, DL, V1, V2, FinalMask);
13589 }
13590 
13591 /// Try to lower a vector shuffle as a bit rotation.
13592 ///
13593 /// Look for a repeated rotation pattern in each sub group.
13594 /// Returns a ISD::ROTL element rotation amount or -1 if failed.
matchShuffleAsBitRotate(ArrayRef<int> Mask,int NumSubElts)13595 static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
13596   int NumElts = Mask.size();
13597   assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
13598 
13599   int RotateAmt = -1;
13600   for (int i = 0; i != NumElts; i += NumSubElts) {
13601     for (int j = 0; j != NumSubElts; ++j) {
13602       int M = Mask[i + j];
13603       if (M < 0)
13604         continue;
13605       if (!isInRange(M, i, i + NumSubElts))
13606         return -1;
13607       int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
13608       if (0 <= RotateAmt && Offset != RotateAmt)
13609         return -1;
13610       RotateAmt = Offset;
13611     }
13612   }
13613   return RotateAmt;
13614 }
13615 
matchShuffleAsBitRotate(MVT & RotateVT,int EltSizeInBits,const X86Subtarget & Subtarget,ArrayRef<int> Mask)13616 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
13617                                    const X86Subtarget &Subtarget,
13618                                    ArrayRef<int> Mask) {
13619   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13620   assert(EltSizeInBits < 64 && "Can't rotate 64-bit integers");
13621 
13622   // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
13623   int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
13624   int MaxSubElts = 64 / EltSizeInBits;
13625   for (int NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
13626     int RotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
13627     if (RotateAmt < 0)
13628       continue;
13629 
13630     int NumElts = Mask.size();
13631     MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
13632     RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
13633     return RotateAmt * EltSizeInBits;
13634   }
13635 
13636   return -1;
13637 }
13638 
13639 /// Lower shuffle using X86ISD::VROTLI rotations.
lowerShuffleAsBitRotate(const SDLoc & DL,MVT VT,SDValue V1,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13640 static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
13641                                        ArrayRef<int> Mask,
13642                                        const X86Subtarget &Subtarget,
13643                                        SelectionDAG &DAG) {
13644   // Only XOP + AVX512 targets have bit rotation instructions.
13645   // If we at least have SSSE3 (PSHUFB) then we shouldn't attempt to use this.
13646   bool IsLegal =
13647       (VT.is128BitVector() && Subtarget.hasXOP()) || Subtarget.hasAVX512();
13648   if (!IsLegal && Subtarget.hasSSE3())
13649     return SDValue();
13650 
13651   MVT RotateVT;
13652   int RotateAmt = matchShuffleAsBitRotate(RotateVT, VT.getScalarSizeInBits(),
13653                                           Subtarget, Mask);
13654   if (RotateAmt < 0)
13655     return SDValue();
13656 
13657   // For pre-SSSE3 targets, if we are shuffling vXi8 elts then ISD::ROTL,
13658   // expanded to OR(SRL,SHL), will be more efficient, but if they can
13659   // widen to vXi16 or more then existing lowering should will be better.
13660   if (!IsLegal) {
13661     if ((RotateAmt % 16) == 0)
13662       return SDValue();
13663     // TODO: Use getTargetVShiftByConstNode.
13664     unsigned ShlAmt = RotateAmt;
13665     unsigned SrlAmt = RotateVT.getScalarSizeInBits() - RotateAmt;
13666     V1 = DAG.getBitcast(RotateVT, V1);
13667     SDValue SHL = DAG.getNode(X86ISD::VSHLI, DL, RotateVT, V1,
13668                               DAG.getTargetConstant(ShlAmt, DL, MVT::i8));
13669     SDValue SRL = DAG.getNode(X86ISD::VSRLI, DL, RotateVT, V1,
13670                               DAG.getTargetConstant(SrlAmt, DL, MVT::i8));
13671     SDValue Rot = DAG.getNode(ISD::OR, DL, RotateVT, SHL, SRL);
13672     return DAG.getBitcast(VT, Rot);
13673   }
13674 
13675   SDValue Rot =
13676       DAG.getNode(X86ISD::VROTLI, DL, RotateVT, DAG.getBitcast(RotateVT, V1),
13677                   DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
13678   return DAG.getBitcast(VT, Rot);
13679 }
13680 
13681 /// Try to match a vector shuffle as an element rotation.
13682 ///
13683 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
matchShuffleAsElementRotate(SDValue & V1,SDValue & V2,ArrayRef<int> Mask)13684 static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
13685                                        ArrayRef<int> Mask) {
13686   int NumElts = Mask.size();
13687 
13688   // We need to detect various ways of spelling a rotation:
13689   //   [11, 12, 13, 14, 15,  0,  1,  2]
13690   //   [-1, 12, 13, 14, -1, -1,  1, -1]
13691   //   [-1, -1, -1, -1, -1, -1,  1,  2]
13692   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
13693   //   [-1,  4,  5,  6, -1, -1,  9, -1]
13694   //   [-1,  4,  5,  6, -1, -1, -1, -1]
13695   int Rotation = 0;
13696   SDValue Lo, Hi;
13697   for (int i = 0; i < NumElts; ++i) {
13698     int M = Mask[i];
13699     assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
13700            "Unexpected mask index.");
13701     if (M < 0)
13702       continue;
13703 
13704     // Determine where a rotated vector would have started.
13705     int StartIdx = i - (M % NumElts);
13706     if (StartIdx == 0)
13707       // The identity rotation isn't interesting, stop.
13708       return -1;
13709 
13710     // If we found the tail of a vector the rotation must be the missing
13711     // front. If we found the head of a vector, it must be how much of the
13712     // head.
13713     int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
13714 
13715     if (Rotation == 0)
13716       Rotation = CandidateRotation;
13717     else if (Rotation != CandidateRotation)
13718       // The rotations don't match, so we can't match this mask.
13719       return -1;
13720 
13721     // Compute which value this mask is pointing at.
13722     SDValue MaskV = M < NumElts ? V1 : V2;
13723 
13724     // Compute which of the two target values this index should be assigned
13725     // to. This reflects whether the high elements are remaining or the low
13726     // elements are remaining.
13727     SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
13728 
13729     // Either set up this value if we've not encountered it before, or check
13730     // that it remains consistent.
13731     if (!TargetV)
13732       TargetV = MaskV;
13733     else if (TargetV != MaskV)
13734       // This may be a rotation, but it pulls from the inputs in some
13735       // unsupported interleaving.
13736       return -1;
13737   }
13738 
13739   // Check that we successfully analyzed the mask, and normalize the results.
13740   assert(Rotation != 0 && "Failed to locate a viable rotation!");
13741   assert((Lo || Hi) && "Failed to find a rotated input vector!");
13742   if (!Lo)
13743     Lo = Hi;
13744   else if (!Hi)
13745     Hi = Lo;
13746 
13747   V1 = Lo;
13748   V2 = Hi;
13749 
13750   return Rotation;
13751 }
13752 
13753 /// Try to lower a vector shuffle as a byte rotation.
13754 ///
13755 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
13756 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
13757 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
13758 /// try to generically lower a vector shuffle through such an pattern. It
13759 /// does not check for the profitability of lowering either as PALIGNR or
13760 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
13761 /// This matches shuffle vectors that look like:
13762 ///
13763 ///   v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
13764 ///
13765 /// Essentially it concatenates V1 and V2, shifts right by some number of
13766 /// elements, and takes the low elements as the result. Note that while this is
13767 /// specified as a *right shift* because x86 is little-endian, it is a *left
13768 /// rotate* of the vector lanes.
matchShuffleAsByteRotate(MVT VT,SDValue & V1,SDValue & V2,ArrayRef<int> Mask)13769 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
13770                                     ArrayRef<int> Mask) {
13771   // Don't accept any shuffles with zero elements.
13772   if (isAnyZero(Mask))
13773     return -1;
13774 
13775   // PALIGNR works on 128-bit lanes.
13776   SmallVector<int, 16> RepeatedMask;
13777   if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
13778     return -1;
13779 
13780   int Rotation = matchShuffleAsElementRotate(V1, V2, RepeatedMask);
13781   if (Rotation <= 0)
13782     return -1;
13783 
13784   // PALIGNR rotates bytes, so we need to scale the
13785   // rotation based on how many bytes are in the vector lane.
13786   int NumElts = RepeatedMask.size();
13787   int Scale = 16 / NumElts;
13788   return Rotation * Scale;
13789 }
13790 
lowerShuffleAsByteRotate(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13791 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
13792                                         SDValue V2, ArrayRef<int> Mask,
13793                                         const X86Subtarget &Subtarget,
13794                                         SelectionDAG &DAG) {
13795   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13796 
13797   SDValue Lo = V1, Hi = V2;
13798   int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
13799   if (ByteRotation <= 0)
13800     return SDValue();
13801 
13802   // Cast the inputs to i8 vector of correct length to match PALIGNR or
13803   // PSLLDQ/PSRLDQ.
13804   MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13805   Lo = DAG.getBitcast(ByteVT, Lo);
13806   Hi = DAG.getBitcast(ByteVT, Hi);
13807 
13808   // SSSE3 targets can use the palignr instruction.
13809   if (Subtarget.hasSSSE3()) {
13810     assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
13811            "512-bit PALIGNR requires BWI instructions");
13812     return DAG.getBitcast(
13813         VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
13814                         DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
13815   }
13816 
13817   assert(VT.is128BitVector() &&
13818          "Rotate-based lowering only supports 128-bit lowering!");
13819   assert(Mask.size() <= 16 &&
13820          "Can shuffle at most 16 bytes in a 128-bit vector!");
13821   assert(ByteVT == MVT::v16i8 &&
13822          "SSE2 rotate lowering only needed for v16i8!");
13823 
13824   // Default SSE2 implementation
13825   int LoByteShift = 16 - ByteRotation;
13826   int HiByteShift = ByteRotation;
13827 
13828   SDValue LoShift =
13829       DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
13830                   DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
13831   SDValue HiShift =
13832       DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
13833                   DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
13834   return DAG.getBitcast(VT,
13835                         DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
13836 }
13837 
13838 /// Try to lower a vector shuffle as a dword/qword rotation.
13839 ///
13840 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
13841 /// rotation of the concatenation of two vectors; This routine will
13842 /// try to generically lower a vector shuffle through such an pattern.
13843 ///
13844 /// Essentially it concatenates V1 and V2, shifts right by some number of
13845 /// elements, and takes the low elements as the result. Note that while this is
13846 /// specified as a *right shift* because x86 is little-endian, it is a *left
13847 /// rotate* of the vector lanes.
lowerShuffleAsVALIGN(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13848 static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
13849                                     SDValue V2, ArrayRef<int> Mask,
13850                                     const X86Subtarget &Subtarget,
13851                                     SelectionDAG &DAG) {
13852   assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
13853          "Only 32-bit and 64-bit elements are supported!");
13854 
13855   // 128/256-bit vectors are only supported with VLX.
13856   assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
13857          && "VLX required for 128/256-bit vectors");
13858 
13859   SDValue Lo = V1, Hi = V2;
13860   int Rotation = matchShuffleAsElementRotate(Lo, Hi, Mask);
13861   if (Rotation <= 0)
13862     return SDValue();
13863 
13864   return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
13865                      DAG.getTargetConstant(Rotation, DL, MVT::i8));
13866 }
13867 
13868 /// Try to lower a vector shuffle as a byte shift sequence.
lowerShuffleAsByteShiftMask(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)13869 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
13870                                            SDValue V2, ArrayRef<int> Mask,
13871                                            const APInt &Zeroable,
13872                                            const X86Subtarget &Subtarget,
13873                                            SelectionDAG &DAG) {
13874   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13875   assert(VT.is128BitVector() && "Only 128-bit vectors supported");
13876 
13877   // We need a shuffle that has zeros at one/both ends and a sequential
13878   // shuffle from one source within.
13879   unsigned ZeroLo = Zeroable.countTrailingOnes();
13880   unsigned ZeroHi = Zeroable.countLeadingOnes();
13881   if (!ZeroLo && !ZeroHi)
13882     return SDValue();
13883 
13884   unsigned NumElts = Mask.size();
13885   unsigned Len = NumElts - (ZeroLo + ZeroHi);
13886   if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
13887     return SDValue();
13888 
13889   unsigned Scale = VT.getScalarSizeInBits() / 8;
13890   ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
13891   if (!isUndefOrInRange(StubMask, 0, NumElts) &&
13892       !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
13893     return SDValue();
13894 
13895   SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
13896   Res = DAG.getBitcast(MVT::v16i8, Res);
13897 
13898   // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
13899   // inner sequential set of elements, possibly offset:
13900   // 01234567 --> zzzzzz01 --> 1zzzzzzz
13901   // 01234567 --> 4567zzzz --> zzzzz456
13902   // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
13903   if (ZeroLo == 0) {
13904     unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
13905     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13906                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13907     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
13908                       DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
13909   } else if (ZeroHi == 0) {
13910     unsigned Shift = Mask[ZeroLo] % NumElts;
13911     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
13912                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13913     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13914                       DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
13915   } else if (!Subtarget.hasSSSE3()) {
13916     // If we don't have PSHUFB then its worth avoiding an AND constant mask
13917     // by performing 3 byte shifts. Shuffle combining can kick in above that.
13918     // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
13919     unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
13920     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13921                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13922     Shift += Mask[ZeroLo] % NumElts;
13923     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
13924                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13925     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13926                       DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
13927   } else
13928     return SDValue();
13929 
13930   return DAG.getBitcast(VT, Res);
13931 }
13932 
13933 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
13934 ///
13935 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
13936 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
13937 /// matches elements from one of the input vectors shuffled to the left or
13938 /// right with zeroable elements 'shifted in'. It handles both the strictly
13939 /// bit-wise element shifts and the byte shift across an entire 128-bit double
13940 /// quad word lane.
13941 ///
13942 /// PSHL : (little-endian) left bit shift.
13943 /// [ zz, 0, zz,  2 ]
13944 /// [ -1, 4, zz, -1 ]
13945 /// PSRL : (little-endian) right bit shift.
13946 /// [  1, zz,  3, zz]
13947 /// [ -1, -1,  7, zz]
13948 /// PSLLDQ : (little-endian) left byte shift
13949 /// [ zz,  0,  1,  2,  3,  4,  5,  6]
13950 /// [ zz, zz, -1, -1,  2,  3,  4, -1]
13951 /// [ zz, zz, zz, zz, zz, zz, -1,  1]
13952 /// PSRLDQ : (little-endian) right byte shift
13953 /// [  5, 6,  7, zz, zz, zz, zz, zz]
13954 /// [ -1, 5,  6,  7, zz, zz, zz, zz]
13955 /// [  1, 2, -1, -1, -1, -1, zz, zz]
matchShuffleAsShift(MVT & ShiftVT,unsigned & Opcode,unsigned ScalarSizeInBits,ArrayRef<int> Mask,int MaskOffset,const APInt & Zeroable,const X86Subtarget & Subtarget)13956 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
13957                                unsigned ScalarSizeInBits, ArrayRef<int> Mask,
13958                                int MaskOffset, const APInt &Zeroable,
13959                                const X86Subtarget &Subtarget) {
13960   int Size = Mask.size();
13961   unsigned SizeInBits = Size * ScalarSizeInBits;
13962 
13963   auto CheckZeros = [&](int Shift, int Scale, bool Left) {
13964     for (int i = 0; i < Size; i += Scale)
13965       for (int j = 0; j < Shift; ++j)
13966         if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
13967           return false;
13968 
13969     return true;
13970   };
13971 
13972   auto MatchShift = [&](int Shift, int Scale, bool Left) {
13973     for (int i = 0; i != Size; i += Scale) {
13974       unsigned Pos = Left ? i + Shift : i;
13975       unsigned Low = Left ? i : i + Shift;
13976       unsigned Len = Scale - Shift;
13977       if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
13978         return -1;
13979     }
13980 
13981     int ShiftEltBits = ScalarSizeInBits * Scale;
13982     bool ByteShift = ShiftEltBits > 64;
13983     Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
13984                   : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
13985     int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
13986 
13987     // Normalize the scale for byte shifts to still produce an i64 element
13988     // type.
13989     Scale = ByteShift ? Scale / 2 : Scale;
13990 
13991     // We need to round trip through the appropriate type for the shift.
13992     MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
13993     ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
13994                         : MVT::getVectorVT(ShiftSVT, Size / Scale);
13995     return (int)ShiftAmt;
13996   };
13997 
13998   // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
13999   // keep doubling the size of the integer elements up to that. We can
14000   // then shift the elements of the integer vector by whole multiples of
14001   // their width within the elements of the larger integer vector. Test each
14002   // multiple to see if we can find a match with the moved element indices
14003   // and that the shifted in elements are all zeroable.
14004   unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
14005   for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
14006     for (int Shift = 1; Shift != Scale; ++Shift)
14007       for (bool Left : {true, false})
14008         if (CheckZeros(Shift, Scale, Left)) {
14009           int ShiftAmt = MatchShift(Shift, Scale, Left);
14010           if (0 < ShiftAmt)
14011             return ShiftAmt;
14012         }
14013 
14014   // no match
14015   return -1;
14016 }
14017 
lowerShuffleAsShift(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)14018 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
14019                                    SDValue V2, ArrayRef<int> Mask,
14020                                    const APInt &Zeroable,
14021                                    const X86Subtarget &Subtarget,
14022                                    SelectionDAG &DAG) {
14023   int Size = Mask.size();
14024   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
14025 
14026   MVT ShiftVT;
14027   SDValue V = V1;
14028   unsigned Opcode;
14029 
14030   // Try to match shuffle against V1 shift.
14031   int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
14032                                      Mask, 0, Zeroable, Subtarget);
14033 
14034   // If V1 failed, try to match shuffle against V2 shift.
14035   if (ShiftAmt < 0) {
14036     ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
14037                                    Mask, Size, Zeroable, Subtarget);
14038     V = V2;
14039   }
14040 
14041   if (ShiftAmt < 0)
14042     return SDValue();
14043 
14044   assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
14045          "Illegal integer vector type");
14046   V = DAG.getBitcast(ShiftVT, V);
14047   V = DAG.getNode(Opcode, DL, ShiftVT, V,
14048                   DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
14049   return DAG.getBitcast(VT, V);
14050 }
14051 
14052 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
14053 // Remainder of lower half result is zero and upper half is all undef.
matchShuffleAsEXTRQ(MVT VT,SDValue & V1,SDValue & V2,ArrayRef<int> Mask,uint64_t & BitLen,uint64_t & BitIdx,const APInt & Zeroable)14054 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
14055                                 ArrayRef<int> Mask, uint64_t &BitLen,
14056                                 uint64_t &BitIdx, const APInt &Zeroable) {
14057   int Size = Mask.size();
14058   int HalfSize = Size / 2;
14059   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
14060   assert(!Zeroable.isAllOnes() && "Fully zeroable shuffle mask");
14061 
14062   // Upper half must be undefined.
14063   if (!isUndefUpperHalf(Mask))
14064     return false;
14065 
14066   // Determine the extraction length from the part of the
14067   // lower half that isn't zeroable.
14068   int Len = HalfSize;
14069   for (; Len > 0; --Len)
14070     if (!Zeroable[Len - 1])
14071       break;
14072   assert(Len > 0 && "Zeroable shuffle mask");
14073 
14074   // Attempt to match first Len sequential elements from the lower half.
14075   SDValue Src;
14076   int Idx = -1;
14077   for (int i = 0; i != Len; ++i) {
14078     int M = Mask[i];
14079     if (M == SM_SentinelUndef)
14080       continue;
14081     SDValue &V = (M < Size ? V1 : V2);
14082     M = M % Size;
14083 
14084     // The extracted elements must start at a valid index and all mask
14085     // elements must be in the lower half.
14086     if (i > M || M >= HalfSize)
14087       return false;
14088 
14089     if (Idx < 0 || (Src == V && Idx == (M - i))) {
14090       Src = V;
14091       Idx = M - i;
14092       continue;
14093     }
14094     return false;
14095   }
14096 
14097   if (!Src || Idx < 0)
14098     return false;
14099 
14100   assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
14101   BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
14102   BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
14103   V1 = Src;
14104   return true;
14105 }
14106 
14107 // INSERTQ: Extract lowest Len elements from lower half of second source and
14108 // insert over first source, starting at Idx.
14109 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
matchShuffleAsINSERTQ(MVT VT,SDValue & V1,SDValue & V2,ArrayRef<int> Mask,uint64_t & BitLen,uint64_t & BitIdx)14110 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
14111                                   ArrayRef<int> Mask, uint64_t &BitLen,
14112                                   uint64_t &BitIdx) {
14113   int Size = Mask.size();
14114   int HalfSize = Size / 2;
14115   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
14116 
14117   // Upper half must be undefined.
14118   if (!isUndefUpperHalf(Mask))
14119     return false;
14120 
14121   for (int Idx = 0; Idx != HalfSize; ++Idx) {
14122     SDValue Base;
14123 
14124     // Attempt to match first source from mask before insertion point.
14125     if (isUndefInRange(Mask, 0, Idx)) {
14126       /* EMPTY */
14127     } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
14128       Base = V1;
14129     } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
14130       Base = V2;
14131     } else {
14132       continue;
14133     }
14134 
14135     // Extend the extraction length looking to match both the insertion of
14136     // the second source and the remaining elements of the first.
14137     for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
14138       SDValue Insert;
14139       int Len = Hi - Idx;
14140 
14141       // Match insertion.
14142       if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
14143         Insert = V1;
14144       } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
14145         Insert = V2;
14146       } else {
14147         continue;
14148       }
14149 
14150       // Match the remaining elements of the lower half.
14151       if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
14152         /* EMPTY */
14153       } else if ((!Base || (Base == V1)) &&
14154                  isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
14155         Base = V1;
14156       } else if ((!Base || (Base == V2)) &&
14157                  isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
14158                                             Size + Hi)) {
14159         Base = V2;
14160       } else {
14161         continue;
14162       }
14163 
14164       BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
14165       BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
14166       V1 = Base;
14167       V2 = Insert;
14168       return true;
14169     }
14170   }
14171 
14172   return false;
14173 }
14174 
14175 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
lowerShuffleWithSSE4A(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG)14176 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
14177                                      SDValue V2, ArrayRef<int> Mask,
14178                                      const APInt &Zeroable, SelectionDAG &DAG) {
14179   uint64_t BitLen, BitIdx;
14180   if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
14181     return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
14182                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
14183                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
14184 
14185   if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
14186     return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
14187                        V2 ? V2 : DAG.getUNDEF(VT),
14188                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
14189                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
14190 
14191   return SDValue();
14192 }
14193 
14194 /// Lower a vector shuffle as a zero or any extension.
14195 ///
14196 /// Given a specific number of elements, element bit width, and extension
14197 /// stride, produce either a zero or any extension based on the available
14198 /// features of the subtarget. The extended elements are consecutive and
14199 /// begin and can start from an offsetted element index in the input; to
14200 /// avoid excess shuffling the offset must either being in the bottom lane
14201 /// or at the start of a higher lane. All extended elements must be from
14202 /// the same lane.
lowerShuffleAsSpecificZeroOrAnyExtend(const SDLoc & DL,MVT VT,int Scale,int Offset,bool AnyExt,SDValue InputV,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)14203 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
14204     const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
14205     ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14206   assert(Scale > 1 && "Need a scale to extend.");
14207   int EltBits = VT.getScalarSizeInBits();
14208   int NumElements = VT.getVectorNumElements();
14209   int NumEltsPerLane = 128 / EltBits;
14210   int OffsetLane = Offset / NumEltsPerLane;
14211   assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
14212          "Only 8, 16, and 32 bit elements can be extended.");
14213   assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
14214   assert(0 <= Offset && "Extension offset must be positive.");
14215   assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
14216          "Extension offset must be in the first lane or start an upper lane.");
14217 
14218   // Check that an index is in same lane as the base offset.
14219   auto SafeOffset = [&](int Idx) {
14220     return OffsetLane == (Idx / NumEltsPerLane);
14221   };
14222 
14223   // Shift along an input so that the offset base moves to the first element.
14224   auto ShuffleOffset = [&](SDValue V) {
14225     if (!Offset)
14226       return V;
14227 
14228     SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
14229     for (int i = 0; i * Scale < NumElements; ++i) {
14230       int SrcIdx = i + Offset;
14231       ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
14232     }
14233     return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
14234   };
14235 
14236   // Found a valid a/zext mask! Try various lowering strategies based on the
14237   // input type and available ISA extensions.
14238   if (Subtarget.hasSSE41()) {
14239     // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
14240     // PUNPCK will catch this in a later shuffle match.
14241     if (Offset && Scale == 2 && VT.is128BitVector())
14242       return SDValue();
14243     MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
14244                                  NumElements / Scale);
14245     InputV = ShuffleOffset(InputV);
14246     InputV = getEXTEND_VECTOR_INREG(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND,
14247                                     DL, ExtVT, InputV, DAG);
14248     return DAG.getBitcast(VT, InputV);
14249   }
14250 
14251   assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
14252 
14253   // For any extends we can cheat for larger element sizes and use shuffle
14254   // instructions that can fold with a load and/or copy.
14255   if (AnyExt && EltBits == 32) {
14256     int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
14257                          -1};
14258     return DAG.getBitcast(
14259         VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
14260                         DAG.getBitcast(MVT::v4i32, InputV),
14261                         getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14262   }
14263   if (AnyExt && EltBits == 16 && Scale > 2) {
14264     int PSHUFDMask[4] = {Offset / 2, -1,
14265                          SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
14266     InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
14267                          DAG.getBitcast(MVT::v4i32, InputV),
14268                          getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
14269     int PSHUFWMask[4] = {1, -1, -1, -1};
14270     unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
14271     return DAG.getBitcast(
14272         VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
14273                         DAG.getBitcast(MVT::v8i16, InputV),
14274                         getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
14275   }
14276 
14277   // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
14278   // to 64-bits.
14279   if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
14280     assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
14281     assert(VT.is128BitVector() && "Unexpected vector width!");
14282 
14283     int LoIdx = Offset * EltBits;
14284     SDValue Lo = DAG.getBitcast(
14285         MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
14286                                 DAG.getTargetConstant(EltBits, DL, MVT::i8),
14287                                 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
14288 
14289     if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
14290       return DAG.getBitcast(VT, Lo);
14291 
14292     int HiIdx = (Offset + 1) * EltBits;
14293     SDValue Hi = DAG.getBitcast(
14294         MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
14295                                 DAG.getTargetConstant(EltBits, DL, MVT::i8),
14296                                 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
14297     return DAG.getBitcast(VT,
14298                           DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
14299   }
14300 
14301   // If this would require more than 2 unpack instructions to expand, use
14302   // pshufb when available. We can only use more than 2 unpack instructions
14303   // when zero extending i8 elements which also makes it easier to use pshufb.
14304   if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
14305     assert(NumElements == 16 && "Unexpected byte vector width!");
14306     SDValue PSHUFBMask[16];
14307     for (int i = 0; i < 16; ++i) {
14308       int Idx = Offset + (i / Scale);
14309       if ((i % Scale == 0 && SafeOffset(Idx))) {
14310         PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
14311         continue;
14312       }
14313       PSHUFBMask[i] =
14314           AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
14315     }
14316     InputV = DAG.getBitcast(MVT::v16i8, InputV);
14317     return DAG.getBitcast(
14318         VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
14319                         DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
14320   }
14321 
14322   // If we are extending from an offset, ensure we start on a boundary that
14323   // we can unpack from.
14324   int AlignToUnpack = Offset % (NumElements / Scale);
14325   if (AlignToUnpack) {
14326     SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
14327     for (int i = AlignToUnpack; i < NumElements; ++i)
14328       ShMask[i - AlignToUnpack] = i;
14329     InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
14330     Offset -= AlignToUnpack;
14331   }
14332 
14333   // Otherwise emit a sequence of unpacks.
14334   do {
14335     unsigned UnpackLoHi = X86ISD::UNPCKL;
14336     if (Offset >= (NumElements / 2)) {
14337       UnpackLoHi = X86ISD::UNPCKH;
14338       Offset -= (NumElements / 2);
14339     }
14340 
14341     MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
14342     SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
14343                          : getZeroVector(InputVT, Subtarget, DAG, DL);
14344     InputV = DAG.getBitcast(InputVT, InputV);
14345     InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
14346     Scale /= 2;
14347     EltBits *= 2;
14348     NumElements /= 2;
14349   } while (Scale > 1);
14350   return DAG.getBitcast(VT, InputV);
14351 }
14352 
14353 /// Try to lower a vector shuffle as a zero extension on any microarch.
14354 ///
14355 /// This routine will try to do everything in its power to cleverly lower
14356 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
14357 /// check for the profitability of this lowering,  it tries to aggressively
14358 /// match this pattern. It will use all of the micro-architectural details it
14359 /// can to emit an efficient lowering. It handles both blends with all-zero
14360 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
14361 /// masking out later).
14362 ///
14363 /// The reason we have dedicated lowering for zext-style shuffles is that they
14364 /// are both incredibly common and often quite performance sensitive.
lowerShuffleAsZeroOrAnyExtend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)14365 static SDValue lowerShuffleAsZeroOrAnyExtend(
14366     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14367     const APInt &Zeroable, const X86Subtarget &Subtarget,
14368     SelectionDAG &DAG) {
14369   int Bits = VT.getSizeInBits();
14370   int NumLanes = Bits / 128;
14371   int NumElements = VT.getVectorNumElements();
14372   int NumEltsPerLane = NumElements / NumLanes;
14373   assert(VT.getScalarSizeInBits() <= 32 &&
14374          "Exceeds 32-bit integer zero extension limit");
14375   assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
14376 
14377   // Define a helper function to check a particular ext-scale and lower to it if
14378   // valid.
14379   auto Lower = [&](int Scale) -> SDValue {
14380     SDValue InputV;
14381     bool AnyExt = true;
14382     int Offset = 0;
14383     int Matches = 0;
14384     for (int i = 0; i < NumElements; ++i) {
14385       int M = Mask[i];
14386       if (M < 0)
14387         continue; // Valid anywhere but doesn't tell us anything.
14388       if (i % Scale != 0) {
14389         // Each of the extended elements need to be zeroable.
14390         if (!Zeroable[i])
14391           return SDValue();
14392 
14393         // We no longer are in the anyext case.
14394         AnyExt = false;
14395         continue;
14396       }
14397 
14398       // Each of the base elements needs to be consecutive indices into the
14399       // same input vector.
14400       SDValue V = M < NumElements ? V1 : V2;
14401       M = M % NumElements;
14402       if (!InputV) {
14403         InputV = V;
14404         Offset = M - (i / Scale);
14405       } else if (InputV != V)
14406         return SDValue(); // Flip-flopping inputs.
14407 
14408       // Offset must start in the lowest 128-bit lane or at the start of an
14409       // upper lane.
14410       // FIXME: Is it ever worth allowing a negative base offset?
14411       if (!((0 <= Offset && Offset < NumEltsPerLane) ||
14412             (Offset % NumEltsPerLane) == 0))
14413         return SDValue();
14414 
14415       // If we are offsetting, all referenced entries must come from the same
14416       // lane.
14417       if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
14418         return SDValue();
14419 
14420       if ((M % NumElements) != (Offset + (i / Scale)))
14421         return SDValue(); // Non-consecutive strided elements.
14422       Matches++;
14423     }
14424 
14425     // If we fail to find an input, we have a zero-shuffle which should always
14426     // have already been handled.
14427     // FIXME: Maybe handle this here in case during blending we end up with one?
14428     if (!InputV)
14429       return SDValue();
14430 
14431     // If we are offsetting, don't extend if we only match a single input, we
14432     // can always do better by using a basic PSHUF or PUNPCK.
14433     if (Offset != 0 && Matches < 2)
14434       return SDValue();
14435 
14436     return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
14437                                                  InputV, Mask, Subtarget, DAG);
14438   };
14439 
14440   // The widest scale possible for extending is to a 64-bit integer.
14441   assert(Bits % 64 == 0 &&
14442          "The number of bits in a vector must be divisible by 64 on x86!");
14443   int NumExtElements = Bits / 64;
14444 
14445   // Each iteration, try extending the elements half as much, but into twice as
14446   // many elements.
14447   for (; NumExtElements < NumElements; NumExtElements *= 2) {
14448     assert(NumElements % NumExtElements == 0 &&
14449            "The input vector size must be divisible by the extended size.");
14450     if (SDValue V = Lower(NumElements / NumExtElements))
14451       return V;
14452   }
14453 
14454   // General extends failed, but 128-bit vectors may be able to use MOVQ.
14455   if (Bits != 128)
14456     return SDValue();
14457 
14458   // Returns one of the source operands if the shuffle can be reduced to a
14459   // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
14460   auto CanZExtLowHalf = [&]() {
14461     for (int i = NumElements / 2; i != NumElements; ++i)
14462       if (!Zeroable[i])
14463         return SDValue();
14464     if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
14465       return V1;
14466     if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
14467       return V2;
14468     return SDValue();
14469   };
14470 
14471   if (SDValue V = CanZExtLowHalf()) {
14472     V = DAG.getBitcast(MVT::v2i64, V);
14473     V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
14474     return DAG.getBitcast(VT, V);
14475   }
14476 
14477   // No viable ext lowering found.
14478   return SDValue();
14479 }
14480 
14481 /// Try to get a scalar value for a specific element of a vector.
14482 ///
14483 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
getScalarValueForVectorElement(SDValue V,int Idx,SelectionDAG & DAG)14484 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
14485                                               SelectionDAG &DAG) {
14486   MVT VT = V.getSimpleValueType();
14487   MVT EltVT = VT.getVectorElementType();
14488   V = peekThroughBitcasts(V);
14489 
14490   // If the bitcasts shift the element size, we can't extract an equivalent
14491   // element from it.
14492   MVT NewVT = V.getSimpleValueType();
14493   if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
14494     return SDValue();
14495 
14496   if (V.getOpcode() == ISD::BUILD_VECTOR ||
14497       (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
14498     // Ensure the scalar operand is the same size as the destination.
14499     // FIXME: Add support for scalar truncation where possible.
14500     SDValue S = V.getOperand(Idx);
14501     if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
14502       return DAG.getBitcast(EltVT, S);
14503   }
14504 
14505   return SDValue();
14506 }
14507 
14508 /// Helper to test for a load that can be folded with x86 shuffles.
14509 ///
14510 /// This is particularly important because the set of instructions varies
14511 /// significantly based on whether the operand is a load or not.
isShuffleFoldableLoad(SDValue V)14512 static bool isShuffleFoldableLoad(SDValue V) {
14513   return V->hasOneUse() &&
14514          ISD::isNON_EXTLoad(peekThroughOneUseBitcasts(V).getNode());
14515 }
14516 
14517 template<typename T>
isSoftFP16(T VT,const X86Subtarget & Subtarget)14518 static bool isSoftFP16(T VT, const X86Subtarget &Subtarget) {
14519   return VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16();
14520 }
14521 
14522 template<typename T>
isSoftFP16(T VT) const14523 bool X86TargetLowering::isSoftFP16(T VT) const {
14524   return ::isSoftFP16(VT, Subtarget);
14525 }
14526 
14527 /// Try to lower insertion of a single element into a zero vector.
14528 ///
14529 /// This is a common pattern that we have especially efficient patterns to lower
14530 /// across all subtarget feature sets.
lowerShuffleAsElementInsertion(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)14531 static SDValue lowerShuffleAsElementInsertion(
14532     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14533     const APInt &Zeroable, const X86Subtarget &Subtarget,
14534     SelectionDAG &DAG) {
14535   MVT ExtVT = VT;
14536   MVT EltVT = VT.getVectorElementType();
14537 
14538   if (isSoftFP16(EltVT, Subtarget))
14539     return SDValue();
14540 
14541   int V2Index =
14542       find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
14543       Mask.begin();
14544   bool IsV1Zeroable = true;
14545   for (int i = 0, Size = Mask.size(); i < Size; ++i)
14546     if (i != V2Index && !Zeroable[i]) {
14547       IsV1Zeroable = false;
14548       break;
14549     }
14550 
14551   // Check for a single input from a SCALAR_TO_VECTOR node.
14552   // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
14553   // all the smarts here sunk into that routine. However, the current
14554   // lowering of BUILD_VECTOR makes that nearly impossible until the old
14555   // vector shuffle lowering is dead.
14556   SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
14557                                                DAG);
14558   if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
14559     // We need to zext the scalar if it is smaller than an i32.
14560     V2S = DAG.getBitcast(EltVT, V2S);
14561     if (EltVT == MVT::i8 || (EltVT == MVT::i16 && !Subtarget.hasFP16())) {
14562       // Using zext to expand a narrow element won't work for non-zero
14563       // insertions.
14564       if (!IsV1Zeroable)
14565         return SDValue();
14566 
14567       // Zero-extend directly to i32.
14568       ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
14569       V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
14570     }
14571     V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
14572   } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
14573              EltVT == MVT::i16) {
14574     // Either not inserting from the low element of the input or the input
14575     // element size is too small to use VZEXT_MOVL to clear the high bits.
14576     return SDValue();
14577   }
14578 
14579   if (!IsV1Zeroable) {
14580     // If V1 can't be treated as a zero vector we have fewer options to lower
14581     // this. We can't support integer vectors or non-zero targets cheaply, and
14582     // the V1 elements can't be permuted in any way.
14583     assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
14584     if (!VT.isFloatingPoint() || V2Index != 0)
14585       return SDValue();
14586     SmallVector<int, 8> V1Mask(Mask);
14587     V1Mask[V2Index] = -1;
14588     if (!isNoopShuffleMask(V1Mask))
14589       return SDValue();
14590     if (!VT.is128BitVector())
14591       return SDValue();
14592 
14593     // Otherwise, use MOVSD, MOVSS or MOVSH.
14594     unsigned MovOpc = 0;
14595     if (EltVT == MVT::f16)
14596       MovOpc = X86ISD::MOVSH;
14597     else if (EltVT == MVT::f32)
14598       MovOpc = X86ISD::MOVSS;
14599     else if (EltVT == MVT::f64)
14600       MovOpc = X86ISD::MOVSD;
14601     else
14602       llvm_unreachable("Unsupported floating point element type to handle!");
14603     return DAG.getNode(MovOpc, DL, ExtVT, V1, V2);
14604   }
14605 
14606   // This lowering only works for the low element with floating point vectors.
14607   if (VT.isFloatingPoint() && V2Index != 0)
14608     return SDValue();
14609 
14610   V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
14611   if (ExtVT != VT)
14612     V2 = DAG.getBitcast(VT, V2);
14613 
14614   if (V2Index != 0) {
14615     // If we have 4 or fewer lanes we can cheaply shuffle the element into
14616     // the desired position. Otherwise it is more efficient to do a vector
14617     // shift left. We know that we can do a vector shift left because all
14618     // the inputs are zero.
14619     if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
14620       SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
14621       V2Shuffle[V2Index] = 0;
14622       V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
14623     } else {
14624       V2 = DAG.getBitcast(MVT::v16i8, V2);
14625       V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
14626                        DAG.getTargetConstant(
14627                            V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
14628       V2 = DAG.getBitcast(VT, V2);
14629     }
14630   }
14631   return V2;
14632 }
14633 
14634 /// Try to lower broadcast of a single - truncated - integer element,
14635 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
14636 ///
14637 /// This assumes we have AVX2.
lowerShuffleAsTruncBroadcast(const SDLoc & DL,MVT VT,SDValue V0,int BroadcastIdx,const X86Subtarget & Subtarget,SelectionDAG & DAG)14638 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
14639                                             int BroadcastIdx,
14640                                             const X86Subtarget &Subtarget,
14641                                             SelectionDAG &DAG) {
14642   assert(Subtarget.hasAVX2() &&
14643          "We can only lower integer broadcasts with AVX2!");
14644 
14645   MVT EltVT = VT.getVectorElementType();
14646   MVT V0VT = V0.getSimpleValueType();
14647 
14648   assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
14649   assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
14650 
14651   MVT V0EltVT = V0VT.getVectorElementType();
14652   if (!V0EltVT.isInteger())
14653     return SDValue();
14654 
14655   const unsigned EltSize = EltVT.getSizeInBits();
14656   const unsigned V0EltSize = V0EltVT.getSizeInBits();
14657 
14658   // This is only a truncation if the original element type is larger.
14659   if (V0EltSize <= EltSize)
14660     return SDValue();
14661 
14662   assert(((V0EltSize % EltSize) == 0) &&
14663          "Scalar type sizes must all be powers of 2 on x86!");
14664 
14665   const unsigned V0Opc = V0.getOpcode();
14666   const unsigned Scale = V0EltSize / EltSize;
14667   const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
14668 
14669   if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
14670       V0Opc != ISD::BUILD_VECTOR)
14671     return SDValue();
14672 
14673   SDValue Scalar = V0.getOperand(V0BroadcastIdx);
14674 
14675   // If we're extracting non-least-significant bits, shift so we can truncate.
14676   // Hopefully, we can fold away the trunc/srl/load into the broadcast.
14677   // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
14678   // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
14679   if (const int OffsetIdx = BroadcastIdx % Scale)
14680     Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
14681                          DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
14682 
14683   return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
14684                      DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
14685 }
14686 
14687 /// Test whether this can be lowered with a single SHUFPS instruction.
14688 ///
14689 /// This is used to disable more specialized lowerings when the shufps lowering
14690 /// will happen to be efficient.
isSingleSHUFPSMask(ArrayRef<int> Mask)14691 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
14692   // This routine only handles 128-bit shufps.
14693   assert(Mask.size() == 4 && "Unsupported mask size!");
14694   assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
14695   assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
14696   assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
14697   assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
14698 
14699   // To lower with a single SHUFPS we need to have the low half and high half
14700   // each requiring a single input.
14701   if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
14702     return false;
14703   if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
14704     return false;
14705 
14706   return true;
14707 }
14708 
14709 /// Test whether the specified input (0 or 1) is in-place blended by the
14710 /// given mask.
14711 ///
14712 /// This returns true if the elements from a particular input are already in the
14713 /// slot required by the given mask and require no permutation.
isShuffleMaskInputInPlace(int Input,ArrayRef<int> Mask)14714 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
14715   assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
14716   int Size = Mask.size();
14717   for (int i = 0; i < Size; ++i)
14718     if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
14719       return false;
14720 
14721   return true;
14722 }
14723 
14724 /// If we are extracting two 128-bit halves of a vector and shuffling the
14725 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
14726 /// multi-shuffle lowering.
lowerShuffleOfExtractsAsVperm(const SDLoc & DL,SDValue N0,SDValue N1,ArrayRef<int> Mask,SelectionDAG & DAG)14727 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
14728                                              SDValue N1, ArrayRef<int> Mask,
14729                                              SelectionDAG &DAG) {
14730   MVT VT = N0.getSimpleValueType();
14731   assert((VT.is128BitVector() &&
14732           (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
14733          "VPERM* family of shuffles requires 32-bit or 64-bit elements");
14734 
14735   // Check that both sources are extracts of the same source vector.
14736   if (!N0.hasOneUse() || !N1.hasOneUse() ||
14737       N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
14738       N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
14739       N0.getOperand(0) != N1.getOperand(0))
14740     return SDValue();
14741 
14742   SDValue WideVec = N0.getOperand(0);
14743   MVT WideVT = WideVec.getSimpleValueType();
14744   if (!WideVT.is256BitVector())
14745     return SDValue();
14746 
14747   // Match extracts of each half of the wide source vector. Commute the shuffle
14748   // if the extract of the low half is N1.
14749   unsigned NumElts = VT.getVectorNumElements();
14750   SmallVector<int, 4> NewMask(Mask);
14751   const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
14752   const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
14753   if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
14754     ShuffleVectorSDNode::commuteMask(NewMask);
14755   else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
14756     return SDValue();
14757 
14758   // Final bailout: if the mask is simple, we are better off using an extract
14759   // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
14760   // because that avoids a constant load from memory.
14761   if (NumElts == 4 &&
14762       (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask, DAG)))
14763     return SDValue();
14764 
14765   // Extend the shuffle mask with undef elements.
14766   NewMask.append(NumElts, -1);
14767 
14768   // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
14769   SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
14770                                       NewMask);
14771   // This is free: ymm -> xmm.
14772   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
14773                      DAG.getIntPtrConstant(0, DL));
14774 }
14775 
14776 /// Try to lower broadcast of a single element.
14777 ///
14778 /// For convenience, this code also bundles all of the subtarget feature set
14779 /// filtering. While a little annoying to re-dispatch on type here, there isn't
14780 /// a convenient way to factor it out.
lowerShuffleAsBroadcast(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)14781 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
14782                                        SDValue V2, ArrayRef<int> Mask,
14783                                        const X86Subtarget &Subtarget,
14784                                        SelectionDAG &DAG) {
14785   if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
14786         (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
14787         (Subtarget.hasAVX2() && VT.isInteger())))
14788     return SDValue();
14789 
14790   // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
14791   // we can only broadcast from a register with AVX2.
14792   unsigned NumEltBits = VT.getScalarSizeInBits();
14793   unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
14794                         ? X86ISD::MOVDDUP
14795                         : X86ISD::VBROADCAST;
14796   bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
14797 
14798   // Check that the mask is a broadcast.
14799   int BroadcastIdx = getSplatIndex(Mask);
14800   if (BroadcastIdx < 0)
14801     return SDValue();
14802   assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
14803                                             "a sorted mask where the broadcast "
14804                                             "comes from V1.");
14805 
14806   // Go up the chain of (vector) values to find a scalar load that we can
14807   // combine with the broadcast.
14808   // TODO: Combine this logic with findEltLoadSrc() used by
14809   //       EltsFromConsecutiveLoads().
14810   int BitOffset = BroadcastIdx * NumEltBits;
14811   SDValue V = V1;
14812   for (;;) {
14813     switch (V.getOpcode()) {
14814     case ISD::BITCAST: {
14815       V = V.getOperand(0);
14816       continue;
14817     }
14818     case ISD::CONCAT_VECTORS: {
14819       int OpBitWidth = V.getOperand(0).getValueSizeInBits();
14820       int OpIdx = BitOffset / OpBitWidth;
14821       V = V.getOperand(OpIdx);
14822       BitOffset %= OpBitWidth;
14823       continue;
14824     }
14825     case ISD::EXTRACT_SUBVECTOR: {
14826       // The extraction index adds to the existing offset.
14827       unsigned EltBitWidth = V.getScalarValueSizeInBits();
14828       unsigned Idx = V.getConstantOperandVal(1);
14829       unsigned BeginOffset = Idx * EltBitWidth;
14830       BitOffset += BeginOffset;
14831       V = V.getOperand(0);
14832       continue;
14833     }
14834     case ISD::INSERT_SUBVECTOR: {
14835       SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
14836       int EltBitWidth = VOuter.getScalarValueSizeInBits();
14837       int Idx = (int)V.getConstantOperandVal(2);
14838       int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
14839       int BeginOffset = Idx * EltBitWidth;
14840       int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
14841       if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
14842         BitOffset -= BeginOffset;
14843         V = VInner;
14844       } else {
14845         V = VOuter;
14846       }
14847       continue;
14848     }
14849     }
14850     break;
14851   }
14852   assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
14853   BroadcastIdx = BitOffset / NumEltBits;
14854 
14855   // Do we need to bitcast the source to retrieve the original broadcast index?
14856   bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
14857 
14858   // Check if this is a broadcast of a scalar. We special case lowering
14859   // for scalars so that we can more effectively fold with loads.
14860   // If the original value has a larger element type than the shuffle, the
14861   // broadcast element is in essence truncated. Make that explicit to ease
14862   // folding.
14863   if (BitCastSrc && VT.isInteger())
14864     if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
14865             DL, VT, V, BroadcastIdx, Subtarget, DAG))
14866       return TruncBroadcast;
14867 
14868   // Also check the simpler case, where we can directly reuse the scalar.
14869   if (!BitCastSrc &&
14870       ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
14871        (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
14872     V = V.getOperand(BroadcastIdx);
14873 
14874     // If we can't broadcast from a register, check that the input is a load.
14875     if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
14876       return SDValue();
14877   } else if (ISD::isNormalLoad(V.getNode()) &&
14878              cast<LoadSDNode>(V)->isSimple()) {
14879     // We do not check for one-use of the vector load because a broadcast load
14880     // is expected to be a win for code size, register pressure, and possibly
14881     // uops even if the original vector load is not eliminated.
14882 
14883     // Reduce the vector load and shuffle to a broadcasted scalar load.
14884     LoadSDNode *Ld = cast<LoadSDNode>(V);
14885     SDValue BaseAddr = Ld->getOperand(1);
14886     MVT SVT = VT.getScalarType();
14887     unsigned Offset = BroadcastIdx * SVT.getStoreSize();
14888     assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
14889     SDValue NewAddr =
14890         DAG.getMemBasePlusOffset(BaseAddr, TypeSize::Fixed(Offset), DL);
14891 
14892     // Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
14893     // than MOVDDUP.
14894     // FIXME: Should we add VBROADCAST_LOAD isel patterns for pre-AVX?
14895     if (Opcode == X86ISD::VBROADCAST) {
14896       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
14897       SDValue Ops[] = {Ld->getChain(), NewAddr};
14898       V = DAG.getMemIntrinsicNode(
14899           X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SVT,
14900           DAG.getMachineFunction().getMachineMemOperand(
14901               Ld->getMemOperand(), Offset, SVT.getStoreSize()));
14902       DAG.makeEquivalentMemoryOrdering(Ld, V);
14903       return DAG.getBitcast(VT, V);
14904     }
14905     assert(SVT == MVT::f64 && "Unexpected VT!");
14906     V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
14907                     DAG.getMachineFunction().getMachineMemOperand(
14908                         Ld->getMemOperand(), Offset, SVT.getStoreSize()));
14909     DAG.makeEquivalentMemoryOrdering(Ld, V);
14910   } else if (!BroadcastFromReg) {
14911     // We can't broadcast from a vector register.
14912     return SDValue();
14913   } else if (BitOffset != 0) {
14914     // We can only broadcast from the zero-element of a vector register,
14915     // but it can be advantageous to broadcast from the zero-element of a
14916     // subvector.
14917     if (!VT.is256BitVector() && !VT.is512BitVector())
14918       return SDValue();
14919 
14920     // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
14921     if (VT == MVT::v4f64 || VT == MVT::v4i64)
14922       return SDValue();
14923 
14924     // Only broadcast the zero-element of a 128-bit subvector.
14925     if ((BitOffset % 128) != 0)
14926       return SDValue();
14927 
14928     assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
14929            "Unexpected bit-offset");
14930     assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
14931            "Unexpected vector size");
14932     unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
14933     V = extract128BitVector(V, ExtractIdx, DAG, DL);
14934   }
14935 
14936   // On AVX we can use VBROADCAST directly for scalar sources.
14937   if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector()) {
14938     V = DAG.getBitcast(MVT::f64, V);
14939     if (Subtarget.hasAVX()) {
14940       V = DAG.getNode(X86ISD::VBROADCAST, DL, MVT::v2f64, V);
14941       return DAG.getBitcast(VT, V);
14942     }
14943     V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V);
14944   }
14945 
14946   // If this is a scalar, do the broadcast on this type and bitcast.
14947   if (!V.getValueType().isVector()) {
14948     assert(V.getScalarValueSizeInBits() == NumEltBits &&
14949            "Unexpected scalar size");
14950     MVT BroadcastVT = MVT::getVectorVT(V.getSimpleValueType(),
14951                                        VT.getVectorNumElements());
14952     return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
14953   }
14954 
14955   // We only support broadcasting from 128-bit vectors to minimize the
14956   // number of patterns we need to deal with in isel. So extract down to
14957   // 128-bits, removing as many bitcasts as possible.
14958   if (V.getValueSizeInBits() > 128)
14959     V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
14960 
14961   // Otherwise cast V to a vector with the same element type as VT, but
14962   // possibly narrower than VT. Then perform the broadcast.
14963   unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
14964   MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), NumSrcElts);
14965   return DAG.getNode(Opcode, DL, VT, DAG.getBitcast(CastVT, V));
14966 }
14967 
14968 // Check for whether we can use INSERTPS to perform the shuffle. We only use
14969 // INSERTPS when the V1 elements are already in the correct locations
14970 // because otherwise we can just always use two SHUFPS instructions which
14971 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
14972 // perform INSERTPS if a single V1 element is out of place and all V2
14973 // elements are zeroable.
matchShuffleAsInsertPS(SDValue & V1,SDValue & V2,unsigned & InsertPSMask,const APInt & Zeroable,ArrayRef<int> Mask,SelectionDAG & DAG)14974 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
14975                                    unsigned &InsertPSMask,
14976                                    const APInt &Zeroable,
14977                                    ArrayRef<int> Mask, SelectionDAG &DAG) {
14978   assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
14979   assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
14980   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
14981 
14982   // Attempt to match INSERTPS with one element from VA or VB being
14983   // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
14984   // are updated.
14985   auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
14986                              ArrayRef<int> CandidateMask) {
14987     unsigned ZMask = 0;
14988     int VADstIndex = -1;
14989     int VBDstIndex = -1;
14990     bool VAUsedInPlace = false;
14991 
14992     for (int i = 0; i < 4; ++i) {
14993       // Synthesize a zero mask from the zeroable elements (includes undefs).
14994       if (Zeroable[i]) {
14995         ZMask |= 1 << i;
14996         continue;
14997       }
14998 
14999       // Flag if we use any VA inputs in place.
15000       if (i == CandidateMask[i]) {
15001         VAUsedInPlace = true;
15002         continue;
15003       }
15004 
15005       // We can only insert a single non-zeroable element.
15006       if (VADstIndex >= 0 || VBDstIndex >= 0)
15007         return false;
15008 
15009       if (CandidateMask[i] < 4) {
15010         // VA input out of place for insertion.
15011         VADstIndex = i;
15012       } else {
15013         // VB input for insertion.
15014         VBDstIndex = i;
15015       }
15016     }
15017 
15018     // Don't bother if we have no (non-zeroable) element for insertion.
15019     if (VADstIndex < 0 && VBDstIndex < 0)
15020       return false;
15021 
15022     // Determine element insertion src/dst indices. The src index is from the
15023     // start of the inserted vector, not the start of the concatenated vector.
15024     unsigned VBSrcIndex = 0;
15025     if (VADstIndex >= 0) {
15026       // If we have a VA input out of place, we use VA as the V2 element
15027       // insertion and don't use the original V2 at all.
15028       VBSrcIndex = CandidateMask[VADstIndex];
15029       VBDstIndex = VADstIndex;
15030       VB = VA;
15031     } else {
15032       VBSrcIndex = CandidateMask[VBDstIndex] - 4;
15033     }
15034 
15035     // If no V1 inputs are used in place, then the result is created only from
15036     // the zero mask and the V2 insertion - so remove V1 dependency.
15037     if (!VAUsedInPlace)
15038       VA = DAG.getUNDEF(MVT::v4f32);
15039 
15040     // Update V1, V2 and InsertPSMask accordingly.
15041     V1 = VA;
15042     V2 = VB;
15043 
15044     // Insert the V2 element into the desired position.
15045     InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
15046     assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
15047     return true;
15048   };
15049 
15050   if (matchAsInsertPS(V1, V2, Mask))
15051     return true;
15052 
15053   // Commute and try again.
15054   SmallVector<int, 4> CommutedMask(Mask);
15055   ShuffleVectorSDNode::commuteMask(CommutedMask);
15056   if (matchAsInsertPS(V2, V1, CommutedMask))
15057     return true;
15058 
15059   return false;
15060 }
15061 
lowerShuffleAsInsertPS(const SDLoc & DL,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG)15062 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
15063                                       ArrayRef<int> Mask, const APInt &Zeroable,
15064                                       SelectionDAG &DAG) {
15065   assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15066   assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15067 
15068   // Attempt to match the insertps pattern.
15069   unsigned InsertPSMask = 0;
15070   if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
15071     return SDValue();
15072 
15073   // Insert the V2 element into the desired position.
15074   return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
15075                      DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
15076 }
15077 
15078 /// Handle lowering of 2-lane 64-bit floating point shuffles.
15079 ///
15080 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
15081 /// support for floating point shuffles but not integer shuffles. These
15082 /// instructions will incur a domain crossing penalty on some chips though so
15083 /// it is better to avoid lowering through this for integer vectors where
15084 /// possible.
lowerV2F64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15085 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15086                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15087                                  const X86Subtarget &Subtarget,
15088                                  SelectionDAG &DAG) {
15089   assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
15090   assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
15091   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
15092 
15093   if (V2.isUndef()) {
15094     // Check for being able to broadcast a single element.
15095     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
15096                                                     Mask, Subtarget, DAG))
15097       return Broadcast;
15098 
15099     // Straight shuffle of a single input vector. Simulate this by using the
15100     // single input as both of the "inputs" to this instruction..
15101     unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
15102 
15103     if (Subtarget.hasAVX()) {
15104       // If we have AVX, we can use VPERMILPS which will allow folding a load
15105       // into the shuffle.
15106       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
15107                          DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
15108     }
15109 
15110     return DAG.getNode(
15111         X86ISD::SHUFP, DL, MVT::v2f64,
15112         Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
15113         Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
15114         DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
15115   }
15116   assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
15117   assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
15118   assert(Mask[0] < 2 && "We sort V1 to be the first input.");
15119   assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
15120 
15121   if (Subtarget.hasAVX2())
15122     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15123       return Extract;
15124 
15125   // When loading a scalar and then shuffling it into a vector we can often do
15126   // the insertion cheaply.
15127   if (SDValue Insertion = lowerShuffleAsElementInsertion(
15128           DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
15129     return Insertion;
15130   // Try inverting the insertion since for v2 masks it is easy to do and we
15131   // can't reliably sort the mask one way or the other.
15132   int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
15133                         Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
15134   if (SDValue Insertion = lowerShuffleAsElementInsertion(
15135           DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
15136     return Insertion;
15137 
15138   // Try to use one of the special instruction patterns to handle two common
15139   // blend patterns if a zero-blend above didn't work.
15140   if (isShuffleEquivalent(Mask, {0, 3}, V1, V2) ||
15141       isShuffleEquivalent(Mask, {1, 3}, V1, V2))
15142     if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
15143       // We can either use a special instruction to load over the low double or
15144       // to move just the low double.
15145       return DAG.getNode(
15146           X86ISD::MOVSD, DL, MVT::v2f64, V2,
15147           DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
15148 
15149   if (Subtarget.hasSSE41())
15150     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
15151                                             Zeroable, Subtarget, DAG))
15152       return Blend;
15153 
15154   // Use dedicated unpack instructions for masks that match their pattern.
15155   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
15156     return V;
15157 
15158   unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
15159   return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
15160                      DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
15161 }
15162 
15163 /// Handle lowering of 2-lane 64-bit integer shuffles.
15164 ///
15165 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
15166 /// the integer unit to minimize domain crossing penalties. However, for blends
15167 /// it falls back to the floating point shuffle operation with appropriate bit
15168 /// casting.
lowerV2I64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15169 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15170                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15171                                  const X86Subtarget &Subtarget,
15172                                  SelectionDAG &DAG) {
15173   assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
15174   assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
15175   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
15176 
15177   if (V2.isUndef()) {
15178     // Check for being able to broadcast a single element.
15179     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
15180                                                     Mask, Subtarget, DAG))
15181       return Broadcast;
15182 
15183     // Straight shuffle of a single input vector. For everything from SSE2
15184     // onward this has a single fast instruction with no scary immediates.
15185     // We have to map the mask as it is actually a v4i32 shuffle instruction.
15186     V1 = DAG.getBitcast(MVT::v4i32, V1);
15187     int WidenedMask[4] = {Mask[0] < 0 ? -1 : (Mask[0] * 2),
15188                           Mask[0] < 0 ? -1 : ((Mask[0] * 2) + 1),
15189                           Mask[1] < 0 ? -1 : (Mask[1] * 2),
15190                           Mask[1] < 0 ? -1 : ((Mask[1] * 2) + 1)};
15191     return DAG.getBitcast(
15192         MVT::v2i64,
15193         DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
15194                     getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
15195   }
15196   assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
15197   assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
15198   assert(Mask[0] < 2 && "We sort V1 to be the first input.");
15199   assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
15200 
15201   if (Subtarget.hasAVX2())
15202     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15203       return Extract;
15204 
15205   // Try to use shift instructions.
15206   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
15207                                           Zeroable, Subtarget, DAG))
15208     return Shift;
15209 
15210   // When loading a scalar and then shuffling it into a vector we can often do
15211   // the insertion cheaply.
15212   if (SDValue Insertion = lowerShuffleAsElementInsertion(
15213           DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
15214     return Insertion;
15215   // Try inverting the insertion since for v2 masks it is easy to do and we
15216   // can't reliably sort the mask one way or the other.
15217   int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
15218   if (SDValue Insertion = lowerShuffleAsElementInsertion(
15219           DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
15220     return Insertion;
15221 
15222   // We have different paths for blend lowering, but they all must use the
15223   // *exact* same predicate.
15224   bool IsBlendSupported = Subtarget.hasSSE41();
15225   if (IsBlendSupported)
15226     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
15227                                             Zeroable, Subtarget, DAG))
15228       return Blend;
15229 
15230   // Use dedicated unpack instructions for masks that match their pattern.
15231   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
15232     return V;
15233 
15234   // Try to use byte rotation instructions.
15235   // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
15236   if (Subtarget.hasSSSE3()) {
15237     if (Subtarget.hasVLX())
15238       if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v2i64, V1, V2, Mask,
15239                                                 Subtarget, DAG))
15240         return Rotate;
15241 
15242     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
15243                                                   Subtarget, DAG))
15244       return Rotate;
15245   }
15246 
15247   // If we have direct support for blends, we should lower by decomposing into
15248   // a permute. That will be faster than the domain cross.
15249   if (IsBlendSupported)
15250     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v2i64, V1, V2, Mask,
15251                                                 Subtarget, DAG);
15252 
15253   // We implement this with SHUFPD which is pretty lame because it will likely
15254   // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
15255   // However, all the alternatives are still more cycles and newer chips don't
15256   // have this problem. It would be really nice if x86 had better shuffles here.
15257   V1 = DAG.getBitcast(MVT::v2f64, V1);
15258   V2 = DAG.getBitcast(MVT::v2f64, V2);
15259   return DAG.getBitcast(MVT::v2i64,
15260                         DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
15261 }
15262 
15263 /// Lower a vector shuffle using the SHUFPS instruction.
15264 ///
15265 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
15266 /// It makes no assumptions about whether this is the *best* lowering, it simply
15267 /// uses it.
lowerShuffleWithSHUFPS(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG)15268 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
15269                                       ArrayRef<int> Mask, SDValue V1,
15270                                       SDValue V2, SelectionDAG &DAG) {
15271   SDValue LowV = V1, HighV = V2;
15272   SmallVector<int, 4> NewMask(Mask);
15273   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15274 
15275   if (NumV2Elements == 1) {
15276     int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
15277 
15278     // Compute the index adjacent to V2Index and in the same half by toggling
15279     // the low bit.
15280     int V2AdjIndex = V2Index ^ 1;
15281 
15282     if (Mask[V2AdjIndex] < 0) {
15283       // Handles all the cases where we have a single V2 element and an undef.
15284       // This will only ever happen in the high lanes because we commute the
15285       // vector otherwise.
15286       if (V2Index < 2)
15287         std::swap(LowV, HighV);
15288       NewMask[V2Index] -= 4;
15289     } else {
15290       // Handle the case where the V2 element ends up adjacent to a V1 element.
15291       // To make this work, blend them together as the first step.
15292       int V1Index = V2AdjIndex;
15293       int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
15294       V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
15295                        getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
15296 
15297       // Now proceed to reconstruct the final blend as we have the necessary
15298       // high or low half formed.
15299       if (V2Index < 2) {
15300         LowV = V2;
15301         HighV = V1;
15302       } else {
15303         HighV = V2;
15304       }
15305       NewMask[V1Index] = 2; // We put the V1 element in V2[2].
15306       NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
15307     }
15308   } else if (NumV2Elements == 2) {
15309     if (Mask[0] < 4 && Mask[1] < 4) {
15310       // Handle the easy case where we have V1 in the low lanes and V2 in the
15311       // high lanes.
15312       NewMask[2] -= 4;
15313       NewMask[3] -= 4;
15314     } else if (Mask[2] < 4 && Mask[3] < 4) {
15315       // We also handle the reversed case because this utility may get called
15316       // when we detect a SHUFPS pattern but can't easily commute the shuffle to
15317       // arrange things in the right direction.
15318       NewMask[0] -= 4;
15319       NewMask[1] -= 4;
15320       HighV = V1;
15321       LowV = V2;
15322     } else {
15323       // We have a mixture of V1 and V2 in both low and high lanes. Rather than
15324       // trying to place elements directly, just blend them and set up the final
15325       // shuffle to place them.
15326 
15327       // The first two blend mask elements are for V1, the second two are for
15328       // V2.
15329       int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
15330                           Mask[2] < 4 ? Mask[2] : Mask[3],
15331                           (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
15332                           (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
15333       V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15334                        getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
15335 
15336       // Now we do a normal shuffle of V1 by giving V1 as both operands to
15337       // a blend.
15338       LowV = HighV = V1;
15339       NewMask[0] = Mask[0] < 4 ? 0 : 2;
15340       NewMask[1] = Mask[0] < 4 ? 2 : 0;
15341       NewMask[2] = Mask[2] < 4 ? 1 : 3;
15342       NewMask[3] = Mask[2] < 4 ? 3 : 1;
15343     }
15344   } else if (NumV2Elements == 3) {
15345     // Ideally canonicalizeShuffleMaskWithCommute should have caught this, but
15346     // we can get here due to other paths (e.g repeated mask matching) that we
15347     // don't want to do another round of lowerVECTOR_SHUFFLE.
15348     ShuffleVectorSDNode::commuteMask(NewMask);
15349     return lowerShuffleWithSHUFPS(DL, VT, NewMask, V2, V1, DAG);
15350   }
15351   return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
15352                      getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
15353 }
15354 
15355 /// Lower 4-lane 32-bit floating point shuffles.
15356 ///
15357 /// Uses instructions exclusively from the floating point unit to minimize
15358 /// domain crossing penalties, as these are sufficient to implement all v4f32
15359 /// shuffles.
lowerV4F32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15360 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15361                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15362                                  const X86Subtarget &Subtarget,
15363                                  SelectionDAG &DAG) {
15364   assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15365   assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15366   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15367 
15368   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15369 
15370   if (NumV2Elements == 0) {
15371     // Check for being able to broadcast a single element.
15372     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
15373                                                     Mask, Subtarget, DAG))
15374       return Broadcast;
15375 
15376     // Use even/odd duplicate instructions for masks that match their pattern.
15377     if (Subtarget.hasSSE3()) {
15378       if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
15379         return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
15380       if (isShuffleEquivalent(Mask, {1, 1, 3, 3}, V1, V2))
15381         return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
15382     }
15383 
15384     if (Subtarget.hasAVX()) {
15385       // If we have AVX, we can use VPERMILPS which will allow folding a load
15386       // into the shuffle.
15387       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
15388                          getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15389     }
15390 
15391     // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
15392     // in SSE1 because otherwise they are widened to v2f64 and never get here.
15393     if (!Subtarget.hasSSE2()) {
15394       if (isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2))
15395         return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
15396       if (isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1, V2))
15397         return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
15398     }
15399 
15400     // Otherwise, use a straight shuffle of a single input vector. We pass the
15401     // input vector to both operands to simulate this with a SHUFPS.
15402     return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
15403                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15404   }
15405 
15406   if (Subtarget.hasAVX2())
15407     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15408       return Extract;
15409 
15410   // There are special ways we can lower some single-element blends. However, we
15411   // have custom ways we can lower more complex single-element blends below that
15412   // we defer to if both this and BLENDPS fail to match, so restrict this to
15413   // when the V2 input is targeting element 0 of the mask -- that is the fast
15414   // case here.
15415   if (NumV2Elements == 1 && Mask[0] >= 4)
15416     if (SDValue V = lowerShuffleAsElementInsertion(
15417             DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
15418       return V;
15419 
15420   if (Subtarget.hasSSE41()) {
15421     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
15422                                             Zeroable, Subtarget, DAG))
15423       return Blend;
15424 
15425     // Use INSERTPS if we can complete the shuffle efficiently.
15426     if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
15427       return V;
15428 
15429     if (!isSingleSHUFPSMask(Mask))
15430       if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
15431                                                             V2, Mask, DAG))
15432         return BlendPerm;
15433   }
15434 
15435   // Use low/high mov instructions. These are only valid in SSE1 because
15436   // otherwise they are widened to v2f64 and never get here.
15437   if (!Subtarget.hasSSE2()) {
15438     if (isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2))
15439       return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
15440     if (isShuffleEquivalent(Mask, {2, 3, 6, 7}, V1, V2))
15441       return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
15442   }
15443 
15444   // Use dedicated unpack instructions for masks that match their pattern.
15445   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
15446     return V;
15447 
15448   // Otherwise fall back to a SHUFPS lowering strategy.
15449   return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
15450 }
15451 
15452 /// Lower 4-lane i32 vector shuffles.
15453 ///
15454 /// We try to handle these with integer-domain shuffles where we can, but for
15455 /// blends we use the floating point domain blend instructions.
lowerV4I32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15456 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15457                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15458                                  const X86Subtarget &Subtarget,
15459                                  SelectionDAG &DAG) {
15460   assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
15461   assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
15462   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15463 
15464   // Whenever we can lower this as a zext, that instruction is strictly faster
15465   // than any alternative. It also allows us to fold memory operands into the
15466   // shuffle in many cases.
15467   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
15468                                                    Zeroable, Subtarget, DAG))
15469     return ZExt;
15470 
15471   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15472 
15473   if (NumV2Elements == 0) {
15474     // Try to use broadcast unless the mask only has one non-undef element.
15475     if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
15476       if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
15477                                                       Mask, Subtarget, DAG))
15478         return Broadcast;
15479     }
15480 
15481     // Straight shuffle of a single input vector. For everything from SSE2
15482     // onward this has a single fast instruction with no scary immediates.
15483     // We coerce the shuffle pattern to be compatible with UNPCK instructions
15484     // but we aren't actually going to use the UNPCK instruction because doing
15485     // so prevents folding a load into this instruction or making a copy.
15486     const int UnpackLoMask[] = {0, 0, 1, 1};
15487     const int UnpackHiMask[] = {2, 2, 3, 3};
15488     if (isShuffleEquivalent(Mask, {0, 0, 1, 1}, V1, V2))
15489       Mask = UnpackLoMask;
15490     else if (isShuffleEquivalent(Mask, {2, 2, 3, 3}, V1, V2))
15491       Mask = UnpackHiMask;
15492 
15493     return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
15494                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15495   }
15496 
15497   if (Subtarget.hasAVX2())
15498     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15499       return Extract;
15500 
15501   // Try to use shift instructions.
15502   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
15503                                           Zeroable, Subtarget, DAG))
15504     return Shift;
15505 
15506   // There are special ways we can lower some single-element blends.
15507   if (NumV2Elements == 1)
15508     if (SDValue V = lowerShuffleAsElementInsertion(
15509             DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
15510       return V;
15511 
15512   // We have different paths for blend lowering, but they all must use the
15513   // *exact* same predicate.
15514   bool IsBlendSupported = Subtarget.hasSSE41();
15515   if (IsBlendSupported)
15516     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
15517                                             Zeroable, Subtarget, DAG))
15518       return Blend;
15519 
15520   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
15521                                              Zeroable, Subtarget, DAG))
15522     return Masked;
15523 
15524   // Use dedicated unpack instructions for masks that match their pattern.
15525   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
15526     return V;
15527 
15528   // Try to use byte rotation instructions.
15529   // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
15530   if (Subtarget.hasSSSE3()) {
15531     if (Subtarget.hasVLX())
15532       if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i32, V1, V2, Mask,
15533                                                 Subtarget, DAG))
15534         return Rotate;
15535 
15536     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
15537                                                   Subtarget, DAG))
15538       return Rotate;
15539   }
15540 
15541   // Assume that a single SHUFPS is faster than an alternative sequence of
15542   // multiple instructions (even if the CPU has a domain penalty).
15543   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
15544   if (!isSingleSHUFPSMask(Mask)) {
15545     // If we have direct support for blends, we should lower by decomposing into
15546     // a permute. That will be faster than the domain cross.
15547     if (IsBlendSupported)
15548       return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i32, V1, V2, Mask,
15549                                                   Subtarget, DAG);
15550 
15551     // Try to lower by permuting the inputs into an unpack instruction.
15552     if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
15553                                                         Mask, Subtarget, DAG))
15554       return Unpack;
15555   }
15556 
15557   // We implement this with SHUFPS because it can blend from two vectors.
15558   // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
15559   // up the inputs, bypassing domain shift penalties that we would incur if we
15560   // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
15561   // relevant.
15562   SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
15563   SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
15564   SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
15565   return DAG.getBitcast(MVT::v4i32, ShufPS);
15566 }
15567 
15568 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
15569 /// shuffle lowering, and the most complex part.
15570 ///
15571 /// The lowering strategy is to try to form pairs of input lanes which are
15572 /// targeted at the same half of the final vector, and then use a dword shuffle
15573 /// to place them onto the right half, and finally unpack the paired lanes into
15574 /// their final position.
15575 ///
15576 /// The exact breakdown of how to form these dword pairs and align them on the
15577 /// correct sides is really tricky. See the comments within the function for
15578 /// more of the details.
15579 ///
15580 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
15581 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
15582 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
15583 /// vector, form the analogous 128-bit 8-element Mask.
lowerV8I16GeneralSingleInputShuffle(const SDLoc & DL,MVT VT,SDValue V,MutableArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)15584 static SDValue lowerV8I16GeneralSingleInputShuffle(
15585     const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
15586     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15587   assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
15588   MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
15589 
15590   assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
15591   MutableArrayRef<int> LoMask = Mask.slice(0, 4);
15592   MutableArrayRef<int> HiMask = Mask.slice(4, 4);
15593 
15594   // Attempt to directly match PSHUFLW or PSHUFHW.
15595   if (isUndefOrInRange(LoMask, 0, 4) &&
15596       isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
15597     return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
15598                        getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
15599   }
15600   if (isUndefOrInRange(HiMask, 4, 8) &&
15601       isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
15602     for (int i = 0; i != 4; ++i)
15603       HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
15604     return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
15605                        getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
15606   }
15607 
15608   SmallVector<int, 4> LoInputs;
15609   copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
15610   array_pod_sort(LoInputs.begin(), LoInputs.end());
15611   LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
15612   SmallVector<int, 4> HiInputs;
15613   copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
15614   array_pod_sort(HiInputs.begin(), HiInputs.end());
15615   HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
15616   int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
15617   int NumHToL = LoInputs.size() - NumLToL;
15618   int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
15619   int NumHToH = HiInputs.size() - NumLToH;
15620   MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
15621   MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
15622   MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
15623   MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
15624 
15625   // If we are shuffling values from one half - check how many different DWORD
15626   // pairs we need to create. If only 1 or 2 then we can perform this as a
15627   // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
15628   auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
15629                                ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
15630     V = DAG.getNode(ShufWOp, DL, VT, V,
15631                     getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
15632     V = DAG.getBitcast(PSHUFDVT, V);
15633     V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
15634                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
15635     return DAG.getBitcast(VT, V);
15636   };
15637 
15638   if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
15639     int PSHUFDMask[4] = { -1, -1, -1, -1 };
15640     SmallVector<std::pair<int, int>, 4> DWordPairs;
15641     int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
15642 
15643     // Collect the different DWORD pairs.
15644     for (int DWord = 0; DWord != 4; ++DWord) {
15645       int M0 = Mask[2 * DWord + 0];
15646       int M1 = Mask[2 * DWord + 1];
15647       M0 = (M0 >= 0 ? M0 % 4 : M0);
15648       M1 = (M1 >= 0 ? M1 % 4 : M1);
15649       if (M0 < 0 && M1 < 0)
15650         continue;
15651 
15652       bool Match = false;
15653       for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
15654         auto &DWordPair = DWordPairs[j];
15655         if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
15656             (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
15657           DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
15658           DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
15659           PSHUFDMask[DWord] = DOffset + j;
15660           Match = true;
15661           break;
15662         }
15663       }
15664       if (!Match) {
15665         PSHUFDMask[DWord] = DOffset + DWordPairs.size();
15666         DWordPairs.push_back(std::make_pair(M0, M1));
15667       }
15668     }
15669 
15670     if (DWordPairs.size() <= 2) {
15671       DWordPairs.resize(2, std::make_pair(-1, -1));
15672       int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
15673                               DWordPairs[1].first, DWordPairs[1].second};
15674       if ((NumHToL + NumHToH) == 0)
15675         return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
15676       if ((NumLToL + NumLToH) == 0)
15677         return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
15678     }
15679   }
15680 
15681   // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
15682   // such inputs we can swap two of the dwords across the half mark and end up
15683   // with <=2 inputs to each half in each half. Once there, we can fall through
15684   // to the generic code below. For example:
15685   //
15686   // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
15687   // Mask:  [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
15688   //
15689   // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
15690   // and an existing 2-into-2 on the other half. In this case we may have to
15691   // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
15692   // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
15693   // Fortunately, we don't have to handle anything but a 2-into-2 pattern
15694   // because any other situation (including a 3-into-1 or 1-into-3 in the other
15695   // half than the one we target for fixing) will be fixed when we re-enter this
15696   // path. We will also combine away any sequence of PSHUFD instructions that
15697   // result into a single instruction. Here is an example of the tricky case:
15698   //
15699   // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
15700   // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
15701   //
15702   // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
15703   //
15704   // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
15705   // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
15706   //
15707   // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
15708   // Mask:  [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
15709   //
15710   // The result is fine to be handled by the generic logic.
15711   auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
15712                           ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
15713                           int AOffset, int BOffset) {
15714     assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
15715            "Must call this with A having 3 or 1 inputs from the A half.");
15716     assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
15717            "Must call this with B having 1 or 3 inputs from the B half.");
15718     assert(AToAInputs.size() + BToAInputs.size() == 4 &&
15719            "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
15720 
15721     bool ThreeAInputs = AToAInputs.size() == 3;
15722 
15723     // Compute the index of dword with only one word among the three inputs in
15724     // a half by taking the sum of the half with three inputs and subtracting
15725     // the sum of the actual three inputs. The difference is the remaining
15726     // slot.
15727     int ADWord = 0, BDWord = 0;
15728     int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
15729     int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
15730     int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
15731     ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
15732     int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
15733     int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
15734     int TripleNonInputIdx =
15735         TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
15736     TripleDWord = TripleNonInputIdx / 2;
15737 
15738     // We use xor with one to compute the adjacent DWord to whichever one the
15739     // OneInput is in.
15740     OneInputDWord = (OneInput / 2) ^ 1;
15741 
15742     // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
15743     // and BToA inputs. If there is also such a problem with the BToB and AToB
15744     // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
15745     // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
15746     // is essential that we don't *create* a 3<-1 as then we might oscillate.
15747     if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
15748       // Compute how many inputs will be flipped by swapping these DWords. We
15749       // need
15750       // to balance this to ensure we don't form a 3-1 shuffle in the other
15751       // half.
15752       int NumFlippedAToBInputs = llvm::count(AToBInputs, 2 * ADWord) +
15753                                  llvm::count(AToBInputs, 2 * ADWord + 1);
15754       int NumFlippedBToBInputs = llvm::count(BToBInputs, 2 * BDWord) +
15755                                  llvm::count(BToBInputs, 2 * BDWord + 1);
15756       if ((NumFlippedAToBInputs == 1 &&
15757            (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
15758           (NumFlippedBToBInputs == 1 &&
15759            (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
15760         // We choose whether to fix the A half or B half based on whether that
15761         // half has zero flipped inputs. At zero, we may not be able to fix it
15762         // with that half. We also bias towards fixing the B half because that
15763         // will more commonly be the high half, and we have to bias one way.
15764         auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
15765                                                        ArrayRef<int> Inputs) {
15766           int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
15767           bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
15768           // Determine whether the free index is in the flipped dword or the
15769           // unflipped dword based on where the pinned index is. We use this bit
15770           // in an xor to conditionally select the adjacent dword.
15771           int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
15772           bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
15773           if (IsFixIdxInput == IsFixFreeIdxInput)
15774             FixFreeIdx += 1;
15775           IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
15776           assert(IsFixIdxInput != IsFixFreeIdxInput &&
15777                  "We need to be changing the number of flipped inputs!");
15778           int PSHUFHalfMask[] = {0, 1, 2, 3};
15779           std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
15780           V = DAG.getNode(
15781               FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
15782               MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
15783               getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
15784 
15785           for (int &M : Mask)
15786             if (M >= 0 && M == FixIdx)
15787               M = FixFreeIdx;
15788             else if (M >= 0 && M == FixFreeIdx)
15789               M = FixIdx;
15790         };
15791         if (NumFlippedBToBInputs != 0) {
15792           int BPinnedIdx =
15793               BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
15794           FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
15795         } else {
15796           assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
15797           int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
15798           FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
15799         }
15800       }
15801     }
15802 
15803     int PSHUFDMask[] = {0, 1, 2, 3};
15804     PSHUFDMask[ADWord] = BDWord;
15805     PSHUFDMask[BDWord] = ADWord;
15806     V = DAG.getBitcast(
15807         VT,
15808         DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
15809                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15810 
15811     // Adjust the mask to match the new locations of A and B.
15812     for (int &M : Mask)
15813       if (M >= 0 && M/2 == ADWord)
15814         M = 2 * BDWord + M % 2;
15815       else if (M >= 0 && M/2 == BDWord)
15816         M = 2 * ADWord + M % 2;
15817 
15818     // Recurse back into this routine to re-compute state now that this isn't
15819     // a 3 and 1 problem.
15820     return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
15821   };
15822   if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
15823     return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
15824   if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
15825     return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
15826 
15827   // At this point there are at most two inputs to the low and high halves from
15828   // each half. That means the inputs can always be grouped into dwords and
15829   // those dwords can then be moved to the correct half with a dword shuffle.
15830   // We use at most one low and one high word shuffle to collect these paired
15831   // inputs into dwords, and finally a dword shuffle to place them.
15832   int PSHUFLMask[4] = {-1, -1, -1, -1};
15833   int PSHUFHMask[4] = {-1, -1, -1, -1};
15834   int PSHUFDMask[4] = {-1, -1, -1, -1};
15835 
15836   // First fix the masks for all the inputs that are staying in their
15837   // original halves. This will then dictate the targets of the cross-half
15838   // shuffles.
15839   auto fixInPlaceInputs =
15840       [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
15841                     MutableArrayRef<int> SourceHalfMask,
15842                     MutableArrayRef<int> HalfMask, int HalfOffset) {
15843     if (InPlaceInputs.empty())
15844       return;
15845     if (InPlaceInputs.size() == 1) {
15846       SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
15847           InPlaceInputs[0] - HalfOffset;
15848       PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
15849       return;
15850     }
15851     if (IncomingInputs.empty()) {
15852       // Just fix all of the in place inputs.
15853       for (int Input : InPlaceInputs) {
15854         SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
15855         PSHUFDMask[Input / 2] = Input / 2;
15856       }
15857       return;
15858     }
15859 
15860     assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
15861     SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
15862         InPlaceInputs[0] - HalfOffset;
15863     // Put the second input next to the first so that they are packed into
15864     // a dword. We find the adjacent index by toggling the low bit.
15865     int AdjIndex = InPlaceInputs[0] ^ 1;
15866     SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
15867     std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
15868     PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
15869   };
15870   fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
15871   fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
15872 
15873   // Now gather the cross-half inputs and place them into a free dword of
15874   // their target half.
15875   // FIXME: This operation could almost certainly be simplified dramatically to
15876   // look more like the 3-1 fixing operation.
15877   auto moveInputsToRightHalf = [&PSHUFDMask](
15878       MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
15879       MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
15880       MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
15881       int DestOffset) {
15882     auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
15883       return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
15884     };
15885     auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
15886                                                int Word) {
15887       int LowWord = Word & ~1;
15888       int HighWord = Word | 1;
15889       return isWordClobbered(SourceHalfMask, LowWord) ||
15890              isWordClobbered(SourceHalfMask, HighWord);
15891     };
15892 
15893     if (IncomingInputs.empty())
15894       return;
15895 
15896     if (ExistingInputs.empty()) {
15897       // Map any dwords with inputs from them into the right half.
15898       for (int Input : IncomingInputs) {
15899         // If the source half mask maps over the inputs, turn those into
15900         // swaps and use the swapped lane.
15901         if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
15902           if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
15903             SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
15904                 Input - SourceOffset;
15905             // We have to swap the uses in our half mask in one sweep.
15906             for (int &M : HalfMask)
15907               if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
15908                 M = Input;
15909               else if (M == Input)
15910                 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
15911           } else {
15912             assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
15913                        Input - SourceOffset &&
15914                    "Previous placement doesn't match!");
15915           }
15916           // Note that this correctly re-maps both when we do a swap and when
15917           // we observe the other side of the swap above. We rely on that to
15918           // avoid swapping the members of the input list directly.
15919           Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
15920         }
15921 
15922         // Map the input's dword into the correct half.
15923         if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
15924           PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
15925         else
15926           assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
15927                      Input / 2 &&
15928                  "Previous placement doesn't match!");
15929       }
15930 
15931       // And just directly shift any other-half mask elements to be same-half
15932       // as we will have mirrored the dword containing the element into the
15933       // same position within that half.
15934       for (int &M : HalfMask)
15935         if (M >= SourceOffset && M < SourceOffset + 4) {
15936           M = M - SourceOffset + DestOffset;
15937           assert(M >= 0 && "This should never wrap below zero!");
15938         }
15939       return;
15940     }
15941 
15942     // Ensure we have the input in a viable dword of its current half. This
15943     // is particularly tricky because the original position may be clobbered
15944     // by inputs being moved and *staying* in that half.
15945     if (IncomingInputs.size() == 1) {
15946       if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
15947         int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
15948                          SourceOffset;
15949         SourceHalfMask[InputFixed - SourceOffset] =
15950             IncomingInputs[0] - SourceOffset;
15951         std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
15952                      InputFixed);
15953         IncomingInputs[0] = InputFixed;
15954       }
15955     } else if (IncomingInputs.size() == 2) {
15956       if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
15957           isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
15958         // We have two non-adjacent or clobbered inputs we need to extract from
15959         // the source half. To do this, we need to map them into some adjacent
15960         // dword slot in the source mask.
15961         int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
15962                               IncomingInputs[1] - SourceOffset};
15963 
15964         // If there is a free slot in the source half mask adjacent to one of
15965         // the inputs, place the other input in it. We use (Index XOR 1) to
15966         // compute an adjacent index.
15967         if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
15968             SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
15969           SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
15970           SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
15971           InputsFixed[1] = InputsFixed[0] ^ 1;
15972         } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
15973                    SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
15974           SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
15975           SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
15976           InputsFixed[0] = InputsFixed[1] ^ 1;
15977         } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
15978                    SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
15979           // The two inputs are in the same DWord but it is clobbered and the
15980           // adjacent DWord isn't used at all. Move both inputs to the free
15981           // slot.
15982           SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
15983           SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
15984           InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
15985           InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
15986         } else {
15987           // The only way we hit this point is if there is no clobbering
15988           // (because there are no off-half inputs to this half) and there is no
15989           // free slot adjacent to one of the inputs. In this case, we have to
15990           // swap an input with a non-input.
15991           for (int i = 0; i < 4; ++i)
15992             assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
15993                    "We can't handle any clobbers here!");
15994           assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
15995                  "Cannot have adjacent inputs here!");
15996 
15997           SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
15998           SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
15999 
16000           // We also have to update the final source mask in this case because
16001           // it may need to undo the above swap.
16002           for (int &M : FinalSourceHalfMask)
16003             if (M == (InputsFixed[0] ^ 1) + SourceOffset)
16004               M = InputsFixed[1] + SourceOffset;
16005             else if (M == InputsFixed[1] + SourceOffset)
16006               M = (InputsFixed[0] ^ 1) + SourceOffset;
16007 
16008           InputsFixed[1] = InputsFixed[0] ^ 1;
16009         }
16010 
16011         // Point everything at the fixed inputs.
16012         for (int &M : HalfMask)
16013           if (M == IncomingInputs[0])
16014             M = InputsFixed[0] + SourceOffset;
16015           else if (M == IncomingInputs[1])
16016             M = InputsFixed[1] + SourceOffset;
16017 
16018         IncomingInputs[0] = InputsFixed[0] + SourceOffset;
16019         IncomingInputs[1] = InputsFixed[1] + SourceOffset;
16020       }
16021     } else {
16022       llvm_unreachable("Unhandled input size!");
16023     }
16024 
16025     // Now hoist the DWord down to the right half.
16026     int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
16027     assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
16028     PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
16029     for (int &M : HalfMask)
16030       for (int Input : IncomingInputs)
16031         if (M == Input)
16032           M = FreeDWord * 2 + Input % 2;
16033   };
16034   moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
16035                         /*SourceOffset*/ 4, /*DestOffset*/ 0);
16036   moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
16037                         /*SourceOffset*/ 0, /*DestOffset*/ 4);
16038 
16039   // Now enact all the shuffles we've computed to move the inputs into their
16040   // target half.
16041   if (!isNoopShuffleMask(PSHUFLMask))
16042     V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
16043                     getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
16044   if (!isNoopShuffleMask(PSHUFHMask))
16045     V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
16046                     getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
16047   if (!isNoopShuffleMask(PSHUFDMask))
16048     V = DAG.getBitcast(
16049         VT,
16050         DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
16051                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16052 
16053   // At this point, each half should contain all its inputs, and we can then
16054   // just shuffle them into their final position.
16055   assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
16056          "Failed to lift all the high half inputs to the low mask!");
16057   assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
16058          "Failed to lift all the low half inputs to the high mask!");
16059 
16060   // Do a half shuffle for the low mask.
16061   if (!isNoopShuffleMask(LoMask))
16062     V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
16063                     getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
16064 
16065   // Do a half shuffle with the high mask after shifting its values down.
16066   for (int &M : HiMask)
16067     if (M >= 0)
16068       M -= 4;
16069   if (!isNoopShuffleMask(HiMask))
16070     V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
16071                     getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
16072 
16073   return V;
16074 }
16075 
16076 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
16077 /// blend if only one input is used.
lowerShuffleAsBlendOfPSHUFBs(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG,bool & V1InUse,bool & V2InUse)16078 static SDValue lowerShuffleAsBlendOfPSHUFBs(
16079     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
16080     const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
16081   assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
16082          "Lane crossing shuffle masks not supported");
16083 
16084   int NumBytes = VT.getSizeInBits() / 8;
16085   int Size = Mask.size();
16086   int Scale = NumBytes / Size;
16087 
16088   SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
16089   SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
16090   V1InUse = false;
16091   V2InUse = false;
16092 
16093   for (int i = 0; i < NumBytes; ++i) {
16094     int M = Mask[i / Scale];
16095     if (M < 0)
16096       continue;
16097 
16098     const int ZeroMask = 0x80;
16099     int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
16100     int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
16101     if (Zeroable[i / Scale])
16102       V1Idx = V2Idx = ZeroMask;
16103 
16104     V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
16105     V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
16106     V1InUse |= (ZeroMask != V1Idx);
16107     V2InUse |= (ZeroMask != V2Idx);
16108   }
16109 
16110   MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
16111   if (V1InUse)
16112     V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
16113                      DAG.getBuildVector(ShufVT, DL, V1Mask));
16114   if (V2InUse)
16115     V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
16116                      DAG.getBuildVector(ShufVT, DL, V2Mask));
16117 
16118   // If we need shuffled inputs from both, blend the two.
16119   SDValue V;
16120   if (V1InUse && V2InUse)
16121     V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
16122   else
16123     V = V1InUse ? V1 : V2;
16124 
16125   // Cast the result back to the correct type.
16126   return DAG.getBitcast(VT, V);
16127 }
16128 
16129 /// Generic lowering of 8-lane i16 shuffles.
16130 ///
16131 /// This handles both single-input shuffles and combined shuffle/blends with
16132 /// two inputs. The single input shuffles are immediately delegated to
16133 /// a dedicated lowering routine.
16134 ///
16135 /// The blends are lowered in one of three fundamental ways. If there are few
16136 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
16137 /// of the input is significantly cheaper when lowered as an interleaving of
16138 /// the two inputs, try to interleave them. Otherwise, blend the low and high
16139 /// halves of the inputs separately (making them have relatively few inputs)
16140 /// and then concatenate them.
lowerV8I16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16141 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16142                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16143                                  const X86Subtarget &Subtarget,
16144                                  SelectionDAG &DAG) {
16145   assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
16146   assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
16147   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16148 
16149   // Whenever we can lower this as a zext, that instruction is strictly faster
16150   // than any alternative.
16151   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
16152                                                    Zeroable, Subtarget, DAG))
16153     return ZExt;
16154 
16155   // Try to use lower using a truncation.
16156   if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
16157                                         Subtarget, DAG))
16158     return V;
16159 
16160   int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
16161 
16162   if (NumV2Inputs == 0) {
16163     // Try to use shift instructions.
16164     if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
16165                                             Zeroable, Subtarget, DAG))
16166       return Shift;
16167 
16168     // Check for being able to broadcast a single element.
16169     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
16170                                                     Mask, Subtarget, DAG))
16171       return Broadcast;
16172 
16173     // Try to use bit rotation instructions.
16174     if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v8i16, V1, Mask,
16175                                                  Subtarget, DAG))
16176       return Rotate;
16177 
16178     // Use dedicated unpack instructions for masks that match their pattern.
16179     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
16180       return V;
16181 
16182     // Use dedicated pack instructions for masks that match their pattern.
16183     if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
16184                                          Subtarget))
16185       return V;
16186 
16187     // Try to use byte rotation instructions.
16188     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
16189                                                   Subtarget, DAG))
16190       return Rotate;
16191 
16192     // Make a copy of the mask so it can be modified.
16193     SmallVector<int, 8> MutableMask(Mask);
16194     return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
16195                                                Subtarget, DAG);
16196   }
16197 
16198   assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
16199          "All single-input shuffles should be canonicalized to be V1-input "
16200          "shuffles.");
16201 
16202   // Try to use shift instructions.
16203   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
16204                                           Zeroable, Subtarget, DAG))
16205     return Shift;
16206 
16207   // See if we can use SSE4A Extraction / Insertion.
16208   if (Subtarget.hasSSE4A())
16209     if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
16210                                           Zeroable, DAG))
16211       return V;
16212 
16213   // There are special ways we can lower some single-element blends.
16214   if (NumV2Inputs == 1)
16215     if (SDValue V = lowerShuffleAsElementInsertion(
16216             DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16217       return V;
16218 
16219   // We have different paths for blend lowering, but they all must use the
16220   // *exact* same predicate.
16221   bool IsBlendSupported = Subtarget.hasSSE41();
16222   if (IsBlendSupported)
16223     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
16224                                             Zeroable, Subtarget, DAG))
16225       return Blend;
16226 
16227   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
16228                                              Zeroable, Subtarget, DAG))
16229     return Masked;
16230 
16231   // Use dedicated unpack instructions for masks that match their pattern.
16232   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
16233     return V;
16234 
16235   // Use dedicated pack instructions for masks that match their pattern.
16236   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
16237                                        Subtarget))
16238     return V;
16239 
16240   // Try to use lower using a truncation.
16241   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
16242                                        Subtarget, DAG))
16243     return V;
16244 
16245   // Try to use byte rotation instructions.
16246   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
16247                                                 Subtarget, DAG))
16248     return Rotate;
16249 
16250   if (SDValue BitBlend =
16251           lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
16252     return BitBlend;
16253 
16254   // Try to use byte shift instructions to mask.
16255   if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
16256                                               Zeroable, Subtarget, DAG))
16257     return V;
16258 
16259   // Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
16260   // We could use SIGN_EXTEND_INREG+PACKSSDW for older targets but this seems to
16261   // be slower than a PSHUFLW+PSHUFHW+PSHUFD chain.
16262   int NumEvenDrops = canLowerByDroppingElements(Mask, true, false);
16263   if ((NumEvenDrops == 1 || NumEvenDrops == 2) && Subtarget.hasSSE41() &&
16264       !Subtarget.hasVLX()) {
16265     // Check if this is part of a 256-bit vector truncation.
16266     if (NumEvenDrops == 2 && Subtarget.hasAVX2() &&
16267         peekThroughBitcasts(V1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
16268         peekThroughBitcasts(V2).getOpcode() == ISD::EXTRACT_SUBVECTOR) {
16269       SDValue V1V2 = concatSubVectors(V1, V2, DAG, DL);
16270       V1V2 = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1V2,
16271                          getZeroVector(MVT::v16i16, Subtarget, DAG, DL),
16272                          DAG.getTargetConstant(0xEE, DL, MVT::i8));
16273       V1V2 = DAG.getBitcast(MVT::v8i32, V1V2);
16274       V1 = extract128BitVector(V1V2, 0, DAG, DL);
16275       V2 = extract128BitVector(V1V2, 4, DAG, DL);
16276     } else {
16277       SmallVector<SDValue, 4> DWordClearOps(4,
16278                                             DAG.getConstant(0, DL, MVT::i32));
16279       for (unsigned i = 0; i != 4; i += 1 << (NumEvenDrops - 1))
16280         DWordClearOps[i] = DAG.getConstant(0xFFFF, DL, MVT::i32);
16281       SDValue DWordClearMask =
16282           DAG.getBuildVector(MVT::v4i32, DL, DWordClearOps);
16283       V1 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V1),
16284                        DWordClearMask);
16285       V2 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V2),
16286                        DWordClearMask);
16287     }
16288     // Now pack things back together.
16289     SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, V1, V2);
16290     if (NumEvenDrops == 2) {
16291       Result = DAG.getBitcast(MVT::v4i32, Result);
16292       Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, Result, Result);
16293     }
16294     return Result;
16295   }
16296 
16297   // When compacting odd (upper) elements, use PACKSS pre-SSE41.
16298   int NumOddDrops = canLowerByDroppingElements(Mask, false, false);
16299   if (NumOddDrops == 1) {
16300     bool HasSSE41 = Subtarget.hasSSE41();
16301     V1 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
16302                      DAG.getBitcast(MVT::v4i32, V1),
16303                      DAG.getTargetConstant(16, DL, MVT::i8));
16304     V2 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
16305                      DAG.getBitcast(MVT::v4i32, V2),
16306                      DAG.getTargetConstant(16, DL, MVT::i8));
16307     return DAG.getNode(HasSSE41 ? X86ISD::PACKUS : X86ISD::PACKSS, DL,
16308                        MVT::v8i16, V1, V2);
16309   }
16310 
16311   // Try to lower by permuting the inputs into an unpack instruction.
16312   if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
16313                                                       Mask, Subtarget, DAG))
16314     return Unpack;
16315 
16316   // If we can't directly blend but can use PSHUFB, that will be better as it
16317   // can both shuffle and set up the inefficient blend.
16318   if (!IsBlendSupported && Subtarget.hasSSSE3()) {
16319     bool V1InUse, V2InUse;
16320     return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
16321                                         Zeroable, DAG, V1InUse, V2InUse);
16322   }
16323 
16324   // We can always bit-blend if we have to so the fallback strategy is to
16325   // decompose into single-input permutes and blends/unpacks.
16326   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i16, V1, V2,
16327                                               Mask, Subtarget, DAG);
16328 }
16329 
16330 /// Lower 8-lane 16-bit floating point shuffles.
lowerV8F16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16331 static SDValue lowerV8F16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16332                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16333                                  const X86Subtarget &Subtarget,
16334                                  SelectionDAG &DAG) {
16335   assert(V1.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
16336   assert(V2.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
16337   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16338   int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
16339 
16340   if (Subtarget.hasFP16()) {
16341     if (NumV2Elements == 0) {
16342       // Check for being able to broadcast a single element.
16343       if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f16, V1, V2,
16344                                                       Mask, Subtarget, DAG))
16345         return Broadcast;
16346     }
16347     if (NumV2Elements == 1 && Mask[0] >= 8)
16348       if (SDValue V = lowerShuffleAsElementInsertion(
16349               DL, MVT::v8f16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16350         return V;
16351   }
16352 
16353   V1 = DAG.getBitcast(MVT::v8i16, V1);
16354   V2 = DAG.getBitcast(MVT::v8i16, V2);
16355   return DAG.getBitcast(MVT::v8f16,
16356                         DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, Mask));
16357 }
16358 
16359 // Lowers unary/binary shuffle as VPERMV/VPERMV3, for non-VLX targets,
16360 // sub-512-bit shuffles are padded to 512-bits for the shuffle and then
16361 // the active subvector is extracted.
lowerShuffleWithPERMV(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16362 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
16363                                      ArrayRef<int> Mask, SDValue V1, SDValue V2,
16364                                      const X86Subtarget &Subtarget,
16365                                      SelectionDAG &DAG) {
16366   MVT MaskVT = VT.changeTypeToInteger();
16367   SDValue MaskNode;
16368   MVT ShuffleVT = VT;
16369   if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
16370     V1 = widenSubVector(V1, false, Subtarget, DAG, DL, 512);
16371     V2 = widenSubVector(V2, false, Subtarget, DAG, DL, 512);
16372     ShuffleVT = V1.getSimpleValueType();
16373 
16374     // Adjust mask to correct indices for the second input.
16375     int NumElts = VT.getVectorNumElements();
16376     unsigned Scale = 512 / VT.getSizeInBits();
16377     SmallVector<int, 32> AdjustedMask(Mask);
16378     for (int &M : AdjustedMask)
16379       if (NumElts <= M)
16380         M += (Scale - 1) * NumElts;
16381     MaskNode = getConstVector(AdjustedMask, MaskVT, DAG, DL, true);
16382     MaskNode = widenSubVector(MaskNode, false, Subtarget, DAG, DL, 512);
16383   } else {
16384     MaskNode = getConstVector(Mask, MaskVT, DAG, DL, true);
16385   }
16386 
16387   SDValue Result;
16388   if (V2.isUndef())
16389     Result = DAG.getNode(X86ISD::VPERMV, DL, ShuffleVT, MaskNode, V1);
16390   else
16391     Result = DAG.getNode(X86ISD::VPERMV3, DL, ShuffleVT, V1, MaskNode, V2);
16392 
16393   if (VT != ShuffleVT)
16394     Result = extractSubVector(Result, 0, DAG, DL, VT.getSizeInBits());
16395 
16396   return Result;
16397 }
16398 
16399 /// Generic lowering of v16i8 shuffles.
16400 ///
16401 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
16402 /// detect any complexity reducing interleaving. If that doesn't help, it uses
16403 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
16404 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
16405 /// back together.
lowerV16I8Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16406 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16407                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16408                                  const X86Subtarget &Subtarget,
16409                                  SelectionDAG &DAG) {
16410   assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
16411   assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
16412   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16413 
16414   // Try to use shift instructions.
16415   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
16416                                           Zeroable, Subtarget, DAG))
16417     return Shift;
16418 
16419   // Try to use byte rotation instructions.
16420   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
16421                                                 Subtarget, DAG))
16422     return Rotate;
16423 
16424   // Use dedicated pack instructions for masks that match their pattern.
16425   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
16426                                        Subtarget))
16427     return V;
16428 
16429   // Try to use a zext lowering.
16430   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
16431                                                    Zeroable, Subtarget, DAG))
16432     return ZExt;
16433 
16434   // Try to use lower using a truncation.
16435   if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
16436                                         Subtarget, DAG))
16437     return V;
16438 
16439   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
16440                                        Subtarget, DAG))
16441     return V;
16442 
16443   // See if we can use SSE4A Extraction / Insertion.
16444   if (Subtarget.hasSSE4A())
16445     if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
16446                                           Zeroable, DAG))
16447       return V;
16448 
16449   int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
16450 
16451   // For single-input shuffles, there are some nicer lowering tricks we can use.
16452   if (NumV2Elements == 0) {
16453     // Check for being able to broadcast a single element.
16454     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
16455                                                     Mask, Subtarget, DAG))
16456       return Broadcast;
16457 
16458     // Try to use bit rotation instructions.
16459     if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i8, V1, Mask,
16460                                                  Subtarget, DAG))
16461       return Rotate;
16462 
16463     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
16464       return V;
16465 
16466     // Check whether we can widen this to an i16 shuffle by duplicating bytes.
16467     // Notably, this handles splat and partial-splat shuffles more efficiently.
16468     // However, it only makes sense if the pre-duplication shuffle simplifies
16469     // things significantly. Currently, this means we need to be able to
16470     // express the pre-duplication shuffle as an i16 shuffle.
16471     //
16472     // FIXME: We should check for other patterns which can be widened into an
16473     // i16 shuffle as well.
16474     auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
16475       for (int i = 0; i < 16; i += 2)
16476         if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
16477           return false;
16478 
16479       return true;
16480     };
16481     auto tryToWidenViaDuplication = [&]() -> SDValue {
16482       if (!canWidenViaDuplication(Mask))
16483         return SDValue();
16484       SmallVector<int, 4> LoInputs;
16485       copy_if(Mask, std::back_inserter(LoInputs),
16486               [](int M) { return M >= 0 && M < 8; });
16487       array_pod_sort(LoInputs.begin(), LoInputs.end());
16488       LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
16489                      LoInputs.end());
16490       SmallVector<int, 4> HiInputs;
16491       copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
16492       array_pod_sort(HiInputs.begin(), HiInputs.end());
16493       HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
16494                      HiInputs.end());
16495 
16496       bool TargetLo = LoInputs.size() >= HiInputs.size();
16497       ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
16498       ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
16499 
16500       int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
16501       SmallDenseMap<int, int, 8> LaneMap;
16502       for (int I : InPlaceInputs) {
16503         PreDupI16Shuffle[I/2] = I/2;
16504         LaneMap[I] = I;
16505       }
16506       int j = TargetLo ? 0 : 4, je = j + 4;
16507       for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
16508         // Check if j is already a shuffle of this input. This happens when
16509         // there are two adjacent bytes after we move the low one.
16510         if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
16511           // If we haven't yet mapped the input, search for a slot into which
16512           // we can map it.
16513           while (j < je && PreDupI16Shuffle[j] >= 0)
16514             ++j;
16515 
16516           if (j == je)
16517             // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
16518             return SDValue();
16519 
16520           // Map this input with the i16 shuffle.
16521           PreDupI16Shuffle[j] = MovingInputs[i] / 2;
16522         }
16523 
16524         // Update the lane map based on the mapping we ended up with.
16525         LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
16526       }
16527       V1 = DAG.getBitcast(
16528           MVT::v16i8,
16529           DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
16530                                DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
16531 
16532       // Unpack the bytes to form the i16s that will be shuffled into place.
16533       bool EvenInUse = false, OddInUse = false;
16534       for (int i = 0; i < 16; i += 2) {
16535         EvenInUse |= (Mask[i + 0] >= 0);
16536         OddInUse |= (Mask[i + 1] >= 0);
16537         if (EvenInUse && OddInUse)
16538           break;
16539       }
16540       V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
16541                        MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
16542                        OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
16543 
16544       int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
16545       for (int i = 0; i < 16; ++i)
16546         if (Mask[i] >= 0) {
16547           int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
16548           assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
16549           if (PostDupI16Shuffle[i / 2] < 0)
16550             PostDupI16Shuffle[i / 2] = MappedMask;
16551           else
16552             assert(PostDupI16Shuffle[i / 2] == MappedMask &&
16553                    "Conflicting entries in the original shuffle!");
16554         }
16555       return DAG.getBitcast(
16556           MVT::v16i8,
16557           DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
16558                                DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
16559     };
16560     if (SDValue V = tryToWidenViaDuplication())
16561       return V;
16562   }
16563 
16564   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
16565                                              Zeroable, Subtarget, DAG))
16566     return Masked;
16567 
16568   // Use dedicated unpack instructions for masks that match their pattern.
16569   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
16570     return V;
16571 
16572   // Try to use byte shift instructions to mask.
16573   if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
16574                                               Zeroable, Subtarget, DAG))
16575     return V;
16576 
16577   // Check for compaction patterns.
16578   bool IsSingleInput = V2.isUndef();
16579   int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
16580 
16581   // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
16582   // with PSHUFB. It is important to do this before we attempt to generate any
16583   // blends but after all of the single-input lowerings. If the single input
16584   // lowerings can find an instruction sequence that is faster than a PSHUFB, we
16585   // want to preserve that and we can DAG combine any longer sequences into
16586   // a PSHUFB in the end. But once we start blending from multiple inputs,
16587   // the complexity of DAG combining bad patterns back into PSHUFB is too high,
16588   // and there are *very* few patterns that would actually be faster than the
16589   // PSHUFB approach because of its ability to zero lanes.
16590   //
16591   // If the mask is a binary compaction, we can more efficiently perform this
16592   // as a PACKUS(AND(),AND()) - which is quicker than UNPACK(PSHUFB(),PSHUFB()).
16593   //
16594   // FIXME: The only exceptions to the above are blends which are exact
16595   // interleavings with direct instructions supporting them. We currently don't
16596   // handle those well here.
16597   if (Subtarget.hasSSSE3() && (IsSingleInput || NumEvenDrops != 1)) {
16598     bool V1InUse = false;
16599     bool V2InUse = false;
16600 
16601     SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
16602         DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
16603 
16604     // If both V1 and V2 are in use and we can use a direct blend or an unpack,
16605     // do so. This avoids using them to handle blends-with-zero which is
16606     // important as a single pshufb is significantly faster for that.
16607     if (V1InUse && V2InUse) {
16608       if (Subtarget.hasSSE41())
16609         if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
16610                                                 Zeroable, Subtarget, DAG))
16611           return Blend;
16612 
16613       // We can use an unpack to do the blending rather than an or in some
16614       // cases. Even though the or may be (very minorly) more efficient, we
16615       // preference this lowering because there are common cases where part of
16616       // the complexity of the shuffles goes away when we do the final blend as
16617       // an unpack.
16618       // FIXME: It might be worth trying to detect if the unpack-feeding
16619       // shuffles will both be pshufb, in which case we shouldn't bother with
16620       // this.
16621       if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
16622               DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
16623         return Unpack;
16624 
16625       // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
16626       if (Subtarget.hasVBMI())
16627         return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, Subtarget,
16628                                      DAG);
16629 
16630       // If we have XOP we can use one VPPERM instead of multiple PSHUFBs.
16631       if (Subtarget.hasXOP()) {
16632         SDValue MaskNode = getConstVector(Mask, MVT::v16i8, DAG, DL, true);
16633         return DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, V1, V2, MaskNode);
16634       }
16635 
16636       // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
16637       // PALIGNR will be cheaper than the second PSHUFB+OR.
16638       if (SDValue V = lowerShuffleAsByteRotateAndPermute(
16639               DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
16640         return V;
16641     }
16642 
16643     return PSHUFB;
16644   }
16645 
16646   // There are special ways we can lower some single-element blends.
16647   if (NumV2Elements == 1)
16648     if (SDValue V = lowerShuffleAsElementInsertion(
16649             DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16650       return V;
16651 
16652   if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
16653     return Blend;
16654 
16655   // Check whether a compaction lowering can be done. This handles shuffles
16656   // which take every Nth element for some even N. See the helper function for
16657   // details.
16658   //
16659   // We special case these as they can be particularly efficiently handled with
16660   // the PACKUSB instruction on x86 and they show up in common patterns of
16661   // rearranging bytes to truncate wide elements.
16662   if (NumEvenDrops) {
16663     // NumEvenDrops is the power of two stride of the elements. Another way of
16664     // thinking about it is that we need to drop the even elements this many
16665     // times to get the original input.
16666 
16667     // First we need to zero all the dropped bytes.
16668     assert(NumEvenDrops <= 3 &&
16669            "No support for dropping even elements more than 3 times.");
16670     SmallVector<SDValue, 8> WordClearOps(8, DAG.getConstant(0, DL, MVT::i16));
16671     for (unsigned i = 0; i != 8; i += 1 << (NumEvenDrops - 1))
16672       WordClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i16);
16673     SDValue WordClearMask = DAG.getBuildVector(MVT::v8i16, DL, WordClearOps);
16674     V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V1),
16675                      WordClearMask);
16676     if (!IsSingleInput)
16677       V2 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V2),
16678                        WordClearMask);
16679 
16680     // Now pack things back together.
16681     SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
16682                                  IsSingleInput ? V1 : V2);
16683     for (int i = 1; i < NumEvenDrops; ++i) {
16684       Result = DAG.getBitcast(MVT::v8i16, Result);
16685       Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
16686     }
16687     return Result;
16688   }
16689 
16690   int NumOddDrops = canLowerByDroppingElements(Mask, false, IsSingleInput);
16691   if (NumOddDrops == 1) {
16692     V1 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
16693                      DAG.getBitcast(MVT::v8i16, V1),
16694                      DAG.getTargetConstant(8, DL, MVT::i8));
16695     if (!IsSingleInput)
16696       V2 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
16697                        DAG.getBitcast(MVT::v8i16, V2),
16698                        DAG.getTargetConstant(8, DL, MVT::i8));
16699     return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
16700                        IsSingleInput ? V1 : V2);
16701   }
16702 
16703   // Handle multi-input cases by blending/unpacking single-input shuffles.
16704   if (NumV2Elements > 0)
16705     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v16i8, V1, V2, Mask,
16706                                                 Subtarget, DAG);
16707 
16708   // The fallback path for single-input shuffles widens this into two v8i16
16709   // vectors with unpacks, shuffles those, and then pulls them back together
16710   // with a pack.
16711   SDValue V = V1;
16712 
16713   std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
16714   std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
16715   for (int i = 0; i < 16; ++i)
16716     if (Mask[i] >= 0)
16717       (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
16718 
16719   SDValue VLoHalf, VHiHalf;
16720   // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
16721   // them out and avoid using UNPCK{L,H} to extract the elements of V as
16722   // i16s.
16723   if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
16724       none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
16725     // Use a mask to drop the high bytes.
16726     VLoHalf = DAG.getBitcast(MVT::v8i16, V);
16727     VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
16728                           DAG.getConstant(0x00FF, DL, MVT::v8i16));
16729 
16730     // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
16731     VHiHalf = DAG.getUNDEF(MVT::v8i16);
16732 
16733     // Squash the masks to point directly into VLoHalf.
16734     for (int &M : LoBlendMask)
16735       if (M >= 0)
16736         M /= 2;
16737     for (int &M : HiBlendMask)
16738       if (M >= 0)
16739         M /= 2;
16740   } else {
16741     // Otherwise just unpack the low half of V into VLoHalf and the high half into
16742     // VHiHalf so that we can blend them as i16s.
16743     SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
16744 
16745     VLoHalf = DAG.getBitcast(
16746         MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
16747     VHiHalf = DAG.getBitcast(
16748         MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
16749   }
16750 
16751   SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
16752   SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
16753 
16754   return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
16755 }
16756 
16757 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
16758 ///
16759 /// This routine breaks down the specific type of 128-bit shuffle and
16760 /// dispatches to the lowering routines accordingly.
lower128BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)16761 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16762                                   MVT VT, SDValue V1, SDValue V2,
16763                                   const APInt &Zeroable,
16764                                   const X86Subtarget &Subtarget,
16765                                   SelectionDAG &DAG) {
16766   switch (VT.SimpleTy) {
16767   case MVT::v2i64:
16768     return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16769   case MVT::v2f64:
16770     return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16771   case MVT::v4i32:
16772     return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16773   case MVT::v4f32:
16774     return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16775   case MVT::v8i16:
16776     return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16777   case MVT::v8f16:
16778     return lowerV8F16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16779   case MVT::v16i8:
16780     return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16781 
16782   default:
16783     llvm_unreachable("Unimplemented!");
16784   }
16785 }
16786 
16787 /// Generic routine to split vector shuffle into half-sized shuffles.
16788 ///
16789 /// This routine just extracts two subvectors, shuffles them independently, and
16790 /// then concatenates them back together. This should work effectively with all
16791 /// AVX vector shuffle types.
splitAndLowerShuffle(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)16792 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
16793                                     SDValue V2, ArrayRef<int> Mask,
16794                                     SelectionDAG &DAG) {
16795   assert(VT.getSizeInBits() >= 256 &&
16796          "Only for 256-bit or wider vector shuffles!");
16797   assert(V1.getSimpleValueType() == VT && "Bad operand type!");
16798   assert(V2.getSimpleValueType() == VT && "Bad operand type!");
16799 
16800   ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
16801   ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
16802 
16803   int NumElements = VT.getVectorNumElements();
16804   int SplitNumElements = NumElements / 2;
16805   MVT ScalarVT = VT.getVectorElementType();
16806   MVT SplitVT = MVT::getVectorVT(ScalarVT, SplitNumElements);
16807 
16808   // Use splitVector/extractSubVector so that split build-vectors just build two
16809   // narrower build vectors. This helps shuffling with splats and zeros.
16810   auto SplitVector = [&](SDValue V) {
16811     SDValue LoV, HiV;
16812     std::tie(LoV, HiV) = splitVector(peekThroughBitcasts(V), DAG, DL);
16813     return std::make_pair(DAG.getBitcast(SplitVT, LoV),
16814                           DAG.getBitcast(SplitVT, HiV));
16815   };
16816 
16817   SDValue LoV1, HiV1, LoV2, HiV2;
16818   std::tie(LoV1, HiV1) = SplitVector(V1);
16819   std::tie(LoV2, HiV2) = SplitVector(V2);
16820 
16821   // Now create two 4-way blends of these half-width vectors.
16822   auto HalfBlend = [&](ArrayRef<int> HalfMask) {
16823     bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
16824     SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
16825     SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
16826     SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
16827     for (int i = 0; i < SplitNumElements; ++i) {
16828       int M = HalfMask[i];
16829       if (M >= NumElements) {
16830         if (M >= NumElements + SplitNumElements)
16831           UseHiV2 = true;
16832         else
16833           UseLoV2 = true;
16834         V2BlendMask[i] = M - NumElements;
16835         BlendMask[i] = SplitNumElements + i;
16836       } else if (M >= 0) {
16837         if (M >= SplitNumElements)
16838           UseHiV1 = true;
16839         else
16840           UseLoV1 = true;
16841         V1BlendMask[i] = M;
16842         BlendMask[i] = i;
16843       }
16844     }
16845 
16846     // Because the lowering happens after all combining takes place, we need to
16847     // manually combine these blend masks as much as possible so that we create
16848     // a minimal number of high-level vector shuffle nodes.
16849 
16850     // First try just blending the halves of V1 or V2.
16851     if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
16852       return DAG.getUNDEF(SplitVT);
16853     if (!UseLoV2 && !UseHiV2)
16854       return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
16855     if (!UseLoV1 && !UseHiV1)
16856       return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
16857 
16858     SDValue V1Blend, V2Blend;
16859     if (UseLoV1 && UseHiV1) {
16860       V1Blend =
16861         DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
16862     } else {
16863       // We only use half of V1 so map the usage down into the final blend mask.
16864       V1Blend = UseLoV1 ? LoV1 : HiV1;
16865       for (int i = 0; i < SplitNumElements; ++i)
16866         if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
16867           BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
16868     }
16869     if (UseLoV2 && UseHiV2) {
16870       V2Blend =
16871         DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
16872     } else {
16873       // We only use half of V2 so map the usage down into the final blend mask.
16874       V2Blend = UseLoV2 ? LoV2 : HiV2;
16875       for (int i = 0; i < SplitNumElements; ++i)
16876         if (BlendMask[i] >= SplitNumElements)
16877           BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
16878     }
16879     return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
16880   };
16881   SDValue Lo = HalfBlend(LoMask);
16882   SDValue Hi = HalfBlend(HiMask);
16883   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
16884 }
16885 
16886 /// Either split a vector in halves or decompose the shuffles and the
16887 /// blend/unpack.
16888 ///
16889 /// This is provided as a good fallback for many lowerings of non-single-input
16890 /// shuffles with more than one 128-bit lane. In those cases, we want to select
16891 /// between splitting the shuffle into 128-bit components and stitching those
16892 /// back together vs. extracting the single-input shuffles and blending those
16893 /// results.
lowerShuffleAsSplitOrBlend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)16894 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
16895                                           SDValue V2, ArrayRef<int> Mask,
16896                                           const X86Subtarget &Subtarget,
16897                                           SelectionDAG &DAG) {
16898   assert(!V2.isUndef() && "This routine must not be used to lower single-input "
16899          "shuffles as it could then recurse on itself.");
16900   int Size = Mask.size();
16901 
16902   // If this can be modeled as a broadcast of two elements followed by a blend,
16903   // prefer that lowering. This is especially important because broadcasts can
16904   // often fold with memory operands.
16905   auto DoBothBroadcast = [&] {
16906     int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
16907     for (int M : Mask)
16908       if (M >= Size) {
16909         if (V2BroadcastIdx < 0)
16910           V2BroadcastIdx = M - Size;
16911         else if (M - Size != V2BroadcastIdx)
16912           return false;
16913       } else if (M >= 0) {
16914         if (V1BroadcastIdx < 0)
16915           V1BroadcastIdx = M;
16916         else if (M != V1BroadcastIdx)
16917           return false;
16918       }
16919     return true;
16920   };
16921   if (DoBothBroadcast())
16922     return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
16923                                                 DAG);
16924 
16925   // If the inputs all stem from a single 128-bit lane of each input, then we
16926   // split them rather than blending because the split will decompose to
16927   // unusually few instructions.
16928   int LaneCount = VT.getSizeInBits() / 128;
16929   int LaneSize = Size / LaneCount;
16930   SmallBitVector LaneInputs[2];
16931   LaneInputs[0].resize(LaneCount, false);
16932   LaneInputs[1].resize(LaneCount, false);
16933   for (int i = 0; i < Size; ++i)
16934     if (Mask[i] >= 0)
16935       LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
16936   if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
16937     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16938 
16939   // Otherwise, just fall back to decomposed shuffles and a blend/unpack. This
16940   // requires that the decomposed single-input shuffles don't end up here.
16941   return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
16942                                               DAG);
16943 }
16944 
16945 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
16946 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
lowerShuffleAsLanePermuteAndSHUFP(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)16947 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
16948                                                  SDValue V1, SDValue V2,
16949                                                  ArrayRef<int> Mask,
16950                                                  SelectionDAG &DAG) {
16951   assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
16952 
16953   int LHSMask[4] = {-1, -1, -1, -1};
16954   int RHSMask[4] = {-1, -1, -1, -1};
16955   unsigned SHUFPMask = 0;
16956 
16957   // As SHUFPD uses a single LHS/RHS element per lane, we can always
16958   // perform the shuffle once the lanes have been shuffled in place.
16959   for (int i = 0; i != 4; ++i) {
16960     int M = Mask[i];
16961     if (M < 0)
16962       continue;
16963     int LaneBase = i & ~1;
16964     auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
16965     LaneMask[LaneBase + (M & 1)] = M;
16966     SHUFPMask |= (M & 1) << i;
16967   }
16968 
16969   SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
16970   SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
16971   return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
16972                      DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
16973 }
16974 
16975 /// Lower a vector shuffle crossing multiple 128-bit lanes as
16976 /// a lane permutation followed by a per-lane permutation.
16977 ///
16978 /// This is mainly for cases where we can have non-repeating permutes
16979 /// in each lane.
16980 ///
16981 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
16982 /// we should investigate merging them.
lowerShuffleAsLanePermuteAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,const X86Subtarget & Subtarget)16983 static SDValue lowerShuffleAsLanePermuteAndPermute(
16984     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
16985     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
16986   int NumElts = VT.getVectorNumElements();
16987   int NumLanes = VT.getSizeInBits() / 128;
16988   int NumEltsPerLane = NumElts / NumLanes;
16989   bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef();
16990 
16991   /// Attempts to find a sublane permute with the given size
16992   /// that gets all elements into their target lanes.
16993   ///
16994   /// If successful, fills CrossLaneMask and InLaneMask and returns true.
16995   /// If unsuccessful, returns false and may overwrite InLaneMask.
16996   auto getSublanePermute = [&](int NumSublanes) -> SDValue {
16997     int NumSublanesPerLane = NumSublanes / NumLanes;
16998     int NumEltsPerSublane = NumElts / NumSublanes;
16999 
17000     SmallVector<int, 16> CrossLaneMask;
17001     SmallVector<int, 16> InLaneMask(NumElts, SM_SentinelUndef);
17002     // CrossLaneMask but one entry == one sublane.
17003     SmallVector<int, 16> CrossLaneMaskLarge(NumSublanes, SM_SentinelUndef);
17004 
17005     for (int i = 0; i != NumElts; ++i) {
17006       int M = Mask[i];
17007       if (M < 0)
17008         continue;
17009 
17010       int SrcSublane = M / NumEltsPerSublane;
17011       int DstLane = i / NumEltsPerLane;
17012 
17013       // We only need to get the elements into the right lane, not sublane.
17014       // So search all sublanes that make up the destination lane.
17015       bool Found = false;
17016       int DstSubStart = DstLane * NumSublanesPerLane;
17017       int DstSubEnd = DstSubStart + NumSublanesPerLane;
17018       for (int DstSublane = DstSubStart; DstSublane < DstSubEnd; ++DstSublane) {
17019         if (!isUndefOrEqual(CrossLaneMaskLarge[DstSublane], SrcSublane))
17020           continue;
17021 
17022         Found = true;
17023         CrossLaneMaskLarge[DstSublane] = SrcSublane;
17024         int DstSublaneOffset = DstSublane * NumEltsPerSublane;
17025         InLaneMask[i] = DstSublaneOffset + M % NumEltsPerSublane;
17026         break;
17027       }
17028       if (!Found)
17029         return SDValue();
17030     }
17031 
17032     // Fill CrossLaneMask using CrossLaneMaskLarge.
17033     narrowShuffleMaskElts(NumEltsPerSublane, CrossLaneMaskLarge, CrossLaneMask);
17034 
17035     if (!CanUseSublanes) {
17036       // If we're only shuffling a single lowest lane and the rest are identity
17037       // then don't bother.
17038       // TODO - isShuffleMaskInputInPlace could be extended to something like
17039       // this.
17040       int NumIdentityLanes = 0;
17041       bool OnlyShuffleLowestLane = true;
17042       for (int i = 0; i != NumLanes; ++i) {
17043         int LaneOffset = i * NumEltsPerLane;
17044         if (isSequentialOrUndefInRange(InLaneMask, LaneOffset, NumEltsPerLane,
17045                                        i * NumEltsPerLane))
17046           NumIdentityLanes++;
17047         else if (CrossLaneMask[LaneOffset] != 0)
17048           OnlyShuffleLowestLane = false;
17049       }
17050       if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
17051         return SDValue();
17052     }
17053 
17054     // Avoid returning the same shuffle operation. For example,
17055     // t7: v16i16 = vector_shuffle<8,9,10,11,4,5,6,7,0,1,2,3,12,13,14,15> t5,
17056     //                             undef:v16i16
17057     if (CrossLaneMask == Mask || InLaneMask == Mask)
17058       return SDValue();
17059 
17060     SDValue CrossLane = DAG.getVectorShuffle(VT, DL, V1, V2, CrossLaneMask);
17061     return DAG.getVectorShuffle(VT, DL, CrossLane, DAG.getUNDEF(VT),
17062                                 InLaneMask);
17063   };
17064 
17065   // First attempt a solution with full lanes.
17066   if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes))
17067     return V;
17068 
17069   // The rest of the solutions use sublanes.
17070   if (!CanUseSublanes)
17071     return SDValue();
17072 
17073   // Then attempt a solution with 64-bit sublanes (vpermq).
17074   if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes * 2))
17075     return V;
17076 
17077   // If that doesn't work and we have fast variable cross-lane shuffle,
17078   // attempt 32-bit sublanes (vpermd).
17079   if (!Subtarget.hasFastVariableCrossLaneShuffle())
17080     return SDValue();
17081 
17082   return getSublanePermute(/*NumSublanes=*/NumLanes * 4);
17083 }
17084 
17085 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
17086 /// source with a lane permutation.
17087 ///
17088 /// This lowering strategy results in four instructions in the worst case for a
17089 /// single-input cross lane shuffle which is lower than any other fully general
17090 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
17091 /// shuffle pattern should be handled prior to trying this lowering.
lowerShuffleAsLanePermuteAndShuffle(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,const X86Subtarget & Subtarget)17092 static SDValue lowerShuffleAsLanePermuteAndShuffle(
17093     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17094     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
17095   // FIXME: This should probably be generalized for 512-bit vectors as well.
17096   assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
17097   int Size = Mask.size();
17098   int LaneSize = Size / 2;
17099 
17100   // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
17101   // Only do this if the elements aren't all from the lower lane,
17102   // otherwise we're (probably) better off doing a split.
17103   if (VT == MVT::v4f64 &&
17104       !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
17105     return lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG);
17106 
17107   // If there are only inputs from one 128-bit lane, splitting will in fact be
17108   // less expensive. The flags track whether the given lane contains an element
17109   // that crosses to another lane.
17110   bool AllLanes;
17111   if (!Subtarget.hasAVX2()) {
17112     bool LaneCrossing[2] = {false, false};
17113     for (int i = 0; i < Size; ++i)
17114       if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
17115         LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
17116     AllLanes = LaneCrossing[0] && LaneCrossing[1];
17117   } else {
17118     bool LaneUsed[2] = {false, false};
17119     for (int i = 0; i < Size; ++i)
17120       if (Mask[i] >= 0)
17121         LaneUsed[(Mask[i] % Size) / LaneSize] = true;
17122     AllLanes = LaneUsed[0] && LaneUsed[1];
17123   }
17124 
17125   // TODO - we could support shuffling V2 in the Flipped input.
17126   assert(V2.isUndef() &&
17127          "This last part of this routine only works on single input shuffles");
17128 
17129   SmallVector<int, 32> InLaneMask(Mask);
17130   for (int i = 0; i < Size; ++i) {
17131     int &M = InLaneMask[i];
17132     if (M < 0)
17133       continue;
17134     if (((M % Size) / LaneSize) != (i / LaneSize))
17135       M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
17136   }
17137   assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
17138          "In-lane shuffle mask expected");
17139 
17140   // If we're not using both lanes in each lane and the inlane mask is not
17141   // repeating, then we're better off splitting.
17142   if (!AllLanes && !is128BitLaneRepeatedShuffleMask(VT, InLaneMask))
17143     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
17144 
17145   // Flip the lanes, and shuffle the results which should now be in-lane.
17146   MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
17147   SDValue Flipped = DAG.getBitcast(PVT, V1);
17148   Flipped =
17149       DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
17150   Flipped = DAG.getBitcast(VT, Flipped);
17151   return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
17152 }
17153 
17154 /// Handle lowering 2-lane 128-bit shuffles.
lowerV2X128Shuffle(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)17155 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
17156                                   SDValue V2, ArrayRef<int> Mask,
17157                                   const APInt &Zeroable,
17158                                   const X86Subtarget &Subtarget,
17159                                   SelectionDAG &DAG) {
17160   if (V2.isUndef()) {
17161     // Attempt to match VBROADCAST*128 subvector broadcast load.
17162     bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1);
17163     bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1);
17164     if ((SplatLo || SplatHi) && !Subtarget.hasAVX512() && V1.hasOneUse() &&
17165         X86::mayFoldLoad(peekThroughOneUseBitcasts(V1), Subtarget)) {
17166       MVT MemVT = VT.getHalfNumVectorElementsVT();
17167       unsigned Ofs = SplatLo ? 0 : MemVT.getStoreSize();
17168       auto *Ld = cast<LoadSDNode>(peekThroughOneUseBitcasts(V1));
17169       if (SDValue BcstLd = getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, DL,
17170                                              VT, MemVT, Ld, Ofs, DAG))
17171         return BcstLd;
17172     }
17173 
17174     // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
17175     if (Subtarget.hasAVX2())
17176       return SDValue();
17177   }
17178 
17179   bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
17180 
17181   SmallVector<int, 4> WidenedMask;
17182   if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
17183     return SDValue();
17184 
17185   bool IsLowZero = (Zeroable & 0x3) == 0x3;
17186   bool IsHighZero = (Zeroable & 0xc) == 0xc;
17187 
17188   // Try to use an insert into a zero vector.
17189   if (WidenedMask[0] == 0 && IsHighZero) {
17190     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
17191     SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
17192                               DAG.getIntPtrConstant(0, DL));
17193     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17194                        getZeroVector(VT, Subtarget, DAG, DL), LoV,
17195                        DAG.getIntPtrConstant(0, DL));
17196   }
17197 
17198   // TODO: If minimizing size and one of the inputs is a zero vector and the
17199   // the zero vector has only one use, we could use a VPERM2X128 to save the
17200   // instruction bytes needed to explicitly generate the zero vector.
17201 
17202   // Blends are faster and handle all the non-lane-crossing cases.
17203   if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
17204                                           Subtarget, DAG))
17205     return Blend;
17206 
17207   // If either input operand is a zero vector, use VPERM2X128 because its mask
17208   // allows us to replace the zero input with an implicit zero.
17209   if (!IsLowZero && !IsHighZero) {
17210     // Check for patterns which can be matched with a single insert of a 128-bit
17211     // subvector.
17212     bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2);
17213     if (OnlyUsesV1 || isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2)) {
17214 
17215       // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
17216       // this will likely become vinsertf128 which can't fold a 256-bit memop.
17217       if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
17218         MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
17219         SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
17220                                      OnlyUsesV1 ? V1 : V2,
17221                                      DAG.getIntPtrConstant(0, DL));
17222         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
17223                            DAG.getIntPtrConstant(2, DL));
17224       }
17225     }
17226 
17227     // Try to use SHUF128 if possible.
17228     if (Subtarget.hasVLX()) {
17229       if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
17230         unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
17231                             ((WidenedMask[1] % 2) << 1);
17232         return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
17233                            DAG.getTargetConstant(PermMask, DL, MVT::i8));
17234       }
17235     }
17236   }
17237 
17238   // Otherwise form a 128-bit permutation. After accounting for undefs,
17239   // convert the 64-bit shuffle mask selection values into 128-bit
17240   // selection bits by dividing the indexes by 2 and shifting into positions
17241   // defined by a vperm2*128 instruction's immediate control byte.
17242 
17243   // The immediate permute control byte looks like this:
17244   //    [1:0] - select 128 bits from sources for low half of destination
17245   //    [2]   - ignore
17246   //    [3]   - zero low half of destination
17247   //    [5:4] - select 128 bits from sources for high half of destination
17248   //    [6]   - ignore
17249   //    [7]   - zero high half of destination
17250 
17251   assert((WidenedMask[0] >= 0 || IsLowZero) &&
17252          (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
17253 
17254   unsigned PermMask = 0;
17255   PermMask |= IsLowZero  ? 0x08 : (WidenedMask[0] << 0);
17256   PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
17257 
17258   // Check the immediate mask and replace unused sources with undef.
17259   if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
17260     V1 = DAG.getUNDEF(VT);
17261   if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
17262     V2 = DAG.getUNDEF(VT);
17263 
17264   return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
17265                      DAG.getTargetConstant(PermMask, DL, MVT::i8));
17266 }
17267 
17268 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
17269 /// shuffling each lane.
17270 ///
17271 /// This attempts to create a repeated lane shuffle where each lane uses one
17272 /// or two of the lanes of the inputs. The lanes of the input vectors are
17273 /// shuffled in one or two independent shuffles to get the lanes into the
17274 /// position needed by the final shuffle.
lowerShuffleAsLanePermuteAndRepeatedMask(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)17275 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
17276     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17277     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
17278   assert(!V2.isUndef() && "This is only useful with multiple inputs.");
17279 
17280   if (is128BitLaneRepeatedShuffleMask(VT, Mask))
17281     return SDValue();
17282 
17283   int NumElts = Mask.size();
17284   int NumLanes = VT.getSizeInBits() / 128;
17285   int NumLaneElts = 128 / VT.getScalarSizeInBits();
17286   SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
17287   SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
17288 
17289   // First pass will try to fill in the RepeatMask from lanes that need two
17290   // sources.
17291   for (int Lane = 0; Lane != NumLanes; ++Lane) {
17292     int Srcs[2] = {-1, -1};
17293     SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
17294     for (int i = 0; i != NumLaneElts; ++i) {
17295       int M = Mask[(Lane * NumLaneElts) + i];
17296       if (M < 0)
17297         continue;
17298       // Determine which of the possible input lanes (NumLanes from each source)
17299       // this element comes from. Assign that as one of the sources for this
17300       // lane. We can assign up to 2 sources for this lane. If we run out
17301       // sources we can't do anything.
17302       int LaneSrc = M / NumLaneElts;
17303       int Src;
17304       if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
17305         Src = 0;
17306       else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
17307         Src = 1;
17308       else
17309         return SDValue();
17310 
17311       Srcs[Src] = LaneSrc;
17312       InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
17313     }
17314 
17315     // If this lane has two sources, see if it fits with the repeat mask so far.
17316     if (Srcs[1] < 0)
17317       continue;
17318 
17319     LaneSrcs[Lane][0] = Srcs[0];
17320     LaneSrcs[Lane][1] = Srcs[1];
17321 
17322     auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
17323       assert(M1.size() == M2.size() && "Unexpected mask size");
17324       for (int i = 0, e = M1.size(); i != e; ++i)
17325         if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
17326           return false;
17327       return true;
17328     };
17329 
17330     auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
17331       assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
17332       for (int i = 0, e = MergedMask.size(); i != e; ++i) {
17333         int M = Mask[i];
17334         if (M < 0)
17335           continue;
17336         assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
17337                "Unexpected mask element");
17338         MergedMask[i] = M;
17339       }
17340     };
17341 
17342     if (MatchMasks(InLaneMask, RepeatMask)) {
17343       // Merge this lane mask into the final repeat mask.
17344       MergeMasks(InLaneMask, RepeatMask);
17345       continue;
17346     }
17347 
17348     // Didn't find a match. Swap the operands and try again.
17349     std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
17350     ShuffleVectorSDNode::commuteMask(InLaneMask);
17351 
17352     if (MatchMasks(InLaneMask, RepeatMask)) {
17353       // Merge this lane mask into the final repeat mask.
17354       MergeMasks(InLaneMask, RepeatMask);
17355       continue;
17356     }
17357 
17358     // Couldn't find a match with the operands in either order.
17359     return SDValue();
17360   }
17361 
17362   // Now handle any lanes with only one source.
17363   for (int Lane = 0; Lane != NumLanes; ++Lane) {
17364     // If this lane has already been processed, skip it.
17365     if (LaneSrcs[Lane][0] >= 0)
17366       continue;
17367 
17368     for (int i = 0; i != NumLaneElts; ++i) {
17369       int M = Mask[(Lane * NumLaneElts) + i];
17370       if (M < 0)
17371         continue;
17372 
17373       // If RepeatMask isn't defined yet we can define it ourself.
17374       if (RepeatMask[i] < 0)
17375         RepeatMask[i] = M % NumLaneElts;
17376 
17377       if (RepeatMask[i] < NumElts) {
17378         if (RepeatMask[i] != M % NumLaneElts)
17379           return SDValue();
17380         LaneSrcs[Lane][0] = M / NumLaneElts;
17381       } else {
17382         if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
17383           return SDValue();
17384         LaneSrcs[Lane][1] = M / NumLaneElts;
17385       }
17386     }
17387 
17388     if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
17389       return SDValue();
17390   }
17391 
17392   SmallVector<int, 16> NewMask(NumElts, -1);
17393   for (int Lane = 0; Lane != NumLanes; ++Lane) {
17394     int Src = LaneSrcs[Lane][0];
17395     for (int i = 0; i != NumLaneElts; ++i) {
17396       int M = -1;
17397       if (Src >= 0)
17398         M = Src * NumLaneElts + i;
17399       NewMask[Lane * NumLaneElts + i] = M;
17400     }
17401   }
17402   SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17403   // Ensure we didn't get back the shuffle we started with.
17404   // FIXME: This is a hack to make up for some splat handling code in
17405   // getVectorShuffle.
17406   if (isa<ShuffleVectorSDNode>(NewV1) &&
17407       cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
17408     return SDValue();
17409 
17410   for (int Lane = 0; Lane != NumLanes; ++Lane) {
17411     int Src = LaneSrcs[Lane][1];
17412     for (int i = 0; i != NumLaneElts; ++i) {
17413       int M = -1;
17414       if (Src >= 0)
17415         M = Src * NumLaneElts + i;
17416       NewMask[Lane * NumLaneElts + i] = M;
17417     }
17418   }
17419   SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17420   // Ensure we didn't get back the shuffle we started with.
17421   // FIXME: This is a hack to make up for some splat handling code in
17422   // getVectorShuffle.
17423   if (isa<ShuffleVectorSDNode>(NewV2) &&
17424       cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
17425     return SDValue();
17426 
17427   for (int i = 0; i != NumElts; ++i) {
17428     NewMask[i] = RepeatMask[i % NumLaneElts];
17429     if (NewMask[i] < 0)
17430       continue;
17431 
17432     NewMask[i] += (i / NumLaneElts) * NumLaneElts;
17433   }
17434   return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
17435 }
17436 
17437 /// If the input shuffle mask results in a vector that is undefined in all upper
17438 /// or lower half elements and that mask accesses only 2 halves of the
17439 /// shuffle's operands, return true. A mask of half the width with mask indexes
17440 /// adjusted to access the extracted halves of the original shuffle operands is
17441 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
17442 /// lower half of each input operand is accessed.
17443 static bool
getHalfShuffleMask(ArrayRef<int> Mask,MutableArrayRef<int> HalfMask,int & HalfIdx1,int & HalfIdx2)17444 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
17445                    int &HalfIdx1, int &HalfIdx2) {
17446   assert((Mask.size() == HalfMask.size() * 2) &&
17447          "Expected input mask to be twice as long as output");
17448 
17449   // Exactly one half of the result must be undef to allow narrowing.
17450   bool UndefLower = isUndefLowerHalf(Mask);
17451   bool UndefUpper = isUndefUpperHalf(Mask);
17452   if (UndefLower == UndefUpper)
17453     return false;
17454 
17455   unsigned HalfNumElts = HalfMask.size();
17456   unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
17457   HalfIdx1 = -1;
17458   HalfIdx2 = -1;
17459   for (unsigned i = 0; i != HalfNumElts; ++i) {
17460     int M = Mask[i + MaskIndexOffset];
17461     if (M < 0) {
17462       HalfMask[i] = M;
17463       continue;
17464     }
17465 
17466     // Determine which of the 4 half vectors this element is from.
17467     // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
17468     int HalfIdx = M / HalfNumElts;
17469 
17470     // Determine the element index into its half vector source.
17471     int HalfElt = M % HalfNumElts;
17472 
17473     // We can shuffle with up to 2 half vectors, set the new 'half'
17474     // shuffle mask accordingly.
17475     if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
17476       HalfMask[i] = HalfElt;
17477       HalfIdx1 = HalfIdx;
17478       continue;
17479     }
17480     if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
17481       HalfMask[i] = HalfElt + HalfNumElts;
17482       HalfIdx2 = HalfIdx;
17483       continue;
17484     }
17485 
17486     // Too many half vectors referenced.
17487     return false;
17488   }
17489 
17490   return true;
17491 }
17492 
17493 /// Given the output values from getHalfShuffleMask(), create a half width
17494 /// shuffle of extracted vectors followed by an insert back to full width.
getShuffleHalfVectors(const SDLoc & DL,SDValue V1,SDValue V2,ArrayRef<int> HalfMask,int HalfIdx1,int HalfIdx2,bool UndefLower,SelectionDAG & DAG,bool UseConcat=false)17495 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
17496                                      ArrayRef<int> HalfMask, int HalfIdx1,
17497                                      int HalfIdx2, bool UndefLower,
17498                                      SelectionDAG &DAG, bool UseConcat = false) {
17499   assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
17500   assert(V1.getValueType().isSimple() && "Expecting only simple types");
17501 
17502   MVT VT = V1.getSimpleValueType();
17503   MVT HalfVT = VT.getHalfNumVectorElementsVT();
17504   unsigned HalfNumElts = HalfVT.getVectorNumElements();
17505 
17506   auto getHalfVector = [&](int HalfIdx) {
17507     if (HalfIdx < 0)
17508       return DAG.getUNDEF(HalfVT);
17509     SDValue V = (HalfIdx < 2 ? V1 : V2);
17510     HalfIdx = (HalfIdx % 2) * HalfNumElts;
17511     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
17512                        DAG.getIntPtrConstant(HalfIdx, DL));
17513   };
17514 
17515   // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
17516   SDValue Half1 = getHalfVector(HalfIdx1);
17517   SDValue Half2 = getHalfVector(HalfIdx2);
17518   SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
17519   if (UseConcat) {
17520     SDValue Op0 = V;
17521     SDValue Op1 = DAG.getUNDEF(HalfVT);
17522     if (UndefLower)
17523       std::swap(Op0, Op1);
17524     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
17525   }
17526 
17527   unsigned Offset = UndefLower ? HalfNumElts : 0;
17528   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
17529                      DAG.getIntPtrConstant(Offset, DL));
17530 }
17531 
17532 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
17533 /// This allows for fast cases such as subvector extraction/insertion
17534 /// or shuffling smaller vector types which can lower more efficiently.
lowerShuffleWithUndefHalf(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)17535 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
17536                                          SDValue V2, ArrayRef<int> Mask,
17537                                          const X86Subtarget &Subtarget,
17538                                          SelectionDAG &DAG) {
17539   assert((VT.is256BitVector() || VT.is512BitVector()) &&
17540          "Expected 256-bit or 512-bit vector");
17541 
17542   bool UndefLower = isUndefLowerHalf(Mask);
17543   if (!UndefLower && !isUndefUpperHalf(Mask))
17544     return SDValue();
17545 
17546   assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
17547          "Completely undef shuffle mask should have been simplified already");
17548 
17549   // Upper half is undef and lower half is whole upper subvector.
17550   // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
17551   MVT HalfVT = VT.getHalfNumVectorElementsVT();
17552   unsigned HalfNumElts = HalfVT.getVectorNumElements();
17553   if (!UndefLower &&
17554       isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
17555     SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
17556                              DAG.getIntPtrConstant(HalfNumElts, DL));
17557     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
17558                        DAG.getIntPtrConstant(0, DL));
17559   }
17560 
17561   // Lower half is undef and upper half is whole lower subvector.
17562   // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
17563   if (UndefLower &&
17564       isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
17565     SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
17566                              DAG.getIntPtrConstant(0, DL));
17567     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
17568                        DAG.getIntPtrConstant(HalfNumElts, DL));
17569   }
17570 
17571   int HalfIdx1, HalfIdx2;
17572   SmallVector<int, 8> HalfMask(HalfNumElts);
17573   if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
17574     return SDValue();
17575 
17576   assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
17577 
17578   // Only shuffle the halves of the inputs when useful.
17579   unsigned NumLowerHalves =
17580       (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
17581   unsigned NumUpperHalves =
17582       (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
17583   assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
17584 
17585   // Determine the larger pattern of undef/halves, then decide if it's worth
17586   // splitting the shuffle based on subtarget capabilities and types.
17587   unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
17588   if (!UndefLower) {
17589     // XXXXuuuu: no insert is needed.
17590     // Always extract lowers when setting lower - these are all free subreg ops.
17591     if (NumUpperHalves == 0)
17592       return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17593                                    UndefLower, DAG);
17594 
17595     if (NumUpperHalves == 1) {
17596       // AVX2 has efficient 32/64-bit element cross-lane shuffles.
17597       if (Subtarget.hasAVX2()) {
17598         // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
17599         if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
17600             !is128BitUnpackShuffleMask(HalfMask, DAG) &&
17601             (!isSingleSHUFPSMask(HalfMask) ||
17602              Subtarget.hasFastVariableCrossLaneShuffle()))
17603           return SDValue();
17604         // If this is a unary shuffle (assume that the 2nd operand is
17605         // canonicalized to undef), then we can use vpermpd. Otherwise, we
17606         // are better off extracting the upper half of 1 operand and using a
17607         // narrow shuffle.
17608         if (EltWidth == 64 && V2.isUndef())
17609           return SDValue();
17610       }
17611       // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
17612       if (Subtarget.hasAVX512() && VT.is512BitVector())
17613         return SDValue();
17614       // Extract + narrow shuffle is better than the wide alternative.
17615       return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17616                                    UndefLower, DAG);
17617     }
17618 
17619     // Don't extract both uppers, instead shuffle and then extract.
17620     assert(NumUpperHalves == 2 && "Half vector count went wrong");
17621     return SDValue();
17622   }
17623 
17624   // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
17625   if (NumUpperHalves == 0) {
17626     // AVX2 has efficient 64-bit element cross-lane shuffles.
17627     // TODO: Refine to account for unary shuffle, splat, and other masks?
17628     if (Subtarget.hasAVX2() && EltWidth == 64)
17629       return SDValue();
17630     // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
17631     if (Subtarget.hasAVX512() && VT.is512BitVector())
17632       return SDValue();
17633     // Narrow shuffle + insert is better than the wide alternative.
17634     return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17635                                  UndefLower, DAG);
17636   }
17637 
17638   // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
17639   return SDValue();
17640 }
17641 
17642 /// Handle case where shuffle sources are coming from the same 128-bit lane and
17643 /// every lane can be represented as the same repeating mask - allowing us to
17644 /// shuffle the sources with the repeating shuffle and then permute the result
17645 /// to the destination lanes.
lowerShuffleAsRepeatedMaskAndLanePermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)17646 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
17647     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17648     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
17649   int NumElts = VT.getVectorNumElements();
17650   int NumLanes = VT.getSizeInBits() / 128;
17651   int NumLaneElts = NumElts / NumLanes;
17652 
17653   // On AVX2 we may be able to just shuffle the lowest elements and then
17654   // broadcast the result.
17655   if (Subtarget.hasAVX2()) {
17656     for (unsigned BroadcastSize : {16, 32, 64}) {
17657       if (BroadcastSize <= VT.getScalarSizeInBits())
17658         continue;
17659       int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
17660 
17661       // Attempt to match a repeating pattern every NumBroadcastElts,
17662       // accounting for UNDEFs but only references the lowest 128-bit
17663       // lane of the inputs.
17664       auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
17665         for (int i = 0; i != NumElts; i += NumBroadcastElts)
17666           for (int j = 0; j != NumBroadcastElts; ++j) {
17667             int M = Mask[i + j];
17668             if (M < 0)
17669               continue;
17670             int &R = RepeatMask[j];
17671             if (0 != ((M % NumElts) / NumLaneElts))
17672               return false;
17673             if (0 <= R && R != M)
17674               return false;
17675             R = M;
17676           }
17677         return true;
17678       };
17679 
17680       SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
17681       if (!FindRepeatingBroadcastMask(RepeatMask))
17682         continue;
17683 
17684       // Shuffle the (lowest) repeated elements in place for broadcast.
17685       SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
17686 
17687       // Shuffle the actual broadcast.
17688       SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
17689       for (int i = 0; i != NumElts; i += NumBroadcastElts)
17690         for (int j = 0; j != NumBroadcastElts; ++j)
17691           BroadcastMask[i + j] = j;
17692       return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
17693                                   BroadcastMask);
17694     }
17695   }
17696 
17697   // Bail if the shuffle mask doesn't cross 128-bit lanes.
17698   if (!is128BitLaneCrossingShuffleMask(VT, Mask))
17699     return SDValue();
17700 
17701   // Bail if we already have a repeated lane shuffle mask.
17702   if (is128BitLaneRepeatedShuffleMask(VT, Mask))
17703     return SDValue();
17704 
17705   // Helper to look for repeated mask in each split sublane, and that those
17706   // sublanes can then be permuted into place.
17707   auto ShuffleSubLanes = [&](int SubLaneScale) {
17708     int NumSubLanes = NumLanes * SubLaneScale;
17709     int NumSubLaneElts = NumLaneElts / SubLaneScale;
17710 
17711     // Check that all the sources are coming from the same lane and see if we
17712     // can form a repeating shuffle mask (local to each sub-lane). At the same
17713     // time, determine the source sub-lane for each destination sub-lane.
17714     int TopSrcSubLane = -1;
17715     SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
17716     SmallVector<SmallVector<int, 8>> RepeatedSubLaneMasks(
17717         SubLaneScale,
17718         SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef));
17719 
17720     for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
17721       // Extract the sub-lane mask, check that it all comes from the same lane
17722       // and normalize the mask entries to come from the first lane.
17723       int SrcLane = -1;
17724       SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
17725       for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
17726         int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
17727         if (M < 0)
17728           continue;
17729         int Lane = (M % NumElts) / NumLaneElts;
17730         if ((0 <= SrcLane) && (SrcLane != Lane))
17731           return SDValue();
17732         SrcLane = Lane;
17733         int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
17734         SubLaneMask[Elt] = LocalM;
17735       }
17736 
17737       // Whole sub-lane is UNDEF.
17738       if (SrcLane < 0)
17739         continue;
17740 
17741       // Attempt to match against the candidate repeated sub-lane masks.
17742       for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
17743         auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
17744           for (int i = 0; i != NumSubLaneElts; ++i) {
17745             if (M1[i] < 0 || M2[i] < 0)
17746               continue;
17747             if (M1[i] != M2[i])
17748               return false;
17749           }
17750           return true;
17751         };
17752 
17753         auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
17754         if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
17755           continue;
17756 
17757         // Merge the sub-lane mask into the matching repeated sub-lane mask.
17758         for (int i = 0; i != NumSubLaneElts; ++i) {
17759           int M = SubLaneMask[i];
17760           if (M < 0)
17761             continue;
17762           assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
17763                  "Unexpected mask element");
17764           RepeatedSubLaneMask[i] = M;
17765         }
17766 
17767         // Track the top most source sub-lane - by setting the remaining to
17768         // UNDEF we can greatly simplify shuffle matching.
17769         int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
17770         TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
17771         Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
17772         break;
17773       }
17774 
17775       // Bail if we failed to find a matching repeated sub-lane mask.
17776       if (Dst2SrcSubLanes[DstSubLane] < 0)
17777         return SDValue();
17778     }
17779     assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
17780            "Unexpected source lane");
17781 
17782     // Create a repeating shuffle mask for the entire vector.
17783     SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
17784     for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
17785       int Lane = SubLane / SubLaneScale;
17786       auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
17787       for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
17788         int M = RepeatedSubLaneMask[Elt];
17789         if (M < 0)
17790           continue;
17791         int Idx = (SubLane * NumSubLaneElts) + Elt;
17792         RepeatedMask[Idx] = M + (Lane * NumLaneElts);
17793       }
17794     }
17795 
17796     // Shuffle each source sub-lane to its destination.
17797     SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
17798     for (int i = 0; i != NumElts; i += NumSubLaneElts) {
17799       int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
17800       if (SrcSubLane < 0)
17801         continue;
17802       for (int j = 0; j != NumSubLaneElts; ++j)
17803         SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
17804     }
17805 
17806     // Avoid returning the same shuffle operation.
17807     // v8i32 = vector_shuffle<0,1,4,5,2,3,6,7> t5, undef:v8i32
17808     if (RepeatedMask == Mask || SubLaneMask == Mask)
17809       return SDValue();
17810 
17811     SDValue RepeatedShuffle =
17812         DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
17813 
17814     return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
17815                                 SubLaneMask);
17816   };
17817 
17818   // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
17819   // (with PERMQ/PERMPD). On AVX2/AVX512BW targets, permuting 32-bit sub-lanes,
17820   // even with a variable shuffle, can be worth it for v32i8/v64i8 vectors.
17821   // Otherwise we can only permute whole 128-bit lanes.
17822   int MinSubLaneScale = 1, MaxSubLaneScale = 1;
17823   if (Subtarget.hasAVX2() && VT.is256BitVector()) {
17824     bool OnlyLowestElts = isUndefOrInRange(Mask, 0, NumLaneElts);
17825     MinSubLaneScale = 2;
17826     MaxSubLaneScale =
17827         (!OnlyLowestElts && V2.isUndef() && VT == MVT::v32i8) ? 4 : 2;
17828   }
17829   if (Subtarget.hasBWI() && VT == MVT::v64i8)
17830     MinSubLaneScale = MaxSubLaneScale = 4;
17831 
17832   for (int Scale = MinSubLaneScale; Scale <= MaxSubLaneScale; Scale *= 2)
17833     if (SDValue Shuffle = ShuffleSubLanes(Scale))
17834       return Shuffle;
17835 
17836   return SDValue();
17837 }
17838 
matchShuffleWithSHUFPD(MVT VT,SDValue & V1,SDValue & V2,bool & ForceV1Zero,bool & ForceV2Zero,unsigned & ShuffleImm,ArrayRef<int> Mask,const APInt & Zeroable)17839 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
17840                                    bool &ForceV1Zero, bool &ForceV2Zero,
17841                                    unsigned &ShuffleImm, ArrayRef<int> Mask,
17842                                    const APInt &Zeroable) {
17843   int NumElts = VT.getVectorNumElements();
17844   assert(VT.getScalarSizeInBits() == 64 &&
17845          (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
17846          "Unexpected data type for VSHUFPD");
17847   assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
17848          "Illegal shuffle mask");
17849 
17850   bool ZeroLane[2] = { true, true };
17851   for (int i = 0; i < NumElts; ++i)
17852     ZeroLane[i & 1] &= Zeroable[i];
17853 
17854   // Mask for V8F64: 0/1,  8/9,  2/3,  10/11, 4/5, ..
17855   // Mask for V4F64; 0/1,  4/5,  2/3,  6/7..
17856   ShuffleImm = 0;
17857   bool ShufpdMask = true;
17858   bool CommutableMask = true;
17859   for (int i = 0; i < NumElts; ++i) {
17860     if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
17861       continue;
17862     if (Mask[i] < 0)
17863       return false;
17864     int Val = (i & 6) + NumElts * (i & 1);
17865     int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
17866     if (Mask[i] < Val || Mask[i] > Val + 1)
17867       ShufpdMask = false;
17868     if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
17869       CommutableMask = false;
17870     ShuffleImm |= (Mask[i] % 2) << i;
17871   }
17872 
17873   if (!ShufpdMask && !CommutableMask)
17874     return false;
17875 
17876   if (!ShufpdMask && CommutableMask)
17877     std::swap(V1, V2);
17878 
17879   ForceV1Zero = ZeroLane[0];
17880   ForceV2Zero = ZeroLane[1];
17881   return true;
17882 }
17883 
lowerShuffleWithSHUFPD(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)17884 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
17885                                       SDValue V2, ArrayRef<int> Mask,
17886                                       const APInt &Zeroable,
17887                                       const X86Subtarget &Subtarget,
17888                                       SelectionDAG &DAG) {
17889   assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
17890          "Unexpected data type for VSHUFPD");
17891 
17892   unsigned Immediate = 0;
17893   bool ForceV1Zero = false, ForceV2Zero = false;
17894   if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
17895                               Mask, Zeroable))
17896     return SDValue();
17897 
17898   // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
17899   if (ForceV1Zero)
17900     V1 = getZeroVector(VT, Subtarget, DAG, DL);
17901   if (ForceV2Zero)
17902     V2 = getZeroVector(VT, Subtarget, DAG, DL);
17903 
17904   return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
17905                      DAG.getTargetConstant(Immediate, DL, MVT::i8));
17906 }
17907 
17908 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
17909 // by zeroable elements in the remaining 24 elements. Turn this into two
17910 // vmovqb instructions shuffled together.
lowerShuffleAsVTRUNCAndUnpack(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG)17911 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
17912                                              SDValue V1, SDValue V2,
17913                                              ArrayRef<int> Mask,
17914                                              const APInt &Zeroable,
17915                                              SelectionDAG &DAG) {
17916   assert(VT == MVT::v32i8 && "Unexpected type!");
17917 
17918   // The first 8 indices should be every 8th element.
17919   if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
17920     return SDValue();
17921 
17922   // Remaining elements need to be zeroable.
17923   if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
17924     return SDValue();
17925 
17926   V1 = DAG.getBitcast(MVT::v4i64, V1);
17927   V2 = DAG.getBitcast(MVT::v4i64, V2);
17928 
17929   V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
17930   V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
17931 
17932   // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
17933   // the upper bits of the result using an unpckldq.
17934   SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
17935                                         { 0, 1, 2, 3, 16, 17, 18, 19,
17936                                           4, 5, 6, 7, 20, 21, 22, 23 });
17937   // Insert the unpckldq into a zero vector to widen to v32i8.
17938   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
17939                      DAG.getConstant(0, DL, MVT::v32i8), Unpack,
17940                      DAG.getIntPtrConstant(0, DL));
17941 }
17942 
17943 // a = shuffle v1, v2, mask1    ; interleaving lower lanes of v1 and v2
17944 // b = shuffle v1, v2, mask2    ; interleaving higher lanes of v1 and v2
17945 //     =>
17946 // ul = unpckl v1, v2
17947 // uh = unpckh v1, v2
17948 // a = vperm ul, uh
17949 // b = vperm ul, uh
17950 //
17951 // Pattern-match interleave(256b v1, 256b v2) -> 512b v3 and lower it into unpck
17952 // and permute. We cannot directly match v3 because it is split into two
17953 // 256-bit vectors in earlier isel stages. Therefore, this function matches a
17954 // pair of 256-bit shuffles and makes sure the masks are consecutive.
17955 //
17956 // Once unpck and permute nodes are created, the permute corresponding to this
17957 // shuffle is returned, while the other permute replaces the other half of the
17958 // shuffle in the selection dag.
lowerShufflePairAsUNPCKAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)17959 static SDValue lowerShufflePairAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
17960                                                  SDValue V1, SDValue V2,
17961                                                  ArrayRef<int> Mask,
17962                                                  SelectionDAG &DAG) {
17963   if (VT != MVT::v8f32 && VT != MVT::v8i32 && VT != MVT::v16i16 &&
17964       VT != MVT::v32i8)
17965     return SDValue();
17966   // <B0, B1, B0+1, B1+1, ..., >
17967   auto IsInterleavingPattern = [&](ArrayRef<int> Mask, unsigned Begin0,
17968                                    unsigned Begin1) {
17969     size_t Size = Mask.size();
17970     assert(Size % 2 == 0 && "Expected even mask size");
17971     for (unsigned I = 0; I < Size; I += 2) {
17972       if (Mask[I] != (int)(Begin0 + I / 2) ||
17973           Mask[I + 1] != (int)(Begin1 + I / 2))
17974         return false;
17975     }
17976     return true;
17977   };
17978   // Check which half is this shuffle node
17979   int NumElts = VT.getVectorNumElements();
17980   size_t FirstQtr = NumElts / 2;
17981   size_t ThirdQtr = NumElts + NumElts / 2;
17982   bool IsFirstHalf = IsInterleavingPattern(Mask, 0, NumElts);
17983   bool IsSecondHalf = IsInterleavingPattern(Mask, FirstQtr, ThirdQtr);
17984   if (!IsFirstHalf && !IsSecondHalf)
17985     return SDValue();
17986 
17987   // Find the intersection between shuffle users of V1 and V2.
17988   SmallVector<SDNode *, 2> Shuffles;
17989   for (SDNode *User : V1->uses())
17990     if (User->getOpcode() == ISD::VECTOR_SHUFFLE && User->getOperand(0) == V1 &&
17991         User->getOperand(1) == V2)
17992       Shuffles.push_back(User);
17993   // Limit user size to two for now.
17994   if (Shuffles.size() != 2)
17995     return SDValue();
17996   // Find out which half of the 512-bit shuffles is each smaller shuffle
17997   auto *SVN1 = cast<ShuffleVectorSDNode>(Shuffles[0]);
17998   auto *SVN2 = cast<ShuffleVectorSDNode>(Shuffles[1]);
17999   SDNode *FirstHalf;
18000   SDNode *SecondHalf;
18001   if (IsInterleavingPattern(SVN1->getMask(), 0, NumElts) &&
18002       IsInterleavingPattern(SVN2->getMask(), FirstQtr, ThirdQtr)) {
18003     FirstHalf = Shuffles[0];
18004     SecondHalf = Shuffles[1];
18005   } else if (IsInterleavingPattern(SVN1->getMask(), FirstQtr, ThirdQtr) &&
18006              IsInterleavingPattern(SVN2->getMask(), 0, NumElts)) {
18007     FirstHalf = Shuffles[1];
18008     SecondHalf = Shuffles[0];
18009   } else {
18010     return SDValue();
18011   }
18012   // Lower into unpck and perm. Return the perm of this shuffle and replace
18013   // the other.
18014   SDValue Unpckl = DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
18015   SDValue Unpckh = DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
18016   SDValue Perm1 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
18017                               DAG.getTargetConstant(0x20, DL, MVT::i8));
18018   SDValue Perm2 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
18019                               DAG.getTargetConstant(0x31, DL, MVT::i8));
18020   if (IsFirstHalf) {
18021     DAG.ReplaceAllUsesWith(SecondHalf, &Perm2);
18022     return Perm1;
18023   }
18024   DAG.ReplaceAllUsesWith(FirstHalf, &Perm1);
18025   return Perm2;
18026 }
18027 
18028 /// Handle lowering of 4-lane 64-bit floating point shuffles.
18029 ///
18030 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
18031 /// isn't available.
lowerV4F64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18032 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18033                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18034                                  const X86Subtarget &Subtarget,
18035                                  SelectionDAG &DAG) {
18036   assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
18037   assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
18038   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
18039 
18040   if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
18041                                      Subtarget, DAG))
18042     return V;
18043 
18044   if (V2.isUndef()) {
18045     // Check for being able to broadcast a single element.
18046     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
18047                                                     Mask, Subtarget, DAG))
18048       return Broadcast;
18049 
18050     // Use low duplicate instructions for masks that match their pattern.
18051     if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
18052       return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
18053 
18054     if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
18055       // Non-half-crossing single input shuffles can be lowered with an
18056       // interleaved permutation.
18057       unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
18058                               ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
18059       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
18060                          DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
18061     }
18062 
18063     // With AVX2 we have direct support for this permutation.
18064     if (Subtarget.hasAVX2())
18065       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
18066                          getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
18067 
18068     // Try to create an in-lane repeating shuffle mask and then shuffle the
18069     // results into the target lanes.
18070     if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18071             DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
18072       return V;
18073 
18074     // Try to permute the lanes and then use a per-lane permute.
18075     if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
18076                                                         Mask, DAG, Subtarget))
18077       return V;
18078 
18079     // Otherwise, fall back.
18080     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
18081                                                DAG, Subtarget);
18082   }
18083 
18084   // Use dedicated unpack instructions for masks that match their pattern.
18085   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
18086     return V;
18087 
18088   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
18089                                           Zeroable, Subtarget, DAG))
18090     return Blend;
18091 
18092   // Check if the blend happens to exactly fit that of SHUFPD.
18093   if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
18094                                           Zeroable, Subtarget, DAG))
18095     return Op;
18096 
18097   bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
18098   bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
18099 
18100   // If we have lane crossing shuffles AND they don't all come from the lower
18101   // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
18102   // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
18103   // canonicalize to a blend of splat which isn't necessary for this combine.
18104   if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
18105       !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
18106       (V1.getOpcode() != ISD::BUILD_VECTOR) &&
18107       (V2.getOpcode() != ISD::BUILD_VECTOR))
18108     return lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2, Mask, DAG);
18109 
18110   // If we have one input in place, then we can permute the other input and
18111   // blend the result.
18112   if (V1IsInPlace || V2IsInPlace)
18113     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
18114                                                 Subtarget, DAG);
18115 
18116   // Try to create an in-lane repeating shuffle mask and then shuffle the
18117   // results into the target lanes.
18118   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18119           DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
18120     return V;
18121 
18122   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18123   // shuffle. However, if we have AVX2 and either inputs are already in place,
18124   // we will be able to shuffle even across lanes the other input in a single
18125   // instruction so skip this pattern.
18126   if (!(Subtarget.hasAVX2() && (V1IsInPlace || V2IsInPlace)))
18127     if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
18128             DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
18129       return V;
18130 
18131   // If we have VLX support, we can use VEXPAND.
18132   if (Subtarget.hasVLX())
18133     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
18134                                          DAG, Subtarget))
18135       return V;
18136 
18137   // If we have AVX2 then we always want to lower with a blend because an v4 we
18138   // can fully permute the elements.
18139   if (Subtarget.hasAVX2())
18140     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
18141                                                 Subtarget, DAG);
18142 
18143   // Otherwise fall back on generic lowering.
18144   return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
18145                                     Subtarget, DAG);
18146 }
18147 
18148 /// Handle lowering of 4-lane 64-bit integer shuffles.
18149 ///
18150 /// This routine is only called when we have AVX2 and thus a reasonable
18151 /// instruction set for v4i64 shuffling..
lowerV4I64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18152 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18153                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18154                                  const X86Subtarget &Subtarget,
18155                                  SelectionDAG &DAG) {
18156   assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
18157   assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
18158   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
18159   assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
18160 
18161   if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
18162                                      Subtarget, DAG))
18163     return V;
18164 
18165   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
18166                                           Zeroable, Subtarget, DAG))
18167     return Blend;
18168 
18169   // Check for being able to broadcast a single element.
18170   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
18171                                                   Subtarget, DAG))
18172     return Broadcast;
18173 
18174   if (V2.isUndef()) {
18175     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
18176     // can use lower latency instructions that will operate on both lanes.
18177     SmallVector<int, 2> RepeatedMask;
18178     if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
18179       SmallVector<int, 4> PSHUFDMask;
18180       narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
18181       return DAG.getBitcast(
18182           MVT::v4i64,
18183           DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
18184                       DAG.getBitcast(MVT::v8i32, V1),
18185                       getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
18186     }
18187 
18188     // AVX2 provides a direct instruction for permuting a single input across
18189     // lanes.
18190     return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
18191                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
18192   }
18193 
18194   // Try to use shift instructions.
18195   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
18196                                           Zeroable, Subtarget, DAG))
18197     return Shift;
18198 
18199   // If we have VLX support, we can use VALIGN or VEXPAND.
18200   if (Subtarget.hasVLX()) {
18201     if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
18202                                               Subtarget, DAG))
18203       return Rotate;
18204 
18205     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
18206                                          DAG, Subtarget))
18207       return V;
18208   }
18209 
18210   // Try to use PALIGNR.
18211   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
18212                                                 Subtarget, DAG))
18213     return Rotate;
18214 
18215   // Use dedicated unpack instructions for masks that match their pattern.
18216   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
18217     return V;
18218 
18219   bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
18220   bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
18221 
18222   // If we have one input in place, then we can permute the other input and
18223   // blend the result.
18224   if (V1IsInPlace || V2IsInPlace)
18225     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
18226                                                 Subtarget, DAG);
18227 
18228   // Try to create an in-lane repeating shuffle mask and then shuffle the
18229   // results into the target lanes.
18230   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18231           DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
18232     return V;
18233 
18234   // Try to lower to PERMQ(BLENDD(V1,V2)).
18235   if (SDValue V =
18236           lowerShuffleAsBlendAndPermute(DL, MVT::v4i64, V1, V2, Mask, DAG))
18237     return V;
18238 
18239   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18240   // shuffle. However, if we have AVX2 and either inputs are already in place,
18241   // we will be able to shuffle even across lanes the other input in a single
18242   // instruction so skip this pattern.
18243   if (!V1IsInPlace && !V2IsInPlace)
18244     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18245             DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
18246       return Result;
18247 
18248   // Otherwise fall back on generic blend lowering.
18249   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
18250                                               Subtarget, DAG);
18251 }
18252 
18253 /// Handle lowering of 8-lane 32-bit floating point shuffles.
18254 ///
18255 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
18256 /// isn't available.
lowerV8F32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18257 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18258                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18259                                  const X86Subtarget &Subtarget,
18260                                  SelectionDAG &DAG) {
18261   assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
18262   assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
18263   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18264 
18265   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
18266                                           Zeroable, Subtarget, DAG))
18267     return Blend;
18268 
18269   // Check for being able to broadcast a single element.
18270   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
18271                                                   Subtarget, DAG))
18272     return Broadcast;
18273 
18274   // If the shuffle mask is repeated in each 128-bit lane, we have many more
18275   // options to efficiently lower the shuffle.
18276   SmallVector<int, 4> RepeatedMask;
18277   if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
18278     assert(RepeatedMask.size() == 4 &&
18279            "Repeated masks must be half the mask width!");
18280 
18281     // Use even/odd duplicate instructions for masks that match their pattern.
18282     if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
18283       return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
18284     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
18285       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
18286 
18287     if (V2.isUndef())
18288       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
18289                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18290 
18291     // Use dedicated unpack instructions for masks that match their pattern.
18292     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
18293       return V;
18294 
18295     // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
18296     // have already handled any direct blends.
18297     return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
18298   }
18299 
18300   // Try to create an in-lane repeating shuffle mask and then shuffle the
18301   // results into the target lanes.
18302   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18303           DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
18304     return V;
18305 
18306   // If we have a single input shuffle with different shuffle patterns in the
18307   // two 128-bit lanes use the variable mask to VPERMILPS.
18308   if (V2.isUndef()) {
18309     if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
18310       SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18311       return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
18312     }
18313     if (Subtarget.hasAVX2()) {
18314       SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18315       return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
18316     }
18317     // Otherwise, fall back.
18318     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
18319                                                DAG, Subtarget);
18320   }
18321 
18322   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18323   // shuffle.
18324   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18325           DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
18326     return Result;
18327 
18328   // If we have VLX support, we can use VEXPAND.
18329   if (Subtarget.hasVLX())
18330     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
18331                                          DAG, Subtarget))
18332       return V;
18333 
18334   // Try to match an interleave of two v8f32s and lower them as unpck and
18335   // permutes using ymms. This needs to go before we try to split the vectors.
18336   //
18337   // TODO: Expand this to AVX1. Currently v8i32 is casted to v8f32 and hits
18338   // this path inadvertently.
18339   if (Subtarget.hasAVX2() && !Subtarget.hasAVX512())
18340     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8f32, V1, V2,
18341                                                       Mask, DAG))
18342       return V;
18343 
18344   // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
18345   // since after split we get a more efficient code using vpunpcklwd and
18346   // vpunpckhwd instrs than vblend.
18347   if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32, DAG))
18348     return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, Subtarget,
18349                                       DAG);
18350 
18351   // If we have AVX2 then we always want to lower with a blend because at v8 we
18352   // can fully permute the elements.
18353   if (Subtarget.hasAVX2())
18354     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8f32, V1, V2, Mask,
18355                                                 Subtarget, DAG);
18356 
18357   // Otherwise fall back on generic lowering.
18358   return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
18359                                     Subtarget, DAG);
18360 }
18361 
18362 /// Handle lowering of 8-lane 32-bit integer shuffles.
18363 ///
18364 /// This routine is only called when we have AVX2 and thus a reasonable
18365 /// instruction set for v8i32 shuffling..
lowerV8I32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18366 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18367                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18368                                  const X86Subtarget &Subtarget,
18369                                  SelectionDAG &DAG) {
18370   assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
18371   assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
18372   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18373   assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
18374 
18375   // Whenever we can lower this as a zext, that instruction is strictly faster
18376   // than any alternative. It also allows us to fold memory operands into the
18377   // shuffle in many cases.
18378   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
18379                                                    Zeroable, Subtarget, DAG))
18380     return ZExt;
18381 
18382   // Try to match an interleave of two v8i32s and lower them as unpck and
18383   // permutes using ymms. This needs to go before we try to split the vectors.
18384   if (!Subtarget.hasAVX512())
18385     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8i32, V1, V2,
18386                                                       Mask, DAG))
18387       return V;
18388 
18389   // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
18390   // since after split we get a more efficient code than vblend by using
18391   // vpunpcklwd and vpunpckhwd instrs.
18392   if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
18393       !Subtarget.hasAVX512())
18394     return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Subtarget,
18395                                       DAG);
18396 
18397   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
18398                                           Zeroable, Subtarget, DAG))
18399     return Blend;
18400 
18401   // Check for being able to broadcast a single element.
18402   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
18403                                                   Subtarget, DAG))
18404     return Broadcast;
18405 
18406   // If the shuffle mask is repeated in each 128-bit lane we can use more
18407   // efficient instructions that mirror the shuffles across the two 128-bit
18408   // lanes.
18409   SmallVector<int, 4> RepeatedMask;
18410   bool Is128BitLaneRepeatedShuffle =
18411       is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
18412   if (Is128BitLaneRepeatedShuffle) {
18413     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
18414     if (V2.isUndef())
18415       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
18416                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18417 
18418     // Use dedicated unpack instructions for masks that match their pattern.
18419     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
18420       return V;
18421   }
18422 
18423   // Try to use shift instructions.
18424   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
18425                                           Zeroable, Subtarget, DAG))
18426     return Shift;
18427 
18428   // If we have VLX support, we can use VALIGN or EXPAND.
18429   if (Subtarget.hasVLX()) {
18430     if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i32, V1, V2, Mask,
18431                                               Subtarget, DAG))
18432       return Rotate;
18433 
18434     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
18435                                          DAG, Subtarget))
18436       return V;
18437   }
18438 
18439   // Try to use byte rotation instructions.
18440   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
18441                                                 Subtarget, DAG))
18442     return Rotate;
18443 
18444   // Try to create an in-lane repeating shuffle mask and then shuffle the
18445   // results into the target lanes.
18446   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18447           DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
18448     return V;
18449 
18450   if (V2.isUndef()) {
18451     // Try to produce a fixed cross-128-bit lane permute followed by unpack
18452     // because that should be faster than the variable permute alternatives.
18453     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, Mask, V1, V2, DAG))
18454       return V;
18455 
18456     // If the shuffle patterns aren't repeated but it's a single input, directly
18457     // generate a cross-lane VPERMD instruction.
18458     SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18459     return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
18460   }
18461 
18462   // Assume that a single SHUFPS is faster than an alternative sequence of
18463   // multiple instructions (even if the CPU has a domain penalty).
18464   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
18465   if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
18466     SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
18467     SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
18468     SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
18469                                             CastV1, CastV2, DAG);
18470     return DAG.getBitcast(MVT::v8i32, ShufPS);
18471   }
18472 
18473   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18474   // shuffle.
18475   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18476           DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
18477     return Result;
18478 
18479   // Otherwise fall back on generic blend lowering.
18480   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i32, V1, V2, Mask,
18481                                               Subtarget, DAG);
18482 }
18483 
18484 /// Handle lowering of 16-lane 16-bit integer shuffles.
18485 ///
18486 /// This routine is only called when we have AVX2 and thus a reasonable
18487 /// instruction set for v16i16 shuffling..
lowerV16I16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18488 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18489                                   const APInt &Zeroable, SDValue V1, SDValue V2,
18490                                   const X86Subtarget &Subtarget,
18491                                   SelectionDAG &DAG) {
18492   assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
18493   assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
18494   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
18495   assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
18496 
18497   // Whenever we can lower this as a zext, that instruction is strictly faster
18498   // than any alternative. It also allows us to fold memory operands into the
18499   // shuffle in many cases.
18500   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
18501           DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
18502     return ZExt;
18503 
18504   // Check for being able to broadcast a single element.
18505   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
18506                                                   Subtarget, DAG))
18507     return Broadcast;
18508 
18509   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
18510                                           Zeroable, Subtarget, DAG))
18511     return Blend;
18512 
18513   // Use dedicated unpack instructions for masks that match their pattern.
18514   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
18515     return V;
18516 
18517   // Use dedicated pack instructions for masks that match their pattern.
18518   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
18519                                        Subtarget))
18520     return V;
18521 
18522   // Try to use lower using a truncation.
18523   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
18524                                        Subtarget, DAG))
18525     return V;
18526 
18527   // Try to use shift instructions.
18528   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
18529                                           Zeroable, Subtarget, DAG))
18530     return Shift;
18531 
18532   // Try to use byte rotation instructions.
18533   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
18534                                                 Subtarget, DAG))
18535     return Rotate;
18536 
18537   // Try to create an in-lane repeating shuffle mask and then shuffle the
18538   // results into the target lanes.
18539   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18540           DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
18541     return V;
18542 
18543   if (V2.isUndef()) {
18544     // Try to use bit rotation instructions.
18545     if (SDValue Rotate =
18546             lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
18547       return Rotate;
18548 
18549     // Try to produce a fixed cross-128-bit lane permute followed by unpack
18550     // because that should be faster than the variable permute alternatives.
18551     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v16i16, Mask, V1, V2, DAG))
18552       return V;
18553 
18554     // There are no generalized cross-lane shuffle operations available on i16
18555     // element types.
18556     if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
18557       if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18558               DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
18559         return V;
18560 
18561       return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
18562                                                  DAG, Subtarget);
18563     }
18564 
18565     SmallVector<int, 8> RepeatedMask;
18566     if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
18567       // As this is a single-input shuffle, the repeated mask should be
18568       // a strictly valid v8i16 mask that we can pass through to the v8i16
18569       // lowering to handle even the v16 case.
18570       return lowerV8I16GeneralSingleInputShuffle(
18571           DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
18572     }
18573   }
18574 
18575   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
18576                                               Zeroable, Subtarget, DAG))
18577     return PSHUFB;
18578 
18579   // AVX512BW can lower to VPERMW (non-VLX will pad to v32i16).
18580   if (Subtarget.hasBWI())
18581     return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, Subtarget, DAG);
18582 
18583   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18584   // shuffle.
18585   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18586           DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
18587     return Result;
18588 
18589   // Try to permute the lanes and then use a per-lane permute.
18590   if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18591           DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
18592     return V;
18593 
18594   // Try to match an interleave of two v16i16s and lower them as unpck and
18595   // permutes using ymms.
18596   if (!Subtarget.hasAVX512())
18597     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v16i16, V1, V2,
18598                                                       Mask, DAG))
18599       return V;
18600 
18601   // Otherwise fall back on generic lowering.
18602   return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
18603                                     Subtarget, DAG);
18604 }
18605 
18606 /// Handle lowering of 32-lane 8-bit integer shuffles.
18607 ///
18608 /// This routine is only called when we have AVX2 and thus a reasonable
18609 /// instruction set for v32i8 shuffling..
lowerV32I8Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18610 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18611                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18612                                  const X86Subtarget &Subtarget,
18613                                  SelectionDAG &DAG) {
18614   assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
18615   assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
18616   assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
18617   assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
18618 
18619   // Whenever we can lower this as a zext, that instruction is strictly faster
18620   // than any alternative. It also allows us to fold memory operands into the
18621   // shuffle in many cases.
18622   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
18623                                                    Zeroable, Subtarget, DAG))
18624     return ZExt;
18625 
18626   // Check for being able to broadcast a single element.
18627   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
18628                                                   Subtarget, DAG))
18629     return Broadcast;
18630 
18631   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
18632                                           Zeroable, Subtarget, DAG))
18633     return Blend;
18634 
18635   // Use dedicated unpack instructions for masks that match their pattern.
18636   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
18637     return V;
18638 
18639   // Use dedicated pack instructions for masks that match their pattern.
18640   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
18641                                        Subtarget))
18642     return V;
18643 
18644   // Try to use lower using a truncation.
18645   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v32i8, V1, V2, Mask, Zeroable,
18646                                        Subtarget, DAG))
18647     return V;
18648 
18649   // Try to use shift instructions.
18650   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
18651                                           Zeroable, Subtarget, DAG))
18652     return Shift;
18653 
18654   // Try to use byte rotation instructions.
18655   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
18656                                                 Subtarget, DAG))
18657     return Rotate;
18658 
18659   // Try to use bit rotation instructions.
18660   if (V2.isUndef())
18661     if (SDValue Rotate =
18662             lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
18663       return Rotate;
18664 
18665   // Try to create an in-lane repeating shuffle mask and then shuffle the
18666   // results into the target lanes.
18667   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18668           DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
18669     return V;
18670 
18671   // There are no generalized cross-lane shuffle operations available on i8
18672   // element types.
18673   if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
18674     // Try to produce a fixed cross-128-bit lane permute followed by unpack
18675     // because that should be faster than the variable permute alternatives.
18676     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, Mask, V1, V2, DAG))
18677       return V;
18678 
18679     if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18680             DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
18681       return V;
18682 
18683     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
18684                                                DAG, Subtarget);
18685   }
18686 
18687   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
18688                                               Zeroable, Subtarget, DAG))
18689     return PSHUFB;
18690 
18691   // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
18692   if (Subtarget.hasVBMI())
18693     return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, Subtarget, DAG);
18694 
18695   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18696   // shuffle.
18697   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18698           DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
18699     return Result;
18700 
18701   // Try to permute the lanes and then use a per-lane permute.
18702   if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18703           DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
18704     return V;
18705 
18706   // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
18707   // by zeroable elements in the remaining 24 elements. Turn this into two
18708   // vmovqb instructions shuffled together.
18709   if (Subtarget.hasVLX())
18710     if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
18711                                                   Mask, Zeroable, DAG))
18712       return V;
18713 
18714   // Try to match an interleave of two v32i8s and lower them as unpck and
18715   // permutes using ymms.
18716   if (!Subtarget.hasAVX512())
18717     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v32i8, V1, V2,
18718                                                       Mask, DAG))
18719       return V;
18720 
18721   // Otherwise fall back on generic lowering.
18722   return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
18723                                     Subtarget, DAG);
18724 }
18725 
18726 /// High-level routine to lower various 256-bit x86 vector shuffles.
18727 ///
18728 /// This routine either breaks down the specific type of a 256-bit x86 vector
18729 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
18730 /// together based on the available instructions.
lower256BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)18731 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
18732                                   SDValue V1, SDValue V2, const APInt &Zeroable,
18733                                   const X86Subtarget &Subtarget,
18734                                   SelectionDAG &DAG) {
18735   // If we have a single input to the zero element, insert that into V1 if we
18736   // can do so cheaply.
18737   int NumElts = VT.getVectorNumElements();
18738   int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
18739 
18740   if (NumV2Elements == 1 && Mask[0] >= NumElts)
18741     if (SDValue Insertion = lowerShuffleAsElementInsertion(
18742             DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
18743       return Insertion;
18744 
18745   // Handle special cases where the lower or upper half is UNDEF.
18746   if (SDValue V =
18747           lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
18748     return V;
18749 
18750   // There is a really nice hard cut-over between AVX1 and AVX2 that means we
18751   // can check for those subtargets here and avoid much of the subtarget
18752   // querying in the per-vector-type lowering routines. With AVX1 we have
18753   // essentially *zero* ability to manipulate a 256-bit vector with integer
18754   // types. Since we'll use floating point types there eventually, just
18755   // immediately cast everything to a float and operate entirely in that domain.
18756   if (VT.isInteger() && !Subtarget.hasAVX2()) {
18757     int ElementBits = VT.getScalarSizeInBits();
18758     if (ElementBits < 32) {
18759       // No floating point type available, if we can't use the bit operations
18760       // for masking/blending then decompose into 128-bit vectors.
18761       if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
18762                                             Subtarget, DAG))
18763         return V;
18764       if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
18765         return V;
18766       return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
18767     }
18768 
18769     MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
18770                                 VT.getVectorNumElements());
18771     V1 = DAG.getBitcast(FpVT, V1);
18772     V2 = DAG.getBitcast(FpVT, V2);
18773     return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
18774   }
18775 
18776   if (VT == MVT::v16f16) {
18777     V1 = DAG.getBitcast(MVT::v16i16, V1);
18778     V2 = DAG.getBitcast(MVT::v16i16, V2);
18779     return DAG.getBitcast(MVT::v16f16,
18780                           DAG.getVectorShuffle(MVT::v16i16, DL, V1, V2, Mask));
18781   }
18782 
18783   switch (VT.SimpleTy) {
18784   case MVT::v4f64:
18785     return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18786   case MVT::v4i64:
18787     return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18788   case MVT::v8f32:
18789     return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18790   case MVT::v8i32:
18791     return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18792   case MVT::v16i16:
18793     return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18794   case MVT::v32i8:
18795     return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18796 
18797   default:
18798     llvm_unreachable("Not a valid 256-bit x86 vector type!");
18799   }
18800 }
18801 
18802 /// Try to lower a vector shuffle as a 128-bit shuffles.
lowerV4X128Shuffle(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18803 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
18804                                   const APInt &Zeroable, SDValue V1, SDValue V2,
18805                                   const X86Subtarget &Subtarget,
18806                                   SelectionDAG &DAG) {
18807   assert(VT.getScalarSizeInBits() == 64 &&
18808          "Unexpected element type size for 128bit shuffle.");
18809 
18810   // To handle 256 bit vector requires VLX and most probably
18811   // function lowerV2X128VectorShuffle() is better solution.
18812   assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
18813 
18814   // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
18815   SmallVector<int, 4> Widened128Mask;
18816   if (!canWidenShuffleElements(Mask, Widened128Mask))
18817     return SDValue();
18818   assert(Widened128Mask.size() == 4 && "Shuffle widening mismatch");
18819 
18820   // Try to use an insert into a zero vector.
18821   if (Widened128Mask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
18822       (Widened128Mask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
18823     unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
18824     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
18825     SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
18826                               DAG.getIntPtrConstant(0, DL));
18827     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
18828                        getZeroVector(VT, Subtarget, DAG, DL), LoV,
18829                        DAG.getIntPtrConstant(0, DL));
18830   }
18831 
18832   // Check for patterns which can be matched with a single insert of a 256-bit
18833   // subvector.
18834   bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3}, V1, V2);
18835   if (OnlyUsesV1 ||
18836       isShuffleEquivalent(Mask, {0, 1, 2, 3, 8, 9, 10, 11}, V1, V2)) {
18837     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
18838     SDValue SubVec =
18839         DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, OnlyUsesV1 ? V1 : V2,
18840                     DAG.getIntPtrConstant(0, DL));
18841     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
18842                        DAG.getIntPtrConstant(4, DL));
18843   }
18844 
18845   // See if this is an insertion of the lower 128-bits of V2 into V1.
18846   bool IsInsert = true;
18847   int V2Index = -1;
18848   for (int i = 0; i < 4; ++i) {
18849     assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
18850     if (Widened128Mask[i] < 0)
18851       continue;
18852 
18853     // Make sure all V1 subvectors are in place.
18854     if (Widened128Mask[i] < 4) {
18855       if (Widened128Mask[i] != i) {
18856         IsInsert = false;
18857         break;
18858       }
18859     } else {
18860       // Make sure we only have a single V2 index and its the lowest 128-bits.
18861       if (V2Index >= 0 || Widened128Mask[i] != 4) {
18862         IsInsert = false;
18863         break;
18864       }
18865       V2Index = i;
18866     }
18867   }
18868   if (IsInsert && V2Index >= 0) {
18869     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
18870     SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
18871                                  DAG.getIntPtrConstant(0, DL));
18872     return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
18873   }
18874 
18875   // See if we can widen to a 256-bit lane shuffle, we're going to lose 128-lane
18876   // UNDEF info by lowering to X86ISD::SHUF128 anyway, so by widening where
18877   // possible we at least ensure the lanes stay sequential to help later
18878   // combines.
18879   SmallVector<int, 2> Widened256Mask;
18880   if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
18881     Widened128Mask.clear();
18882     narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
18883   }
18884 
18885   // Try to lower to vshuf64x2/vshuf32x4.
18886   SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
18887   unsigned PermMask = 0;
18888   // Insure elements came from the same Op.
18889   for (int i = 0; i < 4; ++i) {
18890     assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
18891     if (Widened128Mask[i] < 0)
18892       continue;
18893 
18894     SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
18895     unsigned OpIndex = i / 2;
18896     if (Ops[OpIndex].isUndef())
18897       Ops[OpIndex] = Op;
18898     else if (Ops[OpIndex] != Op)
18899       return SDValue();
18900 
18901     // Convert the 128-bit shuffle mask selection values into 128-bit selection
18902     // bits defined by a vshuf64x2 instruction's immediate control byte.
18903     PermMask |= (Widened128Mask[i] % 4) << (i * 2);
18904   }
18905 
18906   return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
18907                      DAG.getTargetConstant(PermMask, DL, MVT::i8));
18908 }
18909 
18910 /// Handle lowering of 8-lane 64-bit floating point shuffles.
lowerV8F64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18911 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18912                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18913                                  const X86Subtarget &Subtarget,
18914                                  SelectionDAG &DAG) {
18915   assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
18916   assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
18917   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18918 
18919   if (V2.isUndef()) {
18920     // Use low duplicate instructions for masks that match their pattern.
18921     if (isShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1, V2))
18922       return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
18923 
18924     if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
18925       // Non-half-crossing single input shuffles can be lowered with an
18926       // interleaved permutation.
18927       unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
18928                               ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
18929                               ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
18930                               ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
18931       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
18932                          DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
18933     }
18934 
18935     SmallVector<int, 4> RepeatedMask;
18936     if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
18937       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
18938                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18939   }
18940 
18941   if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
18942                                            V2, Subtarget, DAG))
18943     return Shuf128;
18944 
18945   if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
18946     return Unpck;
18947 
18948   // Check if the blend happens to exactly fit that of SHUFPD.
18949   if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
18950                                           Zeroable, Subtarget, DAG))
18951     return Op;
18952 
18953   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
18954                                        DAG, Subtarget))
18955     return V;
18956 
18957   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
18958                                           Zeroable, Subtarget, DAG))
18959     return Blend;
18960 
18961   return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, Subtarget, DAG);
18962 }
18963 
18964 /// Handle lowering of 16-lane 32-bit floating point shuffles.
lowerV16F32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18965 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18966                                   const APInt &Zeroable, SDValue V1, SDValue V2,
18967                                   const X86Subtarget &Subtarget,
18968                                   SelectionDAG &DAG) {
18969   assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
18970   assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
18971   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
18972 
18973   // If the shuffle mask is repeated in each 128-bit lane, we have many more
18974   // options to efficiently lower the shuffle.
18975   SmallVector<int, 4> RepeatedMask;
18976   if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
18977     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
18978 
18979     // Use even/odd duplicate instructions for masks that match their pattern.
18980     if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
18981       return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
18982     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
18983       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
18984 
18985     if (V2.isUndef())
18986       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
18987                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18988 
18989     // Use dedicated unpack instructions for masks that match their pattern.
18990     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
18991       return V;
18992 
18993     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
18994                                             Zeroable, Subtarget, DAG))
18995       return Blend;
18996 
18997     // Otherwise, fall back to a SHUFPS sequence.
18998     return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
18999   }
19000 
19001   // Try to create an in-lane repeating shuffle mask and then shuffle the
19002   // results into the target lanes.
19003   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19004           DL, MVT::v16f32, V1, V2, Mask, Subtarget, DAG))
19005     return V;
19006 
19007   // If we have a single input shuffle with different shuffle patterns in the
19008   // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
19009   if (V2.isUndef() &&
19010       !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
19011     SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
19012     return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
19013   }
19014 
19015   // If we have AVX512F support, we can use VEXPAND.
19016   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
19017                                              V1, V2, DAG, Subtarget))
19018     return V;
19019 
19020   return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, Subtarget, DAG);
19021 }
19022 
19023 /// Handle lowering of 8-lane 64-bit integer shuffles.
lowerV8I64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19024 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19025                                  const APInt &Zeroable, SDValue V1, SDValue V2,
19026                                  const X86Subtarget &Subtarget,
19027                                  SelectionDAG &DAG) {
19028   assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
19029   assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
19030   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
19031 
19032   if (V2.isUndef()) {
19033     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
19034     // can use lower latency instructions that will operate on all four
19035     // 128-bit lanes.
19036     SmallVector<int, 2> Repeated128Mask;
19037     if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
19038       SmallVector<int, 4> PSHUFDMask;
19039       narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
19040       return DAG.getBitcast(
19041           MVT::v8i64,
19042           DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
19043                       DAG.getBitcast(MVT::v16i32, V1),
19044                       getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
19045     }
19046 
19047     SmallVector<int, 4> Repeated256Mask;
19048     if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
19049       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
19050                          getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
19051   }
19052 
19053   if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
19054                                            V2, Subtarget, DAG))
19055     return Shuf128;
19056 
19057   // Try to use shift instructions.
19058   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
19059                                           Zeroable, Subtarget, DAG))
19060     return Shift;
19061 
19062   // Try to use VALIGN.
19063   if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i64, V1, V2, Mask,
19064                                             Subtarget, DAG))
19065     return Rotate;
19066 
19067   // Try to use PALIGNR.
19068   if (Subtarget.hasBWI())
19069     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
19070                                                   Subtarget, DAG))
19071       return Rotate;
19072 
19073   if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
19074     return Unpck;
19075 
19076   // If we have AVX512F support, we can use VEXPAND.
19077   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
19078                                        DAG, Subtarget))
19079     return V;
19080 
19081   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
19082                                           Zeroable, Subtarget, DAG))
19083     return Blend;
19084 
19085   return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, Subtarget, DAG);
19086 }
19087 
19088 /// Handle lowering of 16-lane 32-bit integer shuffles.
lowerV16I32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19089 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19090                                   const APInt &Zeroable, SDValue V1, SDValue V2,
19091                                   const X86Subtarget &Subtarget,
19092                                   SelectionDAG &DAG) {
19093   assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
19094   assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
19095   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
19096 
19097   // Whenever we can lower this as a zext, that instruction is strictly faster
19098   // than any alternative. It also allows us to fold memory operands into the
19099   // shuffle in many cases.
19100   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19101           DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
19102     return ZExt;
19103 
19104   // If the shuffle mask is repeated in each 128-bit lane we can use more
19105   // efficient instructions that mirror the shuffles across the four 128-bit
19106   // lanes.
19107   SmallVector<int, 4> RepeatedMask;
19108   bool Is128BitLaneRepeatedShuffle =
19109       is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
19110   if (Is128BitLaneRepeatedShuffle) {
19111     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
19112     if (V2.isUndef())
19113       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
19114                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
19115 
19116     // Use dedicated unpack instructions for masks that match their pattern.
19117     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
19118       return V;
19119   }
19120 
19121   // Try to use shift instructions.
19122   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
19123                                           Zeroable, Subtarget, DAG))
19124     return Shift;
19125 
19126   // Try to use VALIGN.
19127   if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v16i32, V1, V2, Mask,
19128                                             Subtarget, DAG))
19129     return Rotate;
19130 
19131   // Try to use byte rotation instructions.
19132   if (Subtarget.hasBWI())
19133     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
19134                                                   Subtarget, DAG))
19135       return Rotate;
19136 
19137   // Assume that a single SHUFPS is faster than using a permv shuffle.
19138   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
19139   if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
19140     SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
19141     SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
19142     SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
19143                                             CastV1, CastV2, DAG);
19144     return DAG.getBitcast(MVT::v16i32, ShufPS);
19145   }
19146 
19147   // Try to create an in-lane repeating shuffle mask and then shuffle the
19148   // results into the target lanes.
19149   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19150           DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
19151     return V;
19152 
19153   // If we have AVX512F support, we can use VEXPAND.
19154   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
19155                                        DAG, Subtarget))
19156     return V;
19157 
19158   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
19159                                           Zeroable, Subtarget, DAG))
19160     return Blend;
19161 
19162   return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, Subtarget, DAG);
19163 }
19164 
19165 /// Handle lowering of 32-lane 16-bit integer shuffles.
lowerV32I16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19166 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19167                                   const APInt &Zeroable, SDValue V1, SDValue V2,
19168                                   const X86Subtarget &Subtarget,
19169                                   SelectionDAG &DAG) {
19170   assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
19171   assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
19172   assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
19173   assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
19174 
19175   // Whenever we can lower this as a zext, that instruction is strictly faster
19176   // than any alternative. It also allows us to fold memory operands into the
19177   // shuffle in many cases.
19178   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19179           DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
19180     return ZExt;
19181 
19182   // Use dedicated unpack instructions for masks that match their pattern.
19183   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
19184     return V;
19185 
19186   // Use dedicated pack instructions for masks that match their pattern.
19187   if (SDValue V =
19188           lowerShuffleWithPACK(DL, MVT::v32i16, Mask, V1, V2, DAG, Subtarget))
19189     return V;
19190 
19191   // Try to use shift instructions.
19192   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
19193                                           Zeroable, Subtarget, DAG))
19194     return Shift;
19195 
19196   // Try to use byte rotation instructions.
19197   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
19198                                                 Subtarget, DAG))
19199     return Rotate;
19200 
19201   if (V2.isUndef()) {
19202     // Try to use bit rotation instructions.
19203     if (SDValue Rotate =
19204             lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
19205       return Rotate;
19206 
19207     SmallVector<int, 8> RepeatedMask;
19208     if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
19209       // As this is a single-input shuffle, the repeated mask should be
19210       // a strictly valid v8i16 mask that we can pass through to the v8i16
19211       // lowering to handle even the v32 case.
19212       return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v32i16, V1,
19213                                                  RepeatedMask, Subtarget, DAG);
19214     }
19215   }
19216 
19217   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
19218                                           Zeroable, Subtarget, DAG))
19219     return Blend;
19220 
19221   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
19222                                               Zeroable, Subtarget, DAG))
19223     return PSHUFB;
19224 
19225   return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, Subtarget, DAG);
19226 }
19227 
19228 /// Handle lowering of 64-lane 8-bit integer shuffles.
lowerV64I8Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19229 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19230                                  const APInt &Zeroable, SDValue V1, SDValue V2,
19231                                  const X86Subtarget &Subtarget,
19232                                  SelectionDAG &DAG) {
19233   assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
19234   assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
19235   assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
19236   assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
19237 
19238   // Whenever we can lower this as a zext, that instruction is strictly faster
19239   // than any alternative. It also allows us to fold memory operands into the
19240   // shuffle in many cases.
19241   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19242           DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
19243     return ZExt;
19244 
19245   // Use dedicated unpack instructions for masks that match their pattern.
19246   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
19247     return V;
19248 
19249   // Use dedicated pack instructions for masks that match their pattern.
19250   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
19251                                        Subtarget))
19252     return V;
19253 
19254   // Try to use shift instructions.
19255   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
19256                                           Zeroable, Subtarget, DAG))
19257     return Shift;
19258 
19259   // Try to use byte rotation instructions.
19260   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
19261                                                 Subtarget, DAG))
19262     return Rotate;
19263 
19264   // Try to use bit rotation instructions.
19265   if (V2.isUndef())
19266     if (SDValue Rotate =
19267             lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
19268       return Rotate;
19269 
19270   // Lower as AND if possible.
19271   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v64i8, V1, V2, Mask,
19272                                              Zeroable, Subtarget, DAG))
19273     return Masked;
19274 
19275   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
19276                                               Zeroable, Subtarget, DAG))
19277     return PSHUFB;
19278 
19279   // Try to create an in-lane repeating shuffle mask and then shuffle the
19280   // results into the target lanes.
19281   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19282           DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
19283     return V;
19284 
19285   if (SDValue Result = lowerShuffleAsLanePermuteAndPermute(
19286           DL, MVT::v64i8, V1, V2, Mask, DAG, Subtarget))
19287     return Result;
19288 
19289   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
19290                                           Zeroable, Subtarget, DAG))
19291     return Blend;
19292 
19293   if (!is128BitLaneCrossingShuffleMask(MVT::v64i8, Mask)) {
19294     // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
19295     // PALIGNR will be cheaper than the second PSHUFB+OR.
19296     if (SDValue V = lowerShuffleAsByteRotateAndPermute(DL, MVT::v64i8, V1, V2,
19297                                                        Mask, Subtarget, DAG))
19298       return V;
19299 
19300     // If we can't directly blend but can use PSHUFB, that will be better as it
19301     // can both shuffle and set up the inefficient blend.
19302     bool V1InUse, V2InUse;
19303     return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v64i8, V1, V2, Mask, Zeroable,
19304                                         DAG, V1InUse, V2InUse);
19305   }
19306 
19307   // Try to simplify this by merging 128-bit lanes to enable a lane-based
19308   // shuffle.
19309   if (!V2.isUndef())
19310     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
19311             DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
19312       return Result;
19313 
19314   // VBMI can use VPERMV/VPERMV3 byte shuffles.
19315   if (Subtarget.hasVBMI())
19316     return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, Subtarget, DAG);
19317 
19318   return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
19319 }
19320 
19321 /// High-level routine to lower various 512-bit x86 vector shuffles.
19322 ///
19323 /// This routine either breaks down the specific type of a 512-bit x86 vector
19324 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
19325 /// together based on the available instructions.
lower512BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)19326 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
19327                                   MVT VT, SDValue V1, SDValue V2,
19328                                   const APInt &Zeroable,
19329                                   const X86Subtarget &Subtarget,
19330                                   SelectionDAG &DAG) {
19331   assert(Subtarget.hasAVX512() &&
19332          "Cannot lower 512-bit vectors w/ basic ISA!");
19333 
19334   // If we have a single input to the zero element, insert that into V1 if we
19335   // can do so cheaply.
19336   int NumElts = Mask.size();
19337   int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
19338 
19339   if (NumV2Elements == 1 && Mask[0] >= NumElts)
19340     if (SDValue Insertion = lowerShuffleAsElementInsertion(
19341             DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
19342       return Insertion;
19343 
19344   // Handle special cases where the lower or upper half is UNDEF.
19345   if (SDValue V =
19346           lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
19347     return V;
19348 
19349   // Check for being able to broadcast a single element.
19350   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
19351                                                   Subtarget, DAG))
19352     return Broadcast;
19353 
19354   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI()) {
19355     // Try using bit ops for masking and blending before falling back to
19356     // splitting.
19357     if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
19358                                           Subtarget, DAG))
19359       return V;
19360     if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
19361       return V;
19362 
19363     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
19364   }
19365 
19366   if (VT == MVT::v32f16) {
19367     V1 = DAG.getBitcast(MVT::v32i16, V1);
19368     V2 = DAG.getBitcast(MVT::v32i16, V2);
19369     return DAG.getBitcast(MVT::v32f16,
19370                           DAG.getVectorShuffle(MVT::v32i16, DL, V1, V2, Mask));
19371   }
19372 
19373   // Dispatch to each element type for lowering. If we don't have support for
19374   // specific element type shuffles at 512 bits, immediately split them and
19375   // lower them. Each lowering routine of a given type is allowed to assume that
19376   // the requisite ISA extensions for that element type are available.
19377   switch (VT.SimpleTy) {
19378   case MVT::v8f64:
19379     return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19380   case MVT::v16f32:
19381     return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19382   case MVT::v8i64:
19383     return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19384   case MVT::v16i32:
19385     return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19386   case MVT::v32i16:
19387     return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19388   case MVT::v64i8:
19389     return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19390 
19391   default:
19392     llvm_unreachable("Not a valid 512-bit x86 vector type!");
19393   }
19394 }
19395 
lower1BitShuffleAsKSHIFTR(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19396 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
19397                                          MVT VT, SDValue V1, SDValue V2,
19398                                          const X86Subtarget &Subtarget,
19399                                          SelectionDAG &DAG) {
19400   // Shuffle should be unary.
19401   if (!V2.isUndef())
19402     return SDValue();
19403 
19404   int ShiftAmt = -1;
19405   int NumElts = Mask.size();
19406   for (int i = 0; i != NumElts; ++i) {
19407     int M = Mask[i];
19408     assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
19409            "Unexpected mask index.");
19410     if (M < 0)
19411       continue;
19412 
19413     // The first non-undef element determines our shift amount.
19414     if (ShiftAmt < 0) {
19415       ShiftAmt = M - i;
19416       // Need to be shifting right.
19417       if (ShiftAmt <= 0)
19418         return SDValue();
19419     }
19420     // All non-undef elements must shift by the same amount.
19421     if (ShiftAmt != M - i)
19422       return SDValue();
19423   }
19424   assert(ShiftAmt >= 0 && "All undef?");
19425 
19426   // Great we found a shift right.
19427   MVT WideVT = VT;
19428   if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
19429     WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
19430   SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
19431                             DAG.getUNDEF(WideVT), V1,
19432                             DAG.getIntPtrConstant(0, DL));
19433   Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
19434                     DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
19435   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19436                      DAG.getIntPtrConstant(0, DL));
19437 }
19438 
19439 // Determine if this shuffle can be implemented with a KSHIFT instruction.
19440 // Returns the shift amount if possible or -1 if not. This is a simplified
19441 // version of matchShuffleAsShift.
match1BitShuffleAsKSHIFT(unsigned & Opcode,ArrayRef<int> Mask,int MaskOffset,const APInt & Zeroable)19442 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
19443                                     int MaskOffset, const APInt &Zeroable) {
19444   int Size = Mask.size();
19445 
19446   auto CheckZeros = [&](int Shift, bool Left) {
19447     for (int j = 0; j < Shift; ++j)
19448       if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
19449         return false;
19450 
19451     return true;
19452   };
19453 
19454   auto MatchShift = [&](int Shift, bool Left) {
19455     unsigned Pos = Left ? Shift : 0;
19456     unsigned Low = Left ? 0 : Shift;
19457     unsigned Len = Size - Shift;
19458     return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
19459   };
19460 
19461   for (int Shift = 1; Shift != Size; ++Shift)
19462     for (bool Left : {true, false})
19463       if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
19464         Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
19465         return Shift;
19466       }
19467 
19468   return -1;
19469 }
19470 
19471 
19472 // Lower vXi1 vector shuffles.
19473 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
19474 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
19475 // vector, shuffle and then truncate it back.
lower1BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)19476 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
19477                                 MVT VT, SDValue V1, SDValue V2,
19478                                 const APInt &Zeroable,
19479                                 const X86Subtarget &Subtarget,
19480                                 SelectionDAG &DAG) {
19481   assert(Subtarget.hasAVX512() &&
19482          "Cannot lower 512-bit vectors w/o basic ISA!");
19483 
19484   int NumElts = Mask.size();
19485 
19486   // Try to recognize shuffles that are just padding a subvector with zeros.
19487   int SubvecElts = 0;
19488   int Src = -1;
19489   for (int i = 0; i != NumElts; ++i) {
19490     if (Mask[i] >= 0) {
19491       // Grab the source from the first valid mask. All subsequent elements need
19492       // to use this same source.
19493       if (Src < 0)
19494         Src = Mask[i] / NumElts;
19495       if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
19496         break;
19497     }
19498 
19499     ++SubvecElts;
19500   }
19501   assert(SubvecElts != NumElts && "Identity shuffle?");
19502 
19503   // Clip to a power 2.
19504   SubvecElts = PowerOf2Floor(SubvecElts);
19505 
19506   // Make sure the number of zeroable bits in the top at least covers the bits
19507   // not covered by the subvector.
19508   if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
19509     assert(Src >= 0 && "Expected a source!");
19510     MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
19511     SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
19512                                   Src == 0 ? V1 : V2,
19513                                   DAG.getIntPtrConstant(0, DL));
19514     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
19515                        DAG.getConstant(0, DL, VT),
19516                        Extract, DAG.getIntPtrConstant(0, DL));
19517   }
19518 
19519   // Try a simple shift right with undef elements. Later we'll try with zeros.
19520   if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
19521                                                 DAG))
19522     return Shift;
19523 
19524   // Try to match KSHIFTs.
19525   unsigned Offset = 0;
19526   for (SDValue V : { V1, V2 }) {
19527     unsigned Opcode;
19528     int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
19529     if (ShiftAmt >= 0) {
19530       MVT WideVT = VT;
19531       if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
19532         WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
19533       SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
19534                                 DAG.getUNDEF(WideVT), V,
19535                                 DAG.getIntPtrConstant(0, DL));
19536       // Widened right shifts need two shifts to ensure we shift in zeroes.
19537       if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
19538         int WideElts = WideVT.getVectorNumElements();
19539         // Shift left to put the original vector in the MSBs of the new size.
19540         Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
19541                           DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
19542         // Increase the shift amount to account for the left shift.
19543         ShiftAmt += WideElts - NumElts;
19544       }
19545 
19546       Res = DAG.getNode(Opcode, DL, WideVT, Res,
19547                         DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
19548       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19549                          DAG.getIntPtrConstant(0, DL));
19550     }
19551     Offset += NumElts; // Increment for next iteration.
19552   }
19553 
19554   // If we're broadcasting a SETCC result, try to broadcast the ops instead.
19555   // TODO: What other unary shuffles would benefit from this?
19556   if (isBroadcastShuffleMask(Mask) && V1.getOpcode() == ISD::SETCC &&
19557       V1->hasOneUse()) {
19558     SDValue Op0 = V1.getOperand(0);
19559     SDValue Op1 = V1.getOperand(1);
19560     ISD::CondCode CC = cast<CondCodeSDNode>(V1.getOperand(2))->get();
19561     EVT OpVT = Op0.getValueType();
19562     return DAG.getSetCC(
19563         DL, VT, DAG.getVectorShuffle(OpVT, DL, Op0, DAG.getUNDEF(OpVT), Mask),
19564         DAG.getVectorShuffle(OpVT, DL, Op1, DAG.getUNDEF(OpVT), Mask), CC);
19565   }
19566 
19567   MVT ExtVT;
19568   switch (VT.SimpleTy) {
19569   default:
19570     llvm_unreachable("Expected a vector of i1 elements");
19571   case MVT::v2i1:
19572     ExtVT = MVT::v2i64;
19573     break;
19574   case MVT::v4i1:
19575     ExtVT = MVT::v4i32;
19576     break;
19577   case MVT::v8i1:
19578     // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
19579     // shuffle.
19580     ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
19581     break;
19582   case MVT::v16i1:
19583     // Take 512-bit type, unless we are avoiding 512-bit types and have the
19584     // 256-bit operation available.
19585     ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
19586     break;
19587   case MVT::v32i1:
19588     // Take 512-bit type, unless we are avoiding 512-bit types and have the
19589     // 256-bit operation available.
19590     assert(Subtarget.hasBWI() && "Expected AVX512BW support");
19591     ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
19592     break;
19593   case MVT::v64i1:
19594     // Fall back to scalarization. FIXME: We can do better if the shuffle
19595     // can be partitioned cleanly.
19596     if (!Subtarget.useBWIRegs())
19597       return SDValue();
19598     ExtVT = MVT::v64i8;
19599     break;
19600   }
19601 
19602   V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
19603   V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
19604 
19605   SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
19606   // i1 was sign extended we can use X86ISD::CVT2MASK.
19607   int NumElems = VT.getVectorNumElements();
19608   if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
19609       (Subtarget.hasDQI() && (NumElems < 32)))
19610     return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
19611                        Shuffle, ISD::SETGT);
19612 
19613   return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
19614 }
19615 
19616 /// Helper function that returns true if the shuffle mask should be
19617 /// commuted to improve canonicalization.
canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask)19618 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
19619   int NumElements = Mask.size();
19620 
19621   int NumV1Elements = 0, NumV2Elements = 0;
19622   for (int M : Mask)
19623     if (M < 0)
19624       continue;
19625     else if (M < NumElements)
19626       ++NumV1Elements;
19627     else
19628       ++NumV2Elements;
19629 
19630   // Commute the shuffle as needed such that more elements come from V1 than
19631   // V2. This allows us to match the shuffle pattern strictly on how many
19632   // elements come from V1 without handling the symmetric cases.
19633   if (NumV2Elements > NumV1Elements)
19634     return true;
19635 
19636   assert(NumV1Elements > 0 && "No V1 indices");
19637 
19638   if (NumV2Elements == 0)
19639     return false;
19640 
19641   // When the number of V1 and V2 elements are the same, try to minimize the
19642   // number of uses of V2 in the low half of the vector. When that is tied,
19643   // ensure that the sum of indices for V1 is equal to or lower than the sum
19644   // indices for V2. When those are equal, try to ensure that the number of odd
19645   // indices for V1 is lower than the number of odd indices for V2.
19646   if (NumV1Elements == NumV2Elements) {
19647     int LowV1Elements = 0, LowV2Elements = 0;
19648     for (int M : Mask.slice(0, NumElements / 2))
19649       if (M >= NumElements)
19650         ++LowV2Elements;
19651       else if (M >= 0)
19652         ++LowV1Elements;
19653     if (LowV2Elements > LowV1Elements)
19654       return true;
19655     if (LowV2Elements == LowV1Elements) {
19656       int SumV1Indices = 0, SumV2Indices = 0;
19657       for (int i = 0, Size = Mask.size(); i < Size; ++i)
19658         if (Mask[i] >= NumElements)
19659           SumV2Indices += i;
19660         else if (Mask[i] >= 0)
19661           SumV1Indices += i;
19662       if (SumV2Indices < SumV1Indices)
19663         return true;
19664       if (SumV2Indices == SumV1Indices) {
19665         int NumV1OddIndices = 0, NumV2OddIndices = 0;
19666         for (int i = 0, Size = Mask.size(); i < Size; ++i)
19667           if (Mask[i] >= NumElements)
19668             NumV2OddIndices += i % 2;
19669           else if (Mask[i] >= 0)
19670             NumV1OddIndices += i % 2;
19671         if (NumV2OddIndices < NumV1OddIndices)
19672           return true;
19673       }
19674     }
19675   }
19676 
19677   return false;
19678 }
19679 
canCombineAsMaskOperation(SDValue V1,SDValue V2,const X86Subtarget & Subtarget)19680 static bool canCombineAsMaskOperation(SDValue V1, SDValue V2,
19681                                       const X86Subtarget &Subtarget) {
19682   if (!Subtarget.hasAVX512())
19683     return false;
19684 
19685   MVT VT = V1.getSimpleValueType().getScalarType();
19686   if ((VT == MVT::i16 || VT == MVT::i8) && !Subtarget.hasBWI())
19687     return false;
19688 
19689   // i8 is better to be widen to i16, because there is PBLENDW for vXi16
19690   // when the vector bit size is 128 or 256.
19691   if (VT == MVT::i8 && V1.getSimpleValueType().getSizeInBits() < 512)
19692     return false;
19693 
19694   auto HasMaskOperation = [&](SDValue V) {
19695     // TODO: Currently we only check limited opcode. We probably extend
19696     // it to all binary operation by checking TLI.isBinOp().
19697     switch (V->getOpcode()) {
19698     default:
19699       return false;
19700     case ISD::ADD:
19701     case ISD::SUB:
19702     case ISD::AND:
19703     case ISD::XOR:
19704       break;
19705     }
19706     if (!V->hasOneUse())
19707       return false;
19708 
19709     return true;
19710   };
19711 
19712   if (HasMaskOperation(V1) || HasMaskOperation(V2))
19713     return true;
19714 
19715   return false;
19716 }
19717 
19718 // Forward declaration.
19719 static SDValue canonicalizeShuffleMaskWithHorizOp(
19720     MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
19721     unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
19722     const X86Subtarget &Subtarget);
19723 
19724     /// Top-level lowering for x86 vector shuffles.
19725 ///
19726 /// This handles decomposition, canonicalization, and lowering of all x86
19727 /// vector shuffles. Most of the specific lowering strategies are encapsulated
19728 /// above in helper routines. The canonicalization attempts to widen shuffles
19729 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
19730 /// s.t. only one of the two inputs needs to be tested, etc.
lowerVECTOR_SHUFFLE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)19731 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
19732                                    SelectionDAG &DAG) {
19733   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
19734   ArrayRef<int> OrigMask = SVOp->getMask();
19735   SDValue V1 = Op.getOperand(0);
19736   SDValue V2 = Op.getOperand(1);
19737   MVT VT = Op.getSimpleValueType();
19738   int NumElements = VT.getVectorNumElements();
19739   SDLoc DL(Op);
19740   bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
19741 
19742   assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
19743          "Can't lower MMX shuffles");
19744 
19745   bool V1IsUndef = V1.isUndef();
19746   bool V2IsUndef = V2.isUndef();
19747   if (V1IsUndef && V2IsUndef)
19748     return DAG.getUNDEF(VT);
19749 
19750   // When we create a shuffle node we put the UNDEF node to second operand,
19751   // but in some cases the first operand may be transformed to UNDEF.
19752   // In this case we should just commute the node.
19753   if (V1IsUndef)
19754     return DAG.getCommutedVectorShuffle(*SVOp);
19755 
19756   // Check for non-undef masks pointing at an undef vector and make the masks
19757   // undef as well. This makes it easier to match the shuffle based solely on
19758   // the mask.
19759   if (V2IsUndef &&
19760       any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
19761     SmallVector<int, 8> NewMask(OrigMask);
19762     for (int &M : NewMask)
19763       if (M >= NumElements)
19764         M = -1;
19765     return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
19766   }
19767 
19768   // Check for illegal shuffle mask element index values.
19769   int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
19770   (void)MaskUpperLimit;
19771   assert(llvm::all_of(OrigMask,
19772                       [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
19773          "Out of bounds shuffle index");
19774 
19775   // We actually see shuffles that are entirely re-arrangements of a set of
19776   // zero inputs. This mostly happens while decomposing complex shuffles into
19777   // simple ones. Directly lower these as a buildvector of zeros.
19778   APInt KnownUndef, KnownZero;
19779   computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
19780 
19781   APInt Zeroable = KnownUndef | KnownZero;
19782   if (Zeroable.isAllOnes())
19783     return getZeroVector(VT, Subtarget, DAG, DL);
19784 
19785   bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
19786 
19787   // Try to collapse shuffles into using a vector type with fewer elements but
19788   // wider element types. We cap this to not form integers or floating point
19789   // elements wider than 64 bits. It does not seem beneficial to form i128
19790   // integers to handle flipping the low and high halves of AVX 256-bit vectors.
19791   SmallVector<int, 16> WidenedMask;
19792   if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
19793       !canCombineAsMaskOperation(V1, V2, Subtarget) &&
19794       canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
19795     // Shuffle mask widening should not interfere with a broadcast opportunity
19796     // by obfuscating the operands with bitcasts.
19797     // TODO: Avoid lowering directly from this top-level function: make this
19798     // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
19799     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
19800                                                     Subtarget, DAG))
19801       return Broadcast;
19802 
19803     MVT NewEltVT = VT.isFloatingPoint()
19804                        ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
19805                        : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
19806     int NewNumElts = NumElements / 2;
19807     MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
19808     // Make sure that the new vector type is legal. For example, v2f64 isn't
19809     // legal on SSE1.
19810     if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
19811       if (V2IsZero) {
19812         // Modify the new Mask to take all zeros from the all-zero vector.
19813         // Choose indices that are blend-friendly.
19814         bool UsedZeroVector = false;
19815         assert(is_contained(WidenedMask, SM_SentinelZero) &&
19816                "V2's non-undef elements are used?!");
19817         for (int i = 0; i != NewNumElts; ++i)
19818           if (WidenedMask[i] == SM_SentinelZero) {
19819             WidenedMask[i] = i + NewNumElts;
19820             UsedZeroVector = true;
19821           }
19822         // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
19823         // some elements to be undef.
19824         if (UsedZeroVector)
19825           V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
19826       }
19827       V1 = DAG.getBitcast(NewVT, V1);
19828       V2 = DAG.getBitcast(NewVT, V2);
19829       return DAG.getBitcast(
19830           VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
19831     }
19832   }
19833 
19834   SmallVector<SDValue> Ops = {V1, V2};
19835   SmallVector<int> Mask(OrigMask);
19836 
19837   // Canonicalize the shuffle with any horizontal ops inputs.
19838   // NOTE: This may update Ops and Mask.
19839   if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
19840           Ops, Mask, VT.getSizeInBits(), DL, DAG, Subtarget))
19841     return DAG.getBitcast(VT, HOp);
19842 
19843   V1 = DAG.getBitcast(VT, Ops[0]);
19844   V2 = DAG.getBitcast(VT, Ops[1]);
19845   assert(NumElements == (int)Mask.size() &&
19846          "canonicalizeShuffleMaskWithHorizOp "
19847          "shouldn't alter the shuffle mask size");
19848 
19849   // Commute the shuffle if it will improve canonicalization.
19850   if (canonicalizeShuffleMaskWithCommute(Mask)) {
19851     ShuffleVectorSDNode::commuteMask(Mask);
19852     std::swap(V1, V2);
19853   }
19854 
19855   // For each vector width, delegate to a specialized lowering routine.
19856   if (VT.is128BitVector())
19857     return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19858 
19859   if (VT.is256BitVector())
19860     return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19861 
19862   if (VT.is512BitVector())
19863     return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19864 
19865   if (Is1BitVector)
19866     return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19867 
19868   llvm_unreachable("Unimplemented!");
19869 }
19870 
19871 /// Try to lower a VSELECT instruction to a vector shuffle.
lowerVSELECTtoVectorShuffle(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)19872 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
19873                                            const X86Subtarget &Subtarget,
19874                                            SelectionDAG &DAG) {
19875   SDValue Cond = Op.getOperand(0);
19876   SDValue LHS = Op.getOperand(1);
19877   SDValue RHS = Op.getOperand(2);
19878   MVT VT = Op.getSimpleValueType();
19879 
19880   // Only non-legal VSELECTs reach this lowering, convert those into generic
19881   // shuffles and re-use the shuffle lowering path for blends.
19882   if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
19883     SmallVector<int, 32> Mask;
19884     if (createShuffleMaskFromVSELECT(Mask, Cond))
19885       return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
19886   }
19887 
19888   return SDValue();
19889 }
19890 
LowerVSELECT(SDValue Op,SelectionDAG & DAG) const19891 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
19892   SDValue Cond = Op.getOperand(0);
19893   SDValue LHS = Op.getOperand(1);
19894   SDValue RHS = Op.getOperand(2);
19895 
19896   SDLoc dl(Op);
19897   MVT VT = Op.getSimpleValueType();
19898   if (isSoftFP16(VT)) {
19899     MVT NVT = VT.changeVectorElementTypeToInteger();
19900     return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, dl, NVT, Cond,
19901                                           DAG.getBitcast(NVT, LHS),
19902                                           DAG.getBitcast(NVT, RHS)));
19903   }
19904 
19905   // A vselect where all conditions and data are constants can be optimized into
19906   // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
19907   if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
19908       ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
19909       ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
19910     return SDValue();
19911 
19912   // Try to lower this to a blend-style vector shuffle. This can handle all
19913   // constant condition cases.
19914   if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
19915     return BlendOp;
19916 
19917   // If this VSELECT has a vector if i1 as a mask, it will be directly matched
19918   // with patterns on the mask registers on AVX-512.
19919   MVT CondVT = Cond.getSimpleValueType();
19920   unsigned CondEltSize = Cond.getScalarValueSizeInBits();
19921   if (CondEltSize == 1)
19922     return Op;
19923 
19924   // Variable blends are only legal from SSE4.1 onward.
19925   if (!Subtarget.hasSSE41())
19926     return SDValue();
19927 
19928   unsigned EltSize = VT.getScalarSizeInBits();
19929   unsigned NumElts = VT.getVectorNumElements();
19930 
19931   // Expand v32i16/v64i8 without BWI.
19932   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
19933     return SDValue();
19934 
19935   // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
19936   // into an i1 condition so that we can use the mask-based 512-bit blend
19937   // instructions.
19938   if (VT.getSizeInBits() == 512) {
19939     // Build a mask by testing the condition against zero.
19940     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
19941     SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
19942                                 DAG.getConstant(0, dl, CondVT),
19943                                 ISD::SETNE);
19944     // Now return a new VSELECT using the mask.
19945     return DAG.getSelect(dl, VT, Mask, LHS, RHS);
19946   }
19947 
19948   // SEXT/TRUNC cases where the mask doesn't match the destination size.
19949   if (CondEltSize != EltSize) {
19950     // If we don't have a sign splat, rely on the expansion.
19951     if (CondEltSize != DAG.ComputeNumSignBits(Cond))
19952       return SDValue();
19953 
19954     MVT NewCondSVT = MVT::getIntegerVT(EltSize);
19955     MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
19956     Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
19957     return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
19958   }
19959 
19960   // Only some types will be legal on some subtargets. If we can emit a legal
19961   // VSELECT-matching blend, return Op, and but if we need to expand, return
19962   // a null value.
19963   switch (VT.SimpleTy) {
19964   default:
19965     // Most of the vector types have blends past SSE4.1.
19966     return Op;
19967 
19968   case MVT::v32i8:
19969     // The byte blends for AVX vectors were introduced only in AVX2.
19970     if (Subtarget.hasAVX2())
19971       return Op;
19972 
19973     return SDValue();
19974 
19975   case MVT::v8i16:
19976   case MVT::v16i16: {
19977     // Bitcast everything to the vXi8 type and use a vXi8 vselect.
19978     MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
19979     Cond = DAG.getBitcast(CastVT, Cond);
19980     LHS = DAG.getBitcast(CastVT, LHS);
19981     RHS = DAG.getBitcast(CastVT, RHS);
19982     SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
19983     return DAG.getBitcast(VT, Select);
19984   }
19985   }
19986 }
19987 
LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,SelectionDAG & DAG)19988 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
19989   MVT VT = Op.getSimpleValueType();
19990   SDValue Vec = Op.getOperand(0);
19991   SDValue Idx = Op.getOperand(1);
19992   assert(isa<ConstantSDNode>(Idx) && "Constant index expected");
19993   SDLoc dl(Op);
19994 
19995   if (!Vec.getSimpleValueType().is128BitVector())
19996     return SDValue();
19997 
19998   if (VT.getSizeInBits() == 8) {
19999     // If IdxVal is 0, it's cheaper to do a move instead of a pextrb, unless
20000     // we're going to zero extend the register or fold the store.
20001     if (llvm::isNullConstant(Idx) && !X86::mayFoldIntoZeroExtend(Op) &&
20002         !X86::mayFoldIntoStore(Op))
20003       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
20004                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20005                                      DAG.getBitcast(MVT::v4i32, Vec), Idx));
20006 
20007     unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
20008     SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
20009                                   DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20010     return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
20011   }
20012 
20013   if (VT == MVT::f32) {
20014     // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
20015     // the result back to FR32 register. It's only worth matching if the
20016     // result has a single use which is a store or a bitcast to i32.  And in
20017     // the case of a store, it's not worth it if the index is a constant 0,
20018     // because a MOVSSmr can be used instead, which is smaller and faster.
20019     if (!Op.hasOneUse())
20020       return SDValue();
20021     SDNode *User = *Op.getNode()->use_begin();
20022     if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) &&
20023         (User->getOpcode() != ISD::BITCAST ||
20024          User->getValueType(0) != MVT::i32))
20025       return SDValue();
20026     SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20027                                   DAG.getBitcast(MVT::v4i32, Vec), Idx);
20028     return DAG.getBitcast(MVT::f32, Extract);
20029   }
20030 
20031   if (VT == MVT::i32 || VT == MVT::i64)
20032       return Op;
20033 
20034   return SDValue();
20035 }
20036 
20037 /// Extract one bit from mask vector, like v16i1 or v8i1.
20038 /// AVX-512 feature.
ExtractBitFromMaskVector(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)20039 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
20040                                         const X86Subtarget &Subtarget) {
20041   SDValue Vec = Op.getOperand(0);
20042   SDLoc dl(Vec);
20043   MVT VecVT = Vec.getSimpleValueType();
20044   SDValue Idx = Op.getOperand(1);
20045   auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
20046   MVT EltVT = Op.getSimpleValueType();
20047 
20048   assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
20049          "Unexpected vector type in ExtractBitFromMaskVector");
20050 
20051   // variable index can't be handled in mask registers,
20052   // extend vector to VR512/128
20053   if (!IdxC) {
20054     unsigned NumElts = VecVT.getVectorNumElements();
20055     // Extending v8i1/v16i1 to 512-bit get better performance on KNL
20056     // than extending to 128/256bit.
20057     MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
20058     MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
20059     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
20060     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
20061     return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
20062   }
20063 
20064   unsigned IdxVal = IdxC->getZExtValue();
20065   if (IdxVal == 0) // the operation is legal
20066     return Op;
20067 
20068   // Extend to natively supported kshift.
20069   unsigned NumElems = VecVT.getVectorNumElements();
20070   MVT WideVecVT = VecVT;
20071   if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
20072     WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
20073     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
20074                       DAG.getUNDEF(WideVecVT), Vec,
20075                       DAG.getIntPtrConstant(0, dl));
20076   }
20077 
20078   // Use kshiftr instruction to move to the lower element.
20079   Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
20080                     DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20081 
20082   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
20083                      DAG.getIntPtrConstant(0, dl));
20084 }
20085 
20086 SDValue
LowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const20087 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
20088                                            SelectionDAG &DAG) const {
20089   SDLoc dl(Op);
20090   SDValue Vec = Op.getOperand(0);
20091   MVT VecVT = Vec.getSimpleValueType();
20092   SDValue Idx = Op.getOperand(1);
20093   auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
20094 
20095   if (VecVT.getVectorElementType() == MVT::i1)
20096     return ExtractBitFromMaskVector(Op, DAG, Subtarget);
20097 
20098   if (!IdxC) {
20099     // Its more profitable to go through memory (1 cycles throughput)
20100     // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
20101     // IACA tool was used to get performance estimation
20102     // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
20103     //
20104     // example : extractelement <16 x i8> %a, i32 %i
20105     //
20106     // Block Throughput: 3.00 Cycles
20107     // Throughput Bottleneck: Port5
20108     //
20109     // | Num Of |   Ports pressure in cycles  |    |
20110     // |  Uops  |  0  - DV  |  5  |  6  |  7  |    |
20111     // ---------------------------------------------
20112     // |   1    |           | 1.0 |     |     | CP | vmovd xmm1, edi
20113     // |   1    |           | 1.0 |     |     | CP | vpshufb xmm0, xmm0, xmm1
20114     // |   2    | 1.0       | 1.0 |     |     | CP | vpextrb eax, xmm0, 0x0
20115     // Total Num Of Uops: 4
20116     //
20117     //
20118     // Block Throughput: 1.00 Cycles
20119     // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
20120     //
20121     // |    |  Ports pressure in cycles   |  |
20122     // |Uops| 1 | 2 - D  |3 -  D  | 4 | 5 |  |
20123     // ---------------------------------------------------------
20124     // |2^  |   | 0.5    | 0.5    |1.0|   |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
20125     // |1   |0.5|        |        |   |0.5|  | lea rax, ptr [rsp-0x18]
20126     // |1   |   |0.5, 0.5|0.5, 0.5|   |   |CP| mov al, byte ptr [rdi+rax*1]
20127     // Total Num Of Uops: 4
20128 
20129     return SDValue();
20130   }
20131 
20132   unsigned IdxVal = IdxC->getZExtValue();
20133 
20134   // If this is a 256-bit vector result, first extract the 128-bit vector and
20135   // then extract the element from the 128-bit vector.
20136   if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
20137     // Get the 128-bit vector.
20138     Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
20139     MVT EltVT = VecVT.getVectorElementType();
20140 
20141     unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
20142     assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
20143 
20144     // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
20145     // this can be done with a mask.
20146     IdxVal &= ElemsPerChunk - 1;
20147     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
20148                        DAG.getIntPtrConstant(IdxVal, dl));
20149   }
20150 
20151   assert(VecVT.is128BitVector() && "Unexpected vector length");
20152 
20153   MVT VT = Op.getSimpleValueType();
20154 
20155   if (VT == MVT::i16) {
20156     // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
20157     // we're going to zero extend the register or fold the store (SSE41 only).
20158     if (IdxVal == 0 && !X86::mayFoldIntoZeroExtend(Op) &&
20159         !(Subtarget.hasSSE41() && X86::mayFoldIntoStore(Op))) {
20160       if (Subtarget.hasFP16())
20161         return Op;
20162 
20163       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
20164                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20165                                      DAG.getBitcast(MVT::v4i32, Vec), Idx));
20166     }
20167 
20168     SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, Vec,
20169                                   DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20170     return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
20171   }
20172 
20173   if (Subtarget.hasSSE41())
20174     if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
20175       return Res;
20176 
20177   // TODO: We only extract a single element from v16i8, we can probably afford
20178   // to be more aggressive here before using the default approach of spilling to
20179   // stack.
20180   if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
20181     // Extract either the lowest i32 or any i16, and extract the sub-byte.
20182     int DWordIdx = IdxVal / 4;
20183     if (DWordIdx == 0) {
20184       SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20185                                 DAG.getBitcast(MVT::v4i32, Vec),
20186                                 DAG.getIntPtrConstant(DWordIdx, dl));
20187       int ShiftVal = (IdxVal % 4) * 8;
20188       if (ShiftVal != 0)
20189         Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
20190                           DAG.getConstant(ShiftVal, dl, MVT::i8));
20191       return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20192     }
20193 
20194     int WordIdx = IdxVal / 2;
20195     SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
20196                               DAG.getBitcast(MVT::v8i16, Vec),
20197                               DAG.getIntPtrConstant(WordIdx, dl));
20198     int ShiftVal = (IdxVal % 2) * 8;
20199     if (ShiftVal != 0)
20200       Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
20201                         DAG.getConstant(ShiftVal, dl, MVT::i8));
20202     return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20203   }
20204 
20205   if (VT == MVT::f16 || VT.getSizeInBits() == 32) {
20206     if (IdxVal == 0)
20207       return Op;
20208 
20209     // Shuffle the element to the lowest element, then movss or movsh.
20210     SmallVector<int, 8> Mask(VecVT.getVectorNumElements(), -1);
20211     Mask[0] = static_cast<int>(IdxVal);
20212     Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
20213     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
20214                        DAG.getIntPtrConstant(0, dl));
20215   }
20216 
20217   if (VT.getSizeInBits() == 64) {
20218     // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
20219     // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
20220     //        to match extract_elt for f64.
20221     if (IdxVal == 0)
20222       return Op;
20223 
20224     // UNPCKHPD the element to the lowest double word, then movsd.
20225     // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
20226     // to a f64mem, the whole operation is folded into a single MOVHPDmr.
20227     int Mask[2] = { 1, -1 };
20228     Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
20229     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
20230                        DAG.getIntPtrConstant(0, dl));
20231   }
20232 
20233   return SDValue();
20234 }
20235 
20236 /// Insert one bit to mask vector, like v16i1 or v8i1.
20237 /// AVX-512 feature.
InsertBitToMaskVector(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)20238 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
20239                                      const X86Subtarget &Subtarget) {
20240   SDLoc dl(Op);
20241   SDValue Vec = Op.getOperand(0);
20242   SDValue Elt = Op.getOperand(1);
20243   SDValue Idx = Op.getOperand(2);
20244   MVT VecVT = Vec.getSimpleValueType();
20245 
20246   if (!isa<ConstantSDNode>(Idx)) {
20247     // Non constant index. Extend source and destination,
20248     // insert element and then truncate the result.
20249     unsigned NumElts = VecVT.getVectorNumElements();
20250     MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
20251     MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
20252     SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
20253       DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
20254       DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
20255     return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
20256   }
20257 
20258   // Copy into a k-register, extract to v1i1 and insert_subvector.
20259   SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
20260   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, Idx);
20261 }
20262 
LowerINSERT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const20263 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
20264                                                   SelectionDAG &DAG) const {
20265   MVT VT = Op.getSimpleValueType();
20266   MVT EltVT = VT.getVectorElementType();
20267   unsigned NumElts = VT.getVectorNumElements();
20268   unsigned EltSizeInBits = EltVT.getScalarSizeInBits();
20269 
20270   if (EltVT == MVT::i1)
20271     return InsertBitToMaskVector(Op, DAG, Subtarget);
20272 
20273   SDLoc dl(Op);
20274   SDValue N0 = Op.getOperand(0);
20275   SDValue N1 = Op.getOperand(1);
20276   SDValue N2 = Op.getOperand(2);
20277   auto *N2C = dyn_cast<ConstantSDNode>(N2);
20278 
20279   if (!N2C) {
20280     // Variable insertion indices, usually we're better off spilling to stack,
20281     // but AVX512 can use a variable compare+select by comparing against all
20282     // possible vector indices, and FP insertion has less gpr->simd traffic.
20283     if (!(Subtarget.hasBWI() ||
20284           (Subtarget.hasAVX512() && EltSizeInBits >= 32) ||
20285           (Subtarget.hasSSE41() && (EltVT == MVT::f32 || EltVT == MVT::f64))))
20286       return SDValue();
20287 
20288     MVT IdxSVT = MVT::getIntegerVT(EltSizeInBits);
20289     MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts);
20290     if (!isTypeLegal(IdxSVT) || !isTypeLegal(IdxVT))
20291       return SDValue();
20292 
20293     SDValue IdxExt = DAG.getZExtOrTrunc(N2, dl, IdxSVT);
20294     SDValue IdxSplat = DAG.getSplatBuildVector(IdxVT, dl, IdxExt);
20295     SDValue EltSplat = DAG.getSplatBuildVector(VT, dl, N1);
20296 
20297     SmallVector<SDValue, 16> RawIndices;
20298     for (unsigned I = 0; I != NumElts; ++I)
20299       RawIndices.push_back(DAG.getConstant(I, dl, IdxSVT));
20300     SDValue Indices = DAG.getBuildVector(IdxVT, dl, RawIndices);
20301 
20302     // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
20303     return DAG.getSelectCC(dl, IdxSplat, Indices, EltSplat, N0,
20304                            ISD::CondCode::SETEQ);
20305   }
20306 
20307   if (N2C->getAPIntValue().uge(NumElts))
20308     return SDValue();
20309   uint64_t IdxVal = N2C->getZExtValue();
20310 
20311   bool IsZeroElt = X86::isZeroNode(N1);
20312   bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
20313 
20314   if (IsZeroElt || IsAllOnesElt) {
20315     // Lower insertion of v16i8/v32i8/v64i16 -1 elts as an 'OR' blend.
20316     // We don't deal with i8 0 since it appears to be handled elsewhere.
20317     if (IsAllOnesElt &&
20318         ((VT == MVT::v16i8 && !Subtarget.hasSSE41()) ||
20319          ((VT == MVT::v32i8 || VT == MVT::v16i16) && !Subtarget.hasInt256()))) {
20320       SDValue ZeroCst = DAG.getConstant(0, dl, VT.getScalarType());
20321       SDValue OnesCst = DAG.getAllOnesConstant(dl, VT.getScalarType());
20322       SmallVector<SDValue, 8> CstVectorElts(NumElts, ZeroCst);
20323       CstVectorElts[IdxVal] = OnesCst;
20324       SDValue CstVector = DAG.getBuildVector(VT, dl, CstVectorElts);
20325       return DAG.getNode(ISD::OR, dl, VT, N0, CstVector);
20326     }
20327     // See if we can do this more efficiently with a blend shuffle with a
20328     // rematerializable vector.
20329     if (Subtarget.hasSSE41() &&
20330         (EltSizeInBits >= 16 || (IsZeroElt && !VT.is128BitVector()))) {
20331       SmallVector<int, 8> BlendMask;
20332       for (unsigned i = 0; i != NumElts; ++i)
20333         BlendMask.push_back(i == IdxVal ? i + NumElts : i);
20334       SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
20335                                     : getOnesVector(VT, DAG, dl);
20336       return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
20337     }
20338   }
20339 
20340   // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
20341   // into that, and then insert the subvector back into the result.
20342   if (VT.is256BitVector() || VT.is512BitVector()) {
20343     // With a 256-bit vector, we can insert into the zero element efficiently
20344     // using a blend if we have AVX or AVX2 and the right data type.
20345     if (VT.is256BitVector() && IdxVal == 0) {
20346       // TODO: It is worthwhile to cast integer to floating point and back
20347       // and incur a domain crossing penalty if that's what we'll end up
20348       // doing anyway after extracting to a 128-bit vector.
20349       if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
20350           (Subtarget.hasAVX2() && (EltVT == MVT::i32 || EltVT == MVT::i64))) {
20351         SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
20352         return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
20353                            DAG.getTargetConstant(1, dl, MVT::i8));
20354       }
20355     }
20356 
20357     unsigned NumEltsIn128 = 128 / EltSizeInBits;
20358     assert(isPowerOf2_32(NumEltsIn128) &&
20359            "Vectors will always have power-of-two number of elements.");
20360 
20361     // If we are not inserting into the low 128-bit vector chunk,
20362     // then prefer the broadcast+blend sequence.
20363     // FIXME: relax the profitability check iff all N1 uses are insertions.
20364     if (IdxVal >= NumEltsIn128 &&
20365         ((Subtarget.hasAVX2() && EltSizeInBits != 8) ||
20366          (Subtarget.hasAVX() && (EltSizeInBits >= 32) &&
20367           X86::mayFoldLoad(N1, Subtarget)))) {
20368       SDValue N1SplatVec = DAG.getSplatBuildVector(VT, dl, N1);
20369       SmallVector<int, 8> BlendMask;
20370       for (unsigned i = 0; i != NumElts; ++i)
20371         BlendMask.push_back(i == IdxVal ? i + NumElts : i);
20372       return DAG.getVectorShuffle(VT, dl, N0, N1SplatVec, BlendMask);
20373     }
20374 
20375     // Get the desired 128-bit vector chunk.
20376     SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
20377 
20378     // Insert the element into the desired chunk.
20379     // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
20380     unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
20381 
20382     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
20383                     DAG.getIntPtrConstant(IdxIn128, dl));
20384 
20385     // Insert the changed part back into the bigger vector
20386     return insert128BitVector(N0, V, IdxVal, DAG, dl);
20387   }
20388   assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
20389 
20390   // This will be just movw/movd/movq/movsh/movss/movsd.
20391   if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) {
20392     if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
20393         EltVT == MVT::f16 || EltVT == MVT::i64) {
20394       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
20395       return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
20396     }
20397 
20398     // We can't directly insert an i8 or i16 into a vector, so zero extend
20399     // it to i32 first.
20400     if (EltVT == MVT::i16 || EltVT == MVT::i8) {
20401       N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, N1);
20402       MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
20403       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, N1);
20404       N1 = getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
20405       return DAG.getBitcast(VT, N1);
20406     }
20407   }
20408 
20409   // Transform it so it match pinsr{b,w} which expects a GR32 as its second
20410   // argument. SSE41 required for pinsrb.
20411   if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
20412     unsigned Opc;
20413     if (VT == MVT::v8i16) {
20414       assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
20415       Opc = X86ISD::PINSRW;
20416     } else {
20417       assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
20418       assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
20419       Opc = X86ISD::PINSRB;
20420     }
20421 
20422     assert(N1.getValueType() != MVT::i32 && "Unexpected VT");
20423     N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
20424     N2 = DAG.getTargetConstant(IdxVal, dl, MVT::i8);
20425     return DAG.getNode(Opc, dl, VT, N0, N1, N2);
20426   }
20427 
20428   if (Subtarget.hasSSE41()) {
20429     if (EltVT == MVT::f32) {
20430       // Bits [7:6] of the constant are the source select. This will always be
20431       //   zero here. The DAG Combiner may combine an extract_elt index into
20432       //   these bits. For example (insert (extract, 3), 2) could be matched by
20433       //   putting the '3' into bits [7:6] of X86ISD::INSERTPS.
20434       // Bits [5:4] of the constant are the destination select. This is the
20435       //   value of the incoming immediate.
20436       // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
20437       //   combine either bitwise AND or insert of float 0.0 to set these bits.
20438 
20439       bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
20440       if (IdxVal == 0 && (!MinSize || !X86::mayFoldLoad(N1, Subtarget))) {
20441         // If this is an insertion of 32-bits into the low 32-bits of
20442         // a vector, we prefer to generate a blend with immediate rather
20443         // than an insertps. Blends are simpler operations in hardware and so
20444         // will always have equal or better performance than insertps.
20445         // But if optimizing for size and there's a load folding opportunity,
20446         // generate insertps because blendps does not have a 32-bit memory
20447         // operand form.
20448         N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
20449         return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
20450                            DAG.getTargetConstant(1, dl, MVT::i8));
20451       }
20452       // Create this as a scalar to vector..
20453       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
20454       return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
20455                          DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
20456     }
20457 
20458     // PINSR* works with constant index.
20459     if (EltVT == MVT::i32 || EltVT == MVT::i64)
20460       return Op;
20461   }
20462 
20463   return SDValue();
20464 }
20465 
LowerSCALAR_TO_VECTOR(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)20466 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
20467                                      SelectionDAG &DAG) {
20468   SDLoc dl(Op);
20469   MVT OpVT = Op.getSimpleValueType();
20470 
20471   // It's always cheaper to replace a xor+movd with xorps and simplifies further
20472   // combines.
20473   if (X86::isZeroNode(Op.getOperand(0)))
20474     return getZeroVector(OpVT, Subtarget, DAG, dl);
20475 
20476   // If this is a 256-bit vector result, first insert into a 128-bit
20477   // vector and then insert into the 256-bit vector.
20478   if (!OpVT.is128BitVector()) {
20479     // Insert into a 128-bit vector.
20480     unsigned SizeFactor = OpVT.getSizeInBits() / 128;
20481     MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
20482                                  OpVT.getVectorNumElements() / SizeFactor);
20483 
20484     Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
20485 
20486     // Insert the 128-bit vector.
20487     return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
20488   }
20489   assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
20490          "Expected an SSE type!");
20491 
20492   // Pass through a v4i32 or V8i16 SCALAR_TO_VECTOR as that's what we use in
20493   // tblgen.
20494   if (OpVT == MVT::v4i32 || (OpVT == MVT::v8i16 && Subtarget.hasFP16()))
20495     return Op;
20496 
20497   SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
20498   return DAG.getBitcast(
20499       OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
20500 }
20501 
20502 // Lower a node with an INSERT_SUBVECTOR opcode.  This may result in a
20503 // simple superregister reference or explicit instructions to insert
20504 // the upper bits of a vector.
LowerINSERT_SUBVECTOR(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)20505 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
20506                                      SelectionDAG &DAG) {
20507   assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
20508 
20509   return insert1BitVector(Op, DAG, Subtarget);
20510 }
20511 
LowerEXTRACT_SUBVECTOR(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)20512 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
20513                                       SelectionDAG &DAG) {
20514   assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
20515          "Only vXi1 extract_subvectors need custom lowering");
20516 
20517   SDLoc dl(Op);
20518   SDValue Vec = Op.getOperand(0);
20519   uint64_t IdxVal = Op.getConstantOperandVal(1);
20520 
20521   if (IdxVal == 0) // the operation is legal
20522     return Op;
20523 
20524   MVT VecVT = Vec.getSimpleValueType();
20525   unsigned NumElems = VecVT.getVectorNumElements();
20526 
20527   // Extend to natively supported kshift.
20528   MVT WideVecVT = VecVT;
20529   if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
20530     WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
20531     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
20532                       DAG.getUNDEF(WideVecVT), Vec,
20533                       DAG.getIntPtrConstant(0, dl));
20534   }
20535 
20536   // Shift to the LSB.
20537   Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
20538                     DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20539 
20540   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
20541                      DAG.getIntPtrConstant(0, dl));
20542 }
20543 
20544 // Returns the appropriate wrapper opcode for a global reference.
getGlobalWrapperKind(const GlobalValue * GV,const unsigned char OpFlags) const20545 unsigned X86TargetLowering::getGlobalWrapperKind(
20546     const GlobalValue *GV, const unsigned char OpFlags) const {
20547   // References to absolute symbols are never PC-relative.
20548   if (GV && GV->isAbsoluteSymbolRef())
20549     return X86ISD::Wrapper;
20550 
20551   CodeModel::Model M = getTargetMachine().getCodeModel();
20552   if (Subtarget.isPICStyleRIPRel() &&
20553       (M == CodeModel::Small || M == CodeModel::Kernel))
20554     return X86ISD::WrapperRIP;
20555 
20556   // In the medium model, functions can always be referenced RIP-relatively,
20557   // since they must be within 2GiB. This is also possible in non-PIC mode, and
20558   // shorter than the 64-bit absolute immediate that would otherwise be emitted.
20559   if (M == CodeModel::Medium && isa_and_nonnull<Function>(GV))
20560     return X86ISD::WrapperRIP;
20561 
20562   // GOTPCREL references must always use RIP.
20563   if (OpFlags == X86II::MO_GOTPCREL || OpFlags == X86II::MO_GOTPCREL_NORELAX)
20564     return X86ISD::WrapperRIP;
20565 
20566   return X86ISD::Wrapper;
20567 }
20568 
20569 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
20570 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
20571 // one of the above mentioned nodes. It has to be wrapped because otherwise
20572 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
20573 // be used to form addressing mode. These wrapped nodes will be selected
20574 // into MOV32ri.
20575 SDValue
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const20576 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
20577   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
20578 
20579   // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
20580   // global base reg.
20581   unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
20582 
20583   auto PtrVT = getPointerTy(DAG.getDataLayout());
20584   SDValue Result = DAG.getTargetConstantPool(
20585       CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
20586   SDLoc DL(CP);
20587   Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
20588   // With PIC, the address is actually $g + Offset.
20589   if (OpFlag) {
20590     Result =
20591         DAG.getNode(ISD::ADD, DL, PtrVT,
20592                     DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
20593   }
20594 
20595   return Result;
20596 }
20597 
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const20598 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
20599   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
20600 
20601   // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
20602   // global base reg.
20603   unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
20604 
20605   auto PtrVT = getPointerTy(DAG.getDataLayout());
20606   SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
20607   SDLoc DL(JT);
20608   Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
20609 
20610   // With PIC, the address is actually $g + Offset.
20611   if (OpFlag)
20612     Result =
20613         DAG.getNode(ISD::ADD, DL, PtrVT,
20614                     DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
20615 
20616   return Result;
20617 }
20618 
LowerExternalSymbol(SDValue Op,SelectionDAG & DAG) const20619 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
20620                                                SelectionDAG &DAG) const {
20621   return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
20622 }
20623 
20624 SDValue
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const20625 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
20626   // Create the TargetBlockAddressAddress node.
20627   unsigned char OpFlags =
20628     Subtarget.classifyBlockAddressReference();
20629   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
20630   int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
20631   SDLoc dl(Op);
20632   auto PtrVT = getPointerTy(DAG.getDataLayout());
20633   SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
20634   Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
20635 
20636   // With PIC, the address is actually $g + Offset.
20637   if (isGlobalRelativeToPICBase(OpFlags)) {
20638     Result = DAG.getNode(ISD::ADD, dl, PtrVT,
20639                          DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
20640   }
20641 
20642   return Result;
20643 }
20644 
20645 /// Creates target global address or external symbol nodes for calls or
20646 /// other uses.
LowerGlobalOrExternal(SDValue Op,SelectionDAG & DAG,bool ForCall) const20647 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
20648                                                  bool ForCall) const {
20649   // Unpack the global address or external symbol.
20650   const SDLoc &dl = SDLoc(Op);
20651   const GlobalValue *GV = nullptr;
20652   int64_t Offset = 0;
20653   const char *ExternalSym = nullptr;
20654   if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
20655     GV = G->getGlobal();
20656     Offset = G->getOffset();
20657   } else {
20658     const auto *ES = cast<ExternalSymbolSDNode>(Op);
20659     ExternalSym = ES->getSymbol();
20660   }
20661 
20662   // Calculate some flags for address lowering.
20663   const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
20664   unsigned char OpFlags;
20665   if (ForCall)
20666     OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
20667   else
20668     OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
20669   bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
20670   bool NeedsLoad = isGlobalStubReference(OpFlags);
20671 
20672   CodeModel::Model M = DAG.getTarget().getCodeModel();
20673   auto PtrVT = getPointerTy(DAG.getDataLayout());
20674   SDValue Result;
20675 
20676   if (GV) {
20677     // Create a target global address if this is a global. If possible, fold the
20678     // offset into the global address reference. Otherwise, ADD it on later.
20679     // Suppress the folding if Offset is negative: movl foo-1, %eax is not
20680     // allowed because if the address of foo is 0, the ELF R_X86_64_32
20681     // relocation will compute to a negative value, which is invalid.
20682     int64_t GlobalOffset = 0;
20683     if (OpFlags == X86II::MO_NO_FLAG && Offset >= 0 &&
20684         X86::isOffsetSuitableForCodeModel(Offset, M, true)) {
20685       std::swap(GlobalOffset, Offset);
20686     }
20687     Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
20688   } else {
20689     // If this is not a global address, this must be an external symbol.
20690     Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
20691   }
20692 
20693   // If this is a direct call, avoid the wrapper if we don't need to do any
20694   // loads or adds. This allows SDAG ISel to match direct calls.
20695   if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
20696     return Result;
20697 
20698   Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
20699 
20700   // With PIC, the address is actually $g + Offset.
20701   if (HasPICReg) {
20702     Result = DAG.getNode(ISD::ADD, dl, PtrVT,
20703                          DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
20704   }
20705 
20706   // For globals that require a load from a stub to get the address, emit the
20707   // load.
20708   if (NeedsLoad)
20709     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
20710                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
20711 
20712   // If there was a non-zero offset that we didn't fold, create an explicit
20713   // addition for it.
20714   if (Offset != 0)
20715     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
20716                          DAG.getConstant(Offset, dl, PtrVT));
20717 
20718   return Result;
20719 }
20720 
20721 SDValue
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const20722 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
20723   return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
20724 }
20725 
20726 static SDValue
GetTLSADDR(SelectionDAG & DAG,SDValue Chain,GlobalAddressSDNode * GA,SDValue * InFlag,const EVT PtrVT,unsigned ReturnReg,unsigned char OperandFlags,bool LocalDynamic=false)20727 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
20728            SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
20729            unsigned char OperandFlags, bool LocalDynamic = false) {
20730   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
20731   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
20732   SDLoc dl(GA);
20733   SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
20734                                            GA->getValueType(0),
20735                                            GA->getOffset(),
20736                                            OperandFlags);
20737 
20738   X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
20739                                            : X86ISD::TLSADDR;
20740 
20741   if (InFlag) {
20742     SDValue Ops[] = { Chain,  TGA, *InFlag };
20743     Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
20744   } else {
20745     SDValue Ops[]  = { Chain, TGA };
20746     Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
20747   }
20748 
20749   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
20750   MFI.setAdjustsStack(true);
20751   MFI.setHasCalls(true);
20752 
20753   SDValue Flag = Chain.getValue(1);
20754   return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
20755 }
20756 
20757 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
20758 static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT)20759 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20760                                 const EVT PtrVT) {
20761   SDValue InFlag;
20762   SDLoc dl(GA);  // ? function entry point might be better
20763   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
20764                                    DAG.getNode(X86ISD::GlobalBaseReg,
20765                                                SDLoc(), PtrVT), InFlag);
20766   InFlag = Chain.getValue(1);
20767 
20768   return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
20769 }
20770 
20771 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit LP64
20772 static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT)20773 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20774                                 const EVT PtrVT) {
20775   return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
20776                     X86::RAX, X86II::MO_TLSGD);
20777 }
20778 
20779 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit ILP32
20780 static SDValue
LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT)20781 LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20782                                  const EVT PtrVT) {
20783   return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
20784                     X86::EAX, X86II::MO_TLSGD);
20785 }
20786 
LowerToTLSLocalDynamicModel(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT,bool Is64Bit,bool Is64BitLP64)20787 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
20788                                            SelectionDAG &DAG, const EVT PtrVT,
20789                                            bool Is64Bit, bool Is64BitLP64) {
20790   SDLoc dl(GA);
20791 
20792   // Get the start address of the TLS block for this module.
20793   X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
20794       .getInfo<X86MachineFunctionInfo>();
20795   MFI->incNumLocalDynamicTLSAccesses();
20796 
20797   SDValue Base;
20798   if (Is64Bit) {
20799     unsigned ReturnReg = Is64BitLP64 ? X86::RAX : X86::EAX;
20800     Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, ReturnReg,
20801                       X86II::MO_TLSLD, /*LocalDynamic=*/true);
20802   } else {
20803     SDValue InFlag;
20804     SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
20805         DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
20806     InFlag = Chain.getValue(1);
20807     Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
20808                       X86II::MO_TLSLDM, /*LocalDynamic=*/true);
20809   }
20810 
20811   // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
20812   // of Base.
20813 
20814   // Build x@dtpoff.
20815   unsigned char OperandFlags = X86II::MO_DTPOFF;
20816   unsigned WrapperKind = X86ISD::Wrapper;
20817   SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
20818                                            GA->getValueType(0),
20819                                            GA->getOffset(), OperandFlags);
20820   SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
20821 
20822   // Add x@dtpoff with the base.
20823   return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
20824 }
20825 
20826 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
LowerToTLSExecModel(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT,TLSModel::Model model,bool is64Bit,bool isPIC)20827 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20828                                    const EVT PtrVT, TLSModel::Model model,
20829                                    bool is64Bit, bool isPIC) {
20830   SDLoc dl(GA);
20831 
20832   // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
20833   Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
20834                                                          is64Bit ? 257 : 256));
20835 
20836   SDValue ThreadPointer =
20837       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
20838                   MachinePointerInfo(Ptr));
20839 
20840   unsigned char OperandFlags = 0;
20841   // Most TLS accesses are not RIP relative, even on x86-64.  One exception is
20842   // initialexec.
20843   unsigned WrapperKind = X86ISD::Wrapper;
20844   if (model == TLSModel::LocalExec) {
20845     OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
20846   } else if (model == TLSModel::InitialExec) {
20847     if (is64Bit) {
20848       OperandFlags = X86II::MO_GOTTPOFF;
20849       WrapperKind = X86ISD::WrapperRIP;
20850     } else {
20851       OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
20852     }
20853   } else {
20854     llvm_unreachable("Unexpected model");
20855   }
20856 
20857   // emit "addl x@ntpoff,%eax" (local exec)
20858   // or "addl x@indntpoff,%eax" (initial exec)
20859   // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
20860   SDValue TGA =
20861       DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
20862                                  GA->getOffset(), OperandFlags);
20863   SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
20864 
20865   if (model == TLSModel::InitialExec) {
20866     if (isPIC && !is64Bit) {
20867       Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
20868                            DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
20869                            Offset);
20870     }
20871 
20872     Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
20873                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
20874   }
20875 
20876   // The address of the thread local variable is the add of the thread
20877   // pointer with the offset of the variable.
20878   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
20879 }
20880 
20881 SDValue
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const20882 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
20883 
20884   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
20885 
20886   if (DAG.getTarget().useEmulatedTLS())
20887     return LowerToTLSEmulatedModel(GA, DAG);
20888 
20889   const GlobalValue *GV = GA->getGlobal();
20890   auto PtrVT = getPointerTy(DAG.getDataLayout());
20891   bool PositionIndependent = isPositionIndependent();
20892 
20893   if (Subtarget.isTargetELF()) {
20894     TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
20895     switch (model) {
20896       case TLSModel::GeneralDynamic:
20897         if (Subtarget.is64Bit()) {
20898           if (Subtarget.isTarget64BitLP64())
20899             return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
20900           return LowerToTLSGeneralDynamicModelX32(GA, DAG, PtrVT);
20901         }
20902         return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
20903       case TLSModel::LocalDynamic:
20904         return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT, Subtarget.is64Bit(),
20905                                            Subtarget.isTarget64BitLP64());
20906       case TLSModel::InitialExec:
20907       case TLSModel::LocalExec:
20908         return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
20909                                    PositionIndependent);
20910     }
20911     llvm_unreachable("Unknown TLS model.");
20912   }
20913 
20914   if (Subtarget.isTargetDarwin()) {
20915     // Darwin only has one model of TLS.  Lower to that.
20916     unsigned char OpFlag = 0;
20917     unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
20918                            X86ISD::WrapperRIP : X86ISD::Wrapper;
20919 
20920     // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
20921     // global base reg.
20922     bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
20923     if (PIC32)
20924       OpFlag = X86II::MO_TLVP_PIC_BASE;
20925     else
20926       OpFlag = X86II::MO_TLVP;
20927     SDLoc DL(Op);
20928     SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
20929                                                 GA->getValueType(0),
20930                                                 GA->getOffset(), OpFlag);
20931     SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
20932 
20933     // With PIC32, the address is actually $g + Offset.
20934     if (PIC32)
20935       Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
20936                            DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
20937                            Offset);
20938 
20939     // Lowering the machine isd will make sure everything is in the right
20940     // location.
20941     SDValue Chain = DAG.getEntryNode();
20942     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
20943     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
20944     SDValue Args[] = { Chain, Offset };
20945     Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
20946     Chain = DAG.getCALLSEQ_END(Chain, 0, 0, Chain.getValue(1), DL);
20947 
20948     // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
20949     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
20950     MFI.setAdjustsStack(true);
20951 
20952     // And our return value (tls address) is in the standard call return value
20953     // location.
20954     unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
20955     return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
20956   }
20957 
20958   if (Subtarget.isOSWindows()) {
20959     // Just use the implicit TLS architecture
20960     // Need to generate something similar to:
20961     //   mov     rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
20962     //                                  ; from TEB
20963     //   mov     ecx, dword [rel _tls_index]: Load index (from C runtime)
20964     //   mov     rcx, qword [rdx+rcx*8]
20965     //   mov     eax, .tls$:tlsvar
20966     //   [rax+rcx] contains the address
20967     // Windows 64bit: gs:0x58
20968     // Windows 32bit: fs:__tls_array
20969 
20970     SDLoc dl(GA);
20971     SDValue Chain = DAG.getEntryNode();
20972 
20973     // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
20974     // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
20975     // use its literal value of 0x2C.
20976     Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
20977                                         ? Type::getInt8PtrTy(*DAG.getContext(),
20978                                                              256)
20979                                         : Type::getInt32PtrTy(*DAG.getContext(),
20980                                                               257));
20981 
20982     SDValue TlsArray = Subtarget.is64Bit()
20983                            ? DAG.getIntPtrConstant(0x58, dl)
20984                            : (Subtarget.isTargetWindowsGNU()
20985                                   ? DAG.getIntPtrConstant(0x2C, dl)
20986                                   : DAG.getExternalSymbol("_tls_array", PtrVT));
20987 
20988     SDValue ThreadPointer =
20989         DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
20990 
20991     SDValue res;
20992     if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
20993       res = ThreadPointer;
20994     } else {
20995       // Load the _tls_index variable
20996       SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
20997       if (Subtarget.is64Bit())
20998         IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
20999                              MachinePointerInfo(), MVT::i32);
21000       else
21001         IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
21002 
21003       const DataLayout &DL = DAG.getDataLayout();
21004       SDValue Scale =
21005           DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
21006       IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
21007 
21008       res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
21009     }
21010 
21011     res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
21012 
21013     // Get the offset of start of .tls section
21014     SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
21015                                              GA->getValueType(0),
21016                                              GA->getOffset(), X86II::MO_SECREL);
21017     SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
21018 
21019     // The address of the thread local variable is the add of the thread
21020     // pointer with the offset of the variable.
21021     return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
21022   }
21023 
21024   llvm_unreachable("TLS not implemented for this target.");
21025 }
21026 
21027 /// Lower SRA_PARTS and friends, which return two i32 values
21028 /// and take a 2 x i32 value to shift plus a shift amount.
21029 /// TODO: Can this be moved to general expansion code?
LowerShiftParts(SDValue Op,SelectionDAG & DAG)21030 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
21031   SDValue Lo, Hi;
21032   DAG.getTargetLoweringInfo().expandShiftParts(Op.getNode(), Lo, Hi, DAG);
21033   return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
21034 }
21035 
21036 // Try to use a packed vector operation to handle i64 on 32-bit targets when
21037 // AVX512DQ is enabled.
LowerI64IntToFP_AVX512DQ(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21038 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
21039                                         const X86Subtarget &Subtarget) {
21040   assert((Op.getOpcode() == ISD::SINT_TO_FP ||
21041           Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
21042           Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
21043           Op.getOpcode() == ISD::UINT_TO_FP) &&
21044          "Unexpected opcode!");
21045   bool IsStrict = Op->isStrictFPOpcode();
21046   unsigned OpNo = IsStrict ? 1 : 0;
21047   SDValue Src = Op.getOperand(OpNo);
21048   MVT SrcVT = Src.getSimpleValueType();
21049   MVT VT = Op.getSimpleValueType();
21050 
21051    if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
21052        (VT != MVT::f32 && VT != MVT::f64))
21053     return SDValue();
21054 
21055   // Pack the i64 into a vector, do the operation and extract.
21056 
21057   // Using 256-bit to ensure result is 128-bits for f32 case.
21058   unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
21059   MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
21060   MVT VecVT = MVT::getVectorVT(VT, NumElts);
21061 
21062   SDLoc dl(Op);
21063   SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
21064   if (IsStrict) {
21065     SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
21066                                  {Op.getOperand(0), InVec});
21067     SDValue Chain = CvtVec.getValue(1);
21068     SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21069                                 DAG.getIntPtrConstant(0, dl));
21070     return DAG.getMergeValues({Value, Chain}, dl);
21071   }
21072 
21073   SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
21074 
21075   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21076                      DAG.getIntPtrConstant(0, dl));
21077 }
21078 
21079 // Try to use a packed vector operation to handle i64 on 32-bit targets.
LowerI64IntToFP16(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21080 static SDValue LowerI64IntToFP16(SDValue Op, SelectionDAG &DAG,
21081                                  const X86Subtarget &Subtarget) {
21082   assert((Op.getOpcode() == ISD::SINT_TO_FP ||
21083           Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
21084           Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
21085           Op.getOpcode() == ISD::UINT_TO_FP) &&
21086          "Unexpected opcode!");
21087   bool IsStrict = Op->isStrictFPOpcode();
21088   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21089   MVT SrcVT = Src.getSimpleValueType();
21090   MVT VT = Op.getSimpleValueType();
21091 
21092   if (SrcVT != MVT::i64 || Subtarget.is64Bit() || VT != MVT::f16)
21093     return SDValue();
21094 
21095   // Pack the i64 into a vector, do the operation and extract.
21096 
21097   assert(Subtarget.hasFP16() && "Expected FP16");
21098 
21099   SDLoc dl(Op);
21100   SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
21101   if (IsStrict) {
21102     SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {MVT::v2f16, MVT::Other},
21103                                  {Op.getOperand(0), InVec});
21104     SDValue Chain = CvtVec.getValue(1);
21105     SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21106                                 DAG.getIntPtrConstant(0, dl));
21107     return DAG.getMergeValues({Value, Chain}, dl);
21108   }
21109 
21110   SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, MVT::v2f16, InVec);
21111 
21112   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21113                      DAG.getIntPtrConstant(0, dl));
21114 }
21115 
useVectorCast(unsigned Opcode,MVT FromVT,MVT ToVT,const X86Subtarget & Subtarget)21116 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
21117                           const X86Subtarget &Subtarget) {
21118   switch (Opcode) {
21119     case ISD::SINT_TO_FP:
21120       // TODO: Handle wider types with AVX/AVX512.
21121       if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
21122         return false;
21123       // CVTDQ2PS or (V)CVTDQ2PD
21124       return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
21125 
21126     case ISD::UINT_TO_FP:
21127       // TODO: Handle wider types and i64 elements.
21128       if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
21129         return false;
21130       // VCVTUDQ2PS or VCVTUDQ2PD
21131       return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
21132 
21133     default:
21134       return false;
21135   }
21136 }
21137 
21138 /// Given a scalar cast operation that is extracted from a vector, try to
21139 /// vectorize the cast op followed by extraction. This will avoid an expensive
21140 /// round-trip between XMM and GPR.
vectorizeExtractedCast(SDValue Cast,SelectionDAG & DAG,const X86Subtarget & Subtarget)21141 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
21142                                       const X86Subtarget &Subtarget) {
21143   // TODO: This could be enhanced to handle smaller integer types by peeking
21144   // through an extend.
21145   SDValue Extract = Cast.getOperand(0);
21146   MVT DestVT = Cast.getSimpleValueType();
21147   if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21148       !isa<ConstantSDNode>(Extract.getOperand(1)))
21149     return SDValue();
21150 
21151   // See if we have a 128-bit vector cast op for this type of cast.
21152   SDValue VecOp = Extract.getOperand(0);
21153   MVT FromVT = VecOp.getSimpleValueType();
21154   unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
21155   MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
21156   MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
21157   if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
21158     return SDValue();
21159 
21160   // If we are extracting from a non-zero element, first shuffle the source
21161   // vector to allow extracting from element zero.
21162   SDLoc DL(Cast);
21163   if (!isNullConstant(Extract.getOperand(1))) {
21164     SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
21165     Mask[0] = Extract.getConstantOperandVal(1);
21166     VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
21167   }
21168   // If the source vector is wider than 128-bits, extract the low part. Do not
21169   // create an unnecessarily wide vector cast op.
21170   if (FromVT != Vec128VT)
21171     VecOp = extract128BitVector(VecOp, 0, DAG, DL);
21172 
21173   // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
21174   // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
21175   SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
21176   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
21177                      DAG.getIntPtrConstant(0, DL));
21178 }
21179 
21180 /// Given a scalar cast to FP with a cast to integer operand (almost an ftrunc),
21181 /// try to vectorize the cast ops. This will avoid an expensive round-trip
21182 /// between XMM and GPR.
lowerFPToIntToFP(SDValue CastToFP,SelectionDAG & DAG,const X86Subtarget & Subtarget)21183 static SDValue lowerFPToIntToFP(SDValue CastToFP, SelectionDAG &DAG,
21184                                 const X86Subtarget &Subtarget) {
21185   // TODO: Allow FP_TO_UINT.
21186   SDValue CastToInt = CastToFP.getOperand(0);
21187   MVT VT = CastToFP.getSimpleValueType();
21188   if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
21189     return SDValue();
21190 
21191   MVT IntVT = CastToInt.getSimpleValueType();
21192   SDValue X = CastToInt.getOperand(0);
21193   MVT SrcVT = X.getSimpleValueType();
21194   if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
21195     return SDValue();
21196 
21197   // See if we have 128-bit vector cast instructions for this type of cast.
21198   // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
21199   if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
21200       IntVT != MVT::i32)
21201     return SDValue();
21202 
21203   unsigned SrcSize = SrcVT.getSizeInBits();
21204   unsigned IntSize = IntVT.getSizeInBits();
21205   unsigned VTSize = VT.getSizeInBits();
21206   MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
21207   MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
21208   MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
21209 
21210   // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
21211   unsigned ToIntOpcode =
21212       SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
21213   unsigned ToFPOpcode =
21214       IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
21215 
21216   // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
21217   //
21218   // We are not defining the high elements (for example, zero them) because
21219   // that could nullify any performance advantage that we hoped to gain from
21220   // this vector op hack. We do not expect any adverse effects (like denorm
21221   // penalties) with cast ops.
21222   SDLoc DL(CastToFP);
21223   SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
21224   SDValue VecX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, X);
21225   SDValue VCastToInt = DAG.getNode(ToIntOpcode, DL, VecIntVT, VecX);
21226   SDValue VCastToFP = DAG.getNode(ToFPOpcode, DL, VecVT, VCastToInt);
21227   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VCastToFP, ZeroIdx);
21228 }
21229 
lowerINT_TO_FP_vXi64(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21230 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
21231                                     const X86Subtarget &Subtarget) {
21232   SDLoc DL(Op);
21233   bool IsStrict = Op->isStrictFPOpcode();
21234   MVT VT = Op->getSimpleValueType(0);
21235   SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
21236 
21237   if (Subtarget.hasDQI()) {
21238     assert(!Subtarget.hasVLX() && "Unexpected features");
21239 
21240     assert((Src.getSimpleValueType() == MVT::v2i64 ||
21241             Src.getSimpleValueType() == MVT::v4i64) &&
21242            "Unsupported custom type");
21243 
21244     // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
21245     assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
21246            "Unexpected VT!");
21247     MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
21248 
21249     // Need to concat with zero vector for strict fp to avoid spurious
21250     // exceptions.
21251     SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
21252                            : DAG.getUNDEF(MVT::v8i64);
21253     Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
21254                       DAG.getIntPtrConstant(0, DL));
21255     SDValue Res, Chain;
21256     if (IsStrict) {
21257       Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
21258                         {Op->getOperand(0), Src});
21259       Chain = Res.getValue(1);
21260     } else {
21261       Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
21262     }
21263 
21264     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
21265                       DAG.getIntPtrConstant(0, DL));
21266 
21267     if (IsStrict)
21268       return DAG.getMergeValues({Res, Chain}, DL);
21269     return Res;
21270   }
21271 
21272   bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
21273                   Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
21274   if (VT != MVT::v4f32 || IsSigned)
21275     return SDValue();
21276 
21277   SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
21278   SDValue One  = DAG.getConstant(1, DL, MVT::v4i64);
21279   SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
21280                              DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
21281                              DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
21282   SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
21283   SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
21284   SmallVector<SDValue, 4> SignCvts(4);
21285   SmallVector<SDValue, 4> Chains(4);
21286   for (int i = 0; i != 4; ++i) {
21287     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
21288                               DAG.getIntPtrConstant(i, DL));
21289     if (IsStrict) {
21290       SignCvts[i] =
21291           DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
21292                       {Op.getOperand(0), Elt});
21293       Chains[i] = SignCvts[i].getValue(1);
21294     } else {
21295       SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Elt);
21296     }
21297   }
21298   SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
21299 
21300   SDValue Slow, Chain;
21301   if (IsStrict) {
21302     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
21303     Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
21304                        {Chain, SignCvt, SignCvt});
21305     Chain = Slow.getValue(1);
21306   } else {
21307     Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
21308   }
21309 
21310   IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
21311   SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
21312 
21313   if (IsStrict)
21314     return DAG.getMergeValues({Cvt, Chain}, DL);
21315 
21316   return Cvt;
21317 }
21318 
promoteXINT_TO_FP(SDValue Op,SelectionDAG & DAG)21319 static SDValue promoteXINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
21320   bool IsStrict = Op->isStrictFPOpcode();
21321   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21322   SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
21323   MVT VT = Op.getSimpleValueType();
21324   MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
21325   SDLoc dl(Op);
21326 
21327   SDValue Rnd = DAG.getIntPtrConstant(0, dl);
21328   if (IsStrict)
21329     return DAG.getNode(
21330         ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
21331         {Chain,
21332          DAG.getNode(Op.getOpcode(), dl, {NVT, MVT::Other}, {Chain, Src}),
21333          Rnd});
21334   return DAG.getNode(ISD::FP_ROUND, dl, VT,
21335                      DAG.getNode(Op.getOpcode(), dl, NVT, Src), Rnd);
21336 }
21337 
isLegalConversion(MVT VT,bool IsSigned,const X86Subtarget & Subtarget)21338 static bool isLegalConversion(MVT VT, bool IsSigned,
21339                               const X86Subtarget &Subtarget) {
21340   if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
21341     return true;
21342   if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
21343     return true;
21344   if (Subtarget.hasVLX() && (VT == MVT::v4i32 || VT == MVT::v8i32))
21345     return true;
21346   if (Subtarget.useAVX512Regs()) {
21347     if (VT == MVT::v16i32)
21348       return true;
21349     if (VT == MVT::v8i64 && Subtarget.hasDQI())
21350       return true;
21351   }
21352   if (Subtarget.hasDQI() && Subtarget.hasVLX() &&
21353       (VT == MVT::v2i64 || VT == MVT::v4i64))
21354     return true;
21355   return false;
21356 }
21357 
LowerSINT_TO_FP(SDValue Op,SelectionDAG & DAG) const21358 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
21359                                            SelectionDAG &DAG) const {
21360   bool IsStrict = Op->isStrictFPOpcode();
21361   unsigned OpNo = IsStrict ? 1 : 0;
21362   SDValue Src = Op.getOperand(OpNo);
21363   SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
21364   MVT SrcVT = Src.getSimpleValueType();
21365   MVT VT = Op.getSimpleValueType();
21366   SDLoc dl(Op);
21367 
21368   if (isSoftFP16(VT))
21369     return promoteXINT_TO_FP(Op, DAG);
21370   else if (isLegalConversion(SrcVT, true, Subtarget))
21371     return Op;
21372 
21373   if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
21374     return LowerWin64_INT128_TO_FP(Op, DAG);
21375 
21376   if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
21377     return Extract;
21378 
21379   if (SDValue R = lowerFPToIntToFP(Op, DAG, Subtarget))
21380     return R;
21381 
21382   if (SrcVT.isVector()) {
21383     if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
21384       // Note: Since v2f64 is a legal type. We don't need to zero extend the
21385       // source for strict FP.
21386       if (IsStrict)
21387         return DAG.getNode(
21388             X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
21389             {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
21390                                 DAG.getUNDEF(SrcVT))});
21391       return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
21392                          DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
21393                                      DAG.getUNDEF(SrcVT)));
21394     }
21395     if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
21396       return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
21397 
21398     return SDValue();
21399   }
21400 
21401   assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
21402          "Unknown SINT_TO_FP to lower!");
21403 
21404   bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
21405 
21406   // These are really Legal; return the operand so the caller accepts it as
21407   // Legal.
21408   if (SrcVT == MVT::i32 && UseSSEReg)
21409     return Op;
21410   if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
21411     return Op;
21412 
21413   if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
21414     return V;
21415   if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
21416     return V;
21417 
21418   // SSE doesn't have an i16 conversion so we need to promote.
21419   if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
21420     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
21421     if (IsStrict)
21422       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
21423                          {Chain, Ext});
21424 
21425     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
21426   }
21427 
21428   if (VT == MVT::f128 || !Subtarget.hasX87())
21429     return SDValue();
21430 
21431   SDValue ValueToStore = Src;
21432   if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit())
21433     // Bitcasting to f64 here allows us to do a single 64-bit store from
21434     // an SSE register, avoiding the store forwarding penalty that would come
21435     // with two 32-bit stores.
21436     ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
21437 
21438   unsigned Size = SrcVT.getStoreSize();
21439   Align Alignment(Size);
21440   MachineFunction &MF = DAG.getMachineFunction();
21441   auto PtrVT = getPointerTy(MF.getDataLayout());
21442   int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false);
21443   MachinePointerInfo MPI =
21444       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
21445   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
21446   Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment);
21447   std::pair<SDValue, SDValue> Tmp =
21448       BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG);
21449 
21450   if (IsStrict)
21451     return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
21452 
21453   return Tmp.first;
21454 }
21455 
BuildFILD(EVT DstVT,EVT SrcVT,const SDLoc & DL,SDValue Chain,SDValue Pointer,MachinePointerInfo PtrInfo,Align Alignment,SelectionDAG & DAG) const21456 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
21457     EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
21458     MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {
21459   // Build the FILD
21460   SDVTList Tys;
21461   bool useSSE = isScalarFPTypeInSSEReg(DstVT);
21462   if (useSSE)
21463     Tys = DAG.getVTList(MVT::f80, MVT::Other);
21464   else
21465     Tys = DAG.getVTList(DstVT, MVT::Other);
21466 
21467   SDValue FILDOps[] = {Chain, Pointer};
21468   SDValue Result =
21469       DAG.getMemIntrinsicNode(X86ISD::FILD, DL, Tys, FILDOps, SrcVT, PtrInfo,
21470                               Alignment, MachineMemOperand::MOLoad);
21471   Chain = Result.getValue(1);
21472 
21473   if (useSSE) {
21474     MachineFunction &MF = DAG.getMachineFunction();
21475     unsigned SSFISize = DstVT.getStoreSize();
21476     int SSFI =
21477         MF.getFrameInfo().CreateStackObject(SSFISize, Align(SSFISize), false);
21478     auto PtrVT = getPointerTy(MF.getDataLayout());
21479     SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
21480     Tys = DAG.getVTList(MVT::Other);
21481     SDValue FSTOps[] = {Chain, Result, StackSlot};
21482     MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
21483         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
21484         MachineMemOperand::MOStore, SSFISize, Align(SSFISize));
21485 
21486     Chain =
21487         DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps, DstVT, StoreMMO);
21488     Result = DAG.getLoad(
21489         DstVT, DL, Chain, StackSlot,
21490         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
21491     Chain = Result.getValue(1);
21492   }
21493 
21494   return { Result, Chain };
21495 }
21496 
21497 /// Horizontal vector math instructions may be slower than normal math with
21498 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
21499 /// implementation, and likely shuffle complexity of the alternate sequence.
shouldUseHorizontalOp(bool IsSingleSource,SelectionDAG & DAG,const X86Subtarget & Subtarget)21500 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
21501                                   const X86Subtarget &Subtarget) {
21502   bool IsOptimizingSize = DAG.shouldOptForSize();
21503   bool HasFastHOps = Subtarget.hasFastHorizontalOps();
21504   return !IsSingleSource || IsOptimizingSize || HasFastHOps;
21505 }
21506 
21507 /// 64-bit unsigned integer to double expansion.
LowerUINT_TO_FP_i64(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21508 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
21509                                    const X86Subtarget &Subtarget) {
21510   // We can't use this algorithm for strict fp. It produces -0.0 instead of +0.0
21511   // when converting 0 when rounding toward negative infinity. Caller will
21512   // fall back to Expand for when i64 or is legal or use FILD in 32-bit mode.
21513   assert(!Op->isStrictFPOpcode() && "Expected non-strict uint_to_fp!");
21514   // This algorithm is not obvious. Here it is what we're trying to output:
21515   /*
21516      movq       %rax,  %xmm0
21517      punpckldq  (c0),  %xmm0  // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
21518      subpd      (c1),  %xmm0  // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
21519      #ifdef __SSE3__
21520        haddpd   %xmm0, %xmm0
21521      #else
21522        pshufd   $0x4e, %xmm0, %xmm1
21523        addpd    %xmm1, %xmm0
21524      #endif
21525   */
21526 
21527   SDLoc dl(Op);
21528   LLVMContext *Context = DAG.getContext();
21529 
21530   // Build some magic constants.
21531   static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
21532   Constant *C0 = ConstantDataVector::get(*Context, CV0);
21533   auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
21534   SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, Align(16));
21535 
21536   SmallVector<Constant*,2> CV1;
21537   CV1.push_back(
21538     ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
21539                                       APInt(64, 0x4330000000000000ULL))));
21540   CV1.push_back(
21541     ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
21542                                       APInt(64, 0x4530000000000000ULL))));
21543   Constant *C1 = ConstantVector::get(CV1);
21544   SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, Align(16));
21545 
21546   // Load the 64-bit value into an XMM register.
21547   SDValue XR1 =
21548       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(0));
21549   SDValue CLod0 = DAG.getLoad(
21550       MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
21551       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
21552   SDValue Unpck1 =
21553       getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
21554 
21555   SDValue CLod1 = DAG.getLoad(
21556       MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
21557       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
21558   SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
21559   // TODO: Are there any fast-math-flags to propagate here?
21560   SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
21561   SDValue Result;
21562 
21563   if (Subtarget.hasSSE3() &&
21564       shouldUseHorizontalOp(true, DAG, Subtarget)) {
21565     Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
21566   } else {
21567     SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
21568     Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
21569   }
21570   Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
21571                        DAG.getIntPtrConstant(0, dl));
21572   return Result;
21573 }
21574 
21575 /// 32-bit unsigned integer to float expansion.
LowerUINT_TO_FP_i32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21576 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
21577                                    const X86Subtarget &Subtarget) {
21578   unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
21579   SDLoc dl(Op);
21580   // FP constant to bias correct the final result.
21581   SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
21582                                    MVT::f64);
21583 
21584   // Load the 32-bit value into an XMM register.
21585   SDValue Load =
21586       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
21587 
21588   // Zero out the upper parts of the register.
21589   Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
21590 
21591   // Or the load with the bias.
21592   SDValue Or = DAG.getNode(
21593       ISD::OR, dl, MVT::v2i64,
21594       DAG.getBitcast(MVT::v2i64, Load),
21595       DAG.getBitcast(MVT::v2i64,
21596                      DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
21597   Or =
21598       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
21599                   DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
21600 
21601   if (Op.getNode()->isStrictFPOpcode()) {
21602     // Subtract the bias.
21603     // TODO: Are there any fast-math-flags to propagate here?
21604     SDValue Chain = Op.getOperand(0);
21605     SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
21606                               {Chain, Or, Bias});
21607 
21608     if (Op.getValueType() == Sub.getValueType())
21609       return Sub;
21610 
21611     // Handle final rounding.
21612     std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
21613         Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
21614 
21615     return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
21616   }
21617 
21618   // Subtract the bias.
21619   // TODO: Are there any fast-math-flags to propagate here?
21620   SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
21621 
21622   // Handle final rounding.
21623   return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
21624 }
21625 
lowerUINT_TO_FP_v2i32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)21626 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
21627                                      const X86Subtarget &Subtarget,
21628                                      const SDLoc &DL) {
21629   if (Op.getSimpleValueType() != MVT::v2f64)
21630     return SDValue();
21631 
21632   bool IsStrict = Op->isStrictFPOpcode();
21633 
21634   SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
21635   assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
21636 
21637   if (Subtarget.hasAVX512()) {
21638     if (!Subtarget.hasVLX()) {
21639       // Let generic type legalization widen this.
21640       if (!IsStrict)
21641         return SDValue();
21642       // Otherwise pad the integer input with 0s and widen the operation.
21643       N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
21644                        DAG.getConstant(0, DL, MVT::v2i32));
21645       SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
21646                                 {Op.getOperand(0), N0});
21647       SDValue Chain = Res.getValue(1);
21648       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
21649                         DAG.getIntPtrConstant(0, DL));
21650       return DAG.getMergeValues({Res, Chain}, DL);
21651     }
21652 
21653     // Legalize to v4i32 type.
21654     N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
21655                      DAG.getUNDEF(MVT::v2i32));
21656     if (IsStrict)
21657       return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
21658                          {Op.getOperand(0), N0});
21659     return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
21660   }
21661 
21662   // Zero extend to 2i64, OR with the floating point representation of 2^52.
21663   // This gives us the floating point equivalent of 2^52 + the i32 integer
21664   // since double has 52-bits of mantissa. Then subtract 2^52 in floating
21665   // point leaving just our i32 integers in double format.
21666   SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
21667   SDValue VBias =
21668       DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), DL, MVT::v2f64);
21669   SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
21670                            DAG.getBitcast(MVT::v2i64, VBias));
21671   Or = DAG.getBitcast(MVT::v2f64, Or);
21672 
21673   if (IsStrict)
21674     return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
21675                        {Op.getOperand(0), Or, VBias});
21676   return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
21677 }
21678 
lowerUINT_TO_FP_vXi32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21679 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
21680                                      const X86Subtarget &Subtarget) {
21681   SDLoc DL(Op);
21682   bool IsStrict = Op->isStrictFPOpcode();
21683   SDValue V = Op->getOperand(IsStrict ? 1 : 0);
21684   MVT VecIntVT = V.getSimpleValueType();
21685   assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
21686          "Unsupported custom type");
21687 
21688   if (Subtarget.hasAVX512()) {
21689     // With AVX512, but not VLX we need to widen to get a 512-bit result type.
21690     assert(!Subtarget.hasVLX() && "Unexpected features");
21691     MVT VT = Op->getSimpleValueType(0);
21692 
21693     // v8i32->v8f64 is legal with AVX512 so just return it.
21694     if (VT == MVT::v8f64)
21695       return Op;
21696 
21697     assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
21698            "Unexpected VT!");
21699     MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
21700     MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
21701     // Need to concat with zero vector for strict fp to avoid spurious
21702     // exceptions.
21703     SDValue Tmp =
21704         IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
21705     V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
21706                     DAG.getIntPtrConstant(0, DL));
21707     SDValue Res, Chain;
21708     if (IsStrict) {
21709       Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
21710                         {Op->getOperand(0), V});
21711       Chain = Res.getValue(1);
21712     } else {
21713       Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
21714     }
21715 
21716     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
21717                       DAG.getIntPtrConstant(0, DL));
21718 
21719     if (IsStrict)
21720       return DAG.getMergeValues({Res, Chain}, DL);
21721     return Res;
21722   }
21723 
21724   if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
21725       Op->getSimpleValueType(0) == MVT::v4f64) {
21726     SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
21727     Constant *Bias = ConstantFP::get(
21728         *DAG.getContext(),
21729         APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
21730     auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
21731     SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, Align(8));
21732     SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
21733     SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
21734     SDValue VBias = DAG.getMemIntrinsicNode(
21735         X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
21736         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8),
21737         MachineMemOperand::MOLoad);
21738 
21739     SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
21740                              DAG.getBitcast(MVT::v4i64, VBias));
21741     Or = DAG.getBitcast(MVT::v4f64, Or);
21742 
21743     if (IsStrict)
21744       return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
21745                          {Op.getOperand(0), Or, VBias});
21746     return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
21747   }
21748 
21749   // The algorithm is the following:
21750   // #ifdef __SSE4_1__
21751   //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
21752   //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
21753   //                                 (uint4) 0x53000000, 0xaa);
21754   // #else
21755   //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
21756   //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
21757   // #endif
21758   //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
21759   //     return (float4) lo + fhi;
21760 
21761   bool Is128 = VecIntVT == MVT::v4i32;
21762   MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
21763   // If we convert to something else than the supported type, e.g., to v4f64,
21764   // abort early.
21765   if (VecFloatVT != Op->getSimpleValueType(0))
21766     return SDValue();
21767 
21768   // In the #idef/#else code, we have in common:
21769   // - The vector of constants:
21770   // -- 0x4b000000
21771   // -- 0x53000000
21772   // - A shift:
21773   // -- v >> 16
21774 
21775   // Create the splat vector for 0x4b000000.
21776   SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
21777   // Create the splat vector for 0x53000000.
21778   SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
21779 
21780   // Create the right shift.
21781   SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
21782   SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
21783 
21784   SDValue Low, High;
21785   if (Subtarget.hasSSE41()) {
21786     MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
21787     //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
21788     SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
21789     SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
21790     // Low will be bitcasted right away, so do not bother bitcasting back to its
21791     // original type.
21792     Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
21793                       VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
21794     //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
21795     //                                 (uint4) 0x53000000, 0xaa);
21796     SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
21797     SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
21798     // High will be bitcasted right away, so do not bother bitcasting back to
21799     // its original type.
21800     High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
21801                        VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
21802   } else {
21803     SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
21804     //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
21805     SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
21806     Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
21807 
21808     //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
21809     High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
21810   }
21811 
21812   // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
21813   SDValue VecCstFSub = DAG.getConstantFP(
21814       APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
21815 
21816   //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
21817   // NOTE: By using fsub of a positive constant instead of fadd of a negative
21818   // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
21819   // enabled. See PR24512.
21820   SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
21821   // TODO: Are there any fast-math-flags to propagate here?
21822   //     (float4) lo;
21823   SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
21824   //     return (float4) lo + fhi;
21825   if (IsStrict) {
21826     SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
21827                                 {Op.getOperand(0), HighBitcast, VecCstFSub});
21828     return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
21829                        {FHigh.getValue(1), LowBitcast, FHigh});
21830   }
21831 
21832   SDValue FHigh =
21833       DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
21834   return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
21835 }
21836 
lowerUINT_TO_FP_vec(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21837 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
21838                                    const X86Subtarget &Subtarget) {
21839   unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
21840   SDValue N0 = Op.getOperand(OpNo);
21841   MVT SrcVT = N0.getSimpleValueType();
21842   SDLoc dl(Op);
21843 
21844   switch (SrcVT.SimpleTy) {
21845   default:
21846     llvm_unreachable("Custom UINT_TO_FP is not supported!");
21847   case MVT::v2i32:
21848     return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
21849   case MVT::v4i32:
21850   case MVT::v8i32:
21851     return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
21852   case MVT::v2i64:
21853   case MVT::v4i64:
21854     return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
21855   }
21856 }
21857 
LowerUINT_TO_FP(SDValue Op,SelectionDAG & DAG) const21858 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
21859                                            SelectionDAG &DAG) const {
21860   bool IsStrict = Op->isStrictFPOpcode();
21861   unsigned OpNo = IsStrict ? 1 : 0;
21862   SDValue Src = Op.getOperand(OpNo);
21863   SDLoc dl(Op);
21864   auto PtrVT = getPointerTy(DAG.getDataLayout());
21865   MVT SrcVT = Src.getSimpleValueType();
21866   MVT DstVT = Op->getSimpleValueType(0);
21867   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21868 
21869   // Bail out when we don't have native conversion instructions.
21870   if (DstVT == MVT::f128)
21871     return SDValue();
21872 
21873   if (isSoftFP16(DstVT))
21874     return promoteXINT_TO_FP(Op, DAG);
21875   else if (isLegalConversion(SrcVT, false, Subtarget))
21876     return Op;
21877 
21878   if (DstVT.isVector())
21879     return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
21880 
21881   if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
21882     return LowerWin64_INT128_TO_FP(Op, DAG);
21883 
21884   if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
21885     return Extract;
21886 
21887   if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
21888       (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
21889     // Conversions from unsigned i32 to f32/f64 are legal,
21890     // using VCVTUSI2SS/SD.  Same for i64 in 64-bit mode.
21891     return Op;
21892   }
21893 
21894   // Promote i32 to i64 and use a signed conversion on 64-bit targets.
21895   if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
21896     Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
21897     if (IsStrict)
21898       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
21899                          {Chain, Src});
21900     return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
21901   }
21902 
21903   if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
21904     return V;
21905   if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
21906     return V;
21907 
21908   // The transform for i64->f64 isn't correct for 0 when rounding to negative
21909   // infinity. It produces -0.0, so disable under strictfp.
21910   if (SrcVT == MVT::i64 && DstVT == MVT::f64 && Subtarget.hasSSE2() &&
21911       !IsStrict)
21912     return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
21913   // The transform for i32->f64/f32 isn't correct for 0 when rounding to
21914   // negative infinity. So disable under strictfp. Using FILD instead.
21915   if (SrcVT == MVT::i32 && Subtarget.hasSSE2() && DstVT != MVT::f80 &&
21916       !IsStrict)
21917     return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
21918   if (Subtarget.is64Bit() && SrcVT == MVT::i64 &&
21919       (DstVT == MVT::f32 || DstVT == MVT::f64))
21920     return SDValue();
21921 
21922   // Make a 64-bit buffer, and use it to build an FILD.
21923   SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64, 8);
21924   int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
21925   Align SlotAlign(8);
21926   MachinePointerInfo MPI =
21927     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
21928   if (SrcVT == MVT::i32) {
21929     SDValue OffsetSlot =
21930         DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), dl);
21931     SDValue Store1 = DAG.getStore(Chain, dl, Src, StackSlot, MPI, SlotAlign);
21932     SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
21933                                   OffsetSlot, MPI.getWithOffset(4), SlotAlign);
21934     std::pair<SDValue, SDValue> Tmp =
21935         BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, SlotAlign, DAG);
21936     if (IsStrict)
21937       return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
21938 
21939     return Tmp.first;
21940   }
21941 
21942   assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
21943   SDValue ValueToStore = Src;
21944   if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
21945     // Bitcasting to f64 here allows us to do a single 64-bit store from
21946     // an SSE register, avoiding the store forwarding penalty that would come
21947     // with two 32-bit stores.
21948     ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
21949   }
21950   SDValue Store =
21951       DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, SlotAlign);
21952   // For i64 source, we need to add the appropriate power of 2 if the input
21953   // was negative. We must be careful to do the computation in x87 extended
21954   // precision, not in SSE.
21955   SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
21956   SDValue Ops[] = { Store, StackSlot };
21957   SDValue Fild =
21958       DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI,
21959                               SlotAlign, MachineMemOperand::MOLoad);
21960   Chain = Fild.getValue(1);
21961 
21962 
21963   // Check whether the sign bit is set.
21964   SDValue SignSet = DAG.getSetCC(
21965       dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
21966       Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
21967 
21968   // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
21969   APInt FF(64, 0x5F80000000000000ULL);
21970   SDValue FudgePtr = DAG.getConstantPool(
21971       ConstantInt::get(*DAG.getContext(), FF), PtrVT);
21972   Align CPAlignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlign();
21973 
21974   // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
21975   SDValue Zero = DAG.getIntPtrConstant(0, dl);
21976   SDValue Four = DAG.getIntPtrConstant(4, dl);
21977   SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
21978   FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
21979 
21980   // Load the value out, extending it from f32 to f80.
21981   SDValue Fudge = DAG.getExtLoad(
21982       ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
21983       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
21984       CPAlignment);
21985   Chain = Fudge.getValue(1);
21986   // Extend everything to 80 bits to force it to be done on x87.
21987   // TODO: Are there any fast-math-flags to propagate here?
21988   if (IsStrict) {
21989     unsigned Opc = ISD::STRICT_FADD;
21990     // Windows needs the precision control changed to 80bits around this add.
21991     if (Subtarget.isOSWindows() && DstVT == MVT::f32)
21992       Opc = X86ISD::STRICT_FP80_ADD;
21993 
21994     SDValue Add =
21995         DAG.getNode(Opc, dl, {MVT::f80, MVT::Other}, {Chain, Fild, Fudge});
21996     // STRICT_FP_ROUND can't handle equal types.
21997     if (DstVT == MVT::f80)
21998       return Add;
21999     return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
22000                        {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
22001   }
22002   unsigned Opc = ISD::FADD;
22003   // Windows needs the precision control changed to 80bits around this add.
22004   if (Subtarget.isOSWindows() && DstVT == MVT::f32)
22005     Opc = X86ISD::FP80_ADD;
22006 
22007   SDValue Add = DAG.getNode(Opc, dl, MVT::f80, Fild, Fudge);
22008   return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
22009                      DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
22010 }
22011 
22012 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
22013 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
22014 // just return an SDValue().
22015 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
22016 // to i16, i32 or i64, and we lower it to a legal sequence and return the
22017 // result.
22018 SDValue
FP_TO_INTHelper(SDValue Op,SelectionDAG & DAG,bool IsSigned,SDValue & Chain) const22019 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
22020                                    bool IsSigned, SDValue &Chain) const {
22021   bool IsStrict = Op->isStrictFPOpcode();
22022   SDLoc DL(Op);
22023 
22024   EVT DstTy = Op.getValueType();
22025   SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
22026   EVT TheVT = Value.getValueType();
22027   auto PtrVT = getPointerTy(DAG.getDataLayout());
22028 
22029   if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
22030     // f16 must be promoted before using the lowering in this routine.
22031     // fp128 does not use this lowering.
22032     return SDValue();
22033   }
22034 
22035   // If using FIST to compute an unsigned i64, we'll need some fixup
22036   // to handle values above the maximum signed i64.  A FIST is always
22037   // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
22038   bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
22039 
22040   // FIXME: This does not generate an invalid exception if the input does not
22041   // fit in i32. PR44019
22042   if (!IsSigned && DstTy != MVT::i64) {
22043     // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
22044     // The low 32 bits of the fist result will have the correct uint32 result.
22045     assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
22046     DstTy = MVT::i64;
22047   }
22048 
22049   assert(DstTy.getSimpleVT() <= MVT::i64 &&
22050          DstTy.getSimpleVT() >= MVT::i16 &&
22051          "Unknown FP_TO_INT to lower!");
22052 
22053   // We lower FP->int64 into FISTP64 followed by a load from a temporary
22054   // stack slot.
22055   MachineFunction &MF = DAG.getMachineFunction();
22056   unsigned MemSize = DstTy.getStoreSize();
22057   int SSFI =
22058       MF.getFrameInfo().CreateStackObject(MemSize, Align(MemSize), false);
22059   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
22060 
22061   Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
22062 
22063   SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
22064 
22065   if (UnsignedFixup) {
22066     //
22067     // Conversion to unsigned i64 is implemented with a select,
22068     // depending on whether the source value fits in the range
22069     // of a signed i64.  Let Thresh be the FP equivalent of
22070     // 0x8000000000000000ULL.
22071     //
22072     //  Adjust = (Value >= Thresh) ? 0x80000000 : 0;
22073     //  FltOfs = (Value >= Thresh) ? 0x80000000 : 0;
22074     //  FistSrc = (Value - FltOfs);
22075     //  Fist-to-mem64 FistSrc
22076     //  Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
22077     //  to XOR'ing the high 32 bits with Adjust.
22078     //
22079     // Being a power of 2, Thresh is exactly representable in all FP formats.
22080     // For X87 we'd like to use the smallest FP type for this constant, but
22081     // for DAG type consistency we have to match the FP operand type.
22082 
22083     APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
22084     LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
22085     bool LosesInfo = false;
22086     if (TheVT == MVT::f64)
22087       // The rounding mode is irrelevant as the conversion should be exact.
22088       Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
22089                               &LosesInfo);
22090     else if (TheVT == MVT::f80)
22091       Status = Thresh.convert(APFloat::x87DoubleExtended(),
22092                               APFloat::rmNearestTiesToEven, &LosesInfo);
22093 
22094     assert(Status == APFloat::opOK && !LosesInfo &&
22095            "FP conversion should have been exact");
22096 
22097     SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
22098 
22099     EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
22100                                    *DAG.getContext(), TheVT);
22101     SDValue Cmp;
22102     if (IsStrict) {
22103       Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE, Chain,
22104                          /*IsSignaling*/ true);
22105       Chain = Cmp.getValue(1);
22106     } else {
22107       Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE);
22108     }
22109 
22110     // Our preferred lowering of
22111     //
22112     // (Value >= Thresh) ? 0x8000000000000000ULL : 0
22113     //
22114     // is
22115     //
22116     // (Value >= Thresh) << 63
22117     //
22118     // but since we can get here after LegalOperations, DAGCombine might do the
22119     // wrong thing if we create a select. So, directly create the preferred
22120     // version.
22121     SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Cmp);
22122     SDValue Const63 = DAG.getConstant(63, DL, MVT::i8);
22123     Adjust = DAG.getNode(ISD::SHL, DL, MVT::i64, Zext, Const63);
22124 
22125     SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp, ThreshVal,
22126                                    DAG.getConstantFP(0.0, DL, TheVT));
22127 
22128     if (IsStrict) {
22129       Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
22130                           { Chain, Value, FltOfs });
22131       Chain = Value.getValue(1);
22132     } else
22133       Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
22134   }
22135 
22136   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
22137 
22138   // FIXME This causes a redundant load/store if the SSE-class value is already
22139   // in memory, such as if it is on the callstack.
22140   if (isScalarFPTypeInSSEReg(TheVT)) {
22141     assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
22142     Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
22143     SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
22144     SDValue Ops[] = { Chain, StackSlot };
22145 
22146     unsigned FLDSize = TheVT.getStoreSize();
22147     assert(FLDSize <= MemSize && "Stack slot not big enough");
22148     MachineMemOperand *MMO = MF.getMachineMemOperand(
22149         MPI, MachineMemOperand::MOLoad, FLDSize, Align(FLDSize));
22150     Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
22151     Chain = Value.getValue(1);
22152   }
22153 
22154   // Build the FP_TO_INT*_IN_MEM
22155   MachineMemOperand *MMO = MF.getMachineMemOperand(
22156       MPI, MachineMemOperand::MOStore, MemSize, Align(MemSize));
22157   SDValue Ops[] = { Chain, Value, StackSlot };
22158   SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
22159                                          DAG.getVTList(MVT::Other),
22160                                          Ops, DstTy, MMO);
22161 
22162   SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
22163   Chain = Res.getValue(1);
22164 
22165   // If we need an unsigned fixup, XOR the result with adjust.
22166   if (UnsignedFixup)
22167     Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
22168 
22169   return Res;
22170 }
22171 
LowerAVXExtend(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)22172 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
22173                               const X86Subtarget &Subtarget) {
22174   MVT VT = Op.getSimpleValueType();
22175   SDValue In = Op.getOperand(0);
22176   MVT InVT = In.getSimpleValueType();
22177   SDLoc dl(Op);
22178   unsigned Opc = Op.getOpcode();
22179 
22180   assert(VT.isVector() && InVT.isVector() && "Expected vector type");
22181   assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
22182          "Unexpected extension opcode");
22183   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
22184          "Expected same number of elements");
22185   assert((VT.getVectorElementType() == MVT::i16 ||
22186           VT.getVectorElementType() == MVT::i32 ||
22187           VT.getVectorElementType() == MVT::i64) &&
22188          "Unexpected element type");
22189   assert((InVT.getVectorElementType() == MVT::i8 ||
22190           InVT.getVectorElementType() == MVT::i16 ||
22191           InVT.getVectorElementType() == MVT::i32) &&
22192          "Unexpected element type");
22193 
22194   unsigned ExtendInVecOpc = DAG.getOpcode_EXTEND_VECTOR_INREG(Opc);
22195 
22196   if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
22197     assert(InVT == MVT::v32i8 && "Unexpected VT!");
22198     return splitVectorIntUnary(Op, DAG);
22199   }
22200 
22201   if (Subtarget.hasInt256())
22202     return Op;
22203 
22204   // Optimize vectors in AVX mode:
22205   //
22206   //   v8i16 -> v8i32
22207   //   Use vpmovzwd for 4 lower elements  v8i16 -> v4i32.
22208   //   Use vpunpckhwd for 4 upper elements  v8i16 -> v4i32.
22209   //   Concat upper and lower parts.
22210   //
22211   //   v4i32 -> v4i64
22212   //   Use vpmovzdq for 4 lower elements  v4i32 -> v2i64.
22213   //   Use vpunpckhdq for 4 upper elements  v4i32 -> v2i64.
22214   //   Concat upper and lower parts.
22215   //
22216   MVT HalfVT = VT.getHalfNumVectorElementsVT();
22217   SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
22218 
22219   // Short-circuit if we can determine that each 128-bit half is the same value.
22220   // Otherwise, this is difficult to match and optimize.
22221   if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
22222     if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
22223       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
22224 
22225   SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
22226   SDValue Undef = DAG.getUNDEF(InVT);
22227   bool NeedZero = Opc == ISD::ZERO_EXTEND;
22228   SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
22229   OpHi = DAG.getBitcast(HalfVT, OpHi);
22230 
22231   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
22232 }
22233 
22234 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
SplitAndExtendv16i1(unsigned ExtOpc,MVT VT,SDValue In,const SDLoc & dl,SelectionDAG & DAG)22235 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
22236                                    const SDLoc &dl, SelectionDAG &DAG) {
22237   assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
22238   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
22239                            DAG.getIntPtrConstant(0, dl));
22240   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
22241                            DAG.getIntPtrConstant(8, dl));
22242   Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
22243   Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
22244   SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
22245   return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
22246 }
22247 
LowerZERO_EXTEND_Mask(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)22248 static  SDValue LowerZERO_EXTEND_Mask(SDValue Op,
22249                                       const X86Subtarget &Subtarget,
22250                                       SelectionDAG &DAG) {
22251   MVT VT = Op->getSimpleValueType(0);
22252   SDValue In = Op->getOperand(0);
22253   MVT InVT = In.getSimpleValueType();
22254   assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
22255   SDLoc DL(Op);
22256   unsigned NumElts = VT.getVectorNumElements();
22257 
22258   // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
22259   // avoids a constant pool load.
22260   if (VT.getVectorElementType() != MVT::i8) {
22261     SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
22262     return DAG.getNode(ISD::SRL, DL, VT, Extend,
22263                        DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
22264   }
22265 
22266   // Extend VT if BWI is not supported.
22267   MVT ExtVT = VT;
22268   if (!Subtarget.hasBWI()) {
22269     // If v16i32 is to be avoided, we'll need to split and concatenate.
22270     if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
22271       return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
22272 
22273     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
22274   }
22275 
22276   // Widen to 512-bits if VLX is not supported.
22277   MVT WideVT = ExtVT;
22278   if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
22279     NumElts *= 512 / ExtVT.getSizeInBits();
22280     InVT = MVT::getVectorVT(MVT::i1, NumElts);
22281     In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
22282                      In, DAG.getIntPtrConstant(0, DL));
22283     WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
22284                               NumElts);
22285   }
22286 
22287   SDValue One = DAG.getConstant(1, DL, WideVT);
22288   SDValue Zero = DAG.getConstant(0, DL, WideVT);
22289 
22290   SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
22291 
22292   // Truncate if we had to extend above.
22293   if (VT != ExtVT) {
22294     WideVT = MVT::getVectorVT(MVT::i8, NumElts);
22295     SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
22296   }
22297 
22298   // Extract back to 128/256-bit if we widened.
22299   if (WideVT != VT)
22300     SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
22301                               DAG.getIntPtrConstant(0, DL));
22302 
22303   return SelectedVal;
22304 }
22305 
LowerZERO_EXTEND(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)22306 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
22307                                 SelectionDAG &DAG) {
22308   SDValue In = Op.getOperand(0);
22309   MVT SVT = In.getSimpleValueType();
22310 
22311   if (SVT.getVectorElementType() == MVT::i1)
22312     return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
22313 
22314   assert(Subtarget.hasAVX() && "Expected AVX support");
22315   return LowerAVXExtend(Op, DAG, Subtarget);
22316 }
22317 
22318 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
22319 /// It makes use of the fact that vectors with enough leading sign/zero bits
22320 /// prevent the PACKSS/PACKUS from saturating the results.
22321 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
22322 /// within each 128-bit lane.
truncateVectorWithPACK(unsigned Opcode,EVT DstVT,SDValue In,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)22323 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
22324                                       const SDLoc &DL, SelectionDAG &DAG,
22325                                       const X86Subtarget &Subtarget) {
22326   assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
22327          "Unexpected PACK opcode");
22328   assert(DstVT.isVector() && "VT not a vector?");
22329 
22330   // Requires SSE2 for PACKSS (SSE41 PACKUSDW is handled below).
22331   if (!Subtarget.hasSSE2())
22332     return SDValue();
22333 
22334   EVT SrcVT = In.getValueType();
22335 
22336   // No truncation required, we might get here due to recursive calls.
22337   if (SrcVT == DstVT)
22338     return In;
22339 
22340   // We only support vector truncation to 64bits or greater from a
22341   // 128bits or greater source.
22342   unsigned DstSizeInBits = DstVT.getSizeInBits();
22343   unsigned SrcSizeInBits = SrcVT.getSizeInBits();
22344   if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
22345     return SDValue();
22346 
22347   unsigned NumElems = SrcVT.getVectorNumElements();
22348   if (!isPowerOf2_32(NumElems))
22349     return SDValue();
22350 
22351   LLVMContext &Ctx = *DAG.getContext();
22352   assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
22353   assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
22354 
22355   EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
22356 
22357   // Pack to the largest type possible:
22358   // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
22359   EVT InVT = MVT::i16, OutVT = MVT::i8;
22360   if (SrcVT.getScalarSizeInBits() > 16 &&
22361       (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
22362     InVT = MVT::i32;
22363     OutVT = MVT::i16;
22364   }
22365 
22366   // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
22367   if (SrcVT.is128BitVector()) {
22368     InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
22369     OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
22370     In = DAG.getBitcast(InVT, In);
22371     SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, DAG.getUNDEF(InVT));
22372     Res = extractSubVector(Res, 0, DAG, DL, 64);
22373     return DAG.getBitcast(DstVT, Res);
22374   }
22375 
22376   // Split lower/upper subvectors.
22377   SDValue Lo, Hi;
22378   std::tie(Lo, Hi) = splitVector(In, DAG, DL);
22379 
22380   unsigned SubSizeInBits = SrcSizeInBits / 2;
22381   InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
22382   OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
22383 
22384   // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
22385   if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
22386     Lo = DAG.getBitcast(InVT, Lo);
22387     Hi = DAG.getBitcast(InVT, Hi);
22388     SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
22389     return DAG.getBitcast(DstVT, Res);
22390   }
22391 
22392   // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
22393   // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
22394   if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
22395     Lo = DAG.getBitcast(InVT, Lo);
22396     Hi = DAG.getBitcast(InVT, Hi);
22397     SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
22398 
22399     // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
22400     // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
22401     // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
22402     SmallVector<int, 64> Mask;
22403     int Scale = 64 / OutVT.getScalarSizeInBits();
22404     narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
22405     Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
22406 
22407     if (DstVT.is256BitVector())
22408       return DAG.getBitcast(DstVT, Res);
22409 
22410     // If 512bit -> 128bit truncate another stage.
22411     EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
22412     Res = DAG.getBitcast(PackedVT, Res);
22413     return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
22414   }
22415 
22416   // Recursively pack lower/upper subvectors, concat result and pack again.
22417   assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
22418   EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
22419   Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
22420   Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
22421 
22422   PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
22423   SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
22424   return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
22425 }
22426 
LowerTruncateVecI1(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)22427 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
22428                                   const X86Subtarget &Subtarget) {
22429 
22430   SDLoc DL(Op);
22431   MVT VT = Op.getSimpleValueType();
22432   SDValue In = Op.getOperand(0);
22433   MVT InVT = In.getSimpleValueType();
22434 
22435   assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
22436 
22437   // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
22438   unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
22439   if (InVT.getScalarSizeInBits() <= 16) {
22440     if (Subtarget.hasBWI()) {
22441       // legal, will go to VPMOVB2M, VPMOVW2M
22442       if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
22443         // We need to shift to get the lsb into sign position.
22444         // Shift packed bytes not supported natively, bitcast to word
22445         MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
22446         In = DAG.getNode(ISD::SHL, DL, ExtVT,
22447                          DAG.getBitcast(ExtVT, In),
22448                          DAG.getConstant(ShiftInx, DL, ExtVT));
22449         In = DAG.getBitcast(InVT, In);
22450       }
22451       return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
22452                           In, ISD::SETGT);
22453     }
22454     // Use TESTD/Q, extended vector to packed dword/qword.
22455     assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
22456            "Unexpected vector type.");
22457     unsigned NumElts = InVT.getVectorNumElements();
22458     assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
22459     // We need to change to a wider element type that we have support for.
22460     // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
22461     // For 16 element vectors we extend to v16i32 unless we are explicitly
22462     // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
22463     // we need to split into two 8 element vectors which we can extend to v8i32,
22464     // truncate and concat the results. There's an additional complication if
22465     // the original type is v16i8. In that case we can't split the v16i8
22466     // directly, so we need to shuffle high elements to low and use
22467     // sign_extend_vector_inreg.
22468     if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
22469       SDValue Lo, Hi;
22470       if (InVT == MVT::v16i8) {
22471         Lo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, In);
22472         Hi = DAG.getVectorShuffle(
22473             InVT, DL, In, In,
22474             {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
22475         Hi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, Hi);
22476       } else {
22477         assert(InVT == MVT::v16i16 && "Unexpected VT!");
22478         Lo = extract128BitVector(In, 0, DAG, DL);
22479         Hi = extract128BitVector(In, 8, DAG, DL);
22480       }
22481       // We're split now, just emit two truncates and a concat. The two
22482       // truncates will trigger legalization to come back to this function.
22483       Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
22484       Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
22485       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
22486     }
22487     // We either have 8 elements or we're allowed to use 512-bit vectors.
22488     // If we have VLX, we want to use the narrowest vector that can get the
22489     // job done so we use vXi32.
22490     MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
22491     MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
22492     In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
22493     InVT = ExtVT;
22494     ShiftInx = InVT.getScalarSizeInBits() - 1;
22495   }
22496 
22497   if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
22498     // We need to shift to get the lsb into sign position.
22499     In = DAG.getNode(ISD::SHL, DL, InVT, In,
22500                      DAG.getConstant(ShiftInx, DL, InVT));
22501   }
22502   // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
22503   if (Subtarget.hasDQI())
22504     return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
22505   return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
22506 }
22507 
LowerTRUNCATE(SDValue Op,SelectionDAG & DAG) const22508 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
22509   SDLoc DL(Op);
22510   MVT VT = Op.getSimpleValueType();
22511   SDValue In = Op.getOperand(0);
22512   MVT InVT = In.getSimpleValueType();
22513   unsigned InNumEltBits = InVT.getScalarSizeInBits();
22514 
22515   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
22516          "Invalid TRUNCATE operation");
22517 
22518   // If we're called by the type legalizer, handle a few cases.
22519   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22520   if (!TLI.isTypeLegal(InVT)) {
22521     if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
22522         VT.is128BitVector()) {
22523       assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) &&
22524              "Unexpected subtarget!");
22525       // The default behavior is to truncate one step, concatenate, and then
22526       // truncate the remainder. We'd rather produce two 64-bit results and
22527       // concatenate those.
22528       SDValue Lo, Hi;
22529       std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
22530 
22531       EVT LoVT, HiVT;
22532       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
22533 
22534       Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
22535       Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
22536       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
22537     }
22538 
22539     // Otherwise let default legalization handle it.
22540     return SDValue();
22541   }
22542 
22543   if (VT.getVectorElementType() == MVT::i1)
22544     return LowerTruncateVecI1(Op, DAG, Subtarget);
22545 
22546   // vpmovqb/w/d, vpmovdb/w, vpmovwb
22547   if (Subtarget.hasAVX512()) {
22548     if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
22549       assert(VT == MVT::v32i8 && "Unexpected VT!");
22550       return splitVectorIntUnary(Op, DAG);
22551     }
22552 
22553     // word to byte only under BWI. Otherwise we have to promoted to v16i32
22554     // and then truncate that. But we should only do that if we haven't been
22555     // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
22556     // handled by isel patterns.
22557     if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
22558         Subtarget.canExtendTo512DQ())
22559       return Op;
22560   }
22561 
22562   unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
22563   unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
22564 
22565   // Truncate with PACKUS if we are truncating a vector with leading zero bits
22566   // that extend all the way to the packed/truncated value.
22567   // Pre-SSE41 we can only use PACKUSWB.
22568   KnownBits Known = DAG.computeKnownBits(In);
22569   if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
22570     if (SDValue V =
22571             truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
22572       return V;
22573 
22574   // Truncate with PACKSS if we are truncating a vector with sign-bits that
22575   // extend all the way to the packed/truncated value.
22576   if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
22577     if (SDValue V =
22578             truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
22579       return V;
22580 
22581   // Handle truncation of V256 to V128 using shuffles.
22582   assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
22583 
22584   if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
22585     // On AVX2, v4i64 -> v4i32 becomes VPERMD.
22586     if (Subtarget.hasInt256()) {
22587       static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
22588       In = DAG.getBitcast(MVT::v8i32, In);
22589       In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
22590       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
22591                          DAG.getIntPtrConstant(0, DL));
22592     }
22593 
22594     SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
22595                                DAG.getIntPtrConstant(0, DL));
22596     SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
22597                                DAG.getIntPtrConstant(2, DL));
22598     static const int ShufMask[] = {0, 2, 4, 6};
22599     return DAG.getVectorShuffle(VT, DL, DAG.getBitcast(MVT::v4i32, OpLo),
22600                                 DAG.getBitcast(MVT::v4i32, OpHi), ShufMask);
22601   }
22602 
22603   if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
22604     // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
22605     if (Subtarget.hasInt256()) {
22606       // The PSHUFB mask:
22607       static const int ShufMask1[] = { 0,  1,  4,  5,  8,  9, 12, 13,
22608                                       -1, -1, -1, -1, -1, -1, -1, -1,
22609                                       16, 17, 20, 21, 24, 25, 28, 29,
22610                                       -1, -1, -1, -1, -1, -1, -1, -1 };
22611       In = DAG.getBitcast(MVT::v32i8, In);
22612       In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
22613       In = DAG.getBitcast(MVT::v4i64, In);
22614 
22615       static const int ShufMask2[] = {0, 2, -1, -1};
22616       In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
22617       In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
22618                        DAG.getIntPtrConstant(0, DL));
22619       return DAG.getBitcast(MVT::v8i16, In);
22620     }
22621 
22622     SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
22623                                DAG.getIntPtrConstant(0, DL));
22624     SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
22625                                DAG.getIntPtrConstant(4, DL));
22626 
22627     // The PSHUFB mask:
22628     static const int ShufMask1[] = {0, 2, 4, 6, -1, -1, -1, -1};
22629 
22630     OpLo = DAG.getBitcast(MVT::v8i16, OpLo);
22631     OpHi = DAG.getBitcast(MVT::v8i16, OpHi);
22632 
22633     OpLo = DAG.getVectorShuffle(MVT::v8i16, DL, OpLo, OpLo, ShufMask1);
22634     OpHi = DAG.getVectorShuffle(MVT::v8i16, DL, OpHi, OpHi, ShufMask1);
22635 
22636     OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
22637     OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
22638 
22639     // The MOVLHPS Mask:
22640     static const int ShufMask2[] = {0, 1, 4, 5};
22641     SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
22642     return DAG.getBitcast(MVT::v8i16, res);
22643   }
22644 
22645   if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
22646     // Use an AND to zero uppper bits for PACKUS.
22647     In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
22648 
22649     SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
22650                                DAG.getIntPtrConstant(0, DL));
22651     SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
22652                                DAG.getIntPtrConstant(8, DL));
22653     return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
22654   }
22655 
22656   llvm_unreachable("All 256->128 cases should have been handled above!");
22657 }
22658 
22659 // We can leverage the specific way the "cvttps2dq/cvttpd2dq" instruction
22660 // behaves on out of range inputs to generate optimized conversions.
expandFP_TO_UINT_SSE(MVT VT,SDValue Src,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget)22661 static SDValue expandFP_TO_UINT_SSE(MVT VT, SDValue Src, const SDLoc &dl,
22662                                     SelectionDAG &DAG,
22663                                     const X86Subtarget &Subtarget) {
22664   MVT SrcVT = Src.getSimpleValueType();
22665   unsigned DstBits = VT.getScalarSizeInBits();
22666   assert(DstBits == 32 && "expandFP_TO_UINT_SSE - only vXi32 supported");
22667 
22668   // Calculate the converted result for values in the range 0 to
22669   // 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
22670   SDValue Small = DAG.getNode(X86ISD::CVTTP2SI, dl, VT, Src);
22671   SDValue Big =
22672       DAG.getNode(X86ISD::CVTTP2SI, dl, VT,
22673                   DAG.getNode(ISD::FSUB, dl, SrcVT, Src,
22674                               DAG.getConstantFP(2147483648.0f, dl, SrcVT)));
22675 
22676   // The "CVTTP2SI" instruction conveniently sets the sign bit if
22677   // and only if the value was out of range. So we can use that
22678   // as our indicator that we rather use "Big" instead of "Small".
22679   //
22680   // Use "Small" if "IsOverflown" has all bits cleared
22681   // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
22682 
22683   // AVX1 can't use the signsplat masking for 256-bit vectors - we have to
22684   // use the slightly slower blendv select instead.
22685   if (VT == MVT::v8i32 && !Subtarget.hasAVX2()) {
22686     SDValue Overflow = DAG.getNode(ISD::OR, dl, VT, Small, Big);
22687     return DAG.getNode(X86ISD::BLENDV, dl, VT, Small, Overflow, Small);
22688   }
22689 
22690   SDValue IsOverflown =
22691       DAG.getNode(X86ISD::VSRAI, dl, VT, Small,
22692                   DAG.getTargetConstant(DstBits - 1, dl, MVT::i8));
22693   return DAG.getNode(ISD::OR, dl, VT, Small,
22694                      DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
22695 }
22696 
LowerFP_TO_INT(SDValue Op,SelectionDAG & DAG) const22697 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
22698   bool IsStrict = Op->isStrictFPOpcode();
22699   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
22700                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
22701   MVT VT = Op->getSimpleValueType(0);
22702   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
22703   SDValue Chain = IsStrict ? Op->getOperand(0) : SDValue();
22704   MVT SrcVT = Src.getSimpleValueType();
22705   SDLoc dl(Op);
22706 
22707   SDValue Res;
22708   if (isSoftFP16(SrcVT)) {
22709     MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
22710     if (IsStrict)
22711       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
22712                          {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
22713                                              {NVT, MVT::Other}, {Chain, Src})});
22714     return DAG.getNode(Op.getOpcode(), dl, VT,
22715                        DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
22716   } else if (isTypeLegal(SrcVT) && isLegalConversion(VT, IsSigned, Subtarget)) {
22717     return Op;
22718   }
22719 
22720   if (VT.isVector()) {
22721     if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
22722       MVT ResVT = MVT::v4i32;
22723       MVT TruncVT = MVT::v4i1;
22724       unsigned Opc;
22725       if (IsStrict)
22726         Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
22727       else
22728         Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
22729 
22730       if (!IsSigned && !Subtarget.hasVLX()) {
22731         assert(Subtarget.useAVX512Regs() && "Unexpected features!");
22732         // Widen to 512-bits.
22733         ResVT = MVT::v8i32;
22734         TruncVT = MVT::v8i1;
22735         Opc = Op.getOpcode();
22736         // Need to concat with zero vector for strict fp to avoid spurious
22737         // exceptions.
22738         // TODO: Should we just do this for non-strict as well?
22739         SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
22740                                : DAG.getUNDEF(MVT::v8f64);
22741         Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
22742                           DAG.getIntPtrConstant(0, dl));
22743       }
22744       if (IsStrict) {
22745         Res = DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Chain, Src});
22746         Chain = Res.getValue(1);
22747       } else {
22748         Res = DAG.getNode(Opc, dl, ResVT, Src);
22749       }
22750 
22751       Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
22752       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
22753                         DAG.getIntPtrConstant(0, dl));
22754       if (IsStrict)
22755         return DAG.getMergeValues({Res, Chain}, dl);
22756       return Res;
22757     }
22758 
22759     if (Subtarget.hasFP16() && SrcVT.getVectorElementType() == MVT::f16) {
22760       if (VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16)
22761         return Op;
22762 
22763       MVT ResVT = VT;
22764       MVT EleVT = VT.getVectorElementType();
22765       if (EleVT != MVT::i64)
22766         ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
22767 
22768       if (SrcVT != MVT::v8f16) {
22769         SDValue Tmp =
22770             IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
22771         SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
22772         Ops[0] = Src;
22773         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
22774       }
22775 
22776       if (IsStrict) {
22777         Res = DAG.getNode(IsSigned ? X86ISD::STRICT_CVTTP2SI
22778                                    : X86ISD::STRICT_CVTTP2UI,
22779                           dl, {ResVT, MVT::Other}, {Chain, Src});
22780         Chain = Res.getValue(1);
22781       } else {
22782         Res = DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl,
22783                           ResVT, Src);
22784       }
22785 
22786       // TODO: Need to add exception check code for strict FP.
22787       if (EleVT.getSizeInBits() < 16) {
22788         ResVT = MVT::getVectorVT(EleVT, 8);
22789         Res = DAG.getNode(ISD::TRUNCATE, dl, ResVT, Res);
22790       }
22791 
22792       if (ResVT != VT)
22793         Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
22794                           DAG.getIntPtrConstant(0, dl));
22795 
22796       if (IsStrict)
22797         return DAG.getMergeValues({Res, Chain}, dl);
22798       return Res;
22799     }
22800 
22801     if (VT == MVT::v8i16 && (SrcVT == MVT::v8f32 || SrcVT == MVT::v8f64)) {
22802       if (IsStrict) {
22803         Res = DAG.getNode(IsSigned ? ISD::STRICT_FP_TO_SINT
22804                                    : ISD::STRICT_FP_TO_UINT,
22805                           dl, {MVT::v8i32, MVT::Other}, {Chain, Src});
22806         Chain = Res.getValue(1);
22807       } else {
22808         Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl,
22809                           MVT::v8i32, Src);
22810       }
22811 
22812       // TODO: Need to add exception check code for strict FP.
22813       Res = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i16, Res);
22814 
22815       if (IsStrict)
22816         return DAG.getMergeValues({Res, Chain}, dl);
22817       return Res;
22818     }
22819 
22820     // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
22821     if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
22822       assert(!IsSigned && "Expected unsigned conversion!");
22823       assert(Subtarget.useAVX512Regs() && "Requires avx512f");
22824       return Op;
22825     }
22826 
22827     // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
22828     if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
22829         (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32) &&
22830         Subtarget.useAVX512Regs()) {
22831       assert(!IsSigned && "Expected unsigned conversion!");
22832       assert(!Subtarget.hasVLX() && "Unexpected features!");
22833       MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
22834       MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
22835       // Need to concat with zero vector for strict fp to avoid spurious
22836       // exceptions.
22837       // TODO: Should we just do this for non-strict as well?
22838       SDValue Tmp =
22839           IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
22840       Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
22841                         DAG.getIntPtrConstant(0, dl));
22842 
22843       if (IsStrict) {
22844         Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
22845                           {Chain, Src});
22846         Chain = Res.getValue(1);
22847       } else {
22848         Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
22849       }
22850 
22851       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
22852                         DAG.getIntPtrConstant(0, dl));
22853 
22854       if (IsStrict)
22855         return DAG.getMergeValues({Res, Chain}, dl);
22856       return Res;
22857     }
22858 
22859     // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
22860     if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
22861         (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32) &&
22862         Subtarget.useAVX512Regs() && Subtarget.hasDQI()) {
22863       assert(!Subtarget.hasVLX() && "Unexpected features!");
22864       MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
22865       // Need to concat with zero vector for strict fp to avoid spurious
22866       // exceptions.
22867       // TODO: Should we just do this for non-strict as well?
22868       SDValue Tmp =
22869           IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
22870       Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
22871                         DAG.getIntPtrConstant(0, dl));
22872 
22873       if (IsStrict) {
22874         Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
22875                           {Chain, Src});
22876         Chain = Res.getValue(1);
22877       } else {
22878         Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
22879       }
22880 
22881       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
22882                         DAG.getIntPtrConstant(0, dl));
22883 
22884       if (IsStrict)
22885         return DAG.getMergeValues({Res, Chain}, dl);
22886       return Res;
22887     }
22888 
22889     if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
22890       if (!Subtarget.hasVLX()) {
22891         // Non-strict nodes without VLX can we widened to v4f32->v4i64 by type
22892         // legalizer and then widened again by vector op legalization.
22893         if (!IsStrict)
22894           return SDValue();
22895 
22896         SDValue Zero = DAG.getConstantFP(0.0, dl, MVT::v2f32);
22897         SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f32,
22898                                   {Src, Zero, Zero, Zero});
22899         Tmp = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
22900                           {Chain, Tmp});
22901         SDValue Chain = Tmp.getValue(1);
22902         Tmp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Tmp,
22903                           DAG.getIntPtrConstant(0, dl));
22904         return DAG.getMergeValues({Tmp, Chain}, dl);
22905       }
22906 
22907       assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
22908       SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
22909                                 DAG.getUNDEF(MVT::v2f32));
22910       if (IsStrict) {
22911         unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
22912                                 : X86ISD::STRICT_CVTTP2UI;
22913         return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
22914       }
22915       unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
22916       return DAG.getNode(Opc, dl, VT, Tmp);
22917     }
22918 
22919     // Generate optimized instructions for pre AVX512 unsigned conversions from
22920     // vXf32 to vXi32.
22921     if ((VT == MVT::v4i32 && SrcVT == MVT::v4f32) ||
22922         (VT == MVT::v4i32 && SrcVT == MVT::v4f64) ||
22923         (VT == MVT::v8i32 && SrcVT == MVT::v8f32)) {
22924       assert(!IsSigned && "Expected unsigned conversion!");
22925       return expandFP_TO_UINT_SSE(VT, Src, dl, DAG, Subtarget);
22926     }
22927 
22928     return SDValue();
22929   }
22930 
22931   assert(!VT.isVector());
22932 
22933   bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
22934 
22935   if (!IsSigned && UseSSEReg) {
22936     // Conversions from f32/f64 with AVX512 should be legal.
22937     if (Subtarget.hasAVX512())
22938       return Op;
22939 
22940     // We can leverage the specific way the "cvttss2si/cvttsd2si" instruction
22941     // behaves on out of range inputs to generate optimized conversions.
22942     if (!IsStrict && ((VT == MVT::i32 && !Subtarget.is64Bit()) ||
22943                       (VT == MVT::i64 && Subtarget.is64Bit()))) {
22944       unsigned DstBits = VT.getScalarSizeInBits();
22945       APInt UIntLimit = APInt::getSignMask(DstBits);
22946       SDValue FloatOffset = DAG.getNode(ISD::UINT_TO_FP, dl, SrcVT,
22947                                         DAG.getConstant(UIntLimit, dl, VT));
22948       MVT SrcVecVT = MVT::getVectorVT(SrcVT, 128 / SrcVT.getScalarSizeInBits());
22949 
22950       // Calculate the converted result for values in the range:
22951       // (i32) 0 to 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
22952       // (i64) 0 to 2^63-1 ("Small") and from 2^63 to 2^64-1 ("Big").
22953       SDValue Small =
22954           DAG.getNode(X86ISD::CVTTS2SI, dl, VT,
22955                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT, Src));
22956       SDValue Big = DAG.getNode(
22957           X86ISD::CVTTS2SI, dl, VT,
22958           DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT,
22959                       DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FloatOffset)));
22960 
22961       // The "CVTTS2SI" instruction conveniently sets the sign bit if
22962       // and only if the value was out of range. So we can use that
22963       // as our indicator that we rather use "Big" instead of "Small".
22964       //
22965       // Use "Small" if "IsOverflown" has all bits cleared
22966       // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
22967       SDValue IsOverflown = DAG.getNode(
22968           ISD::SRA, dl, VT, Small, DAG.getConstant(DstBits - 1, dl, MVT::i8));
22969       return DAG.getNode(ISD::OR, dl, VT, Small,
22970                          DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
22971     }
22972 
22973     // Use default expansion for i64.
22974     if (VT == MVT::i64)
22975       return SDValue();
22976 
22977     assert(VT == MVT::i32 && "Unexpected VT!");
22978 
22979     // Promote i32 to i64 and use a signed operation on 64-bit targets.
22980     // FIXME: This does not generate an invalid exception if the input does not
22981     // fit in i32. PR44019
22982     if (Subtarget.is64Bit()) {
22983       if (IsStrict) {
22984         Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i64, MVT::Other},
22985                           {Chain, Src});
22986         Chain = Res.getValue(1);
22987       } else
22988         Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
22989 
22990       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
22991       if (IsStrict)
22992         return DAG.getMergeValues({Res, Chain}, dl);
22993       return Res;
22994     }
22995 
22996     // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
22997     // use fisttp which will be handled later.
22998     if (!Subtarget.hasSSE3())
22999       return SDValue();
23000   }
23001 
23002   // Promote i16 to i32 if we can use a SSE operation or the type is f128.
23003   // FIXME: This does not generate an invalid exception if the input does not
23004   // fit in i16. PR44019
23005   if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
23006     assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
23007     if (IsStrict) {
23008       Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i32, MVT::Other},
23009                         {Chain, Src});
23010       Chain = Res.getValue(1);
23011     } else
23012       Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
23013 
23014     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
23015     if (IsStrict)
23016       return DAG.getMergeValues({Res, Chain}, dl);
23017     return Res;
23018   }
23019 
23020   // If this is a FP_TO_SINT using SSEReg we're done.
23021   if (UseSSEReg && IsSigned)
23022     return Op;
23023 
23024   // fp128 needs to use a libcall.
23025   if (SrcVT == MVT::f128) {
23026     RTLIB::Libcall LC;
23027     if (IsSigned)
23028       LC = RTLIB::getFPTOSINT(SrcVT, VT);
23029     else
23030       LC = RTLIB::getFPTOUINT(SrcVT, VT);
23031 
23032     MakeLibCallOptions CallOptions;
23033     std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
23034                                                   SDLoc(Op), Chain);
23035 
23036     if (IsStrict)
23037       return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
23038 
23039     return Tmp.first;
23040   }
23041 
23042   // Fall back to X87.
23043   if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
23044     if (IsStrict)
23045       return DAG.getMergeValues({V, Chain}, dl);
23046     return V;
23047   }
23048 
23049   llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
23050 }
23051 
LowerLRINT_LLRINT(SDValue Op,SelectionDAG & DAG) const23052 SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
23053                                              SelectionDAG &DAG) const {
23054   SDValue Src = Op.getOperand(0);
23055   MVT SrcVT = Src.getSimpleValueType();
23056 
23057   if (SrcVT == MVT::f16)
23058     return SDValue();
23059 
23060   // If the source is in an SSE register, the node is Legal.
23061   if (isScalarFPTypeInSSEReg(SrcVT))
23062     return Op;
23063 
23064   return LRINT_LLRINTHelper(Op.getNode(), DAG);
23065 }
23066 
LRINT_LLRINTHelper(SDNode * N,SelectionDAG & DAG) const23067 SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
23068                                               SelectionDAG &DAG) const {
23069   EVT DstVT = N->getValueType(0);
23070   SDValue Src = N->getOperand(0);
23071   EVT SrcVT = Src.getValueType();
23072 
23073   if (SrcVT != MVT::f32 && SrcVT != MVT::f64 && SrcVT != MVT::f80) {
23074     // f16 must be promoted before using the lowering in this routine.
23075     // fp128 does not use this lowering.
23076     return SDValue();
23077   }
23078 
23079   SDLoc DL(N);
23080   SDValue Chain = DAG.getEntryNode();
23081 
23082   bool UseSSE = isScalarFPTypeInSSEReg(SrcVT);
23083 
23084   // If we're converting from SSE, the stack slot needs to hold both types.
23085   // Otherwise it only needs to hold the DstVT.
23086   EVT OtherVT = UseSSE ? SrcVT : DstVT;
23087   SDValue StackPtr = DAG.CreateStackTemporary(DstVT, OtherVT);
23088   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
23089   MachinePointerInfo MPI =
23090       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
23091 
23092   if (UseSSE) {
23093     assert(DstVT == MVT::i64 && "Invalid LRINT/LLRINT to lower!");
23094     Chain = DAG.getStore(Chain, DL, Src, StackPtr, MPI);
23095     SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
23096     SDValue Ops[] = { Chain, StackPtr };
23097 
23098     Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI,
23099                                   /*Align*/ std::nullopt,
23100                                   MachineMemOperand::MOLoad);
23101     Chain = Src.getValue(1);
23102   }
23103 
23104   SDValue StoreOps[] = { Chain, Src, StackPtr };
23105   Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other),
23106                                   StoreOps, DstVT, MPI, /*Align*/ std::nullopt,
23107                                   MachineMemOperand::MOStore);
23108 
23109   return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI);
23110 }
23111 
23112 SDValue
LowerFP_TO_INT_SAT(SDValue Op,SelectionDAG & DAG) const23113 X86TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const {
23114   // This is based on the TargetLowering::expandFP_TO_INT_SAT implementation,
23115   // but making use of X86 specifics to produce better instruction sequences.
23116   SDNode *Node = Op.getNode();
23117   bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT;
23118   unsigned FpToIntOpcode = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
23119   SDLoc dl(SDValue(Node, 0));
23120   SDValue Src = Node->getOperand(0);
23121 
23122   // There are three types involved here: SrcVT is the source floating point
23123   // type, DstVT is the type of the result, and TmpVT is the result of the
23124   // intermediate FP_TO_*INT operation we'll use (which may be a promotion of
23125   // DstVT).
23126   EVT SrcVT = Src.getValueType();
23127   EVT DstVT = Node->getValueType(0);
23128   EVT TmpVT = DstVT;
23129 
23130   // This code is only for floats and doubles. Fall back to generic code for
23131   // anything else.
23132   if (!isScalarFPTypeInSSEReg(SrcVT) || isSoftFP16(SrcVT))
23133     return SDValue();
23134 
23135   EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
23136   unsigned SatWidth = SatVT.getScalarSizeInBits();
23137   unsigned DstWidth = DstVT.getScalarSizeInBits();
23138   unsigned TmpWidth = TmpVT.getScalarSizeInBits();
23139   assert(SatWidth <= DstWidth && SatWidth <= TmpWidth &&
23140          "Expected saturation width smaller than result width");
23141 
23142   // Promote result of FP_TO_*INT to at least 32 bits.
23143   if (TmpWidth < 32) {
23144     TmpVT = MVT::i32;
23145     TmpWidth = 32;
23146   }
23147 
23148   // Promote conversions to unsigned 32-bit to 64-bit, because it will allow
23149   // us to use a native signed conversion instead.
23150   if (SatWidth == 32 && !IsSigned && Subtarget.is64Bit()) {
23151     TmpVT = MVT::i64;
23152     TmpWidth = 64;
23153   }
23154 
23155   // If the saturation width is smaller than the size of the temporary result,
23156   // we can always use signed conversion, which is native.
23157   if (SatWidth < TmpWidth)
23158     FpToIntOpcode = ISD::FP_TO_SINT;
23159 
23160   // Determine minimum and maximum integer values and their corresponding
23161   // floating-point values.
23162   APInt MinInt, MaxInt;
23163   if (IsSigned) {
23164     MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth);
23165     MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth);
23166   } else {
23167     MinInt = APInt::getMinValue(SatWidth).zext(DstWidth);
23168     MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth);
23169   }
23170 
23171   APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
23172   APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT));
23173 
23174   APFloat::opStatus MinStatus = MinFloat.convertFromAPInt(
23175     MinInt, IsSigned, APFloat::rmTowardZero);
23176   APFloat::opStatus MaxStatus = MaxFloat.convertFromAPInt(
23177     MaxInt, IsSigned, APFloat::rmTowardZero);
23178   bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact)
23179                           && !(MaxStatus & APFloat::opStatus::opInexact);
23180 
23181   SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT);
23182   SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT);
23183 
23184   // If the integer bounds are exactly representable as floats, emit a
23185   // min+max+fptoi sequence. Otherwise use comparisons and selects.
23186   if (AreExactFloatBounds) {
23187     if (DstVT != TmpVT) {
23188       // Clamp by MinFloat from below. If Src is NaN, propagate NaN.
23189       SDValue MinClamped = DAG.getNode(
23190         X86ISD::FMAX, dl, SrcVT, MinFloatNode, Src);
23191       // Clamp by MaxFloat from above. If Src is NaN, propagate NaN.
23192       SDValue BothClamped = DAG.getNode(
23193         X86ISD::FMIN, dl, SrcVT, MaxFloatNode, MinClamped);
23194       // Convert clamped value to integer.
23195       SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, BothClamped);
23196 
23197       // NaN will become INDVAL, with the top bit set and the rest zero.
23198       // Truncation will discard the top bit, resulting in zero.
23199       return DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
23200     }
23201 
23202     // Clamp by MinFloat from below. If Src is NaN, the result is MinFloat.
23203     SDValue MinClamped = DAG.getNode(
23204       X86ISD::FMAX, dl, SrcVT, Src, MinFloatNode);
23205     // Clamp by MaxFloat from above. NaN cannot occur.
23206     SDValue BothClamped = DAG.getNode(
23207       X86ISD::FMINC, dl, SrcVT, MinClamped, MaxFloatNode);
23208     // Convert clamped value to integer.
23209     SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, DstVT, BothClamped);
23210 
23211     if (!IsSigned) {
23212       // In the unsigned case we're done, because we mapped NaN to MinFloat,
23213       // which is zero.
23214       return FpToInt;
23215     }
23216 
23217     // Otherwise, select zero if Src is NaN.
23218     SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
23219     return DAG.getSelectCC(
23220       dl, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
23221   }
23222 
23223   SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT);
23224   SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT);
23225 
23226   // Result of direct conversion, which may be selected away.
23227   SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, Src);
23228 
23229   if (DstVT != TmpVT) {
23230     // NaN will become INDVAL, with the top bit set and the rest zero.
23231     // Truncation will discard the top bit, resulting in zero.
23232     FpToInt = DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
23233   }
23234 
23235   SDValue Select = FpToInt;
23236   // For signed conversions where we saturate to the same size as the
23237   // result type of the fptoi instructions, INDVAL coincides with integer
23238   // minimum, so we don't need to explicitly check it.
23239   if (!IsSigned || SatWidth != TmpVT.getScalarSizeInBits()) {
23240     // If Src ULT MinFloat, select MinInt. In particular, this also selects
23241     // MinInt if Src is NaN.
23242     Select = DAG.getSelectCC(
23243       dl, Src, MinFloatNode, MinIntNode, Select, ISD::CondCode::SETULT);
23244   }
23245 
23246   // If Src OGT MaxFloat, select MaxInt.
23247   Select = DAG.getSelectCC(
23248     dl, Src, MaxFloatNode, MaxIntNode, Select, ISD::CondCode::SETOGT);
23249 
23250   // In the unsigned case we are done, because we mapped NaN to MinInt, which
23251   // is already zero. The promoted case was already handled above.
23252   if (!IsSigned || DstVT != TmpVT) {
23253     return Select;
23254   }
23255 
23256   // Otherwise, select 0 if Src is NaN.
23257   SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
23258   return DAG.getSelectCC(
23259     dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO);
23260 }
23261 
LowerFP_EXTEND(SDValue Op,SelectionDAG & DAG) const23262 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
23263   bool IsStrict = Op->isStrictFPOpcode();
23264 
23265   SDLoc DL(Op);
23266   MVT VT = Op.getSimpleValueType();
23267   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23268   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
23269   MVT SVT = In.getSimpleValueType();
23270 
23271   // Let f16->f80 get lowered to a libcall, except for darwin, where we should
23272   // lower it to an fp_extend via f32 (as only f16<>f32 libcalls are available)
23273   if (VT == MVT::f128 || (SVT == MVT::f16 && VT == MVT::f80 &&
23274                           !Subtarget.getTargetTriple().isOSDarwin()))
23275     return SDValue();
23276 
23277   if (SVT == MVT::f16) {
23278     if (Subtarget.hasFP16())
23279       return Op;
23280 
23281     if (VT != MVT::f32) {
23282       if (IsStrict)
23283         return DAG.getNode(
23284             ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other},
23285             {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, DL,
23286                                 {MVT::f32, MVT::Other}, {Chain, In})});
23287 
23288       return DAG.getNode(ISD::FP_EXTEND, DL, VT,
23289                          DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, In));
23290     }
23291 
23292     if (!Subtarget.hasF16C()) {
23293       if (!Subtarget.getTargetTriple().isOSDarwin())
23294         return SDValue();
23295 
23296       assert(VT == MVT::f32 && SVT == MVT::f16 && "unexpected extend libcall");
23297 
23298       // Need a libcall, but ABI for f16 is soft-float on MacOS.
23299       TargetLowering::CallLoweringInfo CLI(DAG);
23300       Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
23301 
23302       In = DAG.getBitcast(MVT::i16, In);
23303       TargetLowering::ArgListTy Args;
23304       TargetLowering::ArgListEntry Entry;
23305       Entry.Node = In;
23306       Entry.Ty = EVT(MVT::i16).getTypeForEVT(*DAG.getContext());
23307       Entry.IsSExt = false;
23308       Entry.IsZExt = true;
23309       Args.push_back(Entry);
23310 
23311       SDValue Callee = DAG.getExternalSymbol(
23312           getLibcallName(RTLIB::FPEXT_F16_F32),
23313           getPointerTy(DAG.getDataLayout()));
23314       CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
23315           CallingConv::C, EVT(VT).getTypeForEVT(*DAG.getContext()), Callee,
23316           std::move(Args));
23317 
23318       SDValue Res;
23319       std::tie(Res,Chain) = LowerCallTo(CLI);
23320       if (IsStrict)
23321         Res = DAG.getMergeValues({Res, Chain}, DL);
23322 
23323       return Res;
23324     }
23325 
23326     In = DAG.getBitcast(MVT::i16, In);
23327     In = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v8i16,
23328                      getZeroVector(MVT::v8i16, Subtarget, DAG, DL), In,
23329                      DAG.getIntPtrConstant(0, DL));
23330     SDValue Res;
23331     if (IsStrict) {
23332       Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, DL, {MVT::v4f32, MVT::Other},
23333                         {Chain, In});
23334       Chain = Res.getValue(1);
23335     } else {
23336       Res = DAG.getNode(X86ISD::CVTPH2PS, DL, MVT::v4f32, In,
23337                         DAG.getTargetConstant(4, DL, MVT::i32));
23338     }
23339     Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Res,
23340                       DAG.getIntPtrConstant(0, DL));
23341     if (IsStrict)
23342       return DAG.getMergeValues({Res, Chain}, DL);
23343     return Res;
23344   }
23345 
23346   if (!SVT.isVector())
23347     return Op;
23348 
23349   if (SVT.getVectorElementType() == MVT::f16) {
23350     assert(Subtarget.hasF16C() && "Unexpected features!");
23351     if (SVT == MVT::v2f16)
23352       In = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f16, In,
23353                        DAG.getUNDEF(MVT::v2f16));
23354     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f16, In,
23355                               DAG.getUNDEF(MVT::v4f16));
23356     if (IsStrict)
23357       return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
23358                          {Op->getOperand(0), Res});
23359     return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
23360   } else if (VT == MVT::v4f64 || VT == MVT::v8f64) {
23361     return Op;
23362   }
23363 
23364   assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
23365 
23366   SDValue Res =
23367       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
23368   if (IsStrict)
23369     return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
23370                        {Op->getOperand(0), Res});
23371   return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
23372 }
23373 
LowerFP_ROUND(SDValue Op,SelectionDAG & DAG) const23374 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
23375   bool IsStrict = Op->isStrictFPOpcode();
23376 
23377   SDLoc DL(Op);
23378   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23379   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
23380   MVT VT = Op.getSimpleValueType();
23381   MVT SVT = In.getSimpleValueType();
23382 
23383   if (SVT == MVT::f128 || (VT == MVT::f16 && SVT == MVT::f80))
23384     return SDValue();
23385 
23386   if (VT == MVT::f16 && (SVT == MVT::f64 || SVT == MVT::f32) &&
23387       !Subtarget.hasFP16() && (SVT == MVT::f64 || !Subtarget.hasF16C())) {
23388     if (!Subtarget.getTargetTriple().isOSDarwin())
23389       return SDValue();
23390 
23391     // We need a libcall but the ABI for f16 libcalls on MacOS is soft.
23392     TargetLowering::CallLoweringInfo CLI(DAG);
23393     Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
23394 
23395     TargetLowering::ArgListTy Args;
23396     TargetLowering::ArgListEntry Entry;
23397     Entry.Node = In;
23398     Entry.Ty = EVT(SVT).getTypeForEVT(*DAG.getContext());
23399     Entry.IsSExt = false;
23400     Entry.IsZExt = true;
23401     Args.push_back(Entry);
23402 
23403     SDValue Callee = DAG.getExternalSymbol(
23404         getLibcallName(SVT == MVT::f64 ? RTLIB::FPROUND_F64_F16
23405                                        : RTLIB::FPROUND_F32_F16),
23406         getPointerTy(DAG.getDataLayout()));
23407     CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
23408         CallingConv::C, EVT(MVT::i16).getTypeForEVT(*DAG.getContext()), Callee,
23409         std::move(Args));
23410 
23411     SDValue Res;
23412     std::tie(Res, Chain) = LowerCallTo(CLI);
23413 
23414     Res = DAG.getBitcast(MVT::f16, Res);
23415 
23416     if (IsStrict)
23417       Res = DAG.getMergeValues({Res, Chain}, DL);
23418 
23419     return Res;
23420   }
23421 
23422   if (VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) {
23423     if (!Subtarget.hasF16C() || SVT.getScalarType() != MVT::f32)
23424       return SDValue();
23425 
23426     if (VT.isVector())
23427       return Op;
23428 
23429     SDValue Res;
23430     SDValue Rnd = DAG.getTargetConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, DL,
23431                                         MVT::i32);
23432     if (IsStrict) {
23433       Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4f32,
23434                         DAG.getConstantFP(0, DL, MVT::v4f32), In,
23435                         DAG.getIntPtrConstant(0, DL));
23436       Res = DAG.getNode(X86ISD::STRICT_CVTPS2PH, DL, {MVT::v8i16, MVT::Other},
23437                         {Chain, Res, Rnd});
23438       Chain = Res.getValue(1);
23439     } else {
23440       // FIXME: Should we use zeros for upper elements for non-strict?
23441       Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, In);
23442       Res = DAG.getNode(X86ISD::CVTPS2PH, DL, MVT::v8i16, Res, Rnd);
23443     }
23444 
23445     Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i16, Res,
23446                       DAG.getIntPtrConstant(0, DL));
23447     Res = DAG.getBitcast(MVT::f16, Res);
23448 
23449     if (IsStrict)
23450       return DAG.getMergeValues({Res, Chain}, DL);
23451 
23452     return Res;
23453   }
23454 
23455   return Op;
23456 }
23457 
LowerFP16_TO_FP(SDValue Op,SelectionDAG & DAG)23458 static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
23459   bool IsStrict = Op->isStrictFPOpcode();
23460   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
23461   assert(Src.getValueType() == MVT::i16 && Op.getValueType() == MVT::f32 &&
23462          "Unexpected VT!");
23463 
23464   SDLoc dl(Op);
23465   SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
23466                             DAG.getConstant(0, dl, MVT::v8i16), Src,
23467                             DAG.getIntPtrConstant(0, dl));
23468 
23469   SDValue Chain;
23470   if (IsStrict) {
23471     Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {MVT::v4f32, MVT::Other},
23472                       {Op.getOperand(0), Res});
23473     Chain = Res.getValue(1);
23474   } else {
23475     Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
23476   }
23477 
23478   Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
23479                     DAG.getIntPtrConstant(0, dl));
23480 
23481   if (IsStrict)
23482     return DAG.getMergeValues({Res, Chain}, dl);
23483 
23484   return Res;
23485 }
23486 
LowerFP_TO_FP16(SDValue Op,SelectionDAG & DAG)23487 static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
23488   bool IsStrict = Op->isStrictFPOpcode();
23489   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
23490   assert(Src.getValueType() == MVT::f32 && Op.getValueType() == MVT::i16 &&
23491          "Unexpected VT!");
23492 
23493   SDLoc dl(Op);
23494   SDValue Res, Chain;
23495   if (IsStrict) {
23496     Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4f32,
23497                       DAG.getConstantFP(0, dl, MVT::v4f32), Src,
23498                       DAG.getIntPtrConstant(0, dl));
23499     Res = DAG.getNode(
23500         X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
23501         {Op.getOperand(0), Res, DAG.getTargetConstant(4, dl, MVT::i32)});
23502     Chain = Res.getValue(1);
23503   } else {
23504     // FIXME: Should we use zeros for upper elements for non-strict?
23505     Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, Src);
23506     Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
23507                       DAG.getTargetConstant(4, dl, MVT::i32));
23508   }
23509 
23510   Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
23511                     DAG.getIntPtrConstant(0, dl));
23512 
23513   if (IsStrict)
23514     return DAG.getMergeValues({Res, Chain}, dl);
23515 
23516   return Res;
23517 }
23518 
LowerFP_TO_BF16(SDValue Op,SelectionDAG & DAG) const23519 SDValue X86TargetLowering::LowerFP_TO_BF16(SDValue Op,
23520                                            SelectionDAG &DAG) const {
23521   SDLoc DL(Op);
23522   MakeLibCallOptions CallOptions;
23523   RTLIB::Libcall LC =
23524       RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::bf16);
23525   SDValue Res =
23526       makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
23527   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16,
23528                      DAG.getBitcast(MVT::i32, Res));
23529 }
23530 
23531 /// Depending on uarch and/or optimizing for size, we might prefer to use a
23532 /// vector operation in place of the typical scalar operation.
lowerAddSubToHorizontalOp(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)23533 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
23534                                          const X86Subtarget &Subtarget) {
23535   // If both operands have other uses, this is probably not profitable.
23536   SDValue LHS = Op.getOperand(0);
23537   SDValue RHS = Op.getOperand(1);
23538   if (!LHS.hasOneUse() && !RHS.hasOneUse())
23539     return Op;
23540 
23541   // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
23542   bool IsFP = Op.getSimpleValueType().isFloatingPoint();
23543   if (IsFP && !Subtarget.hasSSE3())
23544     return Op;
23545   if (!IsFP && !Subtarget.hasSSSE3())
23546     return Op;
23547 
23548   // Extract from a common vector.
23549   if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
23550       RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
23551       LHS.getOperand(0) != RHS.getOperand(0) ||
23552       !isa<ConstantSDNode>(LHS.getOperand(1)) ||
23553       !isa<ConstantSDNode>(RHS.getOperand(1)) ||
23554       !shouldUseHorizontalOp(true, DAG, Subtarget))
23555     return Op;
23556 
23557   // Allow commuted 'hadd' ops.
23558   // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
23559   unsigned HOpcode;
23560   switch (Op.getOpcode()) {
23561     case ISD::ADD: HOpcode = X86ISD::HADD; break;
23562     case ISD::SUB: HOpcode = X86ISD::HSUB; break;
23563     case ISD::FADD: HOpcode = X86ISD::FHADD; break;
23564     case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
23565     default:
23566       llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
23567   }
23568   unsigned LExtIndex = LHS.getConstantOperandVal(1);
23569   unsigned RExtIndex = RHS.getConstantOperandVal(1);
23570   if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
23571       (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
23572     std::swap(LExtIndex, RExtIndex);
23573 
23574   if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
23575     return Op;
23576 
23577   SDValue X = LHS.getOperand(0);
23578   EVT VecVT = X.getValueType();
23579   unsigned BitWidth = VecVT.getSizeInBits();
23580   unsigned NumLanes = BitWidth / 128;
23581   unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
23582   assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
23583          "Not expecting illegal vector widths here");
23584 
23585   // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
23586   // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
23587   SDLoc DL(Op);
23588   if (BitWidth == 256 || BitWidth == 512) {
23589     unsigned LaneIdx = LExtIndex / NumEltsPerLane;
23590     X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
23591     LExtIndex %= NumEltsPerLane;
23592   }
23593 
23594   // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
23595   // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
23596   // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
23597   // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
23598   SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
23599   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
23600                      DAG.getIntPtrConstant(LExtIndex / 2, DL));
23601 }
23602 
23603 /// Depending on uarch and/or optimizing for size, we might prefer to use a
23604 /// vector operation in place of the typical scalar operation.
lowerFaddFsub(SDValue Op,SelectionDAG & DAG) const23605 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
23606   assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
23607          "Only expecting float/double");
23608   return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
23609 }
23610 
23611 /// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
23612 /// This mode isn't supported in hardware on X86. But as long as we aren't
23613 /// compiling with trapping math, we can emulate this with
23614 /// trunc(X + copysign(nextafter(0.5, 0.0), X)).
LowerFROUND(SDValue Op,SelectionDAG & DAG)23615 static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
23616   SDValue N0 = Op.getOperand(0);
23617   SDLoc dl(Op);
23618   MVT VT = Op.getSimpleValueType();
23619 
23620   // N0 += copysign(nextafter(0.5, 0.0), N0)
23621   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
23622   bool Ignored;
23623   APFloat Point5Pred = APFloat(0.5f);
23624   Point5Pred.convert(Sem, APFloat::rmNearestTiesToEven, &Ignored);
23625   Point5Pred.next(/*nextDown*/true);
23626 
23627   SDValue Adder = DAG.getNode(ISD::FCOPYSIGN, dl, VT,
23628                               DAG.getConstantFP(Point5Pred, dl, VT), N0);
23629   N0 = DAG.getNode(ISD::FADD, dl, VT, N0, Adder);
23630 
23631   // Truncate the result to remove fraction.
23632   return DAG.getNode(ISD::FTRUNC, dl, VT, N0);
23633 }
23634 
23635 /// The only differences between FABS and FNEG are the mask and the logic op.
23636 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
LowerFABSorFNEG(SDValue Op,SelectionDAG & DAG)23637 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
23638   assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
23639          "Wrong opcode for lowering FABS or FNEG.");
23640 
23641   bool IsFABS = (Op.getOpcode() == ISD::FABS);
23642 
23643   // If this is a FABS and it has an FNEG user, bail out to fold the combination
23644   // into an FNABS. We'll lower the FABS after that if it is still in use.
23645   if (IsFABS)
23646     for (SDNode *User : Op->uses())
23647       if (User->getOpcode() == ISD::FNEG)
23648         return Op;
23649 
23650   SDLoc dl(Op);
23651   MVT VT = Op.getSimpleValueType();
23652 
23653   bool IsF128 = (VT == MVT::f128);
23654   assert(VT.isFloatingPoint() && VT != MVT::f80 &&
23655          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
23656          "Unexpected type in LowerFABSorFNEG");
23657 
23658   // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
23659   // decide if we should generate a 16-byte constant mask when we only need 4 or
23660   // 8 bytes for the scalar case.
23661 
23662   // There are no scalar bitwise logical SSE/AVX instructions, so we
23663   // generate a 16-byte vector constant and logic op even for the scalar case.
23664   // Using a 16-byte mask allows folding the load of the mask with
23665   // the logic op, so it can save (~4 bytes) on code size.
23666   bool IsFakeVector = !VT.isVector() && !IsF128;
23667   MVT LogicVT = VT;
23668   if (IsFakeVector)
23669     LogicVT = (VT == MVT::f64)   ? MVT::v2f64
23670               : (VT == MVT::f32) ? MVT::v4f32
23671                                  : MVT::v8f16;
23672 
23673   unsigned EltBits = VT.getScalarSizeInBits();
23674   // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
23675   APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
23676                            APInt::getSignMask(EltBits);
23677   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
23678   SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
23679 
23680   SDValue Op0 = Op.getOperand(0);
23681   bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
23682   unsigned LogicOp = IsFABS  ? X86ISD::FAND :
23683                      IsFNABS ? X86ISD::FOR  :
23684                                X86ISD::FXOR;
23685   SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
23686 
23687   if (VT.isVector() || IsF128)
23688     return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
23689 
23690   // For the scalar case extend to a 128-bit vector, perform the logic op,
23691   // and extract the scalar result back out.
23692   Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
23693   SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
23694   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
23695                      DAG.getIntPtrConstant(0, dl));
23696 }
23697 
LowerFCOPYSIGN(SDValue Op,SelectionDAG & DAG)23698 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
23699   SDValue Mag = Op.getOperand(0);
23700   SDValue Sign = Op.getOperand(1);
23701   SDLoc dl(Op);
23702 
23703   // If the sign operand is smaller, extend it first.
23704   MVT VT = Op.getSimpleValueType();
23705   if (Sign.getSimpleValueType().bitsLT(VT))
23706     Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
23707 
23708   // And if it is bigger, shrink it first.
23709   if (Sign.getSimpleValueType().bitsGT(VT))
23710     Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign,
23711                        DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
23712 
23713   // At this point the operands and the result should have the same
23714   // type, and that won't be f80 since that is not custom lowered.
23715   bool IsF128 = (VT == MVT::f128);
23716   assert(VT.isFloatingPoint() && VT != MVT::f80 &&
23717          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
23718          "Unexpected type in LowerFCOPYSIGN");
23719 
23720   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
23721 
23722   // Perform all scalar logic operations as 16-byte vectors because there are no
23723   // scalar FP logic instructions in SSE.
23724   // TODO: This isn't necessary. If we used scalar types, we might avoid some
23725   // unnecessary splats, but we might miss load folding opportunities. Should
23726   // this decision be based on OptimizeForSize?
23727   bool IsFakeVector = !VT.isVector() && !IsF128;
23728   MVT LogicVT = VT;
23729   if (IsFakeVector)
23730     LogicVT = (VT == MVT::f64)   ? MVT::v2f64
23731               : (VT == MVT::f32) ? MVT::v4f32
23732                                  : MVT::v8f16;
23733 
23734   // The mask constants are automatically splatted for vector types.
23735   unsigned EltSizeInBits = VT.getScalarSizeInBits();
23736   SDValue SignMask = DAG.getConstantFP(
23737       APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
23738   SDValue MagMask = DAG.getConstantFP(
23739       APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
23740 
23741   // First, clear all bits but the sign bit from the second operand (sign).
23742   if (IsFakeVector)
23743     Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
23744   SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
23745 
23746   // Next, clear the sign bit from the first operand (magnitude).
23747   // TODO: If we had general constant folding for FP logic ops, this check
23748   // wouldn't be necessary.
23749   SDValue MagBits;
23750   if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
23751     APFloat APF = Op0CN->getValueAPF();
23752     APF.clearSign();
23753     MagBits = DAG.getConstantFP(APF, dl, LogicVT);
23754   } else {
23755     // If the magnitude operand wasn't a constant, we need to AND out the sign.
23756     if (IsFakeVector)
23757       Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
23758     MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
23759   }
23760 
23761   // OR the magnitude value with the sign bit.
23762   SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
23763   return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
23764                                           DAG.getIntPtrConstant(0, dl));
23765 }
23766 
LowerFGETSIGN(SDValue Op,SelectionDAG & DAG)23767 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
23768   SDValue N0 = Op.getOperand(0);
23769   SDLoc dl(Op);
23770   MVT VT = Op.getSimpleValueType();
23771 
23772   MVT OpVT = N0.getSimpleValueType();
23773   assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
23774          "Unexpected type for FGETSIGN");
23775 
23776   // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
23777   MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
23778   SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
23779   Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
23780   Res = DAG.getZExtOrTrunc(Res, dl, VT);
23781   Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
23782   return Res;
23783 }
23784 
23785 /// Helper for attempting to create a X86ISD::BT node.
getBT(SDValue Src,SDValue BitNo,const SDLoc & DL,SelectionDAG & DAG)23786 static SDValue getBT(SDValue Src, SDValue BitNo, const SDLoc &DL, SelectionDAG &DAG) {
23787   // If Src is i8, promote it to i32 with any_extend.  There is no i8 BT
23788   // instruction.  Since the shift amount is in-range-or-undefined, we know
23789   // that doing a bittest on the i32 value is ok.  We extend to i32 because
23790   // the encoding for the i16 version is larger than the i32 version.
23791   // Also promote i16 to i32 for performance / code size reason.
23792   if (Src.getValueType().getScalarSizeInBits() < 32)
23793     Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
23794 
23795   // No legal type found, give up.
23796   if (!DAG.getTargetLoweringInfo().isTypeLegal(Src.getValueType()))
23797     return SDValue();
23798 
23799   // See if we can use the 32-bit instruction instead of the 64-bit one for a
23800   // shorter encoding. Since the former takes the modulo 32 of BitNo and the
23801   // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
23802   // known to be zero.
23803   if (Src.getValueType() == MVT::i64 &&
23804       DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
23805     Src = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
23806 
23807   // If the operand types disagree, extend the shift amount to match.  Since
23808   // BT ignores high bits (like shifts) we can use anyextend.
23809   if (Src.getValueType() != BitNo.getValueType()) {
23810     // Peek through a mask/modulo operation.
23811     // TODO: DAGCombine fails to do this as it just checks isTruncateFree, but
23812     // we probably need a better IsDesirableToPromoteOp to handle this as well.
23813     if (BitNo.getOpcode() == ISD::AND && BitNo->hasOneUse())
23814       BitNo = DAG.getNode(ISD::AND, DL, Src.getValueType(),
23815                           DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
23816                                       BitNo.getOperand(0)),
23817                           DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
23818                                       BitNo.getOperand(1)));
23819     else
23820       BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
23821   }
23822 
23823   return DAG.getNode(X86ISD::BT, DL, MVT::i32, Src, BitNo);
23824 }
23825 
23826 /// Helper for creating a X86ISD::SETCC node.
getSETCC(X86::CondCode Cond,SDValue EFLAGS,const SDLoc & dl,SelectionDAG & DAG)23827 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
23828                         SelectionDAG &DAG) {
23829   return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
23830                      DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
23831 }
23832 
23833 /// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
23834 /// style scalarized (associative) reduction patterns. Partial reductions
23835 /// are supported when the pointer SrcMask is non-null.
23836 /// TODO - move this to SelectionDAG?
matchScalarReduction(SDValue Op,ISD::NodeType BinOp,SmallVectorImpl<SDValue> & SrcOps,SmallVectorImpl<APInt> * SrcMask=nullptr)23837 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
23838                                  SmallVectorImpl<SDValue> &SrcOps,
23839                                  SmallVectorImpl<APInt> *SrcMask = nullptr) {
23840   SmallVector<SDValue, 8> Opnds;
23841   DenseMap<SDValue, APInt> SrcOpMap;
23842   EVT VT = MVT::Other;
23843 
23844   // Recognize a special case where a vector is casted into wide integer to
23845   // test all 0s.
23846   assert(Op.getOpcode() == unsigned(BinOp) &&
23847          "Unexpected bit reduction opcode");
23848   Opnds.push_back(Op.getOperand(0));
23849   Opnds.push_back(Op.getOperand(1));
23850 
23851   for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
23852     SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
23853     // BFS traverse all BinOp operands.
23854     if (I->getOpcode() == unsigned(BinOp)) {
23855       Opnds.push_back(I->getOperand(0));
23856       Opnds.push_back(I->getOperand(1));
23857       // Re-evaluate the number of nodes to be traversed.
23858       e += 2; // 2 more nodes (LHS and RHS) are pushed.
23859       continue;
23860     }
23861 
23862     // Quit if a non-EXTRACT_VECTOR_ELT
23863     if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23864       return false;
23865 
23866     // Quit if without a constant index.
23867     auto *Idx = dyn_cast<ConstantSDNode>(I->getOperand(1));
23868     if (!Idx)
23869       return false;
23870 
23871     SDValue Src = I->getOperand(0);
23872     DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
23873     if (M == SrcOpMap.end()) {
23874       VT = Src.getValueType();
23875       // Quit if not the same type.
23876       if (!SrcOpMap.empty() && VT != SrcOpMap.begin()->first.getValueType())
23877         return false;
23878       unsigned NumElts = VT.getVectorNumElements();
23879       APInt EltCount = APInt::getZero(NumElts);
23880       M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
23881       SrcOps.push_back(Src);
23882     }
23883 
23884     // Quit if element already used.
23885     unsigned CIdx = Idx->getZExtValue();
23886     if (M->second[CIdx])
23887       return false;
23888     M->second.setBit(CIdx);
23889   }
23890 
23891   if (SrcMask) {
23892     // Collect the source partial masks.
23893     for (SDValue &SrcOp : SrcOps)
23894       SrcMask->push_back(SrcOpMap[SrcOp]);
23895   } else {
23896     // Quit if not all elements are used.
23897     for (const auto &I : SrcOpMap)
23898       if (!I.second.isAllOnes())
23899         return false;
23900   }
23901 
23902   return true;
23903 }
23904 
23905 // Helper function for comparing all bits of a vector against zero.
LowerVectorAllZero(const SDLoc & DL,SDValue V,ISD::CondCode CC,const APInt & Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG,X86::CondCode & X86CC)23906 static SDValue LowerVectorAllZero(const SDLoc &DL, SDValue V, ISD::CondCode CC,
23907                                   const APInt &Mask,
23908                                   const X86Subtarget &Subtarget,
23909                                   SelectionDAG &DAG, X86::CondCode &X86CC) {
23910   EVT VT = V.getValueType();
23911   unsigned ScalarSize = VT.getScalarSizeInBits();
23912   if (Mask.getBitWidth() != ScalarSize) {
23913     assert(ScalarSize == 1 && "Element Mask vs Vector bitwidth mismatch");
23914     return SDValue();
23915   }
23916 
23917   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
23918   X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
23919 
23920   auto MaskBits = [&](SDValue Src) {
23921     if (Mask.isAllOnes())
23922       return Src;
23923     EVT SrcVT = Src.getValueType();
23924     SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT);
23925     return DAG.getNode(ISD::AND, DL, SrcVT, Src, MaskValue);
23926   };
23927 
23928   // For sub-128-bit vector, cast to (legal) integer and compare with zero.
23929   if (VT.getSizeInBits() < 128) {
23930     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
23931     if (!DAG.getTargetLoweringInfo().isTypeLegal(IntVT))
23932       return SDValue();
23933     return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
23934                        DAG.getBitcast(IntVT, MaskBits(V)),
23935                        DAG.getConstant(0, DL, IntVT));
23936   }
23937 
23938   // Quit if not splittable to 128/256-bit vector.
23939   if (!isPowerOf2_32(VT.getSizeInBits()))
23940     return SDValue();
23941 
23942   // Split down to 128/256-bit vector.
23943   unsigned TestSize = Subtarget.hasAVX() ? 256 : 128;
23944   while (VT.getSizeInBits() > TestSize) {
23945     auto Split = DAG.SplitVector(V, DL);
23946     VT = Split.first.getValueType();
23947     V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
23948   }
23949 
23950   bool UsePTEST = Subtarget.hasSSE41();
23951   if (UsePTEST) {
23952     MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
23953     V = DAG.getBitcast(TestVT, MaskBits(V));
23954     return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, V, V);
23955   }
23956 
23957   // Without PTEST, a masked v2i64 or-reduction is not faster than
23958   // scalarization.
23959   if (!Mask.isAllOnes() && VT.getScalarSizeInBits() > 32)
23960     return SDValue();
23961 
23962   V = DAG.getBitcast(MVT::v16i8, MaskBits(V));
23963   V = DAG.getNode(X86ISD::PCMPEQ, DL, MVT::v16i8, V,
23964                   getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
23965   V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
23966   return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
23967                      DAG.getConstant(0xFFFF, DL, MVT::i32));
23968 }
23969 
23970 // Check whether an OR'd reduction tree is PTEST-able, or if we can fallback to
23971 // CMP(MOVMSK(PCMPEQB(X,0))).
MatchVectorAllZeroTest(SDValue Op,ISD::CondCode CC,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & X86CC)23972 static SDValue MatchVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
23973                                       const SDLoc &DL,
23974                                       const X86Subtarget &Subtarget,
23975                                       SelectionDAG &DAG, SDValue &X86CC) {
23976   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
23977 
23978   if (!Subtarget.hasSSE2() || !Op->hasOneUse())
23979     return SDValue();
23980 
23981   // Check whether we're masking/truncating an OR-reduction result, in which
23982   // case track the masked bits.
23983   APInt Mask = APInt::getAllOnes(Op.getScalarValueSizeInBits());
23984   switch (Op.getOpcode()) {
23985   case ISD::TRUNCATE: {
23986     SDValue Src = Op.getOperand(0);
23987     Mask = APInt::getLowBitsSet(Src.getScalarValueSizeInBits(),
23988                                 Op.getScalarValueSizeInBits());
23989     Op = Src;
23990     break;
23991   }
23992   case ISD::AND: {
23993     if (auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
23994       Mask = Cst->getAPIntValue();
23995       Op = Op.getOperand(0);
23996     }
23997     break;
23998   }
23999   }
24000 
24001   SmallVector<SDValue, 8> VecIns;
24002   if (Op.getOpcode() == ISD::OR && matchScalarReduction(Op, ISD::OR, VecIns)) {
24003     EVT VT = VecIns[0].getValueType();
24004     assert(llvm::all_of(VecIns,
24005                         [VT](SDValue V) { return VT == V.getValueType(); }) &&
24006            "Reduction source vector mismatch");
24007 
24008     // Quit if less than 128-bits or not splittable to 128/256-bit vector.
24009     if (VT.getSizeInBits() < 128 || !isPowerOf2_32(VT.getSizeInBits()))
24010       return SDValue();
24011 
24012     // If more than one full vector is evaluated, OR them first before PTEST.
24013     for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1;
24014          Slot += 2, e += 1) {
24015       // Each iteration will OR 2 nodes and append the result until there is
24016       // only 1 node left, i.e. the final OR'd value of all vectors.
24017       SDValue LHS = VecIns[Slot];
24018       SDValue RHS = VecIns[Slot + 1];
24019       VecIns.push_back(DAG.getNode(ISD::OR, DL, VT, LHS, RHS));
24020     }
24021 
24022     X86::CondCode CCode;
24023     if (SDValue V = LowerVectorAllZero(DL, VecIns.back(), CC, Mask, Subtarget,
24024                                        DAG, CCode)) {
24025       X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
24026       return V;
24027     }
24028   }
24029 
24030   if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
24031     ISD::NodeType BinOp;
24032     if (SDValue Match =
24033             DAG.matchBinOpReduction(Op.getNode(), BinOp, {ISD::OR})) {
24034       X86::CondCode CCode;
24035       if (SDValue V =
24036               LowerVectorAllZero(DL, Match, CC, Mask, Subtarget, DAG, CCode)) {
24037         X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
24038         return V;
24039       }
24040     }
24041   }
24042 
24043   return SDValue();
24044 }
24045 
24046 /// return true if \c Op has a use that doesn't just read flags.
hasNonFlagsUse(SDValue Op)24047 static bool hasNonFlagsUse(SDValue Op) {
24048   for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
24049        ++UI) {
24050     SDNode *User = *UI;
24051     unsigned UOpNo = UI.getOperandNo();
24052     if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
24053       // Look pass truncate.
24054       UOpNo = User->use_begin().getOperandNo();
24055       User = *User->use_begin();
24056     }
24057 
24058     if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
24059         !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
24060       return true;
24061   }
24062   return false;
24063 }
24064 
24065 // Transform to an x86-specific ALU node with flags if there is a chance of
24066 // using an RMW op or only the flags are used. Otherwise, leave
24067 // the node alone and emit a 'cmp' or 'test' instruction.
isProfitableToUseFlagOp(SDValue Op)24068 static bool isProfitableToUseFlagOp(SDValue Op) {
24069   for (SDNode *U : Op->uses())
24070     if (U->getOpcode() != ISD::CopyToReg &&
24071         U->getOpcode() != ISD::SETCC &&
24072         U->getOpcode() != ISD::STORE)
24073       return false;
24074 
24075   return true;
24076 }
24077 
24078 /// Emit nodes that will be selected as "test Op0,Op0", or something
24079 /// equivalent.
EmitTest(SDValue Op,unsigned X86CC,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget)24080 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
24081                         SelectionDAG &DAG, const X86Subtarget &Subtarget) {
24082   // CF and OF aren't always set the way we want. Determine which
24083   // of these we need.
24084   bool NeedCF = false;
24085   bool NeedOF = false;
24086   switch (X86CC) {
24087   default: break;
24088   case X86::COND_A: case X86::COND_AE:
24089   case X86::COND_B: case X86::COND_BE:
24090     NeedCF = true;
24091     break;
24092   case X86::COND_G: case X86::COND_GE:
24093   case X86::COND_L: case X86::COND_LE:
24094   case X86::COND_O: case X86::COND_NO: {
24095     // Check if we really need to set the
24096     // Overflow flag. If NoSignedWrap is present
24097     // that is not actually needed.
24098     switch (Op->getOpcode()) {
24099     case ISD::ADD:
24100     case ISD::SUB:
24101     case ISD::MUL:
24102     case ISD::SHL:
24103       if (Op.getNode()->getFlags().hasNoSignedWrap())
24104         break;
24105       [[fallthrough]];
24106     default:
24107       NeedOF = true;
24108       break;
24109     }
24110     break;
24111   }
24112   }
24113   // See if we can use the EFLAGS value from the operand instead of
24114   // doing a separate TEST. TEST always sets OF and CF to 0, so unless
24115   // we prove that the arithmetic won't overflow, we can't use OF or CF.
24116   if (Op.getResNo() != 0 || NeedOF || NeedCF) {
24117     // Emit a CMP with 0, which is the TEST pattern.
24118     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
24119                        DAG.getConstant(0, dl, Op.getValueType()));
24120   }
24121   unsigned Opcode = 0;
24122   unsigned NumOperands = 0;
24123 
24124   SDValue ArithOp = Op;
24125 
24126   // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
24127   // which may be the result of a CAST.  We use the variable 'Op', which is the
24128   // non-casted variable when we check for possible users.
24129   switch (ArithOp.getOpcode()) {
24130   case ISD::AND:
24131     // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
24132     // because a TEST instruction will be better.
24133     if (!hasNonFlagsUse(Op))
24134       break;
24135 
24136     [[fallthrough]];
24137   case ISD::ADD:
24138   case ISD::SUB:
24139   case ISD::OR:
24140   case ISD::XOR:
24141     if (!isProfitableToUseFlagOp(Op))
24142       break;
24143 
24144     // Otherwise use a regular EFLAGS-setting instruction.
24145     switch (ArithOp.getOpcode()) {
24146     default: llvm_unreachable("unexpected operator!");
24147     case ISD::ADD: Opcode = X86ISD::ADD; break;
24148     case ISD::SUB: Opcode = X86ISD::SUB; break;
24149     case ISD::XOR: Opcode = X86ISD::XOR; break;
24150     case ISD::AND: Opcode = X86ISD::AND; break;
24151     case ISD::OR:  Opcode = X86ISD::OR;  break;
24152     }
24153 
24154     NumOperands = 2;
24155     break;
24156   case X86ISD::ADD:
24157   case X86ISD::SUB:
24158   case X86ISD::OR:
24159   case X86ISD::XOR:
24160   case X86ISD::AND:
24161     return SDValue(Op.getNode(), 1);
24162   case ISD::SSUBO:
24163   case ISD::USUBO: {
24164     // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
24165     SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
24166     return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
24167                        Op->getOperand(1)).getValue(1);
24168   }
24169   default:
24170     break;
24171   }
24172 
24173   if (Opcode == 0) {
24174     // Emit a CMP with 0, which is the TEST pattern.
24175     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
24176                        DAG.getConstant(0, dl, Op.getValueType()));
24177   }
24178   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
24179   SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
24180 
24181   SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
24182   DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
24183   return SDValue(New.getNode(), 1);
24184 }
24185 
24186 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
24187 /// equivalent.
EmitCmp(SDValue Op0,SDValue Op1,unsigned X86CC,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget)24188 static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
24189                        const SDLoc &dl, SelectionDAG &DAG,
24190                        const X86Subtarget &Subtarget) {
24191   if (isNullConstant(Op1))
24192     return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
24193 
24194   EVT CmpVT = Op0.getValueType();
24195 
24196   assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
24197           CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
24198 
24199   // Only promote the compare up to I32 if it is a 16 bit operation
24200   // with an immediate.  16 bit immediates are to be avoided.
24201   if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
24202       !DAG.getMachineFunction().getFunction().hasMinSize()) {
24203     ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
24204     ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
24205     // Don't do this if the immediate can fit in 8-bits.
24206     if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
24207         (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
24208       unsigned ExtendOp =
24209           isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
24210       if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
24211         // For equality comparisons try to use SIGN_EXTEND if the input was
24212         // truncate from something with enough sign bits.
24213         if (Op0.getOpcode() == ISD::TRUNCATE) {
24214           if (DAG.ComputeMaxSignificantBits(Op0.getOperand(0)) <= 16)
24215             ExtendOp = ISD::SIGN_EXTEND;
24216         } else if (Op1.getOpcode() == ISD::TRUNCATE) {
24217           if (DAG.ComputeMaxSignificantBits(Op1.getOperand(0)) <= 16)
24218             ExtendOp = ISD::SIGN_EXTEND;
24219         }
24220       }
24221 
24222       CmpVT = MVT::i32;
24223       Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
24224       Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
24225     }
24226   }
24227 
24228   // Try to shrink i64 compares if the input has enough zero bits.
24229   // FIXME: Do this for non-constant compares for constant on LHS?
24230   if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
24231       Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
24232       cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
24233       DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
24234     CmpVT = MVT::i32;
24235     Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
24236     Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
24237   }
24238 
24239   // 0-x == y --> x+y == 0
24240   // 0-x != y --> x+y != 0
24241   if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op0.getOperand(0)) &&
24242       Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
24243     SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
24244     SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(1), Op1);
24245     return Add.getValue(1);
24246   }
24247 
24248   // x == 0-y --> x+y == 0
24249   // x != 0-y --> x+y != 0
24250   if (Op1.getOpcode() == ISD::SUB && isNullConstant(Op1.getOperand(0)) &&
24251       Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
24252     SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
24253     SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0, Op1.getOperand(1));
24254     return Add.getValue(1);
24255   }
24256 
24257   // Use SUB instead of CMP to enable CSE between SUB and CMP.
24258   SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
24259   SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
24260   return Sub.getValue(1);
24261 }
24262 
24263 /// Check if replacement of SQRT with RSQRT should be disabled.
isFsqrtCheap(SDValue Op,SelectionDAG & DAG) const24264 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
24265   EVT VT = Op.getValueType();
24266 
24267   // We don't need to replace SQRT with RSQRT for half type.
24268   if (VT.getScalarType() == MVT::f16)
24269     return true;
24270 
24271   // We never want to use both SQRT and RSQRT instructions for the same input.
24272   if (DAG.doesNodeExist(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
24273     return false;
24274 
24275   if (VT.isVector())
24276     return Subtarget.hasFastVectorFSQRT();
24277   return Subtarget.hasFastScalarFSQRT();
24278 }
24279 
24280 /// The minimum architected relative accuracy is 2^-12. We need one
24281 /// Newton-Raphson step to have a good float result (24 bits of precision).
getSqrtEstimate(SDValue Op,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal) const24282 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
24283                                            SelectionDAG &DAG, int Enabled,
24284                                            int &RefinementSteps,
24285                                            bool &UseOneConstNR,
24286                                            bool Reciprocal) const {
24287   SDLoc DL(Op);
24288   EVT VT = Op.getValueType();
24289 
24290   // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
24291   // It is likely not profitable to do this for f64 because a double-precision
24292   // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
24293   // instructions: convert to single, rsqrtss, convert back to double, refine
24294   // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
24295   // along with FMA, this could be a throughput win.
24296   // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
24297   // after legalize types.
24298   if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
24299       (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
24300       (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
24301       (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
24302       (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
24303     if (RefinementSteps == ReciprocalEstimate::Unspecified)
24304       RefinementSteps = 1;
24305 
24306     UseOneConstNR = false;
24307     // There is no FSQRT for 512-bits, but there is RSQRT14.
24308     unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
24309     SDValue Estimate = DAG.getNode(Opcode, DL, VT, Op);
24310     if (RefinementSteps == 0 && !Reciprocal)
24311       Estimate = DAG.getNode(ISD::FMUL, DL, VT, Op, Estimate);
24312     return Estimate;
24313   }
24314 
24315   if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
24316       Subtarget.hasFP16()) {
24317     assert(Reciprocal && "Don't replace SQRT with RSQRT for half type");
24318     if (RefinementSteps == ReciprocalEstimate::Unspecified)
24319       RefinementSteps = 0;
24320 
24321     if (VT == MVT::f16) {
24322       SDValue Zero = DAG.getIntPtrConstant(0, DL);
24323       SDValue Undef = DAG.getUNDEF(MVT::v8f16);
24324       Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
24325       Op = DAG.getNode(X86ISD::RSQRT14S, DL, MVT::v8f16, Undef, Op);
24326       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
24327     }
24328 
24329     return DAG.getNode(X86ISD::RSQRT14, DL, VT, Op);
24330   }
24331   return SDValue();
24332 }
24333 
24334 /// The minimum architected relative accuracy is 2^-12. We need one
24335 /// Newton-Raphson step to have a good float result (24 bits of precision).
getRecipEstimate(SDValue Op,SelectionDAG & DAG,int Enabled,int & RefinementSteps) const24336 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
24337                                             int Enabled,
24338                                             int &RefinementSteps) const {
24339   SDLoc DL(Op);
24340   EVT VT = Op.getValueType();
24341 
24342   // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
24343   // It is likely not profitable to do this for f64 because a double-precision
24344   // reciprocal estimate with refinement on x86 prior to FMA requires
24345   // 15 instructions: convert to single, rcpss, convert back to double, refine
24346   // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
24347   // along with FMA, this could be a throughput win.
24348 
24349   if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
24350       (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
24351       (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
24352       (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
24353     // Enable estimate codegen with 1 refinement step for vector division.
24354     // Scalar division estimates are disabled because they break too much
24355     // real-world code. These defaults are intended to match GCC behavior.
24356     if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
24357       return SDValue();
24358 
24359     if (RefinementSteps == ReciprocalEstimate::Unspecified)
24360       RefinementSteps = 1;
24361 
24362     // There is no FSQRT for 512-bits, but there is RCP14.
24363     unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
24364     return DAG.getNode(Opcode, DL, VT, Op);
24365   }
24366 
24367   if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
24368       Subtarget.hasFP16()) {
24369     if (RefinementSteps == ReciprocalEstimate::Unspecified)
24370       RefinementSteps = 0;
24371 
24372     if (VT == MVT::f16) {
24373       SDValue Zero = DAG.getIntPtrConstant(0, DL);
24374       SDValue Undef = DAG.getUNDEF(MVT::v8f16);
24375       Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
24376       Op = DAG.getNode(X86ISD::RCP14S, DL, MVT::v8f16, Undef, Op);
24377       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
24378     }
24379 
24380     return DAG.getNode(X86ISD::RCP14, DL, VT, Op);
24381   }
24382   return SDValue();
24383 }
24384 
24385 /// If we have at least two divisions that use the same divisor, convert to
24386 /// multiplication by a reciprocal. This may need to be adjusted for a given
24387 /// CPU if a division's cost is not at least twice the cost of a multiplication.
24388 /// This is because we still need one division to calculate the reciprocal and
24389 /// then we need two multiplies by that reciprocal as replacements for the
24390 /// original divisions.
combineRepeatedFPDivisors() const24391 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
24392   return 2;
24393 }
24394 
24395 SDValue
BuildSDIVPow2(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created) const24396 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
24397                                  SelectionDAG &DAG,
24398                                  SmallVectorImpl<SDNode *> &Created) const {
24399   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
24400   if (isIntDivCheap(N->getValueType(0), Attr))
24401     return SDValue(N,0); // Lower SDIV as SDIV
24402 
24403   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
24404          "Unexpected divisor!");
24405 
24406   // Only perform this transform if CMOV is supported otherwise the select
24407   // below will become a branch.
24408   if (!Subtarget.canUseCMOV())
24409     return SDValue();
24410 
24411   // fold (sdiv X, pow2)
24412   EVT VT = N->getValueType(0);
24413   // FIXME: Support i8.
24414   if (VT != MVT::i16 && VT != MVT::i32 &&
24415       !(Subtarget.is64Bit() && VT == MVT::i64))
24416     return SDValue();
24417 
24418   unsigned Lg2 = Divisor.countTrailingZeros();
24419 
24420   // If the divisor is 2 or -2, the default expansion is better.
24421   if (Lg2 == 1)
24422     return SDValue();
24423 
24424   SDLoc DL(N);
24425   SDValue N0 = N->getOperand(0);
24426   SDValue Zero = DAG.getConstant(0, DL, VT);
24427   APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
24428   SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
24429 
24430   // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
24431   SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
24432   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
24433   SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
24434 
24435   Created.push_back(Cmp.getNode());
24436   Created.push_back(Add.getNode());
24437   Created.push_back(CMov.getNode());
24438 
24439   // Divide by pow2.
24440   SDValue SRA =
24441       DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i8));
24442 
24443   // If we're dividing by a positive value, we're done.  Otherwise, we must
24444   // negate the result.
24445   if (Divisor.isNonNegative())
24446     return SRA;
24447 
24448   Created.push_back(SRA.getNode());
24449   return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
24450 }
24451 
24452 /// Result of 'and' is compared against zero. Change to a BT node if possible.
24453 /// Returns the BT node and the condition code needed to use it.
LowerAndToBT(SDValue And,ISD::CondCode CC,const SDLoc & dl,SelectionDAG & DAG,X86::CondCode & X86CC)24454 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
24455                             SelectionDAG &DAG, X86::CondCode &X86CC) {
24456   assert(And.getOpcode() == ISD::AND && "Expected AND node!");
24457   SDValue Op0 = And.getOperand(0);
24458   SDValue Op1 = And.getOperand(1);
24459   if (Op0.getOpcode() == ISD::TRUNCATE)
24460     Op0 = Op0.getOperand(0);
24461   if (Op1.getOpcode() == ISD::TRUNCATE)
24462     Op1 = Op1.getOperand(0);
24463 
24464   SDValue Src, BitNo;
24465   if (Op1.getOpcode() == ISD::SHL)
24466     std::swap(Op0, Op1);
24467   if (Op0.getOpcode() == ISD::SHL) {
24468     if (isOneConstant(Op0.getOperand(0))) {
24469       // If we looked past a truncate, check that it's only truncating away
24470       // known zeros.
24471       unsigned BitWidth = Op0.getValueSizeInBits();
24472       unsigned AndBitWidth = And.getValueSizeInBits();
24473       if (BitWidth > AndBitWidth) {
24474         KnownBits Known = DAG.computeKnownBits(Op0);
24475         if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
24476           return SDValue();
24477       }
24478       Src = Op1;
24479       BitNo = Op0.getOperand(1);
24480     }
24481   } else if (Op1.getOpcode() == ISD::Constant) {
24482     ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
24483     uint64_t AndRHSVal = AndRHS->getZExtValue();
24484     SDValue AndLHS = Op0;
24485 
24486     if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
24487       Src = AndLHS.getOperand(0);
24488       BitNo = AndLHS.getOperand(1);
24489     } else {
24490       // Use BT if the immediate can't be encoded in a TEST instruction or we
24491       // are optimizing for size and the immedaite won't fit in a byte.
24492       bool OptForSize = DAG.shouldOptForSize();
24493       if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
24494           isPowerOf2_64(AndRHSVal)) {
24495         Src = AndLHS;
24496         BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
24497                                 Src.getValueType());
24498       }
24499     }
24500   }
24501 
24502   // No patterns found, give up.
24503   if (!Src.getNode())
24504     return SDValue();
24505 
24506   // Remove any bit flip.
24507   if (isBitwiseNot(Src)) {
24508     Src = Src.getOperand(0);
24509     CC = CC == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
24510   }
24511 
24512   // Attempt to create the X86ISD::BT node.
24513   if (SDValue BT = getBT(Src, BitNo, dl, DAG)) {
24514     X86CC = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
24515     return BT;
24516   }
24517 
24518   return SDValue();
24519 }
24520 
24521 // Check if pre-AVX condcode can be performed by a single FCMP op.
cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode)24522 static bool cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode) {
24523   return (SetCCOpcode != ISD::SETONE) && (SetCCOpcode != ISD::SETUEQ);
24524 }
24525 
24526 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
24527 /// CMPs.
translateX86FSETCC(ISD::CondCode SetCCOpcode,SDValue & Op0,SDValue & Op1,bool & IsAlwaysSignaling)24528 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
24529                                    SDValue &Op1, bool &IsAlwaysSignaling) {
24530   unsigned SSECC;
24531   bool Swap = false;
24532 
24533   // SSE Condition code mapping:
24534   //  0 - EQ
24535   //  1 - LT
24536   //  2 - LE
24537   //  3 - UNORD
24538   //  4 - NEQ
24539   //  5 - NLT
24540   //  6 - NLE
24541   //  7 - ORD
24542   switch (SetCCOpcode) {
24543   default: llvm_unreachable("Unexpected SETCC condition");
24544   case ISD::SETOEQ:
24545   case ISD::SETEQ:  SSECC = 0; break;
24546   case ISD::SETOGT:
24547   case ISD::SETGT:  Swap = true; [[fallthrough]];
24548   case ISD::SETLT:
24549   case ISD::SETOLT: SSECC = 1; break;
24550   case ISD::SETOGE:
24551   case ISD::SETGE:  Swap = true; [[fallthrough]];
24552   case ISD::SETLE:
24553   case ISD::SETOLE: SSECC = 2; break;
24554   case ISD::SETUO:  SSECC = 3; break;
24555   case ISD::SETUNE:
24556   case ISD::SETNE:  SSECC = 4; break;
24557   case ISD::SETULE: Swap = true; [[fallthrough]];
24558   case ISD::SETUGE: SSECC = 5; break;
24559   case ISD::SETULT: Swap = true; [[fallthrough]];
24560   case ISD::SETUGT: SSECC = 6; break;
24561   case ISD::SETO:   SSECC = 7; break;
24562   case ISD::SETUEQ: SSECC = 8; break;
24563   case ISD::SETONE: SSECC = 12; break;
24564   }
24565   if (Swap)
24566     std::swap(Op0, Op1);
24567 
24568   switch (SetCCOpcode) {
24569   default:
24570     IsAlwaysSignaling = true;
24571     break;
24572   case ISD::SETEQ:
24573   case ISD::SETOEQ:
24574   case ISD::SETUEQ:
24575   case ISD::SETNE:
24576   case ISD::SETONE:
24577   case ISD::SETUNE:
24578   case ISD::SETO:
24579   case ISD::SETUO:
24580     IsAlwaysSignaling = false;
24581     break;
24582   }
24583 
24584   return SSECC;
24585 }
24586 
24587 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
24588 /// concatenate the result back.
splitIntVSETCC(EVT VT,SDValue LHS,SDValue RHS,ISD::CondCode Cond,SelectionDAG & DAG,const SDLoc & dl)24589 static SDValue splitIntVSETCC(EVT VT, SDValue LHS, SDValue RHS,
24590                               ISD::CondCode Cond, SelectionDAG &DAG,
24591                               const SDLoc &dl) {
24592   assert(VT.isInteger() && VT == LHS.getValueType() &&
24593          VT == RHS.getValueType() && "Unsupported VTs!");
24594 
24595   SDValue CC = DAG.getCondCode(Cond);
24596 
24597   // Extract the LHS Lo/Hi vectors
24598   SDValue LHS1, LHS2;
24599   std::tie(LHS1, LHS2) = splitVector(LHS, DAG, dl);
24600 
24601   // Extract the RHS Lo/Hi vectors
24602   SDValue RHS1, RHS2;
24603   std::tie(RHS1, RHS2) = splitVector(RHS, DAG, dl);
24604 
24605   // Issue the operation on the smaller types and concatenate the result back
24606   EVT LoVT, HiVT;
24607   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
24608   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24609                      DAG.getNode(ISD::SETCC, dl, LoVT, LHS1, RHS1, CC),
24610                      DAG.getNode(ISD::SETCC, dl, HiVT, LHS2, RHS2, CC));
24611 }
24612 
LowerIntVSETCC_AVX512(SDValue Op,SelectionDAG & DAG)24613 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
24614 
24615   SDValue Op0 = Op.getOperand(0);
24616   SDValue Op1 = Op.getOperand(1);
24617   SDValue CC = Op.getOperand(2);
24618   MVT VT = Op.getSimpleValueType();
24619   SDLoc dl(Op);
24620 
24621   assert(VT.getVectorElementType() == MVT::i1 &&
24622          "Cannot set masked compare for this operation");
24623 
24624   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
24625 
24626   // Prefer SETGT over SETLT.
24627   if (SetCCOpcode == ISD::SETLT) {
24628     SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
24629     std::swap(Op0, Op1);
24630   }
24631 
24632   return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
24633 }
24634 
24635 /// Given a buildvector constant, return a new vector constant with each element
24636 /// incremented or decremented. If incrementing or decrementing would result in
24637 /// unsigned overflow or underflow or this is not a simple vector constant,
24638 /// return an empty value.
incDecVectorConstant(SDValue V,SelectionDAG & DAG,bool IsInc)24639 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
24640   auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
24641   if (!BV)
24642     return SDValue();
24643 
24644   MVT VT = V.getSimpleValueType();
24645   MVT EltVT = VT.getVectorElementType();
24646   unsigned NumElts = VT.getVectorNumElements();
24647   SmallVector<SDValue, 8> NewVecC;
24648   SDLoc DL(V);
24649   for (unsigned i = 0; i < NumElts; ++i) {
24650     auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
24651     if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
24652       return SDValue();
24653 
24654     // Avoid overflow/underflow.
24655     const APInt &EltC = Elt->getAPIntValue();
24656     if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isZero()))
24657       return SDValue();
24658 
24659     NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
24660   }
24661 
24662   return DAG.getBuildVector(VT, DL, NewVecC);
24663 }
24664 
24665 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
24666 /// Op0 u<= Op1:
24667 ///   t = psubus Op0, Op1
24668 ///   pcmpeq t, <0..0>
LowerVSETCCWithSUBUS(SDValue Op0,SDValue Op1,MVT VT,ISD::CondCode Cond,const SDLoc & dl,const X86Subtarget & Subtarget,SelectionDAG & DAG)24669 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
24670                                     ISD::CondCode Cond, const SDLoc &dl,
24671                                     const X86Subtarget &Subtarget,
24672                                     SelectionDAG &DAG) {
24673   if (!Subtarget.hasSSE2())
24674     return SDValue();
24675 
24676   MVT VET = VT.getVectorElementType();
24677   if (VET != MVT::i8 && VET != MVT::i16)
24678     return SDValue();
24679 
24680   switch (Cond) {
24681   default:
24682     return SDValue();
24683   case ISD::SETULT: {
24684     // If the comparison is against a constant we can turn this into a
24685     // setule.  With psubus, setule does not require a swap.  This is
24686     // beneficial because the constant in the register is no longer
24687     // destructed as the destination so it can be hoisted out of a loop.
24688     // Only do this pre-AVX since vpcmp* is no longer destructive.
24689     if (Subtarget.hasAVX())
24690       return SDValue();
24691     SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
24692     if (!ULEOp1)
24693       return SDValue();
24694     Op1 = ULEOp1;
24695     break;
24696   }
24697   case ISD::SETUGT: {
24698     // If the comparison is against a constant, we can turn this into a setuge.
24699     // This is beneficial because materializing a constant 0 for the PCMPEQ is
24700     // probably cheaper than XOR+PCMPGT using 2 different vector constants:
24701     // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
24702     SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
24703     if (!UGEOp1)
24704       return SDValue();
24705     Op1 = Op0;
24706     Op0 = UGEOp1;
24707     break;
24708   }
24709   // Psubus is better than flip-sign because it requires no inversion.
24710   case ISD::SETUGE:
24711     std::swap(Op0, Op1);
24712     break;
24713   case ISD::SETULE:
24714     break;
24715   }
24716 
24717   SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
24718   return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
24719                      DAG.getConstant(0, dl, VT));
24720 }
24721 
LowerVSETCC(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)24722 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
24723                            SelectionDAG &DAG) {
24724   bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
24725                   Op.getOpcode() == ISD::STRICT_FSETCCS;
24726   SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
24727   SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
24728   SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
24729   MVT VT = Op->getSimpleValueType(0);
24730   ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
24731   bool isFP = Op1.getSimpleValueType().isFloatingPoint();
24732   SDLoc dl(Op);
24733 
24734   if (isFP) {
24735     MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
24736     assert(EltVT == MVT::f16 || EltVT == MVT::f32 || EltVT == MVT::f64);
24737     if (isSoftFP16(EltVT, Subtarget))
24738       return SDValue();
24739 
24740     bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
24741     SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
24742 
24743     // If we have a strict compare with a vXi1 result and the input is 128/256
24744     // bits we can't use a masked compare unless we have VLX. If we use a wider
24745     // compare like we do for non-strict, we might trigger spurious exceptions
24746     // from the upper elements. Instead emit a AVX compare and convert to mask.
24747     unsigned Opc;
24748     if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1 &&
24749         (!IsStrict || Subtarget.hasVLX() ||
24750          Op0.getSimpleValueType().is512BitVector())) {
24751 #ifndef NDEBUG
24752       unsigned Num = VT.getVectorNumElements();
24753       assert(Num <= 16 || (Num == 32 && EltVT == MVT::f16));
24754 #endif
24755       Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
24756     } else {
24757       Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
24758       // The SSE/AVX packed FP comparison nodes are defined with a
24759       // floating-point vector result that matches the operand type. This allows
24760       // them to work with an SSE1 target (integer vector types are not legal).
24761       VT = Op0.getSimpleValueType();
24762     }
24763 
24764     SDValue Cmp;
24765     bool IsAlwaysSignaling;
24766     unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
24767     if (!Subtarget.hasAVX()) {
24768       // TODO: We could use following steps to handle a quiet compare with
24769       // signaling encodings.
24770       // 1. Get ordered masks from a quiet ISD::SETO
24771       // 2. Use the masks to mask potential unordered elements in operand A, B
24772       // 3. Get the compare results of masked A, B
24773       // 4. Calculating final result using the mask and result from 3
24774       // But currently, we just fall back to scalar operations.
24775       if (IsStrict && IsAlwaysSignaling && !IsSignaling)
24776         return SDValue();
24777 
24778       // Insert an extra signaling instruction to raise exception.
24779       if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
24780         SDValue SignalCmp = DAG.getNode(
24781             Opc, dl, {VT, MVT::Other},
24782             {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
24783         // FIXME: It seems we need to update the flags of all new strict nodes.
24784         // Otherwise, mayRaiseFPException in MI will return false due to
24785         // NoFPExcept = false by default. However, I didn't find it in other
24786         // patches.
24787         SignalCmp->setFlags(Op->getFlags());
24788         Chain = SignalCmp.getValue(1);
24789       }
24790 
24791       // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
24792       // emit two comparisons and a logic op to tie them together.
24793       if (!cheapX86FSETCC_SSE(Cond)) {
24794         // LLVM predicate is SETUEQ or SETONE.
24795         unsigned CC0, CC1;
24796         unsigned CombineOpc;
24797         if (Cond == ISD::SETUEQ) {
24798           CC0 = 3; // UNORD
24799           CC1 = 0; // EQ
24800           CombineOpc = X86ISD::FOR;
24801         } else {
24802           assert(Cond == ISD::SETONE);
24803           CC0 = 7; // ORD
24804           CC1 = 4; // NEQ
24805           CombineOpc = X86ISD::FAND;
24806         }
24807 
24808         SDValue Cmp0, Cmp1;
24809         if (IsStrict) {
24810           Cmp0 = DAG.getNode(
24811               Opc, dl, {VT, MVT::Other},
24812               {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
24813           Cmp1 = DAG.getNode(
24814               Opc, dl, {VT, MVT::Other},
24815               {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
24816           Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
24817                               Cmp1.getValue(1));
24818         } else {
24819           Cmp0 = DAG.getNode(
24820               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
24821           Cmp1 = DAG.getNode(
24822               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
24823         }
24824         Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
24825       } else {
24826         if (IsStrict) {
24827           Cmp = DAG.getNode(
24828               Opc, dl, {VT, MVT::Other},
24829               {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
24830           Chain = Cmp.getValue(1);
24831         } else
24832           Cmp = DAG.getNode(
24833               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
24834       }
24835     } else {
24836       // Handle all other FP comparisons here.
24837       if (IsStrict) {
24838         // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
24839         SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
24840         Cmp = DAG.getNode(
24841             Opc, dl, {VT, MVT::Other},
24842             {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
24843         Chain = Cmp.getValue(1);
24844       } else
24845         Cmp = DAG.getNode(
24846             Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
24847     }
24848 
24849     if (VT.getFixedSizeInBits() >
24850         Op.getSimpleValueType().getFixedSizeInBits()) {
24851       // We emitted a compare with an XMM/YMM result. Finish converting to a
24852       // mask register using a vptestm.
24853       EVT CastVT = EVT(VT).changeVectorElementTypeToInteger();
24854       Cmp = DAG.getBitcast(CastVT, Cmp);
24855       Cmp = DAG.getSetCC(dl, Op.getSimpleValueType(), Cmp,
24856                          DAG.getConstant(0, dl, CastVT), ISD::SETNE);
24857     } else {
24858       // If this is SSE/AVX CMPP, bitcast the result back to integer to match
24859       // the result type of SETCC. The bitcast is expected to be optimized
24860       // away during combining/isel.
24861       Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
24862     }
24863 
24864     if (IsStrict)
24865       return DAG.getMergeValues({Cmp, Chain}, dl);
24866 
24867     return Cmp;
24868   }
24869 
24870   assert(!IsStrict && "Strict SETCC only handles FP operands.");
24871 
24872   MVT VTOp0 = Op0.getSimpleValueType();
24873   (void)VTOp0;
24874   assert(VTOp0 == Op1.getSimpleValueType() &&
24875          "Expected operands with same type!");
24876   assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
24877          "Invalid number of packed elements for source and destination!");
24878 
24879   // The non-AVX512 code below works under the assumption that source and
24880   // destination types are the same.
24881   assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
24882          "Value types for source and destination must be the same!");
24883 
24884   // The result is boolean, but operands are int/float
24885   if (VT.getVectorElementType() == MVT::i1) {
24886     // In AVX-512 architecture setcc returns mask with i1 elements,
24887     // But there is no compare instruction for i8 and i16 elements in KNL.
24888     assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
24889            "Unexpected operand type");
24890     return LowerIntVSETCC_AVX512(Op, DAG);
24891   }
24892 
24893   // Lower using XOP integer comparisons.
24894   if (VT.is128BitVector() && Subtarget.hasXOP()) {
24895     // Translate compare code to XOP PCOM compare mode.
24896     unsigned CmpMode = 0;
24897     switch (Cond) {
24898     default: llvm_unreachable("Unexpected SETCC condition");
24899     case ISD::SETULT:
24900     case ISD::SETLT: CmpMode = 0x00; break;
24901     case ISD::SETULE:
24902     case ISD::SETLE: CmpMode = 0x01; break;
24903     case ISD::SETUGT:
24904     case ISD::SETGT: CmpMode = 0x02; break;
24905     case ISD::SETUGE:
24906     case ISD::SETGE: CmpMode = 0x03; break;
24907     case ISD::SETEQ: CmpMode = 0x04; break;
24908     case ISD::SETNE: CmpMode = 0x05; break;
24909     }
24910 
24911     // Are we comparing unsigned or signed integers?
24912     unsigned Opc =
24913         ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
24914 
24915     return DAG.getNode(Opc, dl, VT, Op0, Op1,
24916                        DAG.getTargetConstant(CmpMode, dl, MVT::i8));
24917   }
24918 
24919   // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
24920   // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
24921   if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
24922     SDValue BC0 = peekThroughBitcasts(Op0);
24923     if (BC0.getOpcode() == ISD::AND) {
24924       APInt UndefElts;
24925       SmallVector<APInt, 64> EltBits;
24926       if (getTargetConstantBitsFromNode(BC0.getOperand(1),
24927                                         VT.getScalarSizeInBits(), UndefElts,
24928                                         EltBits, false, false)) {
24929         if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
24930           Cond = ISD::SETEQ;
24931           Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
24932         }
24933       }
24934     }
24935   }
24936 
24937   // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
24938   if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
24939       Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
24940     ConstantSDNode *C1 = isConstOrConstSplat(Op1);
24941     if (C1 && C1->getAPIntValue().isPowerOf2()) {
24942       unsigned BitWidth = VT.getScalarSizeInBits();
24943       unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
24944 
24945       SDValue Result = Op0.getOperand(0);
24946       Result = DAG.getNode(ISD::SHL, dl, VT, Result,
24947                            DAG.getConstant(ShiftAmt, dl, VT));
24948       Result = DAG.getNode(ISD::SRA, dl, VT, Result,
24949                            DAG.getConstant(BitWidth - 1, dl, VT));
24950       return Result;
24951     }
24952   }
24953 
24954   // Break 256-bit integer vector compare into smaller ones.
24955   if (VT.is256BitVector() && !Subtarget.hasInt256())
24956     return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
24957 
24958   // Break 512-bit integer vector compare into smaller ones.
24959   // TODO: Try harder to use VPCMPx + VPMOV2x?
24960   if (VT.is512BitVector())
24961     return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
24962 
24963   // If we have a limit constant, try to form PCMPGT (signed cmp) to avoid
24964   // not-of-PCMPEQ:
24965   // X != INT_MIN --> X >s INT_MIN
24966   // X != INT_MAX --> X <s INT_MAX --> INT_MAX >s X
24967   // +X != 0 --> +X >s 0
24968   APInt ConstValue;
24969   if (Cond == ISD::SETNE &&
24970       ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
24971     if (ConstValue.isMinSignedValue())
24972       Cond = ISD::SETGT;
24973     else if (ConstValue.isMaxSignedValue())
24974       Cond = ISD::SETLT;
24975     else if (ConstValue.isZero() && DAG.SignBitIsZero(Op0))
24976       Cond = ISD::SETGT;
24977   }
24978 
24979   // If both operands are known non-negative, then an unsigned compare is the
24980   // same as a signed compare and there's no need to flip signbits.
24981   // TODO: We could check for more general simplifications here since we're
24982   // computing known bits.
24983   bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
24984                    !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
24985 
24986   // Special case: Use min/max operations for unsigned compares.
24987   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24988   if (ISD::isUnsignedIntSetCC(Cond) &&
24989       (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
24990       TLI.isOperationLegal(ISD::UMIN, VT)) {
24991     // If we have a constant operand, increment/decrement it and change the
24992     // condition to avoid an invert.
24993     if (Cond == ISD::SETUGT) {
24994       // X > C --> X >= (C+1) --> X == umax(X, C+1)
24995       if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
24996         Op1 = UGTOp1;
24997         Cond = ISD::SETUGE;
24998       }
24999     }
25000     if (Cond == ISD::SETULT) {
25001       // X < C --> X <= (C-1) --> X == umin(X, C-1)
25002       if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
25003         Op1 = ULTOp1;
25004         Cond = ISD::SETULE;
25005       }
25006     }
25007     bool Invert = false;
25008     unsigned Opc;
25009     switch (Cond) {
25010     default: llvm_unreachable("Unexpected condition code");
25011     case ISD::SETUGT: Invert = true; [[fallthrough]];
25012     case ISD::SETULE: Opc = ISD::UMIN; break;
25013     case ISD::SETULT: Invert = true; [[fallthrough]];
25014     case ISD::SETUGE: Opc = ISD::UMAX; break;
25015     }
25016 
25017     SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
25018     Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
25019 
25020     // If the logical-not of the result is required, perform that now.
25021     if (Invert)
25022       Result = DAG.getNOT(dl, Result, VT);
25023 
25024     return Result;
25025   }
25026 
25027   // Try to use SUBUS and PCMPEQ.
25028   if (FlipSigns)
25029     if (SDValue V =
25030             LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
25031       return V;
25032 
25033   // We are handling one of the integer comparisons here. Since SSE only has
25034   // GT and EQ comparisons for integer, swapping operands and multiple
25035   // operations may be required for some comparisons.
25036   unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
25037                                                             : X86ISD::PCMPGT;
25038   bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
25039               Cond == ISD::SETGE || Cond == ISD::SETUGE;
25040   bool Invert = Cond == ISD::SETNE ||
25041                 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
25042 
25043   if (Swap)
25044     std::swap(Op0, Op1);
25045 
25046   // Check that the operation in question is available (most are plain SSE2,
25047   // but PCMPGTQ and PCMPEQQ have different requirements).
25048   if (VT == MVT::v2i64) {
25049     if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
25050       assert(Subtarget.hasSSE2() && "Don't know how to lower!");
25051 
25052       // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
25053       // the odd elements over the even elements.
25054       if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
25055         Op0 = DAG.getConstant(0, dl, MVT::v4i32);
25056         Op1 = DAG.getBitcast(MVT::v4i32, Op1);
25057 
25058         SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
25059         static const int MaskHi[] = { 1, 1, 3, 3 };
25060         SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
25061 
25062         return DAG.getBitcast(VT, Result);
25063       }
25064 
25065       if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
25066         Op0 = DAG.getBitcast(MVT::v4i32, Op0);
25067         Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
25068 
25069         SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
25070         static const int MaskHi[] = { 1, 1, 3, 3 };
25071         SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
25072 
25073         return DAG.getBitcast(VT, Result);
25074       }
25075 
25076       // Since SSE has no unsigned integer comparisons, we need to flip the sign
25077       // bits of the inputs before performing those operations. The lower
25078       // compare is always unsigned.
25079       SDValue SB = DAG.getConstant(FlipSigns ? 0x8000000080000000ULL
25080                                              : 0x0000000080000000ULL,
25081                                    dl, MVT::v2i64);
25082 
25083       Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
25084       Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
25085 
25086       // Cast everything to the right type.
25087       Op0 = DAG.getBitcast(MVT::v4i32, Op0);
25088       Op1 = DAG.getBitcast(MVT::v4i32, Op1);
25089 
25090       // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
25091       SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
25092       SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
25093 
25094       // Create masks for only the low parts/high parts of the 64 bit integers.
25095       static const int MaskHi[] = { 1, 1, 3, 3 };
25096       static const int MaskLo[] = { 0, 0, 2, 2 };
25097       SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
25098       SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
25099       SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
25100 
25101       SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
25102       Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
25103 
25104       if (Invert)
25105         Result = DAG.getNOT(dl, Result, MVT::v4i32);
25106 
25107       return DAG.getBitcast(VT, Result);
25108     }
25109 
25110     if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
25111       // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
25112       // pcmpeqd + pshufd + pand.
25113       assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
25114 
25115       // First cast everything to the right type.
25116       Op0 = DAG.getBitcast(MVT::v4i32, Op0);
25117       Op1 = DAG.getBitcast(MVT::v4i32, Op1);
25118 
25119       // Do the compare.
25120       SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
25121 
25122       // Make sure the lower and upper halves are both all-ones.
25123       static const int Mask[] = { 1, 0, 3, 2 };
25124       SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
25125       Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
25126 
25127       if (Invert)
25128         Result = DAG.getNOT(dl, Result, MVT::v4i32);
25129 
25130       return DAG.getBitcast(VT, Result);
25131     }
25132   }
25133 
25134   // Since SSE has no unsigned integer comparisons, we need to flip the sign
25135   // bits of the inputs before performing those operations.
25136   if (FlipSigns) {
25137     MVT EltVT = VT.getVectorElementType();
25138     SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
25139                                  VT);
25140     Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
25141     Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
25142   }
25143 
25144   SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
25145 
25146   // If the logical-not of the result is required, perform that now.
25147   if (Invert)
25148     Result = DAG.getNOT(dl, Result, VT);
25149 
25150   return Result;
25151 }
25152 
25153 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
EmitAVX512Test(SDValue Op0,SDValue Op1,ISD::CondCode CC,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget,SDValue & X86CC)25154 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
25155                               const SDLoc &dl, SelectionDAG &DAG,
25156                               const X86Subtarget &Subtarget,
25157                               SDValue &X86CC) {
25158   // Only support equality comparisons.
25159   if (CC != ISD::SETEQ && CC != ISD::SETNE)
25160     return SDValue();
25161 
25162   // Must be a bitcast from vXi1.
25163   if (Op0.getOpcode() != ISD::BITCAST)
25164     return SDValue();
25165 
25166   Op0 = Op0.getOperand(0);
25167   MVT VT = Op0.getSimpleValueType();
25168   if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
25169       !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
25170       !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
25171     return SDValue();
25172 
25173   X86::CondCode X86Cond;
25174   if (isNullConstant(Op1)) {
25175     X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
25176   } else if (isAllOnesConstant(Op1)) {
25177     // C flag is set for all ones.
25178     X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
25179   } else
25180     return SDValue();
25181 
25182   // If the input is an AND, we can combine it's operands into the KTEST.
25183   bool KTestable = false;
25184   if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
25185     KTestable = true;
25186   if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
25187     KTestable = true;
25188   if (!isNullConstant(Op1))
25189     KTestable = false;
25190   if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
25191     SDValue LHS = Op0.getOperand(0);
25192     SDValue RHS = Op0.getOperand(1);
25193     X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
25194     return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
25195   }
25196 
25197   // If the input is an OR, we can combine it's operands into the KORTEST.
25198   SDValue LHS = Op0;
25199   SDValue RHS = Op0;
25200   if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
25201     LHS = Op0.getOperand(0);
25202     RHS = Op0.getOperand(1);
25203   }
25204 
25205   X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
25206   return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
25207 }
25208 
25209 /// Emit flags for the given setcc condition and operands. Also returns the
25210 /// corresponding X86 condition code constant in X86CC.
emitFlagsForSetcc(SDValue Op0,SDValue Op1,ISD::CondCode CC,const SDLoc & dl,SelectionDAG & DAG,SDValue & X86CC) const25211 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
25212                                              ISD::CondCode CC, const SDLoc &dl,
25213                                              SelectionDAG &DAG,
25214                                              SDValue &X86CC) const {
25215   // Optimize to BT if possible.
25216   // Lower (X & (1 << N)) == 0 to BT(X, N).
25217   // Lower ((X >>u N) & 1) != 0 to BT(X, N).
25218   // Lower ((X >>s N) & 1) != 0 to BT(X, N).
25219   if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
25220       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
25221     X86::CondCode X86CondCode;
25222     if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CondCode)) {
25223       X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
25224       return BT;
25225     }
25226   }
25227 
25228   // Try to use PTEST/PMOVMSKB for a tree ORs equality compared with 0.
25229   // TODO: We could do AND tree with all 1s as well by using the C flag.
25230   if (isNullConstant(Op1) && (CC == ISD::SETEQ || CC == ISD::SETNE))
25231     if (SDValue CmpZ =
25232             MatchVectorAllZeroTest(Op0, CC, dl, Subtarget, DAG, X86CC))
25233       return CmpZ;
25234 
25235   // Try to lower using KORTEST or KTEST.
25236   if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
25237     return Test;
25238 
25239   // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms of
25240   // these.
25241   if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
25242       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
25243     // If the input is a setcc, then reuse the input setcc or use a new one with
25244     // the inverted condition.
25245     if (Op0.getOpcode() == X86ISD::SETCC) {
25246       bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
25247 
25248       X86CC = Op0.getOperand(0);
25249       if (Invert) {
25250         X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
25251         CCode = X86::GetOppositeBranchCondition(CCode);
25252         X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
25253       }
25254 
25255       return Op0.getOperand(1);
25256     }
25257   }
25258 
25259   // Try to use the carry flag from the add in place of an separate CMP for:
25260   // (seteq (add X, -1), -1). Similar for setne.
25261   if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
25262       Op0.getOperand(1) == Op1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
25263     if (isProfitableToUseFlagOp(Op0)) {
25264       SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
25265 
25266       SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
25267                                 Op0.getOperand(1));
25268       DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
25269       X86::CondCode CCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
25270       X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
25271       return SDValue(New.getNode(), 1);
25272     }
25273   }
25274 
25275   X86::CondCode CondCode =
25276       TranslateX86CC(CC, dl, /*IsFP*/ false, Op0, Op1, DAG);
25277   assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
25278 
25279   SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget);
25280   X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
25281   return EFLAGS;
25282 }
25283 
LowerSETCC(SDValue Op,SelectionDAG & DAG) const25284 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
25285 
25286   bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
25287                   Op.getOpcode() == ISD::STRICT_FSETCCS;
25288   MVT VT = Op->getSimpleValueType(0);
25289 
25290   if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
25291 
25292   assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
25293   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
25294   SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
25295   SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
25296   SDLoc dl(Op);
25297   ISD::CondCode CC =
25298       cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
25299 
25300   if (isSoftFP16(Op0.getValueType()))
25301     return SDValue();
25302 
25303   // Handle f128 first, since one possible outcome is a normal integer
25304   // comparison which gets handled by emitFlagsForSetcc.
25305   if (Op0.getValueType() == MVT::f128) {
25306     softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
25307                         Op.getOpcode() == ISD::STRICT_FSETCCS);
25308 
25309     // If softenSetCCOperands returned a scalar, use it.
25310     if (!Op1.getNode()) {
25311       assert(Op0.getValueType() == Op.getValueType() &&
25312              "Unexpected setcc expansion!");
25313       if (IsStrict)
25314         return DAG.getMergeValues({Op0, Chain}, dl);
25315       return Op0;
25316     }
25317   }
25318 
25319   if (Op0.getSimpleValueType().isInteger()) {
25320     // Attempt to canonicalize SGT/UGT -> SGE/UGE compares with constant which
25321     // reduces the number of EFLAGs bit reads (the GE conditions don't read ZF),
25322     // this may translate to less uops depending on uarch implementation. The
25323     // equivalent for SLE/ULE -> SLT/ULT isn't likely to happen as we already
25324     // canonicalize to that CondCode.
25325     // NOTE: Only do this if incrementing the constant doesn't increase the bit
25326     // encoding size - so it must either already be a i8 or i32 immediate, or it
25327     // shrinks down to that. We don't do this for any i64's to avoid additional
25328     // constant materializations.
25329     // TODO: Can we move this to TranslateX86CC to handle jumps/branches too?
25330     if (auto *Op1C = dyn_cast<ConstantSDNode>(Op1)) {
25331       const APInt &Op1Val = Op1C->getAPIntValue();
25332       if (!Op1Val.isZero()) {
25333         // Ensure the constant+1 doesn't overflow.
25334         if ((CC == ISD::CondCode::SETGT && !Op1Val.isMaxSignedValue()) ||
25335             (CC == ISD::CondCode::SETUGT && !Op1Val.isMaxValue())) {
25336           APInt Op1ValPlusOne = Op1Val + 1;
25337           if (Op1ValPlusOne.isSignedIntN(32) &&
25338               (!Op1Val.isSignedIntN(8) || Op1ValPlusOne.isSignedIntN(8))) {
25339             Op1 = DAG.getConstant(Op1ValPlusOne, dl, Op0.getValueType());
25340             CC = CC == ISD::CondCode::SETGT ? ISD::CondCode::SETGE
25341                                             : ISD::CondCode::SETUGE;
25342           }
25343         }
25344       }
25345     }
25346 
25347     SDValue X86CC;
25348     SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
25349     SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
25350     return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
25351   }
25352 
25353   // Handle floating point.
25354   X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
25355   if (CondCode == X86::COND_INVALID)
25356     return SDValue();
25357 
25358   SDValue EFLAGS;
25359   if (IsStrict) {
25360     bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
25361     EFLAGS =
25362         DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
25363                     dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
25364     Chain = EFLAGS.getValue(1);
25365   } else {
25366     EFLAGS = DAG.getNode(X86ISD::FCMP, dl, MVT::i32, Op0, Op1);
25367   }
25368 
25369   SDValue X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
25370   SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
25371   return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
25372 }
25373 
LowerSETCCCARRY(SDValue Op,SelectionDAG & DAG) const25374 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
25375   SDValue LHS = Op.getOperand(0);
25376   SDValue RHS = Op.getOperand(1);
25377   SDValue Carry = Op.getOperand(2);
25378   SDValue Cond = Op.getOperand(3);
25379   SDLoc DL(Op);
25380 
25381   assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
25382   X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
25383 
25384   // Recreate the carry if needed.
25385   EVT CarryVT = Carry.getValueType();
25386   Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
25387                       Carry, DAG.getAllOnesConstant(DL, CarryVT));
25388 
25389   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
25390   SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
25391   return getSETCC(CC, Cmp.getValue(1), DL, DAG);
25392 }
25393 
25394 // This function returns three things: the arithmetic computation itself
25395 // (Value), an EFLAGS result (Overflow), and a condition code (Cond).  The
25396 // flag and the condition code define the case in which the arithmetic
25397 // computation overflows.
25398 static std::pair<SDValue, SDValue>
getX86XALUOOp(X86::CondCode & Cond,SDValue Op,SelectionDAG & DAG)25399 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
25400   assert(Op.getResNo() == 0 && "Unexpected result number!");
25401   SDValue Value, Overflow;
25402   SDValue LHS = Op.getOperand(0);
25403   SDValue RHS = Op.getOperand(1);
25404   unsigned BaseOp = 0;
25405   SDLoc DL(Op);
25406   switch (Op.getOpcode()) {
25407   default: llvm_unreachable("Unknown ovf instruction!");
25408   case ISD::SADDO:
25409     BaseOp = X86ISD::ADD;
25410     Cond = X86::COND_O;
25411     break;
25412   case ISD::UADDO:
25413     BaseOp = X86ISD::ADD;
25414     Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
25415     break;
25416   case ISD::SSUBO:
25417     BaseOp = X86ISD::SUB;
25418     Cond = X86::COND_O;
25419     break;
25420   case ISD::USUBO:
25421     BaseOp = X86ISD::SUB;
25422     Cond = X86::COND_B;
25423     break;
25424   case ISD::SMULO:
25425     BaseOp = X86ISD::SMUL;
25426     Cond = X86::COND_O;
25427     break;
25428   case ISD::UMULO:
25429     BaseOp = X86ISD::UMUL;
25430     Cond = X86::COND_O;
25431     break;
25432   }
25433 
25434   if (BaseOp) {
25435     // Also sets EFLAGS.
25436     SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
25437     Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
25438     Overflow = Value.getValue(1);
25439   }
25440 
25441   return std::make_pair(Value, Overflow);
25442 }
25443 
LowerXALUO(SDValue Op,SelectionDAG & DAG)25444 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
25445   // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
25446   // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
25447   // looks for this combo and may remove the "setcc" instruction if the "setcc"
25448   // has only one use.
25449   SDLoc DL(Op);
25450   X86::CondCode Cond;
25451   SDValue Value, Overflow;
25452   std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
25453 
25454   SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
25455   assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
25456   return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
25457 }
25458 
25459 /// Return true if opcode is a X86 logical comparison.
isX86LogicalCmp(SDValue Op)25460 static bool isX86LogicalCmp(SDValue Op) {
25461   unsigned Opc = Op.getOpcode();
25462   if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
25463       Opc == X86ISD::FCMP)
25464     return true;
25465   if (Op.getResNo() == 1 &&
25466       (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
25467        Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
25468        Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
25469     return true;
25470 
25471   return false;
25472 }
25473 
isTruncWithZeroHighBitsInput(SDValue V,SelectionDAG & DAG)25474 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
25475   if (V.getOpcode() != ISD::TRUNCATE)
25476     return false;
25477 
25478   SDValue VOp0 = V.getOperand(0);
25479   unsigned InBits = VOp0.getValueSizeInBits();
25480   unsigned Bits = V.getValueSizeInBits();
25481   return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
25482 }
25483 
LowerSELECT(SDValue Op,SelectionDAG & DAG) const25484 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
25485   bool AddTest = true;
25486   SDValue Cond  = Op.getOperand(0);
25487   SDValue Op1 = Op.getOperand(1);
25488   SDValue Op2 = Op.getOperand(2);
25489   SDLoc DL(Op);
25490   MVT VT = Op1.getSimpleValueType();
25491   SDValue CC;
25492 
25493   if (isSoftFP16(VT)) {
25494     MVT NVT = VT.changeTypeToInteger();
25495     return DAG.getBitcast(VT, DAG.getNode(ISD::SELECT, DL, NVT, Cond,
25496                                           DAG.getBitcast(NVT, Op1),
25497                                           DAG.getBitcast(NVT, Op2)));
25498   }
25499 
25500   // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
25501   // are available or VBLENDV if AVX is available.
25502   // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
25503   if (Cond.getOpcode() == ISD::SETCC && isScalarFPTypeInSSEReg(VT) &&
25504       VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
25505     SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
25506     bool IsAlwaysSignaling;
25507     unsigned SSECC =
25508         translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
25509                            CondOp0, CondOp1, IsAlwaysSignaling);
25510 
25511     if (Subtarget.hasAVX512()) {
25512       SDValue Cmp =
25513           DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
25514                       DAG.getTargetConstant(SSECC, DL, MVT::i8));
25515       assert(!VT.isVector() && "Not a scalar type?");
25516       return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
25517     }
25518 
25519     if (SSECC < 8 || Subtarget.hasAVX()) {
25520       SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
25521                                 DAG.getTargetConstant(SSECC, DL, MVT::i8));
25522 
25523       // If we have AVX, we can use a variable vector select (VBLENDV) instead
25524       // of 3 logic instructions for size savings and potentially speed.
25525       // Unfortunately, there is no scalar form of VBLENDV.
25526 
25527       // If either operand is a +0.0 constant, don't try this. We can expect to
25528       // optimize away at least one of the logic instructions later in that
25529       // case, so that sequence would be faster than a variable blend.
25530 
25531       // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
25532       // uses XMM0 as the selection register. That may need just as many
25533       // instructions as the AND/ANDN/OR sequence due to register moves, so
25534       // don't bother.
25535       if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
25536           !isNullFPConstant(Op2)) {
25537         // Convert to vectors, do a VSELECT, and convert back to scalar.
25538         // All of the conversions should be optimized away.
25539         MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
25540         SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
25541         SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
25542         SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
25543 
25544         MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
25545         VCmp = DAG.getBitcast(VCmpVT, VCmp);
25546 
25547         SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
25548 
25549         return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
25550                            VSel, DAG.getIntPtrConstant(0, DL));
25551       }
25552       SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
25553       SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
25554       return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
25555     }
25556   }
25557 
25558   // AVX512 fallback is to lower selects of scalar floats to masked moves.
25559   if (isScalarFPTypeInSSEReg(VT) && Subtarget.hasAVX512()) {
25560     SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
25561     return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
25562   }
25563 
25564   if (Cond.getOpcode() == ISD::SETCC &&
25565       !isSoftFP16(Cond.getOperand(0).getSimpleValueType())) {
25566     if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
25567       Cond = NewCond;
25568       // If the condition was updated, it's possible that the operands of the
25569       // select were also updated (for example, EmitTest has a RAUW). Refresh
25570       // the local references to the select operands in case they got stale.
25571       Op1 = Op.getOperand(1);
25572       Op2 = Op.getOperand(2);
25573     }
25574   }
25575 
25576   // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
25577   // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
25578   // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
25579   // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
25580   // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
25581   // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
25582   // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
25583   // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
25584   if (Cond.getOpcode() == X86ISD::SETCC &&
25585       Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
25586       isNullConstant(Cond.getOperand(1).getOperand(1))) {
25587     SDValue Cmp = Cond.getOperand(1);
25588     SDValue CmpOp0 = Cmp.getOperand(0);
25589     unsigned CondCode = Cond.getConstantOperandVal(0);
25590 
25591     // Special handling for __builtin_ffs(X) - 1 pattern which looks like
25592     // (select (seteq X, 0), -1, (cttz_zero_undef X)). Disable the special
25593     // handle to keep the CMP with 0. This should be removed by
25594     // optimizeCompareInst by using the flags from the BSR/TZCNT used for the
25595     // cttz_zero_undef.
25596     auto MatchFFSMinus1 = [&](SDValue Op1, SDValue Op2) {
25597       return (Op1.getOpcode() == ISD::CTTZ_ZERO_UNDEF && Op1.hasOneUse() &&
25598               Op1.getOperand(0) == CmpOp0 && isAllOnesConstant(Op2));
25599     };
25600     if (Subtarget.canUseCMOV() && (VT == MVT::i32 || VT == MVT::i64) &&
25601         ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
25602          (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
25603       // Keep Cmp.
25604     } else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
25605         (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
25606       SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
25607       SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
25608 
25609       // 'X - 1' sets the carry flag if X == 0.
25610       // '0 - X' sets the carry flag if X != 0.
25611       // Convert the carry flag to a -1/0 mask with sbb:
25612       // select (X != 0), -1, Y --> 0 - X; or (sbb), Y
25613       // select (X == 0), Y, -1 --> 0 - X; or (sbb), Y
25614       // select (X != 0), Y, -1 --> X - 1; or (sbb), Y
25615       // select (X == 0), -1, Y --> X - 1; or (sbb), Y
25616       SDValue Sub;
25617       if (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE)) {
25618         SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
25619         Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
25620       } else {
25621         SDValue One = DAG.getConstant(1, DL, CmpOp0.getValueType());
25622         Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, CmpOp0, One);
25623       }
25624       SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
25625                                 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
25626                                 Sub.getValue(1));
25627       return DAG.getNode(ISD::OR, DL, VT, SBB, Y);
25628     } else if (!Subtarget.canUseCMOV() && CondCode == X86::COND_E &&
25629                CmpOp0.getOpcode() == ISD::AND &&
25630                isOneConstant(CmpOp0.getOperand(1))) {
25631       SDValue Src1, Src2;
25632       // true if Op2 is XOR or OR operator and one of its operands
25633       // is equal to Op1
25634       // ( a , a op b) || ( b , a op b)
25635       auto isOrXorPattern = [&]() {
25636         if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
25637             (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
25638           Src1 =
25639               Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
25640           Src2 = Op1;
25641           return true;
25642         }
25643         return false;
25644       };
25645 
25646       if (isOrXorPattern()) {
25647         SDValue Neg;
25648         unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
25649         // we need mask of all zeros or ones with same size of the other
25650         // operands.
25651         if (CmpSz > VT.getSizeInBits())
25652           Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
25653         else if (CmpSz < VT.getSizeInBits())
25654           Neg = DAG.getNode(ISD::AND, DL, VT,
25655               DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
25656               DAG.getConstant(1, DL, VT));
25657         else
25658           Neg = CmpOp0;
25659         SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
25660                                    Neg); // -(and (x, 0x1))
25661         SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
25662         return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2);  // And Op y
25663       }
25664     } else if ((VT == MVT::i32 || VT == MVT::i64) && isNullConstant(Op2) &&
25665                Cmp.getNode()->hasOneUse() && (CmpOp0 == Op1) &&
25666                ((CondCode == X86::COND_S) ||                    // smin(x, 0)
25667                 (CondCode == X86::COND_G && hasAndNot(Op1)))) { // smax(x, 0)
25668       // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
25669       //
25670       // If the comparison is testing for a positive value, we have to invert
25671       // the sign bit mask, so only do that transform if the target has a
25672       // bitwise 'and not' instruction (the invert is free).
25673       // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
25674       unsigned ShCt = VT.getSizeInBits() - 1;
25675       SDValue ShiftAmt = DAG.getConstant(ShCt, DL, VT);
25676       SDValue Shift = DAG.getNode(ISD::SRA, DL, VT, Op1, ShiftAmt);
25677       if (CondCode == X86::COND_G)
25678         Shift = DAG.getNOT(DL, Shift, VT);
25679       return DAG.getNode(ISD::AND, DL, VT, Shift, Op1);
25680     }
25681   }
25682 
25683   // Look past (and (setcc_carry (cmp ...)), 1).
25684   if (Cond.getOpcode() == ISD::AND &&
25685       Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
25686       isOneConstant(Cond.getOperand(1)))
25687     Cond = Cond.getOperand(0);
25688 
25689   // If condition flag is set by a X86ISD::CMP, then use it as the condition
25690   // setting operand in place of the X86ISD::SETCC.
25691   unsigned CondOpcode = Cond.getOpcode();
25692   if (CondOpcode == X86ISD::SETCC ||
25693       CondOpcode == X86ISD::SETCC_CARRY) {
25694     CC = Cond.getOperand(0);
25695 
25696     SDValue Cmp = Cond.getOperand(1);
25697     bool IllegalFPCMov = false;
25698     if (VT.isFloatingPoint() && !VT.isVector() &&
25699         !isScalarFPTypeInSSEReg(VT) && Subtarget.canUseCMOV())  // FPStack?
25700       IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
25701 
25702     if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
25703         Cmp.getOpcode() == X86ISD::BT) { // FIXME
25704       Cond = Cmp;
25705       AddTest = false;
25706     }
25707   } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
25708              CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
25709              CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
25710     SDValue Value;
25711     X86::CondCode X86Cond;
25712     std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
25713 
25714     CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
25715     AddTest = false;
25716   }
25717 
25718   if (AddTest) {
25719     // Look past the truncate if the high bits are known zero.
25720     if (isTruncWithZeroHighBitsInput(Cond, DAG))
25721       Cond = Cond.getOperand(0);
25722 
25723     // We know the result of AND is compared against zero. Try to match
25724     // it to BT.
25725     if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
25726       X86::CondCode X86CondCode;
25727       if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, X86CondCode)) {
25728         CC = DAG.getTargetConstant(X86CondCode, DL, MVT::i8);
25729         Cond = BT;
25730         AddTest = false;
25731       }
25732     }
25733   }
25734 
25735   if (AddTest) {
25736     CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
25737     Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
25738   }
25739 
25740   // a <  b ? -1 :  0 -> RES = ~setcc_carry
25741   // a <  b ?  0 : -1 -> RES = setcc_carry
25742   // a >= b ? -1 :  0 -> RES = setcc_carry
25743   // a >= b ?  0 : -1 -> RES = ~setcc_carry
25744   if (Cond.getOpcode() == X86ISD::SUB) {
25745     unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
25746 
25747     if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
25748         (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
25749         (isNullConstant(Op1) || isNullConstant(Op2))) {
25750       SDValue Res =
25751           DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
25752                       DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
25753       if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
25754         return DAG.getNOT(DL, Res, Res.getValueType());
25755       return Res;
25756     }
25757   }
25758 
25759   // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
25760   // widen the cmov and push the truncate through. This avoids introducing a new
25761   // branch during isel and doesn't add any extensions.
25762   if (Op.getValueType() == MVT::i8 &&
25763       Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
25764     SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
25765     if (T1.getValueType() == T2.getValueType() &&
25766         // Exclude CopyFromReg to avoid partial register stalls.
25767         T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
25768       SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
25769                                  CC, Cond);
25770       return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
25771     }
25772   }
25773 
25774   // Or finally, promote i8 cmovs if we have CMOV,
25775   //                 or i16 cmovs if it won't prevent folding a load.
25776   // FIXME: we should not limit promotion of i8 case to only when the CMOV is
25777   //        legal, but EmitLoweredSelect() can not deal with these extensions
25778   //        being inserted between two CMOV's. (in i16 case too TBN)
25779   //        https://bugs.llvm.org/show_bug.cgi?id=40974
25780   if ((Op.getValueType() == MVT::i8 && Subtarget.canUseCMOV()) ||
25781       (Op.getValueType() == MVT::i16 && !X86::mayFoldLoad(Op1, Subtarget) &&
25782        !X86::mayFoldLoad(Op2, Subtarget))) {
25783     Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
25784     Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
25785     SDValue Ops[] = { Op2, Op1, CC, Cond };
25786     SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
25787     return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
25788   }
25789 
25790   // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
25791   // condition is true.
25792   SDValue Ops[] = { Op2, Op1, CC, Cond };
25793   return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
25794 }
25795 
LowerSIGN_EXTEND_Mask(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)25796 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
25797                                      const X86Subtarget &Subtarget,
25798                                      SelectionDAG &DAG) {
25799   MVT VT = Op->getSimpleValueType(0);
25800   SDValue In = Op->getOperand(0);
25801   MVT InVT = In.getSimpleValueType();
25802   assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
25803   MVT VTElt = VT.getVectorElementType();
25804   SDLoc dl(Op);
25805 
25806   unsigned NumElts = VT.getVectorNumElements();
25807 
25808   // Extend VT if the scalar type is i8/i16 and BWI is not supported.
25809   MVT ExtVT = VT;
25810   if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
25811     // If v16i32 is to be avoided, we'll need to split and concatenate.
25812     if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
25813       return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
25814 
25815     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
25816   }
25817 
25818   // Widen to 512-bits if VLX is not supported.
25819   MVT WideVT = ExtVT;
25820   if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
25821     NumElts *= 512 / ExtVT.getSizeInBits();
25822     InVT = MVT::getVectorVT(MVT::i1, NumElts);
25823     In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
25824                      In, DAG.getIntPtrConstant(0, dl));
25825     WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
25826   }
25827 
25828   SDValue V;
25829   MVT WideEltVT = WideVT.getVectorElementType();
25830   if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
25831       (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
25832     V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
25833   } else {
25834     SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
25835     SDValue Zero = DAG.getConstant(0, dl, WideVT);
25836     V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
25837   }
25838 
25839   // Truncate if we had to extend i16/i8 above.
25840   if (VT != ExtVT) {
25841     WideVT = MVT::getVectorVT(VTElt, NumElts);
25842     V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
25843   }
25844 
25845   // Extract back to 128/256-bit if we widened.
25846   if (WideVT != VT)
25847     V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
25848                     DAG.getIntPtrConstant(0, dl));
25849 
25850   return V;
25851 }
25852 
LowerANY_EXTEND(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)25853 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
25854                                SelectionDAG &DAG) {
25855   SDValue In = Op->getOperand(0);
25856   MVT InVT = In.getSimpleValueType();
25857 
25858   if (InVT.getVectorElementType() == MVT::i1)
25859     return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
25860 
25861   assert(Subtarget.hasAVX() && "Expected AVX support");
25862   return LowerAVXExtend(Op, DAG, Subtarget);
25863 }
25864 
25865 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
25866 // For sign extend this needs to handle all vector sizes and SSE4.1 and
25867 // non-SSE4.1 targets. For zero extend this should only handle inputs of
25868 // MVT::v64i8 when BWI is not supported, but AVX512 is.
LowerEXTEND_VECTOR_INREG(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)25869 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
25870                                         const X86Subtarget &Subtarget,
25871                                         SelectionDAG &DAG) {
25872   SDValue In = Op->getOperand(0);
25873   MVT VT = Op->getSimpleValueType(0);
25874   MVT InVT = In.getSimpleValueType();
25875 
25876   MVT SVT = VT.getVectorElementType();
25877   MVT InSVT = InVT.getVectorElementType();
25878   assert(SVT.getFixedSizeInBits() > InSVT.getFixedSizeInBits());
25879 
25880   if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
25881     return SDValue();
25882   if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
25883     return SDValue();
25884   if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
25885       !(VT.is256BitVector() && Subtarget.hasAVX()) &&
25886       !(VT.is512BitVector() && Subtarget.hasAVX512()))
25887     return SDValue();
25888 
25889   SDLoc dl(Op);
25890   unsigned Opc = Op.getOpcode();
25891   unsigned NumElts = VT.getVectorNumElements();
25892 
25893   // For 256-bit vectors, we only need the lower (128-bit) half of the input.
25894   // For 512-bit vectors, we need 128-bits or 256-bits.
25895   if (InVT.getSizeInBits() > 128) {
25896     // Input needs to be at least the same number of elements as output, and
25897     // at least 128-bits.
25898     int InSize = InSVT.getSizeInBits() * NumElts;
25899     In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
25900     InVT = In.getSimpleValueType();
25901   }
25902 
25903   // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
25904   // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
25905   // need to be handled here for 256/512-bit results.
25906   if (Subtarget.hasInt256()) {
25907     assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
25908 
25909     if (InVT.getVectorNumElements() != NumElts)
25910       return DAG.getNode(Op.getOpcode(), dl, VT, In);
25911 
25912     // FIXME: Apparently we create inreg operations that could be regular
25913     // extends.
25914     unsigned ExtOpc =
25915         Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
25916                                              : ISD::ZERO_EXTEND;
25917     return DAG.getNode(ExtOpc, dl, VT, In);
25918   }
25919 
25920   // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
25921   if (Subtarget.hasAVX()) {
25922     assert(VT.is256BitVector() && "256-bit vector expected");
25923     MVT HalfVT = VT.getHalfNumVectorElementsVT();
25924     int HalfNumElts = HalfVT.getVectorNumElements();
25925 
25926     unsigned NumSrcElts = InVT.getVectorNumElements();
25927     SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
25928     for (int i = 0; i != HalfNumElts; ++i)
25929       HiMask[i] = HalfNumElts + i;
25930 
25931     SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
25932     SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
25933     Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
25934     return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
25935   }
25936 
25937   // We should only get here for sign extend.
25938   assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
25939   assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
25940 
25941   // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
25942   SDValue Curr = In;
25943   SDValue SignExt = Curr;
25944 
25945   // As SRAI is only available on i16/i32 types, we expand only up to i32
25946   // and handle i64 separately.
25947   if (InVT != MVT::v4i32) {
25948     MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
25949 
25950     unsigned DestWidth = DestVT.getScalarSizeInBits();
25951     unsigned Scale = DestWidth / InSVT.getSizeInBits();
25952 
25953     unsigned InNumElts = InVT.getVectorNumElements();
25954     unsigned DestElts = DestVT.getVectorNumElements();
25955 
25956     // Build a shuffle mask that takes each input element and places it in the
25957     // MSBs of the new element size.
25958     SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
25959     for (unsigned i = 0; i != DestElts; ++i)
25960       Mask[i * Scale + (Scale - 1)] = i;
25961 
25962     Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
25963     Curr = DAG.getBitcast(DestVT, Curr);
25964 
25965     unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
25966     SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
25967                           DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
25968   }
25969 
25970   if (VT == MVT::v2i64) {
25971     assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
25972     SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
25973     SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
25974     SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
25975     SignExt = DAG.getBitcast(VT, SignExt);
25976   }
25977 
25978   return SignExt;
25979 }
25980 
LowerSIGN_EXTEND(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)25981 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
25982                                 SelectionDAG &DAG) {
25983   MVT VT = Op->getSimpleValueType(0);
25984   SDValue In = Op->getOperand(0);
25985   MVT InVT = In.getSimpleValueType();
25986   SDLoc dl(Op);
25987 
25988   if (InVT.getVectorElementType() == MVT::i1)
25989     return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
25990 
25991   assert(VT.isVector() && InVT.isVector() && "Expected vector type");
25992   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
25993          "Expected same number of elements");
25994   assert((VT.getVectorElementType() == MVT::i16 ||
25995           VT.getVectorElementType() == MVT::i32 ||
25996           VT.getVectorElementType() == MVT::i64) &&
25997          "Unexpected element type");
25998   assert((InVT.getVectorElementType() == MVT::i8 ||
25999           InVT.getVectorElementType() == MVT::i16 ||
26000           InVT.getVectorElementType() == MVT::i32) &&
26001          "Unexpected element type");
26002 
26003   if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
26004     assert(InVT == MVT::v32i8 && "Unexpected VT!");
26005     return splitVectorIntUnary(Op, DAG);
26006   }
26007 
26008   if (Subtarget.hasInt256())
26009     return Op;
26010 
26011   // Optimize vectors in AVX mode
26012   // Sign extend  v8i16 to v8i32 and
26013   //              v4i32 to v4i64
26014   //
26015   // Divide input vector into two parts
26016   // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
26017   // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
26018   // concat the vectors to original VT
26019   MVT HalfVT = VT.getHalfNumVectorElementsVT();
26020   SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
26021 
26022   unsigned NumElems = InVT.getVectorNumElements();
26023   SmallVector<int,8> ShufMask(NumElems, -1);
26024   for (unsigned i = 0; i != NumElems/2; ++i)
26025     ShufMask[i] = i + NumElems/2;
26026 
26027   SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
26028   OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
26029 
26030   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
26031 }
26032 
26033 /// Change a vector store into a pair of half-size vector stores.
splitVectorStore(StoreSDNode * Store,SelectionDAG & DAG)26034 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
26035   SDValue StoredVal = Store->getValue();
26036   assert((StoredVal.getValueType().is256BitVector() ||
26037           StoredVal.getValueType().is512BitVector()) &&
26038          "Expecting 256/512-bit op");
26039 
26040   // Splitting volatile memory ops is not allowed unless the operation was not
26041   // legal to begin with. Assume the input store is legal (this transform is
26042   // only used for targets with AVX). Note: It is possible that we have an
26043   // illegal type like v2i128, and so we could allow splitting a volatile store
26044   // in that case if that is important.
26045   if (!Store->isSimple())
26046     return SDValue();
26047 
26048   SDLoc DL(Store);
26049   SDValue Value0, Value1;
26050   std::tie(Value0, Value1) = splitVector(StoredVal, DAG, DL);
26051   unsigned HalfOffset = Value0.getValueType().getStoreSize();
26052   SDValue Ptr0 = Store->getBasePtr();
26053   SDValue Ptr1 =
26054       DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(HalfOffset), DL);
26055   SDValue Ch0 =
26056       DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
26057                    Store->getOriginalAlign(),
26058                    Store->getMemOperand()->getFlags());
26059   SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
26060                              Store->getPointerInfo().getWithOffset(HalfOffset),
26061                              Store->getOriginalAlign(),
26062                              Store->getMemOperand()->getFlags());
26063   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
26064 }
26065 
26066 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
26067 /// type.
scalarizeVectorStore(StoreSDNode * Store,MVT StoreVT,SelectionDAG & DAG)26068 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
26069                                     SelectionDAG &DAG) {
26070   SDValue StoredVal = Store->getValue();
26071   assert(StoreVT.is128BitVector() &&
26072          StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
26073   StoredVal = DAG.getBitcast(StoreVT, StoredVal);
26074 
26075   // Splitting volatile memory ops is not allowed unless the operation was not
26076   // legal to begin with. We are assuming the input op is legal (this transform
26077   // is only used for targets with AVX).
26078   if (!Store->isSimple())
26079     return SDValue();
26080 
26081   MVT StoreSVT = StoreVT.getScalarType();
26082   unsigned NumElems = StoreVT.getVectorNumElements();
26083   unsigned ScalarSize = StoreSVT.getStoreSize();
26084 
26085   SDLoc DL(Store);
26086   SmallVector<SDValue, 4> Stores;
26087   for (unsigned i = 0; i != NumElems; ++i) {
26088     unsigned Offset = i * ScalarSize;
26089     SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(),
26090                                            TypeSize::Fixed(Offset), DL);
26091     SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
26092                               DAG.getIntPtrConstant(i, DL));
26093     SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
26094                               Store->getPointerInfo().getWithOffset(Offset),
26095                               Store->getOriginalAlign(),
26096                               Store->getMemOperand()->getFlags());
26097     Stores.push_back(Ch);
26098   }
26099   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
26100 }
26101 
LowerStore(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)26102 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
26103                           SelectionDAG &DAG) {
26104   StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
26105   SDLoc dl(St);
26106   SDValue StoredVal = St->getValue();
26107 
26108   // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
26109   if (StoredVal.getValueType().isVector() &&
26110       StoredVal.getValueType().getVectorElementType() == MVT::i1) {
26111     unsigned NumElts = StoredVal.getValueType().getVectorNumElements();
26112     assert(NumElts <= 8 && "Unexpected VT");
26113     assert(!St->isTruncatingStore() && "Expected non-truncating store");
26114     assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
26115            "Expected AVX512F without AVX512DQI");
26116 
26117     // We must pad with zeros to ensure we store zeroes to any unused bits.
26118     StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
26119                             DAG.getUNDEF(MVT::v16i1), StoredVal,
26120                             DAG.getIntPtrConstant(0, dl));
26121     StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
26122     StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
26123     // Make sure we store zeros in the extra bits.
26124     if (NumElts < 8)
26125       StoredVal = DAG.getZeroExtendInReg(
26126           StoredVal, dl, EVT::getIntegerVT(*DAG.getContext(), NumElts));
26127 
26128     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
26129                         St->getPointerInfo(), St->getOriginalAlign(),
26130                         St->getMemOperand()->getFlags());
26131   }
26132 
26133   if (St->isTruncatingStore())
26134     return SDValue();
26135 
26136   // If this is a 256-bit store of concatenated ops, we are better off splitting
26137   // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
26138   // and each half can execute independently. Some cores would split the op into
26139   // halves anyway, so the concat (vinsertf128) is purely an extra op.
26140   MVT StoreVT = StoredVal.getSimpleValueType();
26141   if (StoreVT.is256BitVector() ||
26142       ((StoreVT == MVT::v32i16 || StoreVT == MVT::v64i8) &&
26143        !Subtarget.hasBWI())) {
26144     SmallVector<SDValue, 4> CatOps;
26145     if (StoredVal.hasOneUse() &&
26146         collectConcatOps(StoredVal.getNode(), CatOps, DAG))
26147       return splitVectorStore(St, DAG);
26148     return SDValue();
26149   }
26150 
26151   if (StoreVT.is32BitVector())
26152     return SDValue();
26153 
26154   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26155   assert(StoreVT.is64BitVector() && "Unexpected VT");
26156   assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
26157              TargetLowering::TypeWidenVector &&
26158          "Unexpected type action!");
26159 
26160   EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
26161   StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
26162                           DAG.getUNDEF(StoreVT));
26163 
26164   if (Subtarget.hasSSE2()) {
26165     // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
26166     // and store it.
26167     MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
26168     MVT CastVT = MVT::getVectorVT(StVT, 2);
26169     StoredVal = DAG.getBitcast(CastVT, StoredVal);
26170     StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
26171                             DAG.getIntPtrConstant(0, dl));
26172 
26173     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
26174                         St->getPointerInfo(), St->getOriginalAlign(),
26175                         St->getMemOperand()->getFlags());
26176   }
26177   assert(Subtarget.hasSSE1() && "Expected SSE");
26178   SDVTList Tys = DAG.getVTList(MVT::Other);
26179   SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
26180   return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
26181                                  St->getMemOperand());
26182 }
26183 
26184 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
26185 // may emit an illegal shuffle but the expansion is still better than scalar
26186 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
26187 // we'll emit a shuffle and a arithmetic shift.
26188 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
26189 // TODO: It is possible to support ZExt by zeroing the undef values during
26190 // the shuffle phase or after the shuffle.
LowerLoad(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)26191 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
26192                                  SelectionDAG &DAG) {
26193   MVT RegVT = Op.getSimpleValueType();
26194   assert(RegVT.isVector() && "We only custom lower vector loads.");
26195   assert(RegVT.isInteger() &&
26196          "We only custom lower integer vector loads.");
26197 
26198   LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
26199   SDLoc dl(Ld);
26200 
26201   // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
26202   if (RegVT.getVectorElementType() == MVT::i1) {
26203     assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
26204     assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
26205     assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
26206            "Expected AVX512F without AVX512DQI");
26207 
26208     SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
26209                                 Ld->getPointerInfo(), Ld->getOriginalAlign(),
26210                                 Ld->getMemOperand()->getFlags());
26211 
26212     // Replace chain users with the new chain.
26213     assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
26214 
26215     SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
26216     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
26217                       DAG.getBitcast(MVT::v16i1, Val),
26218                       DAG.getIntPtrConstant(0, dl));
26219     return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
26220   }
26221 
26222   return SDValue();
26223 }
26224 
26225 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
26226 /// each of which has no other use apart from the AND / OR.
isAndOrOfSetCCs(SDValue Op,unsigned & Opc)26227 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
26228   Opc = Op.getOpcode();
26229   if (Opc != ISD::OR && Opc != ISD::AND)
26230     return false;
26231   return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
26232           Op.getOperand(0).hasOneUse() &&
26233           Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
26234           Op.getOperand(1).hasOneUse());
26235 }
26236 
LowerBRCOND(SDValue Op,SelectionDAG & DAG) const26237 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
26238   SDValue Chain = Op.getOperand(0);
26239   SDValue Cond  = Op.getOperand(1);
26240   SDValue Dest  = Op.getOperand(2);
26241   SDLoc dl(Op);
26242 
26243   // Bail out when we don't have native compare instructions.
26244   if (Cond.getOpcode() == ISD::SETCC &&
26245       Cond.getOperand(0).getValueType() != MVT::f128 &&
26246       !isSoftFP16(Cond.getOperand(0).getValueType())) {
26247     SDValue LHS = Cond.getOperand(0);
26248     SDValue RHS = Cond.getOperand(1);
26249     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
26250 
26251     // Special case for
26252     // setcc([su]{add,sub,mul}o == 0)
26253     // setcc([su]{add,sub,mul}o != 1)
26254     if (ISD::isOverflowIntrOpRes(LHS) &&
26255         (CC == ISD::SETEQ || CC == ISD::SETNE) &&
26256         (isNullConstant(RHS) || isOneConstant(RHS))) {
26257       SDValue Value, Overflow;
26258       X86::CondCode X86Cond;
26259       std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, LHS.getValue(0), DAG);
26260 
26261       if ((CC == ISD::SETEQ) == isNullConstant(RHS))
26262         X86Cond = X86::GetOppositeBranchCondition(X86Cond);
26263 
26264       SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
26265       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26266                          Overflow);
26267     }
26268 
26269     if (LHS.getSimpleValueType().isInteger()) {
26270       SDValue CCVal;
26271       SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, CC, SDLoc(Cond), DAG, CCVal);
26272       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26273                          EFLAGS);
26274     }
26275 
26276     if (CC == ISD::SETOEQ) {
26277       // For FCMP_OEQ, we can emit
26278       // two branches instead of an explicit AND instruction with a
26279       // separate test. However, we only do this if this block doesn't
26280       // have a fall-through edge, because this requires an explicit
26281       // jmp when the condition is false.
26282       if (Op.getNode()->hasOneUse()) {
26283         SDNode *User = *Op.getNode()->use_begin();
26284         // Look for an unconditional branch following this conditional branch.
26285         // We need this because we need to reverse the successors in order
26286         // to implement FCMP_OEQ.
26287         if (User->getOpcode() == ISD::BR) {
26288           SDValue FalseBB = User->getOperand(1);
26289           SDNode *NewBR =
26290             DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
26291           assert(NewBR == User);
26292           (void)NewBR;
26293           Dest = FalseBB;
26294 
26295           SDValue Cmp =
26296               DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
26297           SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
26298           Chain = DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest,
26299                               CCVal, Cmp);
26300           CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
26301           return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26302                              Cmp);
26303         }
26304       }
26305     } else if (CC == ISD::SETUNE) {
26306       // For FCMP_UNE, we can emit
26307       // two branches instead of an explicit OR instruction with a
26308       // separate test.
26309       SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
26310       SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
26311       Chain =
26312           DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp);
26313       CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
26314       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26315                          Cmp);
26316     } else {
26317       X86::CondCode X86Cond =
26318           TranslateX86CC(CC, dl, /*IsFP*/ true, LHS, RHS, DAG);
26319       SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
26320       SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
26321       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26322                          Cmp);
26323     }
26324   }
26325 
26326   if (ISD::isOverflowIntrOpRes(Cond)) {
26327     SDValue Value, Overflow;
26328     X86::CondCode X86Cond;
26329     std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
26330 
26331     SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
26332     return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26333                        Overflow);
26334   }
26335 
26336   // Look past the truncate if the high bits are known zero.
26337   if (isTruncWithZeroHighBitsInput(Cond, DAG))
26338     Cond = Cond.getOperand(0);
26339 
26340   EVT CondVT = Cond.getValueType();
26341 
26342   // Add an AND with 1 if we don't already have one.
26343   if (!(Cond.getOpcode() == ISD::AND && isOneConstant(Cond.getOperand(1))))
26344     Cond =
26345         DAG.getNode(ISD::AND, dl, CondVT, Cond, DAG.getConstant(1, dl, CondVT));
26346 
26347   SDValue LHS = Cond;
26348   SDValue RHS = DAG.getConstant(0, dl, CondVT);
26349 
26350   SDValue CCVal;
26351   SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, ISD::SETNE, dl, DAG, CCVal);
26352   return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26353                      EFLAGS);
26354 }
26355 
26356 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
26357 // Calls to _alloca are needed to probe the stack when allocating more than 4k
26358 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
26359 // that the guard pages used by the OS virtual memory manager are allocated in
26360 // correct sequence.
26361 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const26362 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
26363                                            SelectionDAG &DAG) const {
26364   MachineFunction &MF = DAG.getMachineFunction();
26365   bool SplitStack = MF.shouldSplitStack();
26366   bool EmitStackProbeCall = hasStackProbeSymbol(MF);
26367   bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
26368                SplitStack || EmitStackProbeCall;
26369   SDLoc dl(Op);
26370 
26371   // Get the inputs.
26372   SDNode *Node = Op.getNode();
26373   SDValue Chain = Op.getOperand(0);
26374   SDValue Size  = Op.getOperand(1);
26375   MaybeAlign Alignment(Op.getConstantOperandVal(2));
26376   EVT VT = Node->getValueType(0);
26377 
26378   // Chain the dynamic stack allocation so that it doesn't modify the stack
26379   // pointer when other instructions are using the stack.
26380   Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
26381 
26382   bool Is64Bit = Subtarget.is64Bit();
26383   MVT SPTy = getPointerTy(DAG.getDataLayout());
26384 
26385   SDValue Result;
26386   if (!Lower) {
26387     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26388     Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
26389     assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
26390                     " not tell us which reg is the stack pointer!");
26391 
26392     const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
26393     const Align StackAlign = TFI.getStackAlign();
26394     if (hasInlineStackProbe(MF)) {
26395       MachineRegisterInfo &MRI = MF.getRegInfo();
26396 
26397       const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
26398       Register Vreg = MRI.createVirtualRegister(AddrRegClass);
26399       Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
26400       Result = DAG.getNode(X86ISD::PROBED_ALLOCA, dl, SPTy, Chain,
26401                            DAG.getRegister(Vreg, SPTy));
26402     } else {
26403       SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
26404       Chain = SP.getValue(1);
26405       Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
26406     }
26407     if (Alignment && *Alignment > StackAlign)
26408       Result =
26409           DAG.getNode(ISD::AND, dl, VT, Result,
26410                       DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
26411     Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
26412   } else if (SplitStack) {
26413     MachineRegisterInfo &MRI = MF.getRegInfo();
26414 
26415     if (Is64Bit) {
26416       // The 64 bit implementation of segmented stacks needs to clobber both r10
26417       // r11. This makes it impossible to use it along with nested parameters.
26418       const Function &F = MF.getFunction();
26419       for (const auto &A : F.args()) {
26420         if (A.hasNestAttr())
26421           report_fatal_error("Cannot use segmented stacks with functions that "
26422                              "have nested arguments.");
26423       }
26424     }
26425 
26426     const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
26427     Register Vreg = MRI.createVirtualRegister(AddrRegClass);
26428     Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
26429     Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
26430                                 DAG.getRegister(Vreg, SPTy));
26431   } else {
26432     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
26433     Chain = DAG.getNode(X86ISD::DYN_ALLOCA, dl, NodeTys, Chain, Size);
26434     MF.getInfo<X86MachineFunctionInfo>()->setHasDynAlloca(true);
26435 
26436     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
26437     Register SPReg = RegInfo->getStackRegister();
26438     SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
26439     Chain = SP.getValue(1);
26440 
26441     if (Alignment) {
26442       SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
26443                        DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
26444       Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
26445     }
26446 
26447     Result = SP;
26448   }
26449 
26450   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
26451 
26452   SDValue Ops[2] = {Result, Chain};
26453   return DAG.getMergeValues(Ops, dl);
26454 }
26455 
LowerVASTART(SDValue Op,SelectionDAG & DAG) const26456 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
26457   MachineFunction &MF = DAG.getMachineFunction();
26458   auto PtrVT = getPointerTy(MF.getDataLayout());
26459   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
26460 
26461   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
26462   SDLoc DL(Op);
26463 
26464   if (!Subtarget.is64Bit() ||
26465       Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
26466     // vastart just stores the address of the VarArgsFrameIndex slot into the
26467     // memory location argument.
26468     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
26469     return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
26470                         MachinePointerInfo(SV));
26471   }
26472 
26473   // __va_list_tag:
26474   //   gp_offset         (0 - 6 * 8)
26475   //   fp_offset         (48 - 48 + 8 * 16)
26476   //   overflow_arg_area (point to parameters coming in memory).
26477   //   reg_save_area
26478   SmallVector<SDValue, 8> MemOps;
26479   SDValue FIN = Op.getOperand(1);
26480   // Store gp_offset
26481   SDValue Store = DAG.getStore(
26482       Op.getOperand(0), DL,
26483       DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
26484       MachinePointerInfo(SV));
26485   MemOps.push_back(Store);
26486 
26487   // Store fp_offset
26488   FIN = DAG.getMemBasePlusOffset(FIN, TypeSize::Fixed(4), DL);
26489   Store = DAG.getStore(
26490       Op.getOperand(0), DL,
26491       DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
26492       MachinePointerInfo(SV, 4));
26493   MemOps.push_back(Store);
26494 
26495   // Store ptr to overflow_arg_area
26496   FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
26497   SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
26498   Store =
26499       DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
26500   MemOps.push_back(Store);
26501 
26502   // Store ptr to reg_save_area.
26503   FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
26504       Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
26505   SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
26506   Store = DAG.getStore(
26507       Op.getOperand(0), DL, RSFIN, FIN,
26508       MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
26509   MemOps.push_back(Store);
26510   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
26511 }
26512 
LowerVAARG(SDValue Op,SelectionDAG & DAG) const26513 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
26514   assert(Subtarget.is64Bit() &&
26515          "LowerVAARG only handles 64-bit va_arg!");
26516   assert(Op.getNumOperands() == 4);
26517 
26518   MachineFunction &MF = DAG.getMachineFunction();
26519   if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
26520     // The Win64 ABI uses char* instead of a structure.
26521     return DAG.expandVAArg(Op.getNode());
26522 
26523   SDValue Chain = Op.getOperand(0);
26524   SDValue SrcPtr = Op.getOperand(1);
26525   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
26526   unsigned Align = Op.getConstantOperandVal(3);
26527   SDLoc dl(Op);
26528 
26529   EVT ArgVT = Op.getNode()->getValueType(0);
26530   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26531   uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
26532   uint8_t ArgMode;
26533 
26534   // Decide which area this value should be read from.
26535   // TODO: Implement the AMD64 ABI in its entirety. This simple
26536   // selection mechanism works only for the basic types.
26537   assert(ArgVT != MVT::f80 && "va_arg for f80 not yet implemented");
26538   if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
26539     ArgMode = 2;  // Argument passed in XMM register. Use fp_offset.
26540   } else {
26541     assert(ArgVT.isInteger() && ArgSize <= 32 /*bytes*/ &&
26542            "Unhandled argument type in LowerVAARG");
26543     ArgMode = 1;  // Argument passed in GPR64 register(s). Use gp_offset.
26544   }
26545 
26546   if (ArgMode == 2) {
26547     // Make sure using fp_offset makes sense.
26548     assert(!Subtarget.useSoftFloat() &&
26549            !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
26550            Subtarget.hasSSE1());
26551   }
26552 
26553   // Insert VAARG node into the DAG
26554   // VAARG returns two values: Variable Argument Address, Chain
26555   SDValue InstOps[] = {Chain, SrcPtr,
26556                        DAG.getTargetConstant(ArgSize, dl, MVT::i32),
26557                        DAG.getTargetConstant(ArgMode, dl, MVT::i8),
26558                        DAG.getTargetConstant(Align, dl, MVT::i32)};
26559   SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
26560   SDValue VAARG = DAG.getMemIntrinsicNode(
26561       Subtarget.isTarget64BitLP64() ? X86ISD::VAARG_64 : X86ISD::VAARG_X32, dl,
26562       VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
26563       /*Alignment=*/std::nullopt,
26564       MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
26565   Chain = VAARG.getValue(1);
26566 
26567   // Load the next argument and return it
26568   return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
26569 }
26570 
LowerVACOPY(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)26571 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
26572                            SelectionDAG &DAG) {
26573   // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
26574   // where a va_list is still an i8*.
26575   assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
26576   if (Subtarget.isCallingConvWin64(
26577         DAG.getMachineFunction().getFunction().getCallingConv()))
26578     // Probably a Win64 va_copy.
26579     return DAG.expandVACopy(Op.getNode());
26580 
26581   SDValue Chain = Op.getOperand(0);
26582   SDValue DstPtr = Op.getOperand(1);
26583   SDValue SrcPtr = Op.getOperand(2);
26584   const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
26585   const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
26586   SDLoc DL(Op);
26587 
26588   return DAG.getMemcpy(
26589       Chain, DL, DstPtr, SrcPtr,
26590       DAG.getIntPtrConstant(Subtarget.isTarget64BitLP64() ? 24 : 16, DL),
26591       Align(Subtarget.isTarget64BitLP64() ? 8 : 4), /*isVolatile*/ false, false,
26592       false, MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
26593 }
26594 
26595 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
getTargetVShiftUniformOpcode(unsigned Opc,bool IsVariable)26596 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
26597   switch (Opc) {
26598   case ISD::SHL:
26599   case X86ISD::VSHL:
26600   case X86ISD::VSHLI:
26601     return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
26602   case ISD::SRL:
26603   case X86ISD::VSRL:
26604   case X86ISD::VSRLI:
26605     return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
26606   case ISD::SRA:
26607   case X86ISD::VSRA:
26608   case X86ISD::VSRAI:
26609     return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
26610   }
26611   llvm_unreachable("Unknown target vector shift node");
26612 }
26613 
26614 /// Handle vector element shifts where the shift amount is a constant.
26615 /// Takes immediate version of shift as input.
getTargetVShiftByConstNode(unsigned Opc,const SDLoc & dl,MVT VT,SDValue SrcOp,uint64_t ShiftAmt,SelectionDAG & DAG)26616 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
26617                                           SDValue SrcOp, uint64_t ShiftAmt,
26618                                           SelectionDAG &DAG) {
26619   MVT ElementType = VT.getVectorElementType();
26620 
26621   // Bitcast the source vector to the output type, this is mainly necessary for
26622   // vXi8/vXi64 shifts.
26623   if (VT != SrcOp.getSimpleValueType())
26624     SrcOp = DAG.getBitcast(VT, SrcOp);
26625 
26626   // Fold this packed shift into its first operand if ShiftAmt is 0.
26627   if (ShiftAmt == 0)
26628     return SrcOp;
26629 
26630   // Check for ShiftAmt >= element width
26631   if (ShiftAmt >= ElementType.getSizeInBits()) {
26632     if (Opc == X86ISD::VSRAI)
26633       ShiftAmt = ElementType.getSizeInBits() - 1;
26634     else
26635       return DAG.getConstant(0, dl, VT);
26636   }
26637 
26638   assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
26639          && "Unknown target vector shift-by-constant node");
26640 
26641   // Fold this packed vector shift into a build vector if SrcOp is a
26642   // vector of Constants or UNDEFs.
26643   if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
26644     unsigned ShiftOpc;
26645     switch (Opc) {
26646     default: llvm_unreachable("Unknown opcode!");
26647     case X86ISD::VSHLI:
26648       ShiftOpc = ISD::SHL;
26649       break;
26650     case X86ISD::VSRLI:
26651       ShiftOpc = ISD::SRL;
26652       break;
26653     case X86ISD::VSRAI:
26654       ShiftOpc = ISD::SRA;
26655       break;
26656     }
26657 
26658     SDValue Amt = DAG.getConstant(ShiftAmt, dl, VT);
26659     if (SDValue C = DAG.FoldConstantArithmetic(ShiftOpc, dl, VT, {SrcOp, Amt}))
26660       return C;
26661   }
26662 
26663   return DAG.getNode(Opc, dl, VT, SrcOp,
26664                      DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
26665 }
26666 
26667 /// Handle vector element shifts by a splat shift amount
getTargetVShiftNode(unsigned Opc,const SDLoc & dl,MVT VT,SDValue SrcOp,SDValue ShAmt,int ShAmtIdx,const X86Subtarget & Subtarget,SelectionDAG & DAG)26668 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
26669                                    SDValue SrcOp, SDValue ShAmt, int ShAmtIdx,
26670                                    const X86Subtarget &Subtarget,
26671                                    SelectionDAG &DAG) {
26672   MVT AmtVT = ShAmt.getSimpleValueType();
26673   assert(AmtVT.isVector() && "Vector shift type mismatch");
26674   assert(0 <= ShAmtIdx && ShAmtIdx < (int)AmtVT.getVectorNumElements() &&
26675          "Illegal vector splat index");
26676 
26677   // Move the splat element to the bottom element.
26678   if (ShAmtIdx != 0) {
26679     SmallVector<int> Mask(AmtVT.getVectorNumElements(), -1);
26680     Mask[0] = ShAmtIdx;
26681     ShAmt = DAG.getVectorShuffle(AmtVT, dl, ShAmt, DAG.getUNDEF(AmtVT), Mask);
26682   }
26683 
26684   // Peek through any zext node if we can get back to a 128-bit source.
26685   if (AmtVT.getScalarSizeInBits() == 64 &&
26686       (ShAmt.getOpcode() == ISD::ZERO_EXTEND ||
26687        ShAmt.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
26688       ShAmt.getOperand(0).getValueType().isSimple() &&
26689       ShAmt.getOperand(0).getValueType().is128BitVector()) {
26690     ShAmt = ShAmt.getOperand(0);
26691     AmtVT = ShAmt.getSimpleValueType();
26692   }
26693 
26694   // See if we can mask off the upper elements using the existing source node.
26695   // The shift uses the entire lower 64-bits of the amount vector, so no need to
26696   // do this for vXi64 types.
26697   bool IsMasked = false;
26698   if (AmtVT.getScalarSizeInBits() < 64) {
26699     if (ShAmt.getOpcode() == ISD::BUILD_VECTOR ||
26700         ShAmt.getOpcode() == ISD::SCALAR_TO_VECTOR) {
26701       // If the shift amount has come from a scalar, then zero-extend the scalar
26702       // before moving to the vector.
26703       ShAmt = DAG.getZExtOrTrunc(ShAmt.getOperand(0), dl, MVT::i32);
26704       ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
26705       ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, ShAmt);
26706       AmtVT = MVT::v4i32;
26707       IsMasked = true;
26708     } else if (ShAmt.getOpcode() == ISD::AND) {
26709       // See if the shift amount is already masked (e.g. for rotation modulo),
26710       // then we can zero-extend it by setting all the other mask elements to
26711       // zero.
26712       SmallVector<SDValue> MaskElts(
26713           AmtVT.getVectorNumElements(),
26714           DAG.getConstant(0, dl, AmtVT.getScalarType()));
26715       MaskElts[0] = DAG.getAllOnesConstant(dl, AmtVT.getScalarType());
26716       SDValue Mask = DAG.getBuildVector(AmtVT, dl, MaskElts);
26717       if ((Mask = DAG.FoldConstantArithmetic(ISD::AND, dl, AmtVT,
26718                                              {ShAmt.getOperand(1), Mask}))) {
26719         ShAmt = DAG.getNode(ISD::AND, dl, AmtVT, ShAmt.getOperand(0), Mask);
26720         IsMasked = true;
26721       }
26722     }
26723   }
26724 
26725   // Extract if the shift amount vector is larger than 128-bits.
26726   if (AmtVT.getSizeInBits() > 128) {
26727     ShAmt = extract128BitVector(ShAmt, 0, DAG, dl);
26728     AmtVT = ShAmt.getSimpleValueType();
26729   }
26730 
26731   // Zero-extend bottom element to v2i64 vector type, either by extension or
26732   // shuffle masking.
26733   if (!IsMasked && AmtVT.getScalarSizeInBits() < 64) {
26734     if (AmtVT == MVT::v4i32 && (ShAmt.getOpcode() == X86ISD::VBROADCAST ||
26735                                 ShAmt.getOpcode() == X86ISD::VBROADCAST_LOAD)) {
26736       ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, SDLoc(ShAmt), MVT::v4i32, ShAmt);
26737     } else if (Subtarget.hasSSE41()) {
26738       ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
26739                           MVT::v2i64, ShAmt);
26740     } else {
26741       SDValue ByteShift = DAG.getTargetConstant(
26742           (128 - AmtVT.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
26743       ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
26744       ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
26745                           ByteShift);
26746       ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
26747                           ByteShift);
26748     }
26749   }
26750 
26751   // Change opcode to non-immediate version.
26752   Opc = getTargetVShiftUniformOpcode(Opc, true);
26753 
26754   // The return type has to be a 128-bit type with the same element
26755   // type as the input type.
26756   MVT EltVT = VT.getVectorElementType();
26757   MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
26758 
26759   ShAmt = DAG.getBitcast(ShVT, ShAmt);
26760   return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
26761 }
26762 
26763 /// Return Mask with the necessary casting or extending
26764 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
getMaskNode(SDValue Mask,MVT MaskVT,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)26765 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
26766                            const X86Subtarget &Subtarget, SelectionDAG &DAG,
26767                            const SDLoc &dl) {
26768 
26769   if (isAllOnesConstant(Mask))
26770     return DAG.getConstant(1, dl, MaskVT);
26771   if (X86::isZeroNode(Mask))
26772     return DAG.getConstant(0, dl, MaskVT);
26773 
26774   assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
26775 
26776   if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
26777     assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
26778     assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
26779     // In case 32bit mode, bitcast i64 is illegal, extend/split it.
26780     SDValue Lo, Hi;
26781     Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
26782                         DAG.getConstant(0, dl, MVT::i32));
26783     Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
26784                         DAG.getConstant(1, dl, MVT::i32));
26785 
26786     Lo = DAG.getBitcast(MVT::v32i1, Lo);
26787     Hi = DAG.getBitcast(MVT::v32i1, Hi);
26788 
26789     return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
26790   } else {
26791     MVT BitcastVT = MVT::getVectorVT(MVT::i1,
26792                                      Mask.getSimpleValueType().getSizeInBits());
26793     // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
26794     // are extracted by EXTRACT_SUBVECTOR.
26795     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
26796                        DAG.getBitcast(BitcastVT, Mask),
26797                        DAG.getIntPtrConstant(0, dl));
26798   }
26799 }
26800 
26801 /// Return (and \p Op, \p Mask) for compare instructions or
26802 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
26803 /// necessary casting or extending for \p Mask when lowering masking intrinsics
getVectorMaskingNode(SDValue Op,SDValue Mask,SDValue PreservedSrc,const X86Subtarget & Subtarget,SelectionDAG & DAG)26804 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
26805                                     SDValue PreservedSrc,
26806                                     const X86Subtarget &Subtarget,
26807                                     SelectionDAG &DAG) {
26808   MVT VT = Op.getSimpleValueType();
26809   MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
26810   unsigned OpcodeSelect = ISD::VSELECT;
26811   SDLoc dl(Op);
26812 
26813   if (isAllOnesConstant(Mask))
26814     return Op;
26815 
26816   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26817 
26818   if (PreservedSrc.isUndef())
26819     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
26820   return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
26821 }
26822 
26823 /// Creates an SDNode for a predicated scalar operation.
26824 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
26825 /// The mask is coming as MVT::i8 and it should be transformed
26826 /// to MVT::v1i1 while lowering masking intrinsics.
26827 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
26828 /// "X86select" instead of "vselect". We just can't create the "vselect" node
26829 /// for a scalar instruction.
getScalarMaskingNode(SDValue Op,SDValue Mask,SDValue PreservedSrc,const X86Subtarget & Subtarget,SelectionDAG & DAG)26830 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
26831                                     SDValue PreservedSrc,
26832                                     const X86Subtarget &Subtarget,
26833                                     SelectionDAG &DAG) {
26834 
26835   if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
26836     if (MaskConst->getZExtValue() & 0x1)
26837       return Op;
26838 
26839   MVT VT = Op.getSimpleValueType();
26840   SDLoc dl(Op);
26841 
26842   assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
26843   SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
26844                               DAG.getBitcast(MVT::v8i1, Mask),
26845                               DAG.getIntPtrConstant(0, dl));
26846   if (Op.getOpcode() == X86ISD::FSETCCM ||
26847       Op.getOpcode() == X86ISD::FSETCCM_SAE ||
26848       Op.getOpcode() == X86ISD::VFPCLASSS)
26849     return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
26850 
26851   if (PreservedSrc.isUndef())
26852     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
26853   return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
26854 }
26855 
getSEHRegistrationNodeSize(const Function * Fn)26856 static int getSEHRegistrationNodeSize(const Function *Fn) {
26857   if (!Fn->hasPersonalityFn())
26858     report_fatal_error(
26859         "querying registration node size for function without personality");
26860   // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
26861   // WinEHStatePass for the full struct definition.
26862   switch (classifyEHPersonality(Fn->getPersonalityFn())) {
26863   case EHPersonality::MSVC_X86SEH: return 24;
26864   case EHPersonality::MSVC_CXX: return 16;
26865   default: break;
26866   }
26867   report_fatal_error(
26868       "can only recover FP for 32-bit MSVC EH personality functions");
26869 }
26870 
26871 /// When the MSVC runtime transfers control to us, either to an outlined
26872 /// function or when returning to a parent frame after catching an exception, we
26873 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
26874 /// Here's the math:
26875 ///   RegNodeBase = EntryEBP - RegNodeSize
26876 ///   ParentFP = RegNodeBase - ParentFrameOffset
26877 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
26878 /// subtracting the offset (negative on x86) takes us back to the parent FP.
recoverFramePointer(SelectionDAG & DAG,const Function * Fn,SDValue EntryEBP)26879 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
26880                                    SDValue EntryEBP) {
26881   MachineFunction &MF = DAG.getMachineFunction();
26882   SDLoc dl;
26883 
26884   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26885   MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
26886 
26887   // It's possible that the parent function no longer has a personality function
26888   // if the exceptional code was optimized away, in which case we just return
26889   // the incoming EBP.
26890   if (!Fn->hasPersonalityFn())
26891     return EntryEBP;
26892 
26893   // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
26894   // registration, or the .set_setframe offset.
26895   MCSymbol *OffsetSym =
26896       MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
26897           GlobalValue::dropLLVMManglingEscape(Fn->getName()));
26898   SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
26899   SDValue ParentFrameOffset =
26900       DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
26901 
26902   // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
26903   // prologue to RBP in the parent function.
26904   const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
26905   if (Subtarget.is64Bit())
26906     return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
26907 
26908   int RegNodeSize = getSEHRegistrationNodeSize(Fn);
26909   // RegNodeBase = EntryEBP - RegNodeSize
26910   // ParentFP = RegNodeBase - ParentFrameOffset
26911   SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
26912                                     DAG.getConstant(RegNodeSize, dl, PtrVT));
26913   return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
26914 }
26915 
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const26916 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
26917                                                    SelectionDAG &DAG) const {
26918   // Helper to detect if the operand is CUR_DIRECTION rounding mode.
26919   auto isRoundModeCurDirection = [](SDValue Rnd) {
26920     if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
26921       return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
26922 
26923     return false;
26924   };
26925   auto isRoundModeSAE = [](SDValue Rnd) {
26926     if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
26927       unsigned RC = C->getZExtValue();
26928       if (RC & X86::STATIC_ROUNDING::NO_EXC) {
26929         // Clear the NO_EXC bit and check remaining bits.
26930         RC ^= X86::STATIC_ROUNDING::NO_EXC;
26931         // As a convenience we allow no other bits or explicitly
26932         // current direction.
26933         return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
26934       }
26935     }
26936 
26937     return false;
26938   };
26939   auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
26940     if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
26941       RC = C->getZExtValue();
26942       if (RC & X86::STATIC_ROUNDING::NO_EXC) {
26943         // Clear the NO_EXC bit and check remaining bits.
26944         RC ^= X86::STATIC_ROUNDING::NO_EXC;
26945         return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
26946                RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
26947                RC == X86::STATIC_ROUNDING::TO_POS_INF ||
26948                RC == X86::STATIC_ROUNDING::TO_ZERO;
26949       }
26950     }
26951 
26952     return false;
26953   };
26954 
26955   SDLoc dl(Op);
26956   unsigned IntNo = Op.getConstantOperandVal(0);
26957   MVT VT = Op.getSimpleValueType();
26958   const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
26959 
26960   // Propagate flags from original node to transformed node(s).
26961   SelectionDAG::FlagInserter FlagsInserter(DAG, Op->getFlags());
26962 
26963   if (IntrData) {
26964     switch(IntrData->Type) {
26965     case INTR_TYPE_1OP: {
26966       // We specify 2 possible opcodes for intrinsics with rounding modes.
26967       // First, we check if the intrinsic may have non-default rounding mode,
26968       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
26969       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
26970       if (IntrWithRoundingModeOpcode != 0) {
26971         SDValue Rnd = Op.getOperand(2);
26972         unsigned RC = 0;
26973         if (isRoundModeSAEToX(Rnd, RC))
26974           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
26975                              Op.getOperand(1),
26976                              DAG.getTargetConstant(RC, dl, MVT::i32));
26977         if (!isRoundModeCurDirection(Rnd))
26978           return SDValue();
26979       }
26980       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
26981                          Op.getOperand(1));
26982     }
26983     case INTR_TYPE_1OP_SAE: {
26984       SDValue Sae = Op.getOperand(2);
26985 
26986       unsigned Opc;
26987       if (isRoundModeCurDirection(Sae))
26988         Opc = IntrData->Opc0;
26989       else if (isRoundModeSAE(Sae))
26990         Opc = IntrData->Opc1;
26991       else
26992         return SDValue();
26993 
26994       return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
26995     }
26996     case INTR_TYPE_2OP: {
26997       SDValue Src2 = Op.getOperand(2);
26998 
26999       // We specify 2 possible opcodes for intrinsics with rounding modes.
27000       // First, we check if the intrinsic may have non-default rounding mode,
27001       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
27002       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27003       if (IntrWithRoundingModeOpcode != 0) {
27004         SDValue Rnd = Op.getOperand(3);
27005         unsigned RC = 0;
27006         if (isRoundModeSAEToX(Rnd, RC))
27007           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
27008                              Op.getOperand(1), Src2,
27009                              DAG.getTargetConstant(RC, dl, MVT::i32));
27010         if (!isRoundModeCurDirection(Rnd))
27011           return SDValue();
27012       }
27013 
27014       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27015                          Op.getOperand(1), Src2);
27016     }
27017     case INTR_TYPE_2OP_SAE: {
27018       SDValue Sae = Op.getOperand(3);
27019 
27020       unsigned Opc;
27021       if (isRoundModeCurDirection(Sae))
27022         Opc = IntrData->Opc0;
27023       else if (isRoundModeSAE(Sae))
27024         Opc = IntrData->Opc1;
27025       else
27026         return SDValue();
27027 
27028       return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
27029                          Op.getOperand(2));
27030     }
27031     case INTR_TYPE_3OP:
27032     case INTR_TYPE_3OP_IMM8: {
27033       SDValue Src1 = Op.getOperand(1);
27034       SDValue Src2 = Op.getOperand(2);
27035       SDValue Src3 = Op.getOperand(3);
27036 
27037       if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
27038           Src3.getValueType() != MVT::i8) {
27039         Src3 = DAG.getTargetConstant(
27040             cast<ConstantSDNode>(Src3)->getZExtValue() & 0xff, dl, MVT::i8);
27041       }
27042 
27043       // We specify 2 possible opcodes for intrinsics with rounding modes.
27044       // First, we check if the intrinsic may have non-default rounding mode,
27045       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
27046       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27047       if (IntrWithRoundingModeOpcode != 0) {
27048         SDValue Rnd = Op.getOperand(4);
27049         unsigned RC = 0;
27050         if (isRoundModeSAEToX(Rnd, RC))
27051           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
27052                              Src1, Src2, Src3,
27053                              DAG.getTargetConstant(RC, dl, MVT::i32));
27054         if (!isRoundModeCurDirection(Rnd))
27055           return SDValue();
27056       }
27057 
27058       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27059                          {Src1, Src2, Src3});
27060     }
27061     case INTR_TYPE_4OP_IMM8: {
27062       assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
27063       SDValue Src4 = Op.getOperand(4);
27064       if (Src4.getValueType() != MVT::i8) {
27065         Src4 = DAG.getTargetConstant(
27066             cast<ConstantSDNode>(Src4)->getZExtValue() & 0xff, dl, MVT::i8);
27067       }
27068 
27069       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27070                          Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
27071                          Src4);
27072     }
27073     case INTR_TYPE_1OP_MASK: {
27074       SDValue Src = Op.getOperand(1);
27075       SDValue PassThru = Op.getOperand(2);
27076       SDValue Mask = Op.getOperand(3);
27077       // We add rounding mode to the Node when
27078       //   - RC Opcode is specified and
27079       //   - RC is not "current direction".
27080       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27081       if (IntrWithRoundingModeOpcode != 0) {
27082         SDValue Rnd = Op.getOperand(4);
27083         unsigned RC = 0;
27084         if (isRoundModeSAEToX(Rnd, RC))
27085           return getVectorMaskingNode(
27086               DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
27087                           Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
27088               Mask, PassThru, Subtarget, DAG);
27089         if (!isRoundModeCurDirection(Rnd))
27090           return SDValue();
27091       }
27092       return getVectorMaskingNode(
27093           DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
27094           Subtarget, DAG);
27095     }
27096     case INTR_TYPE_1OP_MASK_SAE: {
27097       SDValue Src = Op.getOperand(1);
27098       SDValue PassThru = Op.getOperand(2);
27099       SDValue Mask = Op.getOperand(3);
27100       SDValue Rnd = Op.getOperand(4);
27101 
27102       unsigned Opc;
27103       if (isRoundModeCurDirection(Rnd))
27104         Opc = IntrData->Opc0;
27105       else if (isRoundModeSAE(Rnd))
27106         Opc = IntrData->Opc1;
27107       else
27108         return SDValue();
27109 
27110       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
27111                                   Subtarget, DAG);
27112     }
27113     case INTR_TYPE_SCALAR_MASK: {
27114       SDValue Src1 = Op.getOperand(1);
27115       SDValue Src2 = Op.getOperand(2);
27116       SDValue passThru = Op.getOperand(3);
27117       SDValue Mask = Op.getOperand(4);
27118       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27119       // There are 2 kinds of intrinsics in this group:
27120       // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
27121       // (2) With rounding mode and sae - 7 operands.
27122       bool HasRounding = IntrWithRoundingModeOpcode != 0;
27123       if (Op.getNumOperands() == (5U + HasRounding)) {
27124         if (HasRounding) {
27125           SDValue Rnd = Op.getOperand(5);
27126           unsigned RC = 0;
27127           if (isRoundModeSAEToX(Rnd, RC))
27128             return getScalarMaskingNode(
27129                 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
27130                             DAG.getTargetConstant(RC, dl, MVT::i32)),
27131                 Mask, passThru, Subtarget, DAG);
27132           if (!isRoundModeCurDirection(Rnd))
27133             return SDValue();
27134         }
27135         return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
27136                                                 Src2),
27137                                     Mask, passThru, Subtarget, DAG);
27138       }
27139 
27140       assert(Op.getNumOperands() == (6U + HasRounding) &&
27141              "Unexpected intrinsic form");
27142       SDValue RoundingMode = Op.getOperand(5);
27143       unsigned Opc = IntrData->Opc0;
27144       if (HasRounding) {
27145         SDValue Sae = Op.getOperand(6);
27146         if (isRoundModeSAE(Sae))
27147           Opc = IntrWithRoundingModeOpcode;
27148         else if (!isRoundModeCurDirection(Sae))
27149           return SDValue();
27150       }
27151       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
27152                                               Src2, RoundingMode),
27153                                   Mask, passThru, Subtarget, DAG);
27154     }
27155     case INTR_TYPE_SCALAR_MASK_RND: {
27156       SDValue Src1 = Op.getOperand(1);
27157       SDValue Src2 = Op.getOperand(2);
27158       SDValue passThru = Op.getOperand(3);
27159       SDValue Mask = Op.getOperand(4);
27160       SDValue Rnd = Op.getOperand(5);
27161 
27162       SDValue NewOp;
27163       unsigned RC = 0;
27164       if (isRoundModeCurDirection(Rnd))
27165         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
27166       else if (isRoundModeSAEToX(Rnd, RC))
27167         NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
27168                             DAG.getTargetConstant(RC, dl, MVT::i32));
27169       else
27170         return SDValue();
27171 
27172       return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
27173     }
27174     case INTR_TYPE_SCALAR_MASK_SAE: {
27175       SDValue Src1 = Op.getOperand(1);
27176       SDValue Src2 = Op.getOperand(2);
27177       SDValue passThru = Op.getOperand(3);
27178       SDValue Mask = Op.getOperand(4);
27179       SDValue Sae = Op.getOperand(5);
27180       unsigned Opc;
27181       if (isRoundModeCurDirection(Sae))
27182         Opc = IntrData->Opc0;
27183       else if (isRoundModeSAE(Sae))
27184         Opc = IntrData->Opc1;
27185       else
27186         return SDValue();
27187 
27188       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
27189                                   Mask, passThru, Subtarget, DAG);
27190     }
27191     case INTR_TYPE_2OP_MASK: {
27192       SDValue Src1 = Op.getOperand(1);
27193       SDValue Src2 = Op.getOperand(2);
27194       SDValue PassThru = Op.getOperand(3);
27195       SDValue Mask = Op.getOperand(4);
27196       SDValue NewOp;
27197       if (IntrData->Opc1 != 0) {
27198         SDValue Rnd = Op.getOperand(5);
27199         unsigned RC = 0;
27200         if (isRoundModeSAEToX(Rnd, RC))
27201           NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
27202                               DAG.getTargetConstant(RC, dl, MVT::i32));
27203         else if (!isRoundModeCurDirection(Rnd))
27204           return SDValue();
27205       }
27206       if (!NewOp)
27207         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
27208       return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
27209     }
27210     case INTR_TYPE_2OP_MASK_SAE: {
27211       SDValue Src1 = Op.getOperand(1);
27212       SDValue Src2 = Op.getOperand(2);
27213       SDValue PassThru = Op.getOperand(3);
27214       SDValue Mask = Op.getOperand(4);
27215 
27216       unsigned Opc = IntrData->Opc0;
27217       if (IntrData->Opc1 != 0) {
27218         SDValue Sae = Op.getOperand(5);
27219         if (isRoundModeSAE(Sae))
27220           Opc = IntrData->Opc1;
27221         else if (!isRoundModeCurDirection(Sae))
27222           return SDValue();
27223       }
27224 
27225       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
27226                                   Mask, PassThru, Subtarget, DAG);
27227     }
27228     case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
27229       SDValue Src1 = Op.getOperand(1);
27230       SDValue Src2 = Op.getOperand(2);
27231       SDValue Src3 = Op.getOperand(3);
27232       SDValue PassThru = Op.getOperand(4);
27233       SDValue Mask = Op.getOperand(5);
27234       SDValue Sae = Op.getOperand(6);
27235       unsigned Opc;
27236       if (isRoundModeCurDirection(Sae))
27237         Opc = IntrData->Opc0;
27238       else if (isRoundModeSAE(Sae))
27239         Opc = IntrData->Opc1;
27240       else
27241         return SDValue();
27242 
27243       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
27244                                   Mask, PassThru, Subtarget, DAG);
27245     }
27246     case INTR_TYPE_3OP_MASK_SAE: {
27247       SDValue Src1 = Op.getOperand(1);
27248       SDValue Src2 = Op.getOperand(2);
27249       SDValue Src3 = Op.getOperand(3);
27250       SDValue PassThru = Op.getOperand(4);
27251       SDValue Mask = Op.getOperand(5);
27252 
27253       unsigned Opc = IntrData->Opc0;
27254       if (IntrData->Opc1 != 0) {
27255         SDValue Sae = Op.getOperand(6);
27256         if (isRoundModeSAE(Sae))
27257           Opc = IntrData->Opc1;
27258         else if (!isRoundModeCurDirection(Sae))
27259           return SDValue();
27260       }
27261       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
27262                                   Mask, PassThru, Subtarget, DAG);
27263     }
27264     case BLENDV: {
27265       SDValue Src1 = Op.getOperand(1);
27266       SDValue Src2 = Op.getOperand(2);
27267       SDValue Src3 = Op.getOperand(3);
27268 
27269       EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
27270       Src3 = DAG.getBitcast(MaskVT, Src3);
27271 
27272       // Reverse the operands to match VSELECT order.
27273       return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
27274     }
27275     case VPERM_2OP : {
27276       SDValue Src1 = Op.getOperand(1);
27277       SDValue Src2 = Op.getOperand(2);
27278 
27279       // Swap Src1 and Src2 in the node creation
27280       return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
27281     }
27282     case CFMA_OP_MASKZ:
27283     case CFMA_OP_MASK: {
27284       SDValue Src1 = Op.getOperand(1);
27285       SDValue Src2 = Op.getOperand(2);
27286       SDValue Src3 = Op.getOperand(3);
27287       SDValue Mask = Op.getOperand(4);
27288       MVT VT = Op.getSimpleValueType();
27289 
27290       SDValue PassThru = Src3;
27291       if (IntrData->Type == CFMA_OP_MASKZ)
27292         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
27293 
27294       // We add rounding mode to the Node when
27295       //   - RC Opcode is specified and
27296       //   - RC is not "current direction".
27297       SDValue NewOp;
27298       if (IntrData->Opc1 != 0) {
27299         SDValue Rnd = Op.getOperand(5);
27300         unsigned RC = 0;
27301         if (isRoundModeSAEToX(Rnd, RC))
27302           NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2, Src3,
27303                               DAG.getTargetConstant(RC, dl, MVT::i32));
27304         else if (!isRoundModeCurDirection(Rnd))
27305           return SDValue();
27306       }
27307       if (!NewOp)
27308         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, Src3);
27309       return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
27310     }
27311     case IFMA_OP:
27312       // NOTE: We need to swizzle the operands to pass the multiply operands
27313       // first.
27314       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27315                          Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
27316     case FPCLASSS: {
27317       SDValue Src1 = Op.getOperand(1);
27318       SDValue Imm = Op.getOperand(2);
27319       SDValue Mask = Op.getOperand(3);
27320       SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
27321       SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
27322                                                  Subtarget, DAG);
27323       // Need to fill with zeros to ensure the bitcast will produce zeroes
27324       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
27325       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
27326                                 DAG.getConstant(0, dl, MVT::v8i1),
27327                                 FPclassMask, DAG.getIntPtrConstant(0, dl));
27328       return DAG.getBitcast(MVT::i8, Ins);
27329     }
27330 
27331     case CMP_MASK_CC: {
27332       MVT MaskVT = Op.getSimpleValueType();
27333       SDValue CC = Op.getOperand(3);
27334       SDValue Mask = Op.getOperand(4);
27335       // We specify 2 possible opcodes for intrinsics with rounding modes.
27336       // First, we check if the intrinsic may have non-default rounding mode,
27337       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
27338       if (IntrData->Opc1 != 0) {
27339         SDValue Sae = Op.getOperand(5);
27340         if (isRoundModeSAE(Sae))
27341           return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
27342                              Op.getOperand(2), CC, Mask, Sae);
27343         if (!isRoundModeCurDirection(Sae))
27344           return SDValue();
27345       }
27346       //default rounding mode
27347       return DAG.getNode(IntrData->Opc0, dl, MaskVT,
27348                          {Op.getOperand(1), Op.getOperand(2), CC, Mask});
27349     }
27350     case CMP_MASK_SCALAR_CC: {
27351       SDValue Src1 = Op.getOperand(1);
27352       SDValue Src2 = Op.getOperand(2);
27353       SDValue CC = Op.getOperand(3);
27354       SDValue Mask = Op.getOperand(4);
27355 
27356       SDValue Cmp;
27357       if (IntrData->Opc1 != 0) {
27358         SDValue Sae = Op.getOperand(5);
27359         if (isRoundModeSAE(Sae))
27360           Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
27361         else if (!isRoundModeCurDirection(Sae))
27362           return SDValue();
27363       }
27364       //default rounding mode
27365       if (!Cmp.getNode())
27366         Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
27367 
27368       SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
27369                                              Subtarget, DAG);
27370       // Need to fill with zeros to ensure the bitcast will produce zeroes
27371       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
27372       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
27373                                 DAG.getConstant(0, dl, MVT::v8i1),
27374                                 CmpMask, DAG.getIntPtrConstant(0, dl));
27375       return DAG.getBitcast(MVT::i8, Ins);
27376     }
27377     case COMI: { // Comparison intrinsics
27378       ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
27379       SDValue LHS = Op.getOperand(1);
27380       SDValue RHS = Op.getOperand(2);
27381       // Some conditions require the operands to be swapped.
27382       if (CC == ISD::SETLT || CC == ISD::SETLE)
27383         std::swap(LHS, RHS);
27384 
27385       SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
27386       SDValue SetCC;
27387       switch (CC) {
27388       case ISD::SETEQ: { // (ZF = 0 and PF = 0)
27389         SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
27390         SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
27391         SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
27392         break;
27393       }
27394       case ISD::SETNE: { // (ZF = 1 or PF = 1)
27395         SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
27396         SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
27397         SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
27398         break;
27399       }
27400       case ISD::SETGT: // (CF = 0 and ZF = 0)
27401       case ISD::SETLT: { // Condition opposite to GT. Operands swapped above.
27402         SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
27403         break;
27404       }
27405       case ISD::SETGE: // CF = 0
27406       case ISD::SETLE: // Condition opposite to GE. Operands swapped above.
27407         SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
27408         break;
27409       default:
27410         llvm_unreachable("Unexpected illegal condition!");
27411       }
27412       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
27413     }
27414     case COMI_RM: { // Comparison intrinsics with Sae
27415       SDValue LHS = Op.getOperand(1);
27416       SDValue RHS = Op.getOperand(2);
27417       unsigned CondVal = Op.getConstantOperandVal(3);
27418       SDValue Sae = Op.getOperand(4);
27419 
27420       SDValue FCmp;
27421       if (isRoundModeCurDirection(Sae))
27422         FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
27423                            DAG.getTargetConstant(CondVal, dl, MVT::i8));
27424       else if (isRoundModeSAE(Sae))
27425         FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
27426                            DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
27427       else
27428         return SDValue();
27429       // Need to fill with zeros to ensure the bitcast will produce zeroes
27430       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
27431       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
27432                                 DAG.getConstant(0, dl, MVT::v16i1),
27433                                 FCmp, DAG.getIntPtrConstant(0, dl));
27434       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
27435                          DAG.getBitcast(MVT::i16, Ins));
27436     }
27437     case VSHIFT: {
27438       SDValue SrcOp = Op.getOperand(1);
27439       SDValue ShAmt = Op.getOperand(2);
27440       assert(ShAmt.getValueType() == MVT::i32 &&
27441              "Unexpected VSHIFT amount type");
27442 
27443       // Catch shift-by-constant.
27444       if (auto *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
27445         return getTargetVShiftByConstNode(IntrData->Opc0, dl,
27446                                           Op.getSimpleValueType(), SrcOp,
27447                                           CShAmt->getZExtValue(), DAG);
27448 
27449       ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
27450       return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
27451                                  SrcOp, ShAmt, 0, Subtarget, DAG);
27452     }
27453     case COMPRESS_EXPAND_IN_REG: {
27454       SDValue Mask = Op.getOperand(3);
27455       SDValue DataToCompress = Op.getOperand(1);
27456       SDValue PassThru = Op.getOperand(2);
27457       if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
27458         return Op.getOperand(1);
27459 
27460       // Avoid false dependency.
27461       if (PassThru.isUndef())
27462         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
27463 
27464       return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
27465                          Mask);
27466     }
27467     case FIXUPIMM:
27468     case FIXUPIMM_MASKZ: {
27469       SDValue Src1 = Op.getOperand(1);
27470       SDValue Src2 = Op.getOperand(2);
27471       SDValue Src3 = Op.getOperand(3);
27472       SDValue Imm = Op.getOperand(4);
27473       SDValue Mask = Op.getOperand(5);
27474       SDValue Passthru = (IntrData->Type == FIXUPIMM)
27475                              ? Src1
27476                              : getZeroVector(VT, Subtarget, DAG, dl);
27477 
27478       unsigned Opc = IntrData->Opc0;
27479       if (IntrData->Opc1 != 0) {
27480         SDValue Sae = Op.getOperand(6);
27481         if (isRoundModeSAE(Sae))
27482           Opc = IntrData->Opc1;
27483         else if (!isRoundModeCurDirection(Sae))
27484           return SDValue();
27485       }
27486 
27487       SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
27488 
27489       if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
27490         return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
27491 
27492       return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
27493     }
27494     case ROUNDP: {
27495       assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
27496       // Clear the upper bits of the rounding immediate so that the legacy
27497       // intrinsic can't trigger the scaling behavior of VRNDSCALE.
27498       auto Round = cast<ConstantSDNode>(Op.getOperand(2));
27499       SDValue RoundingMode =
27500           DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
27501       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27502                          Op.getOperand(1), RoundingMode);
27503     }
27504     case ROUNDS: {
27505       assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
27506       // Clear the upper bits of the rounding immediate so that the legacy
27507       // intrinsic can't trigger the scaling behavior of VRNDSCALE.
27508       auto Round = cast<ConstantSDNode>(Op.getOperand(3));
27509       SDValue RoundingMode =
27510           DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
27511       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27512                          Op.getOperand(1), Op.getOperand(2), RoundingMode);
27513     }
27514     case BEXTRI: {
27515       assert(IntrData->Opc0 == X86ISD::BEXTRI && "Unexpected opcode");
27516 
27517       uint64_t Imm = Op.getConstantOperandVal(2);
27518       SDValue Control = DAG.getTargetConstant(Imm & 0xffff, dl,
27519                                               Op.getValueType());
27520       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27521                          Op.getOperand(1), Control);
27522     }
27523     // ADC/ADCX/SBB
27524     case ADX: {
27525       SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
27526       SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
27527 
27528       SDValue Res;
27529       // If the carry in is zero, then we should just use ADD/SUB instead of
27530       // ADC/SBB.
27531       if (isNullConstant(Op.getOperand(1))) {
27532         Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
27533                           Op.getOperand(3));
27534       } else {
27535         SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
27536                                     DAG.getConstant(-1, dl, MVT::i8));
27537         Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
27538                           Op.getOperand(3), GenCF.getValue(1));
27539       }
27540       SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
27541       SDValue Results[] = { SetCC, Res };
27542       return DAG.getMergeValues(Results, dl);
27543     }
27544     case CVTPD2PS_MASK:
27545     case CVTPD2DQ_MASK:
27546     case CVTQQ2PS_MASK:
27547     case TRUNCATE_TO_REG: {
27548       SDValue Src = Op.getOperand(1);
27549       SDValue PassThru = Op.getOperand(2);
27550       SDValue Mask = Op.getOperand(3);
27551 
27552       if (isAllOnesConstant(Mask))
27553         return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
27554 
27555       MVT SrcVT = Src.getSimpleValueType();
27556       MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
27557       Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27558       return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
27559                          {Src, PassThru, Mask});
27560     }
27561     case CVTPS2PH_MASK: {
27562       SDValue Src = Op.getOperand(1);
27563       SDValue Rnd = Op.getOperand(2);
27564       SDValue PassThru = Op.getOperand(3);
27565       SDValue Mask = Op.getOperand(4);
27566 
27567       unsigned RC = 0;
27568       unsigned Opc = IntrData->Opc0;
27569       bool SAE = Src.getValueType().is512BitVector() &&
27570                  (isRoundModeSAEToX(Rnd, RC) || isRoundModeSAE(Rnd));
27571       if (SAE) {
27572         Opc = X86ISD::CVTPS2PH_SAE;
27573         Rnd = DAG.getTargetConstant(RC, dl, MVT::i32);
27574       }
27575 
27576       if (isAllOnesConstant(Mask))
27577         return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd);
27578 
27579       if (SAE)
27580         Opc = X86ISD::MCVTPS2PH_SAE;
27581       else
27582         Opc = IntrData->Opc1;
27583       MVT SrcVT = Src.getSimpleValueType();
27584       MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
27585       Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27586       return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd, PassThru, Mask);
27587     }
27588     case CVTNEPS2BF16_MASK: {
27589       SDValue Src = Op.getOperand(1);
27590       SDValue PassThru = Op.getOperand(2);
27591       SDValue Mask = Op.getOperand(3);
27592 
27593       if (ISD::isBuildVectorAllOnes(Mask.getNode()))
27594         return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
27595 
27596       // Break false dependency.
27597       if (PassThru.isUndef())
27598         PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
27599 
27600       return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
27601                          Mask);
27602     }
27603     default:
27604       break;
27605     }
27606   }
27607 
27608   switch (IntNo) {
27609   default: return SDValue();    // Don't custom lower most intrinsics.
27610 
27611   // ptest and testp intrinsics. The intrinsic these come from are designed to
27612   // return an integer value, not just an instruction so lower it to the ptest
27613   // or testp pattern and a setcc for the result.
27614   case Intrinsic::x86_avx512_ktestc_b:
27615   case Intrinsic::x86_avx512_ktestc_w:
27616   case Intrinsic::x86_avx512_ktestc_d:
27617   case Intrinsic::x86_avx512_ktestc_q:
27618   case Intrinsic::x86_avx512_ktestz_b:
27619   case Intrinsic::x86_avx512_ktestz_w:
27620   case Intrinsic::x86_avx512_ktestz_d:
27621   case Intrinsic::x86_avx512_ktestz_q:
27622   case Intrinsic::x86_sse41_ptestz:
27623   case Intrinsic::x86_sse41_ptestc:
27624   case Intrinsic::x86_sse41_ptestnzc:
27625   case Intrinsic::x86_avx_ptestz_256:
27626   case Intrinsic::x86_avx_ptestc_256:
27627   case Intrinsic::x86_avx_ptestnzc_256:
27628   case Intrinsic::x86_avx_vtestz_ps:
27629   case Intrinsic::x86_avx_vtestc_ps:
27630   case Intrinsic::x86_avx_vtestnzc_ps:
27631   case Intrinsic::x86_avx_vtestz_pd:
27632   case Intrinsic::x86_avx_vtestc_pd:
27633   case Intrinsic::x86_avx_vtestnzc_pd:
27634   case Intrinsic::x86_avx_vtestz_ps_256:
27635   case Intrinsic::x86_avx_vtestc_ps_256:
27636   case Intrinsic::x86_avx_vtestnzc_ps_256:
27637   case Intrinsic::x86_avx_vtestz_pd_256:
27638   case Intrinsic::x86_avx_vtestc_pd_256:
27639   case Intrinsic::x86_avx_vtestnzc_pd_256: {
27640     unsigned TestOpc = X86ISD::PTEST;
27641     X86::CondCode X86CC;
27642     switch (IntNo) {
27643     default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
27644     case Intrinsic::x86_avx512_ktestc_b:
27645     case Intrinsic::x86_avx512_ktestc_w:
27646     case Intrinsic::x86_avx512_ktestc_d:
27647     case Intrinsic::x86_avx512_ktestc_q:
27648       // CF = 1
27649       TestOpc = X86ISD::KTEST;
27650       X86CC = X86::COND_B;
27651       break;
27652     case Intrinsic::x86_avx512_ktestz_b:
27653     case Intrinsic::x86_avx512_ktestz_w:
27654     case Intrinsic::x86_avx512_ktestz_d:
27655     case Intrinsic::x86_avx512_ktestz_q:
27656       TestOpc = X86ISD::KTEST;
27657       X86CC = X86::COND_E;
27658       break;
27659     case Intrinsic::x86_avx_vtestz_ps:
27660     case Intrinsic::x86_avx_vtestz_pd:
27661     case Intrinsic::x86_avx_vtestz_ps_256:
27662     case Intrinsic::x86_avx_vtestz_pd_256:
27663       TestOpc = X86ISD::TESTP;
27664       [[fallthrough]];
27665     case Intrinsic::x86_sse41_ptestz:
27666     case Intrinsic::x86_avx_ptestz_256:
27667       // ZF = 1
27668       X86CC = X86::COND_E;
27669       break;
27670     case Intrinsic::x86_avx_vtestc_ps:
27671     case Intrinsic::x86_avx_vtestc_pd:
27672     case Intrinsic::x86_avx_vtestc_ps_256:
27673     case Intrinsic::x86_avx_vtestc_pd_256:
27674       TestOpc = X86ISD::TESTP;
27675       [[fallthrough]];
27676     case Intrinsic::x86_sse41_ptestc:
27677     case Intrinsic::x86_avx_ptestc_256:
27678       // CF = 1
27679       X86CC = X86::COND_B;
27680       break;
27681     case Intrinsic::x86_avx_vtestnzc_ps:
27682     case Intrinsic::x86_avx_vtestnzc_pd:
27683     case Intrinsic::x86_avx_vtestnzc_ps_256:
27684     case Intrinsic::x86_avx_vtestnzc_pd_256:
27685       TestOpc = X86ISD::TESTP;
27686       [[fallthrough]];
27687     case Intrinsic::x86_sse41_ptestnzc:
27688     case Intrinsic::x86_avx_ptestnzc_256:
27689       // ZF and CF = 0
27690       X86CC = X86::COND_A;
27691       break;
27692     }
27693 
27694     SDValue LHS = Op.getOperand(1);
27695     SDValue RHS = Op.getOperand(2);
27696     SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
27697     SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
27698     return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
27699   }
27700 
27701   case Intrinsic::x86_sse42_pcmpistria128:
27702   case Intrinsic::x86_sse42_pcmpestria128:
27703   case Intrinsic::x86_sse42_pcmpistric128:
27704   case Intrinsic::x86_sse42_pcmpestric128:
27705   case Intrinsic::x86_sse42_pcmpistrio128:
27706   case Intrinsic::x86_sse42_pcmpestrio128:
27707   case Intrinsic::x86_sse42_pcmpistris128:
27708   case Intrinsic::x86_sse42_pcmpestris128:
27709   case Intrinsic::x86_sse42_pcmpistriz128:
27710   case Intrinsic::x86_sse42_pcmpestriz128: {
27711     unsigned Opcode;
27712     X86::CondCode X86CC;
27713     switch (IntNo) {
27714     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
27715     case Intrinsic::x86_sse42_pcmpistria128:
27716       Opcode = X86ISD::PCMPISTR;
27717       X86CC = X86::COND_A;
27718       break;
27719     case Intrinsic::x86_sse42_pcmpestria128:
27720       Opcode = X86ISD::PCMPESTR;
27721       X86CC = X86::COND_A;
27722       break;
27723     case Intrinsic::x86_sse42_pcmpistric128:
27724       Opcode = X86ISD::PCMPISTR;
27725       X86CC = X86::COND_B;
27726       break;
27727     case Intrinsic::x86_sse42_pcmpestric128:
27728       Opcode = X86ISD::PCMPESTR;
27729       X86CC = X86::COND_B;
27730       break;
27731     case Intrinsic::x86_sse42_pcmpistrio128:
27732       Opcode = X86ISD::PCMPISTR;
27733       X86CC = X86::COND_O;
27734       break;
27735     case Intrinsic::x86_sse42_pcmpestrio128:
27736       Opcode = X86ISD::PCMPESTR;
27737       X86CC = X86::COND_O;
27738       break;
27739     case Intrinsic::x86_sse42_pcmpistris128:
27740       Opcode = X86ISD::PCMPISTR;
27741       X86CC = X86::COND_S;
27742       break;
27743     case Intrinsic::x86_sse42_pcmpestris128:
27744       Opcode = X86ISD::PCMPESTR;
27745       X86CC = X86::COND_S;
27746       break;
27747     case Intrinsic::x86_sse42_pcmpistriz128:
27748       Opcode = X86ISD::PCMPISTR;
27749       X86CC = X86::COND_E;
27750       break;
27751     case Intrinsic::x86_sse42_pcmpestriz128:
27752       Opcode = X86ISD::PCMPESTR;
27753       X86CC = X86::COND_E;
27754       break;
27755     }
27756     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
27757     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
27758     SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
27759     SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
27760     return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
27761   }
27762 
27763   case Intrinsic::x86_sse42_pcmpistri128:
27764   case Intrinsic::x86_sse42_pcmpestri128: {
27765     unsigned Opcode;
27766     if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
27767       Opcode = X86ISD::PCMPISTR;
27768     else
27769       Opcode = X86ISD::PCMPESTR;
27770 
27771     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
27772     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
27773     return DAG.getNode(Opcode, dl, VTs, NewOps);
27774   }
27775 
27776   case Intrinsic::x86_sse42_pcmpistrm128:
27777   case Intrinsic::x86_sse42_pcmpestrm128: {
27778     unsigned Opcode;
27779     if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
27780       Opcode = X86ISD::PCMPISTR;
27781     else
27782       Opcode = X86ISD::PCMPESTR;
27783 
27784     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
27785     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
27786     return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
27787   }
27788 
27789   case Intrinsic::eh_sjlj_lsda: {
27790     MachineFunction &MF = DAG.getMachineFunction();
27791     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27792     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
27793     auto &Context = MF.getMMI().getContext();
27794     MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
27795                                             Twine(MF.getFunctionNumber()));
27796     return DAG.getNode(getGlobalWrapperKind(), dl, VT,
27797                        DAG.getMCSymbol(S, PtrVT));
27798   }
27799 
27800   case Intrinsic::x86_seh_lsda: {
27801     // Compute the symbol for the LSDA. We know it'll get emitted later.
27802     MachineFunction &MF = DAG.getMachineFunction();
27803     SDValue Op1 = Op.getOperand(1);
27804     auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
27805     MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
27806         GlobalValue::dropLLVMManglingEscape(Fn->getName()));
27807 
27808     // Generate a simple absolute symbol reference. This intrinsic is only
27809     // supported on 32-bit Windows, which isn't PIC.
27810     SDValue Result = DAG.getMCSymbol(LSDASym, VT);
27811     return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
27812   }
27813 
27814   case Intrinsic::eh_recoverfp: {
27815     SDValue FnOp = Op.getOperand(1);
27816     SDValue IncomingFPOp = Op.getOperand(2);
27817     GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
27818     auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
27819     if (!Fn)
27820       report_fatal_error(
27821           "llvm.eh.recoverfp must take a function as the first argument");
27822     return recoverFramePointer(DAG, Fn, IncomingFPOp);
27823   }
27824 
27825   case Intrinsic::localaddress: {
27826     // Returns one of the stack, base, or frame pointer registers, depending on
27827     // which is used to reference local variables.
27828     MachineFunction &MF = DAG.getMachineFunction();
27829     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27830     unsigned Reg;
27831     if (RegInfo->hasBasePointer(MF))
27832       Reg = RegInfo->getBaseRegister();
27833     else { // Handles the SP or FP case.
27834       bool CantUseFP = RegInfo->hasStackRealignment(MF);
27835       if (CantUseFP)
27836         Reg = RegInfo->getPtrSizedStackRegister(MF);
27837       else
27838         Reg = RegInfo->getPtrSizedFrameRegister(MF);
27839     }
27840     return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
27841   }
27842   case Intrinsic::x86_avx512_vp2intersect_q_512:
27843   case Intrinsic::x86_avx512_vp2intersect_q_256:
27844   case Intrinsic::x86_avx512_vp2intersect_q_128:
27845   case Intrinsic::x86_avx512_vp2intersect_d_512:
27846   case Intrinsic::x86_avx512_vp2intersect_d_256:
27847   case Intrinsic::x86_avx512_vp2intersect_d_128: {
27848     MVT MaskVT = Op.getSimpleValueType();
27849 
27850     SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
27851     SDLoc DL(Op);
27852 
27853     SDValue Operation =
27854         DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
27855                     Op->getOperand(1), Op->getOperand(2));
27856 
27857     SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
27858                                                  MaskVT, Operation);
27859     SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
27860                                                  MaskVT, Operation);
27861     return DAG.getMergeValues({Result0, Result1}, DL);
27862   }
27863   case Intrinsic::x86_mmx_pslli_w:
27864   case Intrinsic::x86_mmx_pslli_d:
27865   case Intrinsic::x86_mmx_pslli_q:
27866   case Intrinsic::x86_mmx_psrli_w:
27867   case Intrinsic::x86_mmx_psrli_d:
27868   case Intrinsic::x86_mmx_psrli_q:
27869   case Intrinsic::x86_mmx_psrai_w:
27870   case Intrinsic::x86_mmx_psrai_d: {
27871     SDLoc DL(Op);
27872     SDValue ShAmt = Op.getOperand(2);
27873     // If the argument is a constant, convert it to a target constant.
27874     if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
27875       // Clamp out of bounds shift amounts since they will otherwise be masked
27876       // to 8-bits which may make it no longer out of bounds.
27877       unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
27878       if (ShiftAmount == 0)
27879         return Op.getOperand(1);
27880 
27881       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
27882                          Op.getOperand(0), Op.getOperand(1),
27883                          DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
27884     }
27885 
27886     unsigned NewIntrinsic;
27887     switch (IntNo) {
27888     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
27889     case Intrinsic::x86_mmx_pslli_w:
27890       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
27891       break;
27892     case Intrinsic::x86_mmx_pslli_d:
27893       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
27894       break;
27895     case Intrinsic::x86_mmx_pslli_q:
27896       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
27897       break;
27898     case Intrinsic::x86_mmx_psrli_w:
27899       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
27900       break;
27901     case Intrinsic::x86_mmx_psrli_d:
27902       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
27903       break;
27904     case Intrinsic::x86_mmx_psrli_q:
27905       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
27906       break;
27907     case Intrinsic::x86_mmx_psrai_w:
27908       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
27909       break;
27910     case Intrinsic::x86_mmx_psrai_d:
27911       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
27912       break;
27913     }
27914 
27915     // The vector shift intrinsics with scalars uses 32b shift amounts but
27916     // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
27917     // MMX register.
27918     ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
27919     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
27920                        DAG.getTargetConstant(NewIntrinsic, DL,
27921                                              getPointerTy(DAG.getDataLayout())),
27922                        Op.getOperand(1), ShAmt);
27923   }
27924   case Intrinsic::thread_pointer: {
27925     if (Subtarget.isTargetELF()) {
27926       SDLoc dl(Op);
27927       EVT PtrVT = getPointerTy(DAG.getDataLayout());
27928       // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
27929       Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(
27930           *DAG.getContext(), Subtarget.is64Bit() ? X86AS::FS : X86AS::GS));
27931       return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
27932                          DAG.getIntPtrConstant(0, dl), MachinePointerInfo(Ptr));
27933     }
27934     report_fatal_error(
27935         "Target OS doesn't support __builtin_thread_pointer() yet.");
27936   }
27937   }
27938 }
27939 
getAVX2GatherNode(unsigned Opc,SDValue Op,SelectionDAG & DAG,SDValue Src,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)27940 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
27941                                  SDValue Src, SDValue Mask, SDValue Base,
27942                                  SDValue Index, SDValue ScaleOp, SDValue Chain,
27943                                  const X86Subtarget &Subtarget) {
27944   SDLoc dl(Op);
27945   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
27946   // Scale must be constant.
27947   if (!C)
27948     return SDValue();
27949   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27950   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
27951                                         TLI.getPointerTy(DAG.getDataLayout()));
27952   EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
27953   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
27954   // If source is undef or we know it won't be used, use a zero vector
27955   // to break register dependency.
27956   // TODO: use undef instead and let BreakFalseDeps deal with it?
27957   if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
27958     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
27959 
27960   // Cast mask to an integer type.
27961   Mask = DAG.getBitcast(MaskVT, Mask);
27962 
27963   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
27964 
27965   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
27966   SDValue Res =
27967       DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
27968                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
27969   return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
27970 }
27971 
getGatherNode(SDValue Op,SelectionDAG & DAG,SDValue Src,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)27972 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
27973                              SDValue Src, SDValue Mask, SDValue Base,
27974                              SDValue Index, SDValue ScaleOp, SDValue Chain,
27975                              const X86Subtarget &Subtarget) {
27976   MVT VT = Op.getSimpleValueType();
27977   SDLoc dl(Op);
27978   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
27979   // Scale must be constant.
27980   if (!C)
27981     return SDValue();
27982   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27983   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
27984                                         TLI.getPointerTy(DAG.getDataLayout()));
27985   unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
27986                               VT.getVectorNumElements());
27987   MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
27988 
27989   // We support two versions of the gather intrinsics. One with scalar mask and
27990   // one with vXi1 mask. Convert scalar to vXi1 if necessary.
27991   if (Mask.getValueType() != MaskVT)
27992     Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27993 
27994   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
27995   // If source is undef or we know it won't be used, use a zero vector
27996   // to break register dependency.
27997   // TODO: use undef instead and let BreakFalseDeps deal with it?
27998   if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
27999     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
28000 
28001   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28002 
28003   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
28004   SDValue Res =
28005       DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
28006                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
28007   return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
28008 }
28009 
getScatterNode(unsigned Opc,SDValue Op,SelectionDAG & DAG,SDValue Src,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)28010 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
28011                                SDValue Src, SDValue Mask, SDValue Base,
28012                                SDValue Index, SDValue ScaleOp, SDValue Chain,
28013                                const X86Subtarget &Subtarget) {
28014   SDLoc dl(Op);
28015   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
28016   // Scale must be constant.
28017   if (!C)
28018     return SDValue();
28019   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28020   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
28021                                         TLI.getPointerTy(DAG.getDataLayout()));
28022   unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
28023                               Src.getSimpleValueType().getVectorNumElements());
28024   MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
28025 
28026   // We support two versions of the scatter intrinsics. One with scalar mask and
28027   // one with vXi1 mask. Convert scalar to vXi1 if necessary.
28028   if (Mask.getValueType() != MaskVT)
28029     Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28030 
28031   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28032 
28033   SDVTList VTs = DAG.getVTList(MVT::Other);
28034   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
28035   SDValue Res =
28036       DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
28037                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
28038   return Res;
28039 }
28040 
getPrefetchNode(unsigned Opc,SDValue Op,SelectionDAG & DAG,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)28041 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
28042                                SDValue Mask, SDValue Base, SDValue Index,
28043                                SDValue ScaleOp, SDValue Chain,
28044                                const X86Subtarget &Subtarget) {
28045   SDLoc dl(Op);
28046   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
28047   // Scale must be constant.
28048   if (!C)
28049     return SDValue();
28050   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28051   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
28052                                         TLI.getPointerTy(DAG.getDataLayout()));
28053   SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
28054   SDValue Segment = DAG.getRegister(0, MVT::i32);
28055   MVT MaskVT =
28056     MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
28057   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28058   SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
28059   SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
28060   return SDValue(Res, 0);
28061 }
28062 
28063 /// Handles the lowering of builtin intrinsics with chain that return their
28064 /// value into registers EDX:EAX.
28065 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
28066 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
28067 /// TargetOpcode.
28068 /// Returns a Glue value which can be used to add extra copy-from-reg if the
28069 /// expanded intrinsics implicitly defines extra registers (i.e. not just
28070 /// EDX:EAX).
expandIntrinsicWChainHelper(SDNode * N,const SDLoc & DL,SelectionDAG & DAG,unsigned TargetOpcode,unsigned SrcReg,const X86Subtarget & Subtarget,SmallVectorImpl<SDValue> & Results)28071 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
28072                                         SelectionDAG &DAG,
28073                                         unsigned TargetOpcode,
28074                                         unsigned SrcReg,
28075                                         const X86Subtarget &Subtarget,
28076                                         SmallVectorImpl<SDValue> &Results) {
28077   SDValue Chain = N->getOperand(0);
28078   SDValue Glue;
28079 
28080   if (SrcReg) {
28081     assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
28082     Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
28083     Glue = Chain.getValue(1);
28084   }
28085 
28086   SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
28087   SDValue N1Ops[] = {Chain, Glue};
28088   SDNode *N1 = DAG.getMachineNode(
28089       TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
28090   Chain = SDValue(N1, 0);
28091 
28092   // Reads the content of XCR and returns it in registers EDX:EAX.
28093   SDValue LO, HI;
28094   if (Subtarget.is64Bit()) {
28095     LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
28096     HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
28097                             LO.getValue(2));
28098   } else {
28099     LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
28100     HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
28101                             LO.getValue(2));
28102   }
28103   Chain = HI.getValue(1);
28104   Glue = HI.getValue(2);
28105 
28106   if (Subtarget.is64Bit()) {
28107     // Merge the two 32-bit values into a 64-bit one.
28108     SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
28109                               DAG.getConstant(32, DL, MVT::i8));
28110     Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
28111     Results.push_back(Chain);
28112     return Glue;
28113   }
28114 
28115   // Use a buildpair to merge the two 32-bit values into a 64-bit one.
28116   SDValue Ops[] = { LO, HI };
28117   SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
28118   Results.push_back(Pair);
28119   Results.push_back(Chain);
28120   return Glue;
28121 }
28122 
28123 /// Handles the lowering of builtin intrinsics that read the time stamp counter
28124 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
28125 /// READCYCLECOUNTER nodes.
getReadTimeStampCounter(SDNode * N,const SDLoc & DL,unsigned Opcode,SelectionDAG & DAG,const X86Subtarget & Subtarget,SmallVectorImpl<SDValue> & Results)28126 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
28127                                     SelectionDAG &DAG,
28128                                     const X86Subtarget &Subtarget,
28129                                     SmallVectorImpl<SDValue> &Results) {
28130   // The processor's time-stamp counter (a 64-bit MSR) is stored into the
28131   // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
28132   // and the EAX register is loaded with the low-order 32 bits.
28133   SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
28134                                              /* NoRegister */0, Subtarget,
28135                                              Results);
28136   if (Opcode != X86::RDTSCP)
28137     return;
28138 
28139   SDValue Chain = Results[1];
28140   // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
28141   // the ECX register. Add 'ecx' explicitly to the chain.
28142   SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
28143   Results[1] = ecx;
28144   Results.push_back(ecx.getValue(1));
28145 }
28146 
LowerREADCYCLECOUNTER(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28147 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
28148                                      SelectionDAG &DAG) {
28149   SmallVector<SDValue, 3> Results;
28150   SDLoc DL(Op);
28151   getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
28152                           Results);
28153   return DAG.getMergeValues(Results, DL);
28154 }
28155 
MarkEHRegistrationNode(SDValue Op,SelectionDAG & DAG)28156 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
28157   MachineFunction &MF = DAG.getMachineFunction();
28158   SDValue Chain = Op.getOperand(0);
28159   SDValue RegNode = Op.getOperand(2);
28160   WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
28161   if (!EHInfo)
28162     report_fatal_error("EH registrations only live in functions using WinEH");
28163 
28164   // Cast the operand to an alloca, and remember the frame index.
28165   auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
28166   if (!FINode)
28167     report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
28168   EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
28169 
28170   // Return the chain operand without making any DAG nodes.
28171   return Chain;
28172 }
28173 
MarkEHGuard(SDValue Op,SelectionDAG & DAG)28174 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
28175   MachineFunction &MF = DAG.getMachineFunction();
28176   SDValue Chain = Op.getOperand(0);
28177   SDValue EHGuard = Op.getOperand(2);
28178   WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
28179   if (!EHInfo)
28180     report_fatal_error("EHGuard only live in functions using WinEH");
28181 
28182   // Cast the operand to an alloca, and remember the frame index.
28183   auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
28184   if (!FINode)
28185     report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
28186   EHInfo->EHGuardFrameIndex = FINode->getIndex();
28187 
28188   // Return the chain operand without making any DAG nodes.
28189   return Chain;
28190 }
28191 
28192 /// Emit Truncating Store with signed or unsigned saturation.
28193 static SDValue
EmitTruncSStore(bool SignedSat,SDValue Chain,const SDLoc & Dl,SDValue Val,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO,SelectionDAG & DAG)28194 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
28195                 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
28196                 SelectionDAG &DAG) {
28197   SDVTList VTs = DAG.getVTList(MVT::Other);
28198   SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
28199   SDValue Ops[] = { Chain, Val, Ptr, Undef };
28200   unsigned Opc = SignedSat ? X86ISD::VTRUNCSTORES : X86ISD::VTRUNCSTOREUS;
28201   return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
28202 }
28203 
28204 /// Emit Masked Truncating Store with signed or unsigned saturation.
28205 static SDValue
EmitMaskedTruncSStore(bool SignedSat,SDValue Chain,const SDLoc & Dl,SDValue Val,SDValue Ptr,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,SelectionDAG & DAG)28206 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
28207                       SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
28208                       MachineMemOperand *MMO, SelectionDAG &DAG) {
28209   SDVTList VTs = DAG.getVTList(MVT::Other);
28210   SDValue Ops[] = { Chain, Val, Ptr, Mask };
28211   unsigned Opc = SignedSat ? X86ISD::VMTRUNCSTORES : X86ISD::VMTRUNCSTOREUS;
28212   return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
28213 }
28214 
LowerINTRINSIC_W_CHAIN(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28215 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
28216                                       SelectionDAG &DAG) {
28217   unsigned IntNo = Op.getConstantOperandVal(1);
28218   const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
28219   if (!IntrData) {
28220     switch (IntNo) {
28221 
28222     case Intrinsic::swift_async_context_addr: {
28223       SDLoc dl(Op);
28224       auto &MF = DAG.getMachineFunction();
28225       auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
28226       if (Subtarget.is64Bit()) {
28227         MF.getFrameInfo().setFrameAddressIsTaken(true);
28228         X86FI->setHasSwiftAsyncContext(true);
28229         SDValue Chain = Op->getOperand(0);
28230         SDValue CopyRBP = DAG.getCopyFromReg(Chain, dl, X86::RBP, MVT::i64);
28231         SDValue Result =
28232             SDValue(DAG.getMachineNode(X86::SUB64ri8, dl, MVT::i64, CopyRBP,
28233                                        DAG.getTargetConstant(8, dl, MVT::i32)),
28234                     0);
28235         // Return { result, chain }.
28236         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
28237                            CopyRBP.getValue(1));
28238       } else {
28239         // 32-bit so no special extended frame, create or reuse an existing
28240         // stack slot.
28241         if (!X86FI->getSwiftAsyncContextFrameIdx())
28242           X86FI->setSwiftAsyncContextFrameIdx(
28243               MF.getFrameInfo().CreateStackObject(4, Align(4), false));
28244         SDValue Result =
28245             DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(), MVT::i32);
28246         // Return { result, chain }.
28247         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
28248                            Op->getOperand(0));
28249       }
28250     }
28251 
28252     case llvm::Intrinsic::x86_seh_ehregnode:
28253       return MarkEHRegistrationNode(Op, DAG);
28254     case llvm::Intrinsic::x86_seh_ehguard:
28255       return MarkEHGuard(Op, DAG);
28256     case llvm::Intrinsic::x86_rdpkru: {
28257       SDLoc dl(Op);
28258       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
28259       // Create a RDPKRU node and pass 0 to the ECX parameter.
28260       return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
28261                          DAG.getConstant(0, dl, MVT::i32));
28262     }
28263     case llvm::Intrinsic::x86_wrpkru: {
28264       SDLoc dl(Op);
28265       // Create a WRPKRU node, pass the input to the EAX parameter,  and pass 0
28266       // to the EDX and ECX parameters.
28267       return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
28268                          Op.getOperand(0), Op.getOperand(2),
28269                          DAG.getConstant(0, dl, MVT::i32),
28270                          DAG.getConstant(0, dl, MVT::i32));
28271     }
28272     case llvm::Intrinsic::asan_check_memaccess: {
28273       // Mark this as adjustsStack because it will be lowered to a call.
28274       DAG.getMachineFunction().getFrameInfo().setAdjustsStack(true);
28275       // Don't do anything here, we will expand these intrinsics out later.
28276       return Op;
28277     }
28278     case llvm::Intrinsic::x86_flags_read_u32:
28279     case llvm::Intrinsic::x86_flags_read_u64:
28280     case llvm::Intrinsic::x86_flags_write_u32:
28281     case llvm::Intrinsic::x86_flags_write_u64: {
28282       // We need a frame pointer because this will get lowered to a PUSH/POP
28283       // sequence.
28284       MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
28285       MFI.setHasCopyImplyingStackAdjustment(true);
28286       // Don't do anything here, we will expand these intrinsics out later
28287       // during FinalizeISel in EmitInstrWithCustomInserter.
28288       return Op;
28289     }
28290     case Intrinsic::x86_lwpins32:
28291     case Intrinsic::x86_lwpins64:
28292     case Intrinsic::x86_umwait:
28293     case Intrinsic::x86_tpause: {
28294       SDLoc dl(Op);
28295       SDValue Chain = Op->getOperand(0);
28296       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
28297       unsigned Opcode;
28298 
28299       switch (IntNo) {
28300       default: llvm_unreachable("Impossible intrinsic");
28301       case Intrinsic::x86_umwait:
28302         Opcode = X86ISD::UMWAIT;
28303         break;
28304       case Intrinsic::x86_tpause:
28305         Opcode = X86ISD::TPAUSE;
28306         break;
28307       case Intrinsic::x86_lwpins32:
28308       case Intrinsic::x86_lwpins64:
28309         Opcode = X86ISD::LWPINS;
28310         break;
28311       }
28312 
28313       SDValue Operation =
28314           DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
28315                       Op->getOperand(3), Op->getOperand(4));
28316       SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
28317       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
28318                          Operation.getValue(1));
28319     }
28320     case Intrinsic::x86_enqcmd:
28321     case Intrinsic::x86_enqcmds: {
28322       SDLoc dl(Op);
28323       SDValue Chain = Op.getOperand(0);
28324       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
28325       unsigned Opcode;
28326       switch (IntNo) {
28327       default: llvm_unreachable("Impossible intrinsic!");
28328       case Intrinsic::x86_enqcmd:
28329         Opcode = X86ISD::ENQCMD;
28330         break;
28331       case Intrinsic::x86_enqcmds:
28332         Opcode = X86ISD::ENQCMDS;
28333         break;
28334       }
28335       SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
28336                                       Op.getOperand(3));
28337       SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
28338       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
28339                          Operation.getValue(1));
28340     }
28341     case Intrinsic::x86_aesenc128kl:
28342     case Intrinsic::x86_aesdec128kl:
28343     case Intrinsic::x86_aesenc256kl:
28344     case Intrinsic::x86_aesdec256kl: {
28345       SDLoc DL(Op);
28346       SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::i32, MVT::Other);
28347       SDValue Chain = Op.getOperand(0);
28348       unsigned Opcode;
28349 
28350       switch (IntNo) {
28351       default: llvm_unreachable("Impossible intrinsic");
28352       case Intrinsic::x86_aesenc128kl:
28353         Opcode = X86ISD::AESENC128KL;
28354         break;
28355       case Intrinsic::x86_aesdec128kl:
28356         Opcode = X86ISD::AESDEC128KL;
28357         break;
28358       case Intrinsic::x86_aesenc256kl:
28359         Opcode = X86ISD::AESENC256KL;
28360         break;
28361       case Intrinsic::x86_aesdec256kl:
28362         Opcode = X86ISD::AESDEC256KL;
28363         break;
28364       }
28365 
28366       MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28367       MachineMemOperand *MMO = MemIntr->getMemOperand();
28368       EVT MemVT = MemIntr->getMemoryVT();
28369       SDValue Operation = DAG.getMemIntrinsicNode(
28370           Opcode, DL, VTs, {Chain, Op.getOperand(2), Op.getOperand(3)}, MemVT,
28371           MMO);
28372       SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(1), DL, DAG);
28373 
28374       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
28375                          {ZF, Operation.getValue(0), Operation.getValue(2)});
28376     }
28377     case Intrinsic::x86_aesencwide128kl:
28378     case Intrinsic::x86_aesdecwide128kl:
28379     case Intrinsic::x86_aesencwide256kl:
28380     case Intrinsic::x86_aesdecwide256kl: {
28381       SDLoc DL(Op);
28382       SDVTList VTs = DAG.getVTList(
28383           {MVT::i32, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64,
28384            MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::Other});
28385       SDValue Chain = Op.getOperand(0);
28386       unsigned Opcode;
28387 
28388       switch (IntNo) {
28389       default: llvm_unreachable("Impossible intrinsic");
28390       case Intrinsic::x86_aesencwide128kl:
28391         Opcode = X86ISD::AESENCWIDE128KL;
28392         break;
28393       case Intrinsic::x86_aesdecwide128kl:
28394         Opcode = X86ISD::AESDECWIDE128KL;
28395         break;
28396       case Intrinsic::x86_aesencwide256kl:
28397         Opcode = X86ISD::AESENCWIDE256KL;
28398         break;
28399       case Intrinsic::x86_aesdecwide256kl:
28400         Opcode = X86ISD::AESDECWIDE256KL;
28401         break;
28402       }
28403 
28404       MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28405       MachineMemOperand *MMO = MemIntr->getMemOperand();
28406       EVT MemVT = MemIntr->getMemoryVT();
28407       SDValue Operation = DAG.getMemIntrinsicNode(
28408           Opcode, DL, VTs,
28409           {Chain, Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
28410            Op.getOperand(5), Op.getOperand(6), Op.getOperand(7),
28411            Op.getOperand(8), Op.getOperand(9), Op.getOperand(10)},
28412           MemVT, MMO);
28413       SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(0), DL, DAG);
28414 
28415       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
28416                          {ZF, Operation.getValue(1), Operation.getValue(2),
28417                           Operation.getValue(3), Operation.getValue(4),
28418                           Operation.getValue(5), Operation.getValue(6),
28419                           Operation.getValue(7), Operation.getValue(8),
28420                           Operation.getValue(9)});
28421     }
28422     case Intrinsic::x86_testui: {
28423       SDLoc dl(Op);
28424       SDValue Chain = Op.getOperand(0);
28425       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
28426       SDValue Operation = DAG.getNode(X86ISD::TESTUI, dl, VTs, Chain);
28427       SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
28428       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
28429                          Operation.getValue(1));
28430     }
28431     case Intrinsic::x86_atomic_bts_rm:
28432     case Intrinsic::x86_atomic_btc_rm:
28433     case Intrinsic::x86_atomic_btr_rm: {
28434       SDLoc DL(Op);
28435       MVT VT = Op.getSimpleValueType();
28436       SDValue Chain = Op.getOperand(0);
28437       SDValue Op1 = Op.getOperand(2);
28438       SDValue Op2 = Op.getOperand(3);
28439       unsigned Opc = IntNo == Intrinsic::x86_atomic_bts_rm   ? X86ISD::LBTS_RM
28440                      : IntNo == Intrinsic::x86_atomic_btc_rm ? X86ISD::LBTC_RM
28441                                                              : X86ISD::LBTR_RM;
28442       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
28443       SDValue Res =
28444           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
28445                                   {Chain, Op1, Op2}, VT, MMO);
28446       Chain = Res.getValue(1);
28447       Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
28448       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
28449     }
28450     case Intrinsic::x86_atomic_bts:
28451     case Intrinsic::x86_atomic_btc:
28452     case Intrinsic::x86_atomic_btr: {
28453       SDLoc DL(Op);
28454       MVT VT = Op.getSimpleValueType();
28455       SDValue Chain = Op.getOperand(0);
28456       SDValue Op1 = Op.getOperand(2);
28457       SDValue Op2 = Op.getOperand(3);
28458       unsigned Opc = IntNo == Intrinsic::x86_atomic_bts   ? X86ISD::LBTS
28459                      : IntNo == Intrinsic::x86_atomic_btc ? X86ISD::LBTC
28460                                                           : X86ISD::LBTR;
28461       SDValue Size = DAG.getConstant(VT.getScalarSizeInBits(), DL, MVT::i32);
28462       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
28463       SDValue Res =
28464           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
28465                                   {Chain, Op1, Op2, Size}, VT, MMO);
28466       Chain = Res.getValue(1);
28467       Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
28468       unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
28469       if (Imm)
28470         Res = DAG.getNode(ISD::SHL, DL, VT, Res,
28471                           DAG.getShiftAmountConstant(Imm, VT, DL));
28472       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
28473     }
28474     case Intrinsic::x86_cmpccxadd32:
28475     case Intrinsic::x86_cmpccxadd64: {
28476       SDLoc DL(Op);
28477       SDValue Chain = Op.getOperand(0);
28478       SDValue Addr = Op.getOperand(2);
28479       SDValue Src1 = Op.getOperand(3);
28480       SDValue Src2 = Op.getOperand(4);
28481       SDValue CC = Op.getOperand(5);
28482       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
28483       SDValue Operation = DAG.getMemIntrinsicNode(
28484           X86ISD::CMPCCXADD, DL, Op->getVTList(), {Chain, Addr, Src1, Src2, CC},
28485           MVT::i32, MMO);
28486       return Operation;
28487     }
28488     case Intrinsic::x86_aadd32:
28489     case Intrinsic::x86_aadd64:
28490     case Intrinsic::x86_aand32:
28491     case Intrinsic::x86_aand64:
28492     case Intrinsic::x86_aor32:
28493     case Intrinsic::x86_aor64:
28494     case Intrinsic::x86_axor32:
28495     case Intrinsic::x86_axor64: {
28496       SDLoc DL(Op);
28497       SDValue Chain = Op.getOperand(0);
28498       SDValue Op1 = Op.getOperand(2);
28499       SDValue Op2 = Op.getOperand(3);
28500       MVT VT = Op2.getSimpleValueType();
28501       unsigned Opc = 0;
28502       switch (IntNo) {
28503       default:
28504         llvm_unreachable("Unknown Intrinsic");
28505       case Intrinsic::x86_aadd32:
28506       case Intrinsic::x86_aadd64:
28507         Opc = X86ISD::AADD;
28508         break;
28509       case Intrinsic::x86_aand32:
28510       case Intrinsic::x86_aand64:
28511         Opc = X86ISD::AAND;
28512         break;
28513       case Intrinsic::x86_aor32:
28514       case Intrinsic::x86_aor64:
28515         Opc = X86ISD::AOR;
28516         break;
28517       case Intrinsic::x86_axor32:
28518       case Intrinsic::x86_axor64:
28519         Opc = X86ISD::AXOR;
28520         break;
28521       }
28522       MachineMemOperand *MMO = cast<MemSDNode>(Op)->getMemOperand();
28523       return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(),
28524                                      {Chain, Op1, Op2}, VT, MMO);
28525     }
28526     case Intrinsic::x86_atomic_add_cc:
28527     case Intrinsic::x86_atomic_sub_cc:
28528     case Intrinsic::x86_atomic_or_cc:
28529     case Intrinsic::x86_atomic_and_cc:
28530     case Intrinsic::x86_atomic_xor_cc: {
28531       SDLoc DL(Op);
28532       SDValue Chain = Op.getOperand(0);
28533       SDValue Op1 = Op.getOperand(2);
28534       SDValue Op2 = Op.getOperand(3);
28535       X86::CondCode CC = (X86::CondCode)Op.getConstantOperandVal(4);
28536       MVT VT = Op2.getSimpleValueType();
28537       unsigned Opc = 0;
28538       switch (IntNo) {
28539       default:
28540         llvm_unreachable("Unknown Intrinsic");
28541       case Intrinsic::x86_atomic_add_cc:
28542         Opc = X86ISD::LADD;
28543         break;
28544       case Intrinsic::x86_atomic_sub_cc:
28545         Opc = X86ISD::LSUB;
28546         break;
28547       case Intrinsic::x86_atomic_or_cc:
28548         Opc = X86ISD::LOR;
28549         break;
28550       case Intrinsic::x86_atomic_and_cc:
28551         Opc = X86ISD::LAND;
28552         break;
28553       case Intrinsic::x86_atomic_xor_cc:
28554         Opc = X86ISD::LXOR;
28555         break;
28556       }
28557       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
28558       SDValue LockArith =
28559           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
28560                                   {Chain, Op1, Op2}, VT, MMO);
28561       Chain = LockArith.getValue(1);
28562       return DAG.getMergeValues({getSETCC(CC, LockArith, DL, DAG), Chain}, DL);
28563     }
28564     }
28565     return SDValue();
28566   }
28567 
28568   SDLoc dl(Op);
28569   switch(IntrData->Type) {
28570   default: llvm_unreachable("Unknown Intrinsic Type");
28571   case RDSEED:
28572   case RDRAND: {
28573     // Emit the node with the right value type.
28574     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
28575     SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
28576 
28577     // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
28578     // Otherwise return the value from Rand, which is always 0, casted to i32.
28579     SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
28580                      DAG.getConstant(1, dl, Op->getValueType(1)),
28581                      DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
28582                      SDValue(Result.getNode(), 1)};
28583     SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
28584 
28585     // Return { result, isValid, chain }.
28586     return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
28587                        SDValue(Result.getNode(), 2));
28588   }
28589   case GATHER_AVX2: {
28590     SDValue Chain = Op.getOperand(0);
28591     SDValue Src   = Op.getOperand(2);
28592     SDValue Base  = Op.getOperand(3);
28593     SDValue Index = Op.getOperand(4);
28594     SDValue Mask  = Op.getOperand(5);
28595     SDValue Scale = Op.getOperand(6);
28596     return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
28597                              Scale, Chain, Subtarget);
28598   }
28599   case GATHER: {
28600   //gather(v1, mask, index, base, scale);
28601     SDValue Chain = Op.getOperand(0);
28602     SDValue Src   = Op.getOperand(2);
28603     SDValue Base  = Op.getOperand(3);
28604     SDValue Index = Op.getOperand(4);
28605     SDValue Mask  = Op.getOperand(5);
28606     SDValue Scale = Op.getOperand(6);
28607     return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
28608                          Chain, Subtarget);
28609   }
28610   case SCATTER: {
28611   //scatter(base, mask, index, v1, scale);
28612     SDValue Chain = Op.getOperand(0);
28613     SDValue Base  = Op.getOperand(2);
28614     SDValue Mask  = Op.getOperand(3);
28615     SDValue Index = Op.getOperand(4);
28616     SDValue Src   = Op.getOperand(5);
28617     SDValue Scale = Op.getOperand(6);
28618     return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
28619                           Scale, Chain, Subtarget);
28620   }
28621   case PREFETCH: {
28622     const APInt &HintVal = Op.getConstantOperandAPInt(6);
28623     assert((HintVal == 2 || HintVal == 3) &&
28624            "Wrong prefetch hint in intrinsic: should be 2 or 3");
28625     unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
28626     SDValue Chain = Op.getOperand(0);
28627     SDValue Mask  = Op.getOperand(2);
28628     SDValue Index = Op.getOperand(3);
28629     SDValue Base  = Op.getOperand(4);
28630     SDValue Scale = Op.getOperand(5);
28631     return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
28632                            Subtarget);
28633   }
28634   // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
28635   case RDTSC: {
28636     SmallVector<SDValue, 2> Results;
28637     getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
28638                             Results);
28639     return DAG.getMergeValues(Results, dl);
28640   }
28641   // Read Performance Monitoring Counters.
28642   case RDPMC:
28643   // Read Processor Register.
28644   case RDPRU:
28645   // GetExtended Control Register.
28646   case XGETBV: {
28647     SmallVector<SDValue, 2> Results;
28648 
28649     // RDPMC uses ECX to select the index of the performance counter to read.
28650     // RDPRU uses ECX to select the processor register to read.
28651     // XGETBV uses ECX to select the index of the XCR register to return.
28652     // The result is stored into registers EDX:EAX.
28653     expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
28654                                 Subtarget, Results);
28655     return DAG.getMergeValues(Results, dl);
28656   }
28657   // XTEST intrinsics.
28658   case XTEST: {
28659     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
28660     SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
28661 
28662     SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
28663     SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
28664     return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
28665                        Ret, SDValue(InTrans.getNode(), 1));
28666   }
28667   case TRUNCATE_TO_MEM_VI8:
28668   case TRUNCATE_TO_MEM_VI16:
28669   case TRUNCATE_TO_MEM_VI32: {
28670     SDValue Mask = Op.getOperand(4);
28671     SDValue DataToTruncate = Op.getOperand(3);
28672     SDValue Addr = Op.getOperand(2);
28673     SDValue Chain = Op.getOperand(0);
28674 
28675     MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
28676     assert(MemIntr && "Expected MemIntrinsicSDNode!");
28677 
28678     EVT MemVT  = MemIntr->getMemoryVT();
28679 
28680     uint16_t TruncationOp = IntrData->Opc0;
28681     switch (TruncationOp) {
28682     case X86ISD::VTRUNC: {
28683       if (isAllOnesConstant(Mask)) // return just a truncate store
28684         return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
28685                                  MemIntr->getMemOperand());
28686 
28687       MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
28688       SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28689       SDValue Offset = DAG.getUNDEF(VMask.getValueType());
28690 
28691       return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
28692                                 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
28693                                 true /* truncating */);
28694     }
28695     case X86ISD::VTRUNCUS:
28696     case X86ISD::VTRUNCS: {
28697       bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
28698       if (isAllOnesConstant(Mask))
28699         return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
28700                                MemIntr->getMemOperand(), DAG);
28701 
28702       MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
28703       SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28704 
28705       return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
28706                                    VMask, MemVT, MemIntr->getMemOperand(), DAG);
28707     }
28708     default:
28709       llvm_unreachable("Unsupported truncstore intrinsic");
28710     }
28711   }
28712   }
28713 }
28714 
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const28715 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
28716                                            SelectionDAG &DAG) const {
28717   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
28718   MFI.setReturnAddressIsTaken(true);
28719 
28720   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
28721     return SDValue();
28722 
28723   unsigned Depth = Op.getConstantOperandVal(0);
28724   SDLoc dl(Op);
28725   EVT PtrVT = getPointerTy(DAG.getDataLayout());
28726 
28727   if (Depth > 0) {
28728     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
28729     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28730     SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
28731     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
28732                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
28733                        MachinePointerInfo());
28734   }
28735 
28736   // Just load the return address.
28737   SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
28738   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
28739                      MachinePointerInfo());
28740 }
28741 
LowerADDROFRETURNADDR(SDValue Op,SelectionDAG & DAG) const28742 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
28743                                                  SelectionDAG &DAG) const {
28744   DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
28745   return getReturnAddressFrameIndex(DAG);
28746 }
28747 
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const28748 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
28749   MachineFunction &MF = DAG.getMachineFunction();
28750   MachineFrameInfo &MFI = MF.getFrameInfo();
28751   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
28752   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28753   EVT VT = Op.getValueType();
28754 
28755   MFI.setFrameAddressIsTaken(true);
28756 
28757   if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
28758     // Depth > 0 makes no sense on targets which use Windows unwind codes.  It
28759     // is not possible to crawl up the stack without looking at the unwind codes
28760     // simultaneously.
28761     int FrameAddrIndex = FuncInfo->getFAIndex();
28762     if (!FrameAddrIndex) {
28763       // Set up a frame object for the return address.
28764       unsigned SlotSize = RegInfo->getSlotSize();
28765       FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
28766           SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
28767       FuncInfo->setFAIndex(FrameAddrIndex);
28768     }
28769     return DAG.getFrameIndex(FrameAddrIndex, VT);
28770   }
28771 
28772   unsigned FrameReg =
28773       RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
28774   SDLoc dl(Op);  // FIXME probably not meaningful
28775   unsigned Depth = Op.getConstantOperandVal(0);
28776   assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
28777           (FrameReg == X86::EBP && VT == MVT::i32)) &&
28778          "Invalid Frame Register!");
28779   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
28780   while (Depth--)
28781     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
28782                             MachinePointerInfo());
28783   return FrameAddr;
28784 }
28785 
28786 // FIXME? Maybe this could be a TableGen attribute on some registers and
28787 // this table could be generated automatically from RegInfo.
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const28788 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
28789                                               const MachineFunction &MF) const {
28790   const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
28791 
28792   Register Reg = StringSwitch<unsigned>(RegName)
28793                        .Case("esp", X86::ESP)
28794                        .Case("rsp", X86::RSP)
28795                        .Case("ebp", X86::EBP)
28796                        .Case("rbp", X86::RBP)
28797                        .Default(0);
28798 
28799   if (Reg == X86::EBP || Reg == X86::RBP) {
28800     if (!TFI.hasFP(MF))
28801       report_fatal_error("register " + StringRef(RegName) +
28802                          " is allocatable: function has no frame pointer");
28803 #ifndef NDEBUG
28804     else {
28805       const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28806       Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
28807       assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
28808              "Invalid Frame Register!");
28809     }
28810 #endif
28811   }
28812 
28813   if (Reg)
28814     return Reg;
28815 
28816   report_fatal_error("Invalid register name global variable");
28817 }
28818 
LowerFRAME_TO_ARGS_OFFSET(SDValue Op,SelectionDAG & DAG) const28819 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
28820                                                      SelectionDAG &DAG) const {
28821   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28822   return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
28823 }
28824 
getExceptionPointerRegister(const Constant * PersonalityFn) const28825 Register X86TargetLowering::getExceptionPointerRegister(
28826     const Constant *PersonalityFn) const {
28827   if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
28828     return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
28829 
28830   return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
28831 }
28832 
getExceptionSelectorRegister(const Constant * PersonalityFn) const28833 Register X86TargetLowering::getExceptionSelectorRegister(
28834     const Constant *PersonalityFn) const {
28835   // Funclet personalities don't use selectors (the runtime does the selection).
28836   if (isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)))
28837     return X86::NoRegister;
28838   return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
28839 }
28840 
needsFixedCatchObjects() const28841 bool X86TargetLowering::needsFixedCatchObjects() const {
28842   return Subtarget.isTargetWin64();
28843 }
28844 
LowerEH_RETURN(SDValue Op,SelectionDAG & DAG) const28845 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
28846   SDValue Chain     = Op.getOperand(0);
28847   SDValue Offset    = Op.getOperand(1);
28848   SDValue Handler   = Op.getOperand(2);
28849   SDLoc dl      (Op);
28850 
28851   EVT PtrVT = getPointerTy(DAG.getDataLayout());
28852   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28853   Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
28854   assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
28855           (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
28856          "Invalid Frame Register!");
28857   SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
28858   Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
28859 
28860   SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
28861                                  DAG.getIntPtrConstant(RegInfo->getSlotSize(),
28862                                                        dl));
28863   StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
28864   Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
28865   Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
28866 
28867   return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
28868                      DAG.getRegister(StoreAddrReg, PtrVT));
28869 }
28870 
lowerEH_SJLJ_SETJMP(SDValue Op,SelectionDAG & DAG) const28871 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
28872                                                SelectionDAG &DAG) const {
28873   SDLoc DL(Op);
28874   // If the subtarget is not 64bit, we may need the global base reg
28875   // after isel expand pseudo, i.e., after CGBR pass ran.
28876   // Therefore, ask for the GlobalBaseReg now, so that the pass
28877   // inserts the code for us in case we need it.
28878   // Otherwise, we will end up in a situation where we will
28879   // reference a virtual register that is not defined!
28880   if (!Subtarget.is64Bit()) {
28881     const X86InstrInfo *TII = Subtarget.getInstrInfo();
28882     (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
28883   }
28884   return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
28885                      DAG.getVTList(MVT::i32, MVT::Other),
28886                      Op.getOperand(0), Op.getOperand(1));
28887 }
28888 
lowerEH_SJLJ_LONGJMP(SDValue Op,SelectionDAG & DAG) const28889 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
28890                                                 SelectionDAG &DAG) const {
28891   SDLoc DL(Op);
28892   return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
28893                      Op.getOperand(0), Op.getOperand(1));
28894 }
28895 
lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,SelectionDAG & DAG) const28896 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
28897                                                        SelectionDAG &DAG) const {
28898   SDLoc DL(Op);
28899   return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
28900                      Op.getOperand(0));
28901 }
28902 
LowerADJUST_TRAMPOLINE(SDValue Op,SelectionDAG & DAG)28903 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
28904   return Op.getOperand(0);
28905 }
28906 
LowerINIT_TRAMPOLINE(SDValue Op,SelectionDAG & DAG) const28907 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
28908                                                 SelectionDAG &DAG) const {
28909   SDValue Root = Op.getOperand(0);
28910   SDValue Trmp = Op.getOperand(1); // trampoline
28911   SDValue FPtr = Op.getOperand(2); // nested function
28912   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
28913   SDLoc dl (Op);
28914 
28915   const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
28916   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
28917 
28918   if (Subtarget.is64Bit()) {
28919     SDValue OutChains[6];
28920 
28921     // Large code-model.
28922     const unsigned char JMP64r  = 0xFF; // 64-bit jmp through register opcode.
28923     const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
28924 
28925     const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
28926     const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
28927 
28928     const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
28929 
28930     // Load the pointer to the nested function into R11.
28931     unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
28932     SDValue Addr = Trmp;
28933     OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
28934                                 Addr, MachinePointerInfo(TrmpAddr));
28935 
28936     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28937                        DAG.getConstant(2, dl, MVT::i64));
28938     OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
28939                                 MachinePointerInfo(TrmpAddr, 2), Align(2));
28940 
28941     // Load the 'nest' parameter value into R10.
28942     // R10 is specified in X86CallingConv.td
28943     OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
28944     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28945                        DAG.getConstant(10, dl, MVT::i64));
28946     OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
28947                                 Addr, MachinePointerInfo(TrmpAddr, 10));
28948 
28949     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28950                        DAG.getConstant(12, dl, MVT::i64));
28951     OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
28952                                 MachinePointerInfo(TrmpAddr, 12), Align(2));
28953 
28954     // Jump to the nested function.
28955     OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
28956     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28957                        DAG.getConstant(20, dl, MVT::i64));
28958     OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
28959                                 Addr, MachinePointerInfo(TrmpAddr, 20));
28960 
28961     unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
28962     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28963                        DAG.getConstant(22, dl, MVT::i64));
28964     OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
28965                                 Addr, MachinePointerInfo(TrmpAddr, 22));
28966 
28967     return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
28968   } else {
28969     const Function *Func =
28970       cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
28971     CallingConv::ID CC = Func->getCallingConv();
28972     unsigned NestReg;
28973 
28974     switch (CC) {
28975     default:
28976       llvm_unreachable("Unsupported calling convention");
28977     case CallingConv::C:
28978     case CallingConv::X86_StdCall: {
28979       // Pass 'nest' parameter in ECX.
28980       // Must be kept in sync with X86CallingConv.td
28981       NestReg = X86::ECX;
28982 
28983       // Check that ECX wasn't needed by an 'inreg' parameter.
28984       FunctionType *FTy = Func->getFunctionType();
28985       const AttributeList &Attrs = Func->getAttributes();
28986 
28987       if (!Attrs.isEmpty() && !Func->isVarArg()) {
28988         unsigned InRegCount = 0;
28989         unsigned Idx = 0;
28990 
28991         for (FunctionType::param_iterator I = FTy->param_begin(),
28992              E = FTy->param_end(); I != E; ++I, ++Idx)
28993           if (Attrs.hasParamAttr(Idx, Attribute::InReg)) {
28994             const DataLayout &DL = DAG.getDataLayout();
28995             // FIXME: should only count parameters that are lowered to integers.
28996             InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
28997           }
28998 
28999         if (InRegCount > 2) {
29000           report_fatal_error("Nest register in use - reduce number of inreg"
29001                              " parameters!");
29002         }
29003       }
29004       break;
29005     }
29006     case CallingConv::X86_FastCall:
29007     case CallingConv::X86_ThisCall:
29008     case CallingConv::Fast:
29009     case CallingConv::Tail:
29010     case CallingConv::SwiftTail:
29011       // Pass 'nest' parameter in EAX.
29012       // Must be kept in sync with X86CallingConv.td
29013       NestReg = X86::EAX;
29014       break;
29015     }
29016 
29017     SDValue OutChains[4];
29018     SDValue Addr, Disp;
29019 
29020     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29021                        DAG.getConstant(10, dl, MVT::i32));
29022     Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
29023 
29024     // This is storing the opcode for MOV32ri.
29025     const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
29026     const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
29027     OutChains[0] =
29028         DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
29029                      Trmp, MachinePointerInfo(TrmpAddr));
29030 
29031     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29032                        DAG.getConstant(1, dl, MVT::i32));
29033     OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
29034                                 MachinePointerInfo(TrmpAddr, 1), Align(1));
29035 
29036     const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
29037     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29038                        DAG.getConstant(5, dl, MVT::i32));
29039     OutChains[2] =
29040         DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8), Addr,
29041                      MachinePointerInfo(TrmpAddr, 5), Align(1));
29042 
29043     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29044                        DAG.getConstant(6, dl, MVT::i32));
29045     OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
29046                                 MachinePointerInfo(TrmpAddr, 6), Align(1));
29047 
29048     return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
29049   }
29050 }
29051 
LowerGET_ROUNDING(SDValue Op,SelectionDAG & DAG) const29052 SDValue X86TargetLowering::LowerGET_ROUNDING(SDValue Op,
29053                                              SelectionDAG &DAG) const {
29054   /*
29055    The rounding mode is in bits 11:10 of FPSR, and has the following
29056    settings:
29057      00 Round to nearest
29058      01 Round to -inf
29059      10 Round to +inf
29060      11 Round to 0
29061 
29062   GET_ROUNDING, on the other hand, expects the following:
29063     -1 Undefined
29064      0 Round to 0
29065      1 Round to nearest
29066      2 Round to +inf
29067      3 Round to -inf
29068 
29069   To perform the conversion, we use a packed lookup table of the four 2-bit
29070   values that we can index by FPSP[11:10]
29071     0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
29072 
29073     (0x2d >> ((FPSR & 0xc00) >> 9)) & 3
29074   */
29075 
29076   MachineFunction &MF = DAG.getMachineFunction();
29077   MVT VT = Op.getSimpleValueType();
29078   SDLoc DL(Op);
29079 
29080   // Save FP Control Word to stack slot
29081   int SSFI = MF.getFrameInfo().CreateStackObject(2, Align(2), false);
29082   SDValue StackSlot =
29083       DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
29084 
29085   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
29086 
29087   SDValue Chain = Op.getOperand(0);
29088   SDValue Ops[] = {Chain, StackSlot};
29089   Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
29090                                   DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI,
29091                                   Align(2), MachineMemOperand::MOStore);
29092 
29093   // Load FP Control Word from stack slot
29094   SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2));
29095   Chain = CWD.getValue(1);
29096 
29097   // Mask and turn the control bits into a shift for the lookup table.
29098   SDValue Shift =
29099     DAG.getNode(ISD::SRL, DL, MVT::i16,
29100                 DAG.getNode(ISD::AND, DL, MVT::i16,
29101                             CWD, DAG.getConstant(0xc00, DL, MVT::i16)),
29102                 DAG.getConstant(9, DL, MVT::i8));
29103   Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Shift);
29104 
29105   SDValue LUT = DAG.getConstant(0x2d, DL, MVT::i32);
29106   SDValue RetVal =
29107     DAG.getNode(ISD::AND, DL, MVT::i32,
29108                 DAG.getNode(ISD::SRL, DL, MVT::i32, LUT, Shift),
29109                 DAG.getConstant(3, DL, MVT::i32));
29110 
29111   RetVal = DAG.getZExtOrTrunc(RetVal, DL, VT);
29112 
29113   return DAG.getMergeValues({RetVal, Chain}, DL);
29114 }
29115 
LowerSET_ROUNDING(SDValue Op,SelectionDAG & DAG) const29116 SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op,
29117                                              SelectionDAG &DAG) const {
29118   MachineFunction &MF = DAG.getMachineFunction();
29119   SDLoc DL(Op);
29120   SDValue Chain = Op.getNode()->getOperand(0);
29121 
29122   // FP control word may be set only from data in memory. So we need to allocate
29123   // stack space to save/load FP control word.
29124   int OldCWFrameIdx = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
29125   SDValue StackSlot =
29126       DAG.getFrameIndex(OldCWFrameIdx, getPointerTy(DAG.getDataLayout()));
29127   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, OldCWFrameIdx);
29128   MachineMemOperand *MMO =
29129       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 2, Align(2));
29130 
29131   // Store FP control word into memory.
29132   SDValue Ops[] = {Chain, StackSlot};
29133   Chain = DAG.getMemIntrinsicNode(
29134       X86ISD::FNSTCW16m, DL, DAG.getVTList(MVT::Other), Ops, MVT::i16, MMO);
29135 
29136   // Load FP Control Word from stack slot and clear RM field (bits 11:10).
29137   SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI);
29138   Chain = CWD.getValue(1);
29139   CWD = DAG.getNode(ISD::AND, DL, MVT::i16, CWD.getValue(0),
29140                     DAG.getConstant(0xf3ff, DL, MVT::i16));
29141 
29142   // Calculate new rounding mode.
29143   SDValue NewRM = Op.getNode()->getOperand(1);
29144   SDValue RMBits;
29145   if (auto *CVal = dyn_cast<ConstantSDNode>(NewRM)) {
29146     uint64_t RM = CVal->getZExtValue();
29147     int FieldVal;
29148     switch (static_cast<RoundingMode>(RM)) {
29149     case RoundingMode::NearestTiesToEven: FieldVal = X86::rmToNearest; break;
29150     case RoundingMode::TowardNegative:    FieldVal = X86::rmDownward; break;
29151     case RoundingMode::TowardPositive:    FieldVal = X86::rmUpward; break;
29152     case RoundingMode::TowardZero:        FieldVal = X86::rmTowardZero; break;
29153     default:
29154       llvm_unreachable("rounding mode is not supported by X86 hardware");
29155     }
29156     RMBits = DAG.getConstant(FieldVal, DL, MVT::i16);
29157   } else {
29158     // Need to convert argument into bits of control word:
29159     //    0 Round to 0       -> 11
29160     //    1 Round to nearest -> 00
29161     //    2 Round to +inf    -> 10
29162     //    3 Round to -inf    -> 01
29163     // The 2-bit value needs then to be shifted so that it occupies bits 11:10.
29164     // To make the conversion, put all these values into a value 0xc9 and shift
29165     // it left depending on the rounding mode:
29166     //    (0xc9 << 4) & 0xc00 = X86::rmTowardZero
29167     //    (0xc9 << 6) & 0xc00 = X86::rmToNearest
29168     //    ...
29169     // (0xc9 << (2 * NewRM + 4)) & 0xc00
29170     SDValue ShiftValue =
29171         DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
29172                     DAG.getNode(ISD::ADD, DL, MVT::i32,
29173                                 DAG.getNode(ISD::SHL, DL, MVT::i32, NewRM,
29174                                             DAG.getConstant(1, DL, MVT::i8)),
29175                                 DAG.getConstant(4, DL, MVT::i32)));
29176     SDValue Shifted =
29177         DAG.getNode(ISD::SHL, DL, MVT::i16, DAG.getConstant(0xc9, DL, MVT::i16),
29178                     ShiftValue);
29179     RMBits = DAG.getNode(ISD::AND, DL, MVT::i16, Shifted,
29180                          DAG.getConstant(0xc00, DL, MVT::i16));
29181   }
29182 
29183   // Update rounding mode bits and store the new FP Control Word into stack.
29184   CWD = DAG.getNode(ISD::OR, DL, MVT::i16, CWD, RMBits);
29185   Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(2));
29186 
29187   // Load FP control word from the slot.
29188   SDValue OpsLD[] = {Chain, StackSlot};
29189   MachineMemOperand *MMOL =
29190       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 2, Align(2));
29191   Chain = DAG.getMemIntrinsicNode(
29192       X86ISD::FLDCW16m, DL, DAG.getVTList(MVT::Other), OpsLD, MVT::i16, MMOL);
29193 
29194   // If target supports SSE, set MXCSR as well. Rounding mode is encoded in the
29195   // same way but in bits 14:13.
29196   if (Subtarget.hasSSE1()) {
29197     // Store MXCSR into memory.
29198     Chain = DAG.getNode(
29199         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
29200         DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
29201         StackSlot);
29202 
29203     // Load MXCSR from stack slot and clear RM field (bits 14:13).
29204     SDValue CWD = DAG.getLoad(MVT::i32, DL, Chain, StackSlot, MPI);
29205     Chain = CWD.getValue(1);
29206     CWD = DAG.getNode(ISD::AND, DL, MVT::i32, CWD.getValue(0),
29207                       DAG.getConstant(0xffff9fff, DL, MVT::i32));
29208 
29209     // Shift X87 RM bits from 11:10 to 14:13.
29210     RMBits = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, RMBits);
29211     RMBits = DAG.getNode(ISD::SHL, DL, MVT::i32, RMBits,
29212                          DAG.getConstant(3, DL, MVT::i8));
29213 
29214     // Update rounding mode bits and store the new FP Control Word into stack.
29215     CWD = DAG.getNode(ISD::OR, DL, MVT::i32, CWD, RMBits);
29216     Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(4));
29217 
29218     // Load MXCSR from the slot.
29219     Chain = DAG.getNode(
29220         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
29221         DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
29222         StackSlot);
29223   }
29224 
29225   return Chain;
29226 }
29227 
29228 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
29229 //
29230 // i8/i16 vector implemented using dword LZCNT vector instruction
29231 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
29232 // split the vector, perform operation on it's Lo a Hi part and
29233 // concatenate the results.
LowerVectorCTLZ_AVX512CDI(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)29234 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
29235                                          const X86Subtarget &Subtarget) {
29236   assert(Op.getOpcode() == ISD::CTLZ);
29237   SDLoc dl(Op);
29238   MVT VT = Op.getSimpleValueType();
29239   MVT EltVT = VT.getVectorElementType();
29240   unsigned NumElems = VT.getVectorNumElements();
29241 
29242   assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
29243           "Unsupported element type");
29244 
29245   // Split vector, it's Lo and Hi parts will be handled in next iteration.
29246   if (NumElems > 16 ||
29247       (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
29248     return splitVectorIntUnary(Op, DAG);
29249 
29250   MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
29251   assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
29252           "Unsupported value type for operation");
29253 
29254   // Use native supported vector instruction vplzcntd.
29255   Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
29256   SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
29257   SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
29258   SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
29259 
29260   return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
29261 }
29262 
29263 // Lower CTLZ using a PSHUFB lookup table implementation.
LowerVectorCTLZInRegLUT(SDValue Op,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)29264 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
29265                                        const X86Subtarget &Subtarget,
29266                                        SelectionDAG &DAG) {
29267   MVT VT = Op.getSimpleValueType();
29268   int NumElts = VT.getVectorNumElements();
29269   int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
29270   MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
29271 
29272   // Per-nibble leading zero PSHUFB lookup table.
29273   const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
29274                        /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
29275                        /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
29276                        /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
29277 
29278   SmallVector<SDValue, 64> LUTVec;
29279   for (int i = 0; i < NumBytes; ++i)
29280     LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
29281   SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
29282 
29283   // Begin by bitcasting the input to byte vector, then split those bytes
29284   // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
29285   // If the hi input nibble is zero then we add both results together, otherwise
29286   // we just take the hi result (by masking the lo result to zero before the
29287   // add).
29288   SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
29289   SDValue Zero = DAG.getConstant(0, DL, CurrVT);
29290 
29291   SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
29292   SDValue Lo = Op0;
29293   SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
29294   SDValue HiZ;
29295   if (CurrVT.is512BitVector()) {
29296     MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
29297     HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
29298     HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
29299   } else {
29300     HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
29301   }
29302 
29303   Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
29304   Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
29305   Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
29306   SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
29307 
29308   // Merge result back from vXi8 back to VT, working on the lo/hi halves
29309   // of the current vector width in the same way we did for the nibbles.
29310   // If the upper half of the input element is zero then add the halves'
29311   // leading zero counts together, otherwise just use the upper half's.
29312   // Double the width of the result until we are at target width.
29313   while (CurrVT != VT) {
29314     int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
29315     int CurrNumElts = CurrVT.getVectorNumElements();
29316     MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
29317     MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
29318     SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
29319 
29320     // Check if the upper half of the input element is zero.
29321     if (CurrVT.is512BitVector()) {
29322       MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
29323       HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
29324                          DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
29325       HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
29326     } else {
29327       HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
29328                          DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
29329     }
29330     HiZ = DAG.getBitcast(NextVT, HiZ);
29331 
29332     // Move the upper/lower halves to the lower bits as we'll be extending to
29333     // NextVT. Mask the lower result to zero if HiZ is true and add the results
29334     // together.
29335     SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
29336     SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
29337     SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
29338     R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
29339     Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
29340     CurrVT = NextVT;
29341   }
29342 
29343   return Res;
29344 }
29345 
LowerVectorCTLZ(SDValue Op,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)29346 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
29347                                const X86Subtarget &Subtarget,
29348                                SelectionDAG &DAG) {
29349   MVT VT = Op.getSimpleValueType();
29350 
29351   if (Subtarget.hasCDI() &&
29352       // vXi8 vectors need to be promoted to 512-bits for vXi32.
29353       (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
29354     return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
29355 
29356   // Decompose 256-bit ops into smaller 128-bit ops.
29357   if (VT.is256BitVector() && !Subtarget.hasInt256())
29358     return splitVectorIntUnary(Op, DAG);
29359 
29360   // Decompose 512-bit ops into smaller 256-bit ops.
29361   if (VT.is512BitVector() && !Subtarget.hasBWI())
29362     return splitVectorIntUnary(Op, DAG);
29363 
29364   assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
29365   return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
29366 }
29367 
LowerCTLZ(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29368 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
29369                          SelectionDAG &DAG) {
29370   MVT VT = Op.getSimpleValueType();
29371   MVT OpVT = VT;
29372   unsigned NumBits = VT.getSizeInBits();
29373   SDLoc dl(Op);
29374   unsigned Opc = Op.getOpcode();
29375 
29376   if (VT.isVector())
29377     return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
29378 
29379   Op = Op.getOperand(0);
29380   if (VT == MVT::i8) {
29381     // Zero extend to i32 since there is not an i8 bsr.
29382     OpVT = MVT::i32;
29383     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
29384   }
29385 
29386   // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
29387   SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
29388   Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
29389 
29390   if (Opc == ISD::CTLZ) {
29391     // If src is zero (i.e. bsr sets ZF), returns NumBits.
29392     SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
29393                      DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
29394                      Op.getValue(1)};
29395     Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
29396   }
29397 
29398   // Finally xor with NumBits-1.
29399   Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
29400                    DAG.getConstant(NumBits - 1, dl, OpVT));
29401 
29402   if (VT == MVT::i8)
29403     Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
29404   return Op;
29405 }
29406 
LowerCTTZ(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29407 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
29408                          SelectionDAG &DAG) {
29409   MVT VT = Op.getSimpleValueType();
29410   unsigned NumBits = VT.getScalarSizeInBits();
29411   SDValue N0 = Op.getOperand(0);
29412   SDLoc dl(Op);
29413 
29414   assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
29415          "Only scalar CTTZ requires custom lowering");
29416 
29417   // Issue a bsf (scan bits forward) which also sets EFLAGS.
29418   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
29419   Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
29420 
29421   // If src is known never zero we can skip the CMOV.
29422   if (DAG.isKnownNeverZero(N0))
29423     return Op;
29424 
29425   // If src is zero (i.e. bsf sets ZF), returns NumBits.
29426   SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
29427                    DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
29428                    Op.getValue(1)};
29429   return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
29430 }
29431 
lowerAddSub(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)29432 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
29433                            const X86Subtarget &Subtarget) {
29434   MVT VT = Op.getSimpleValueType();
29435   if (VT == MVT::i16 || VT == MVT::i32)
29436     return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
29437 
29438   if (VT == MVT::v32i16 || VT == MVT::v64i8)
29439     return splitVectorIntBinary(Op, DAG);
29440 
29441   assert(Op.getSimpleValueType().is256BitVector() &&
29442          Op.getSimpleValueType().isInteger() &&
29443          "Only handle AVX 256-bit vector integer operation");
29444   return splitVectorIntBinary(Op, DAG);
29445 }
29446 
LowerADDSAT_SUBSAT(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)29447 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
29448                                   const X86Subtarget &Subtarget) {
29449   MVT VT = Op.getSimpleValueType();
29450   SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
29451   unsigned Opcode = Op.getOpcode();
29452   SDLoc DL(Op);
29453 
29454   if (VT == MVT::v32i16 || VT == MVT::v64i8 ||
29455       (VT.is256BitVector() && !Subtarget.hasInt256())) {
29456     assert(Op.getSimpleValueType().isInteger() &&
29457            "Only handle AVX vector integer operation");
29458     return splitVectorIntBinary(Op, DAG);
29459   }
29460 
29461   // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
29462   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29463   EVT SetCCResultType =
29464       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
29465 
29466   unsigned BitWidth = VT.getScalarSizeInBits();
29467   if (Opcode == ISD::USUBSAT) {
29468     if (!TLI.isOperationLegal(ISD::UMAX, VT) || useVPTERNLOG(Subtarget, VT)) {
29469       // Handle a special-case with a bit-hack instead of cmp+select:
29470       // usubsat X, SMIN --> (X ^ SMIN) & (X s>> BW-1)
29471       // If the target can use VPTERNLOG, DAGToDAG will match this as
29472       // "vpsra + vpternlog" which is better than "vpmax + vpsub" with a
29473       // "broadcast" constant load.
29474       ConstantSDNode *C = isConstOrConstSplat(Y, true);
29475       if (C && C->getAPIntValue().isSignMask()) {
29476         SDValue SignMask = DAG.getConstant(C->getAPIntValue(), DL, VT);
29477         SDValue ShiftAmt = DAG.getConstant(BitWidth - 1, DL, VT);
29478         SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, SignMask);
29479         SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShiftAmt);
29480         return DAG.getNode(ISD::AND, DL, VT, Xor, Sra);
29481       }
29482     }
29483     if (!TLI.isOperationLegal(ISD::UMAX, VT)) {
29484       // usubsat X, Y --> (X >u Y) ? X - Y : 0
29485       SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
29486       SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
29487       // TODO: Move this to DAGCombiner?
29488       if (SetCCResultType == VT &&
29489           DAG.ComputeNumSignBits(Cmp) == VT.getScalarSizeInBits())
29490         return DAG.getNode(ISD::AND, DL, VT, Cmp, Sub);
29491       return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
29492     }
29493   }
29494 
29495   if ((Opcode == ISD::SADDSAT || Opcode == ISD::SSUBSAT) &&
29496       (!VT.isVector() || VT == MVT::v2i64)) {
29497     APInt MinVal = APInt::getSignedMinValue(BitWidth);
29498     APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
29499     SDValue Zero = DAG.getConstant(0, DL, VT);
29500     SDValue Result =
29501         DAG.getNode(Opcode == ISD::SADDSAT ? ISD::SADDO : ISD::SSUBO, DL,
29502                     DAG.getVTList(VT, SetCCResultType), X, Y);
29503     SDValue SumDiff = Result.getValue(0);
29504     SDValue Overflow = Result.getValue(1);
29505     SDValue SatMin = DAG.getConstant(MinVal, DL, VT);
29506     SDValue SatMax = DAG.getConstant(MaxVal, DL, VT);
29507     SDValue SumNeg =
29508         DAG.getSetCC(DL, SetCCResultType, SumDiff, Zero, ISD::SETLT);
29509     Result = DAG.getSelect(DL, VT, SumNeg, SatMax, SatMin);
29510     return DAG.getSelect(DL, VT, Overflow, Result, SumDiff);
29511   }
29512 
29513   // Use default expansion.
29514   return SDValue();
29515 }
29516 
LowerABS(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29517 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
29518                         SelectionDAG &DAG) {
29519   MVT VT = Op.getSimpleValueType();
29520   if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
29521     // Since X86 does not have CMOV for 8-bit integer, we don't convert
29522     // 8-bit integer abs to NEG and CMOV.
29523     SDLoc DL(Op);
29524     SDValue N0 = Op.getOperand(0);
29525     SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
29526                               DAG.getConstant(0, DL, VT), N0);
29527     SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_NS, DL, MVT::i8),
29528                      SDValue(Neg.getNode(), 1)};
29529     return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
29530   }
29531 
29532   // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
29533   if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
29534     SDLoc DL(Op);
29535     SDValue Src = Op.getOperand(0);
29536     SDValue Sub =
29537         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
29538     return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
29539   }
29540 
29541   if (VT.is256BitVector() && !Subtarget.hasInt256()) {
29542     assert(VT.isInteger() &&
29543            "Only handle AVX 256-bit vector integer operation");
29544     return splitVectorIntUnary(Op, DAG);
29545   }
29546 
29547   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
29548     return splitVectorIntUnary(Op, DAG);
29549 
29550   // Default to expand.
29551   return SDValue();
29552 }
29553 
LowerAVG(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29554 static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
29555                         SelectionDAG &DAG) {
29556   MVT VT = Op.getSimpleValueType();
29557 
29558   // For AVX1 cases, split to use legal ops.
29559   if (VT.is256BitVector() && !Subtarget.hasInt256())
29560     return splitVectorIntBinary(Op, DAG);
29561 
29562   if (VT == MVT::v32i16 || VT == MVT::v64i8)
29563     return splitVectorIntBinary(Op, DAG);
29564 
29565   // Default to expand.
29566   return SDValue();
29567 }
29568 
LowerMINMAX(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29569 static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
29570                            SelectionDAG &DAG) {
29571   MVT VT = Op.getSimpleValueType();
29572 
29573   // For AVX1 cases, split to use legal ops.
29574   if (VT.is256BitVector() && !Subtarget.hasInt256())
29575     return splitVectorIntBinary(Op, DAG);
29576 
29577   if (VT == MVT::v32i16 || VT == MVT::v64i8)
29578     return splitVectorIntBinary(Op, DAG);
29579 
29580   // Default to expand.
29581   return SDValue();
29582 }
29583 
LowerMUL(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29584 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
29585                         SelectionDAG &DAG) {
29586   SDLoc dl(Op);
29587   MVT VT = Op.getSimpleValueType();
29588 
29589   // Decompose 256-bit ops into 128-bit ops.
29590   if (VT.is256BitVector() && !Subtarget.hasInt256())
29591     return splitVectorIntBinary(Op, DAG);
29592 
29593   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
29594     return splitVectorIntBinary(Op, DAG);
29595 
29596   SDValue A = Op.getOperand(0);
29597   SDValue B = Op.getOperand(1);
29598 
29599   // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
29600   // vector pairs, multiply and truncate.
29601   if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
29602     unsigned NumElts = VT.getVectorNumElements();
29603 
29604     if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
29605         (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
29606       MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
29607       return DAG.getNode(
29608           ISD::TRUNCATE, dl, VT,
29609           DAG.getNode(ISD::MUL, dl, ExVT,
29610                       DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
29611                       DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
29612     }
29613 
29614     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29615 
29616     // Extract the lo/hi parts to any extend to i16.
29617     // We're going to mask off the low byte of each result element of the
29618     // pmullw, so it doesn't matter what's in the high byte of each 16-bit
29619     // element.
29620     SDValue Undef = DAG.getUNDEF(VT);
29621     SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
29622     SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
29623 
29624     SDValue BLo, BHi;
29625     if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
29626       // If the RHS is a constant, manually unpackl/unpackh.
29627       SmallVector<SDValue, 16> LoOps, HiOps;
29628       for (unsigned i = 0; i != NumElts; i += 16) {
29629         for (unsigned j = 0; j != 8; ++j) {
29630           LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
29631                                                MVT::i16));
29632           HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
29633                                                MVT::i16));
29634         }
29635       }
29636 
29637       BLo = DAG.getBuildVector(ExVT, dl, LoOps);
29638       BHi = DAG.getBuildVector(ExVT, dl, HiOps);
29639     } else {
29640       BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
29641       BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
29642     }
29643 
29644     // Multiply, mask the lower 8bits of the lo/hi results and pack.
29645     SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
29646     SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
29647     return getPack(DAG, Subtarget, dl, VT, RLo, RHi);
29648   }
29649 
29650   // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
29651   if (VT == MVT::v4i32) {
29652     assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
29653            "Should not custom lower when pmulld is available!");
29654 
29655     // Extract the odd parts.
29656     static const int UnpackMask[] = { 1, -1, 3, -1 };
29657     SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
29658     SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
29659 
29660     // Multiply the even parts.
29661     SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
29662                                 DAG.getBitcast(MVT::v2i64, A),
29663                                 DAG.getBitcast(MVT::v2i64, B));
29664     // Now multiply odd parts.
29665     SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
29666                                DAG.getBitcast(MVT::v2i64, Aodds),
29667                                DAG.getBitcast(MVT::v2i64, Bodds));
29668 
29669     Evens = DAG.getBitcast(VT, Evens);
29670     Odds = DAG.getBitcast(VT, Odds);
29671 
29672     // Merge the two vectors back together with a shuffle. This expands into 2
29673     // shuffles.
29674     static const int ShufMask[] = { 0, 4, 2, 6 };
29675     return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
29676   }
29677 
29678   assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
29679          "Only know how to lower V2I64/V4I64/V8I64 multiply");
29680   assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
29681 
29682   //  Ahi = psrlqi(a, 32);
29683   //  Bhi = psrlqi(b, 32);
29684   //
29685   //  AloBlo = pmuludq(a, b);
29686   //  AloBhi = pmuludq(a, Bhi);
29687   //  AhiBlo = pmuludq(Ahi, b);
29688   //
29689   //  Hi = psllqi(AloBhi + AhiBlo, 32);
29690   //  return AloBlo + Hi;
29691   KnownBits AKnown = DAG.computeKnownBits(A);
29692   KnownBits BKnown = DAG.computeKnownBits(B);
29693 
29694   APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
29695   bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
29696   bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
29697 
29698   APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
29699   bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
29700   bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
29701 
29702   SDValue Zero = DAG.getConstant(0, dl, VT);
29703 
29704   // Only multiply lo/hi halves that aren't known to be zero.
29705   SDValue AloBlo = Zero;
29706   if (!ALoIsZero && !BLoIsZero)
29707     AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
29708 
29709   SDValue AloBhi = Zero;
29710   if (!ALoIsZero && !BHiIsZero) {
29711     SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
29712     AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
29713   }
29714 
29715   SDValue AhiBlo = Zero;
29716   if (!AHiIsZero && !BLoIsZero) {
29717     SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
29718     AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
29719   }
29720 
29721   SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
29722   Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
29723 
29724   return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
29725 }
29726 
LowervXi8MulWithUNPCK(SDValue A,SDValue B,const SDLoc & dl,MVT VT,bool IsSigned,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue * Low=nullptr)29727 static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
29728                                      MVT VT, bool IsSigned,
29729                                      const X86Subtarget &Subtarget,
29730                                      SelectionDAG &DAG,
29731                                      SDValue *Low = nullptr) {
29732   unsigned NumElts = VT.getVectorNumElements();
29733 
29734   // For vXi8 we will unpack the low and high half of each 128 bit lane to widen
29735   // to a vXi16 type. Do the multiplies, shift the results and pack the half
29736   // lane results back together.
29737 
29738   // We'll take different approaches for signed and unsigned.
29739   // For unsigned we'll use punpcklbw/punpckhbw to put zero extend the bytes
29740   // and use pmullw to calculate the full 16-bit product.
29741   // For signed we'll use punpcklbw/punpckbw to extend the bytes to words and
29742   // shift them left into the upper byte of each word. This allows us to use
29743   // pmulhw to calculate the full 16-bit product. This trick means we don't
29744   // need to sign extend the bytes to use pmullw.
29745 
29746   MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29747   SDValue Zero = DAG.getConstant(0, dl, VT);
29748 
29749   SDValue ALo, AHi;
29750   if (IsSigned) {
29751     ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, A));
29752     AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, A));
29753   } else {
29754     ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Zero));
29755     AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Zero));
29756   }
29757 
29758   SDValue BLo, BHi;
29759   if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
29760     // If the RHS is a constant, manually unpackl/unpackh and extend.
29761     SmallVector<SDValue, 16> LoOps, HiOps;
29762     for (unsigned i = 0; i != NumElts; i += 16) {
29763       for (unsigned j = 0; j != 8; ++j) {
29764         SDValue LoOp = B.getOperand(i + j);
29765         SDValue HiOp = B.getOperand(i + j + 8);
29766 
29767         if (IsSigned) {
29768           LoOp = DAG.getAnyExtOrTrunc(LoOp, dl, MVT::i16);
29769           HiOp = DAG.getAnyExtOrTrunc(HiOp, dl, MVT::i16);
29770           LoOp = DAG.getNode(ISD::SHL, dl, MVT::i16, LoOp,
29771                              DAG.getConstant(8, dl, MVT::i16));
29772           HiOp = DAG.getNode(ISD::SHL, dl, MVT::i16, HiOp,
29773                              DAG.getConstant(8, dl, MVT::i16));
29774         } else {
29775           LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
29776           HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
29777         }
29778 
29779         LoOps.push_back(LoOp);
29780         HiOps.push_back(HiOp);
29781       }
29782     }
29783 
29784     BLo = DAG.getBuildVector(ExVT, dl, LoOps);
29785     BHi = DAG.getBuildVector(ExVT, dl, HiOps);
29786   } else if (IsSigned) {
29787     BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, B));
29788     BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, B));
29789   } else {
29790     BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Zero));
29791     BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Zero));
29792   }
29793 
29794   // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
29795   // pack back to vXi8.
29796   unsigned MulOpc = IsSigned ? ISD::MULHS : ISD::MUL;
29797   SDValue RLo = DAG.getNode(MulOpc, dl, ExVT, ALo, BLo);
29798   SDValue RHi = DAG.getNode(MulOpc, dl, ExVT, AHi, BHi);
29799 
29800   if (Low)
29801     *Low = getPack(DAG, Subtarget, dl, VT, RLo, RHi);
29802 
29803   return getPack(DAG, Subtarget, dl, VT, RLo, RHi, /*PackHiHalf*/ true);
29804 }
29805 
LowerMULH(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29806 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
29807                          SelectionDAG &DAG) {
29808   SDLoc dl(Op);
29809   MVT VT = Op.getSimpleValueType();
29810   bool IsSigned = Op->getOpcode() == ISD::MULHS;
29811   unsigned NumElts = VT.getVectorNumElements();
29812   SDValue A = Op.getOperand(0);
29813   SDValue B = Op.getOperand(1);
29814 
29815   // Decompose 256-bit ops into 128-bit ops.
29816   if (VT.is256BitVector() && !Subtarget.hasInt256())
29817     return splitVectorIntBinary(Op, DAG);
29818 
29819   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
29820     return splitVectorIntBinary(Op, DAG);
29821 
29822   if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
29823     assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
29824            (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
29825            (VT == MVT::v16i32 && Subtarget.hasAVX512()));
29826 
29827     // PMULxD operations multiply each even value (starting at 0) of LHS with
29828     // the related value of RHS and produce a widen result.
29829     // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
29830     // => <2 x i64> <ae|cg>
29831     //
29832     // In other word, to have all the results, we need to perform two PMULxD:
29833     // 1. one with the even values.
29834     // 2. one with the odd values.
29835     // To achieve #2, with need to place the odd values at an even position.
29836     //
29837     // Place the odd value at an even position (basically, shift all values 1
29838     // step to the left):
29839     const int Mask[] = {1, -1,  3, -1,  5, -1,  7, -1,
29840                         9, -1, 11, -1, 13, -1, 15, -1};
29841     // <a|b|c|d> => <b|undef|d|undef>
29842     SDValue Odd0 =
29843         DAG.getVectorShuffle(VT, dl, A, A, ArrayRef(&Mask[0], NumElts));
29844     // <e|f|g|h> => <f|undef|h|undef>
29845     SDValue Odd1 =
29846         DAG.getVectorShuffle(VT, dl, B, B, ArrayRef(&Mask[0], NumElts));
29847 
29848     // Emit two multiplies, one for the lower 2 ints and one for the higher 2
29849     // ints.
29850     MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
29851     unsigned Opcode =
29852         (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
29853     // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
29854     // => <2 x i64> <ae|cg>
29855     SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
29856                                                   DAG.getBitcast(MulVT, A),
29857                                                   DAG.getBitcast(MulVT, B)));
29858     // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
29859     // => <2 x i64> <bf|dh>
29860     SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
29861                                                   DAG.getBitcast(MulVT, Odd0),
29862                                                   DAG.getBitcast(MulVT, Odd1)));
29863 
29864     // Shuffle it back into the right order.
29865     SmallVector<int, 16> ShufMask(NumElts);
29866     for (int i = 0; i != (int)NumElts; ++i)
29867       ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
29868 
29869     SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
29870 
29871     // If we have a signed multiply but no PMULDQ fix up the result of an
29872     // unsigned multiply.
29873     if (IsSigned && !Subtarget.hasSSE41()) {
29874       SDValue Zero = DAG.getConstant(0, dl, VT);
29875       SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
29876                                DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
29877       SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
29878                                DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
29879 
29880       SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
29881       Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
29882     }
29883 
29884     return Res;
29885   }
29886 
29887   // Only i8 vectors should need custom lowering after this.
29888   assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
29889          (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
29890          "Unsupported vector type");
29891 
29892   // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
29893   // logical shift down the upper half and pack back to i8.
29894 
29895   // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
29896   // and then ashr/lshr the upper bits down to the lower bits before multiply.
29897 
29898   if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
29899       (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
29900     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29901     unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
29902     SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
29903     SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
29904     SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
29905     Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
29906     return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
29907   }
29908 
29909   return LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG);
29910 }
29911 
29912 // Custom lowering for SMULO/UMULO.
LowerMULO(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29913 static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
29914                          SelectionDAG &DAG) {
29915   MVT VT = Op.getSimpleValueType();
29916 
29917   // Scalars defer to LowerXALUO.
29918   if (!VT.isVector())
29919     return LowerXALUO(Op, DAG);
29920 
29921   SDLoc dl(Op);
29922   bool IsSigned = Op->getOpcode() == ISD::SMULO;
29923   SDValue A = Op.getOperand(0);
29924   SDValue B = Op.getOperand(1);
29925   EVT OvfVT = Op->getValueType(1);
29926 
29927   if ((VT == MVT::v32i8 && !Subtarget.hasInt256()) ||
29928       (VT == MVT::v64i8 && !Subtarget.hasBWI())) {
29929     // Extract the LHS Lo/Hi vectors
29930     SDValue LHSLo, LHSHi;
29931     std::tie(LHSLo, LHSHi) = splitVector(A, DAG, dl);
29932 
29933     // Extract the RHS Lo/Hi vectors
29934     SDValue RHSLo, RHSHi;
29935     std::tie(RHSLo, RHSHi) = splitVector(B, DAG, dl);
29936 
29937     EVT LoOvfVT, HiOvfVT;
29938     std::tie(LoOvfVT, HiOvfVT) = DAG.GetSplitDestVTs(OvfVT);
29939     SDVTList LoVTs = DAG.getVTList(LHSLo.getValueType(), LoOvfVT);
29940     SDVTList HiVTs = DAG.getVTList(LHSHi.getValueType(), HiOvfVT);
29941 
29942     // Issue the split operations.
29943     SDValue Lo = DAG.getNode(Op.getOpcode(), dl, LoVTs, LHSLo, RHSLo);
29944     SDValue Hi = DAG.getNode(Op.getOpcode(), dl, HiVTs, LHSHi, RHSHi);
29945 
29946     // Join the separate data results and the overflow results.
29947     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29948     SDValue Ovf = DAG.getNode(ISD::CONCAT_VECTORS, dl, OvfVT, Lo.getValue(1),
29949                               Hi.getValue(1));
29950 
29951     return DAG.getMergeValues({Res, Ovf}, dl);
29952   }
29953 
29954   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29955   EVT SetccVT =
29956       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
29957 
29958   if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
29959       (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
29960     unsigned NumElts = VT.getVectorNumElements();
29961     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29962     unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
29963     SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
29964     SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
29965     SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
29966 
29967     SDValue Low = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
29968 
29969     SDValue Ovf;
29970     if (IsSigned) {
29971       SDValue High, LowSign;
29972       if (OvfVT.getVectorElementType() == MVT::i1 &&
29973           (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
29974         // Rather the truncating try to do the compare on vXi16 or vXi32.
29975         // Shift the high down filling with sign bits.
29976         High = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Mul, 8, DAG);
29977         // Fill all 16 bits with the sign bit from the low.
29978         LowSign =
29979             getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExVT, Mul, 8, DAG);
29980         LowSign = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, LowSign,
29981                                              15, DAG);
29982         SetccVT = OvfVT;
29983         if (!Subtarget.hasBWI()) {
29984           // We can't do a vXi16 compare so sign extend to v16i32.
29985           High = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, High);
29986           LowSign = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, LowSign);
29987         }
29988       } else {
29989         // Otherwise do the compare at vXi8.
29990         High = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
29991         High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
29992         LowSign =
29993             DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
29994       }
29995 
29996       Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
29997     } else {
29998       SDValue High =
29999           getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
30000       if (OvfVT.getVectorElementType() == MVT::i1 &&
30001           (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
30002         // Rather the truncating try to do the compare on vXi16 or vXi32.
30003         SetccVT = OvfVT;
30004         if (!Subtarget.hasBWI()) {
30005           // We can't do a vXi16 compare so sign extend to v16i32.
30006           High = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, High);
30007         }
30008       } else {
30009         // Otherwise do the compare at vXi8.
30010         High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
30011       }
30012 
30013       Ovf =
30014           DAG.getSetCC(dl, SetccVT, High,
30015                        DAG.getConstant(0, dl, High.getValueType()), ISD::SETNE);
30016     }
30017 
30018     Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
30019 
30020     return DAG.getMergeValues({Low, Ovf}, dl);
30021   }
30022 
30023   SDValue Low;
30024   SDValue High =
30025       LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG, &Low);
30026 
30027   SDValue Ovf;
30028   if (IsSigned) {
30029     // SMULO overflows if the high bits don't match the sign of the low.
30030     SDValue LowSign =
30031         DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
30032     Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
30033   } else {
30034     // UMULO overflows if the high bits are non-zero.
30035     Ovf =
30036         DAG.getSetCC(dl, SetccVT, High, DAG.getConstant(0, dl, VT), ISD::SETNE);
30037   }
30038 
30039   Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
30040 
30041   return DAG.getMergeValues({Low, Ovf}, dl);
30042 }
30043 
LowerWin64_i128OP(SDValue Op,SelectionDAG & DAG) const30044 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
30045   assert(Subtarget.isTargetWin64() && "Unexpected target");
30046   EVT VT = Op.getValueType();
30047   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
30048          "Unexpected return type for lowering");
30049 
30050   if (isa<ConstantSDNode>(Op->getOperand(1))) {
30051     SmallVector<SDValue> Result;
30052     if (expandDIVREMByConstant(Op.getNode(), Result, MVT::i64, DAG))
30053       return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), VT, Result[0], Result[1]);
30054   }
30055 
30056   RTLIB::Libcall LC;
30057   bool isSigned;
30058   switch (Op->getOpcode()) {
30059   default: llvm_unreachable("Unexpected request for libcall!");
30060   case ISD::SDIV:      isSigned = true;  LC = RTLIB::SDIV_I128;    break;
30061   case ISD::UDIV:      isSigned = false; LC = RTLIB::UDIV_I128;    break;
30062   case ISD::SREM:      isSigned = true;  LC = RTLIB::SREM_I128;    break;
30063   case ISD::UREM:      isSigned = false; LC = RTLIB::UREM_I128;    break;
30064   }
30065 
30066   SDLoc dl(Op);
30067   SDValue InChain = DAG.getEntryNode();
30068 
30069   TargetLowering::ArgListTy Args;
30070   TargetLowering::ArgListEntry Entry;
30071   for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
30072     EVT ArgVT = Op->getOperand(i).getValueType();
30073     assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
30074            "Unexpected argument type for lowering");
30075     SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
30076     int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
30077     MachinePointerInfo MPI =
30078         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
30079     Entry.Node = StackPtr;
30080     InChain =
30081         DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MPI, Align(16));
30082     Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
30083     Entry.Ty = PointerType::get(ArgTy,0);
30084     Entry.IsSExt = false;
30085     Entry.IsZExt = false;
30086     Args.push_back(Entry);
30087   }
30088 
30089   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
30090                                          getPointerTy(DAG.getDataLayout()));
30091 
30092   TargetLowering::CallLoweringInfo CLI(DAG);
30093   CLI.setDebugLoc(dl)
30094       .setChain(InChain)
30095       .setLibCallee(
30096           getLibcallCallingConv(LC),
30097           static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
30098           std::move(Args))
30099       .setInRegister()
30100       .setSExtResult(isSigned)
30101       .setZExtResult(!isSigned);
30102 
30103   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
30104   return DAG.getBitcast(VT, CallInfo.first);
30105 }
30106 
LowerWin64_FP_TO_INT128(SDValue Op,SelectionDAG & DAG,SDValue & Chain) const30107 SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
30108                                                    SelectionDAG &DAG,
30109                                                    SDValue &Chain) const {
30110   assert(Subtarget.isTargetWin64() && "Unexpected target");
30111   EVT VT = Op.getValueType();
30112   bool IsStrict = Op->isStrictFPOpcode();
30113 
30114   SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
30115   EVT ArgVT = Arg.getValueType();
30116 
30117   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
30118          "Unexpected return type for lowering");
30119 
30120   RTLIB::Libcall LC;
30121   if (Op->getOpcode() == ISD::FP_TO_SINT ||
30122       Op->getOpcode() == ISD::STRICT_FP_TO_SINT)
30123     LC = RTLIB::getFPTOSINT(ArgVT, VT);
30124   else
30125     LC = RTLIB::getFPTOUINT(ArgVT, VT);
30126   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
30127 
30128   SDLoc dl(Op);
30129   MakeLibCallOptions CallOptions;
30130   Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
30131 
30132   SDValue Result;
30133   // Expect the i128 argument returned as a v2i64 in xmm0, cast back to the
30134   // expected VT (i128).
30135   std::tie(Result, Chain) =
30136       makeLibCall(DAG, LC, MVT::v2i64, Arg, CallOptions, dl, Chain);
30137   Result = DAG.getBitcast(VT, Result);
30138   return Result;
30139 }
30140 
LowerWin64_INT128_TO_FP(SDValue Op,SelectionDAG & DAG) const30141 SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
30142                                                    SelectionDAG &DAG) const {
30143   assert(Subtarget.isTargetWin64() && "Unexpected target");
30144   EVT VT = Op.getValueType();
30145   bool IsStrict = Op->isStrictFPOpcode();
30146 
30147   SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
30148   EVT ArgVT = Arg.getValueType();
30149 
30150   assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
30151          "Unexpected argument type for lowering");
30152 
30153   RTLIB::Libcall LC;
30154   if (Op->getOpcode() == ISD::SINT_TO_FP ||
30155       Op->getOpcode() == ISD::STRICT_SINT_TO_FP)
30156     LC = RTLIB::getSINTTOFP(ArgVT, VT);
30157   else
30158     LC = RTLIB::getUINTTOFP(ArgVT, VT);
30159   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
30160 
30161   SDLoc dl(Op);
30162   MakeLibCallOptions CallOptions;
30163   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
30164 
30165   // Pass the i128 argument as an indirect argument on the stack.
30166   SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
30167   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
30168   MachinePointerInfo MPI =
30169       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
30170   Chain = DAG.getStore(Chain, dl, Arg, StackPtr, MPI, Align(16));
30171 
30172   SDValue Result;
30173   std::tie(Result, Chain) =
30174       makeLibCall(DAG, LC, VT, StackPtr, CallOptions, dl, Chain);
30175   return IsStrict ? DAG.getMergeValues({Result, Chain}, dl) : Result;
30176 }
30177 
30178 // Return true if the required (according to Opcode) shift-imm form is natively
30179 // supported by the Subtarget
supportedVectorShiftWithImm(MVT VT,const X86Subtarget & Subtarget,unsigned Opcode)30180 static bool supportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
30181                                         unsigned Opcode) {
30182   if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
30183     return false;
30184 
30185   if (VT.getScalarSizeInBits() < 16)
30186     return false;
30187 
30188   if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
30189       (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
30190     return true;
30191 
30192   bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
30193                 (VT.is256BitVector() && Subtarget.hasInt256());
30194 
30195   bool AShift = LShift && (Subtarget.hasAVX512() ||
30196                            (VT != MVT::v2i64 && VT != MVT::v4i64));
30197   return (Opcode == ISD::SRA) ? AShift : LShift;
30198 }
30199 
30200 // The shift amount is a variable, but it is the same for all vector lanes.
30201 // These instructions are defined together with shift-immediate.
30202 static
supportedVectorShiftWithBaseAmnt(MVT VT,const X86Subtarget & Subtarget,unsigned Opcode)30203 bool supportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
30204                                       unsigned Opcode) {
30205   return supportedVectorShiftWithImm(VT, Subtarget, Opcode);
30206 }
30207 
30208 // Return true if the required (according to Opcode) variable-shift form is
30209 // natively supported by the Subtarget
supportedVectorVarShift(MVT VT,const X86Subtarget & Subtarget,unsigned Opcode)30210 static bool supportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
30211                                     unsigned Opcode) {
30212   if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
30213     return false;
30214 
30215   if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
30216     return false;
30217 
30218   // vXi16 supported only on AVX-512, BWI
30219   if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
30220     return false;
30221 
30222   if (Subtarget.hasAVX512() &&
30223       (Subtarget.useAVX512Regs() || !VT.is512BitVector()))
30224     return true;
30225 
30226   bool LShift = VT.is128BitVector() || VT.is256BitVector();
30227   bool AShift = LShift &&  VT != MVT::v2i64 && VT != MVT::v4i64;
30228   return (Opcode == ISD::SRA) ? AShift : LShift;
30229 }
30230 
LowerShiftByScalarImmediate(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)30231 static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG,
30232                                            const X86Subtarget &Subtarget) {
30233   MVT VT = Op.getSimpleValueType();
30234   SDLoc dl(Op);
30235   SDValue R = Op.getOperand(0);
30236   SDValue Amt = Op.getOperand(1);
30237   unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
30238 
30239   auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
30240     assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
30241     MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
30242     SDValue Ex = DAG.getBitcast(ExVT, R);
30243 
30244     // ashr(R, 63) === cmp_slt(R, 0)
30245     if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
30246       assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
30247              "Unsupported PCMPGT op");
30248       return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
30249     }
30250 
30251     if (ShiftAmt >= 32) {
30252       // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
30253       SDValue Upper =
30254           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
30255       SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
30256                                                  ShiftAmt - 32, DAG);
30257       if (VT == MVT::v2i64)
30258         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
30259       if (VT == MVT::v4i64)
30260         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
30261                                   {9, 1, 11, 3, 13, 5, 15, 7});
30262     } else {
30263       // SRA upper i32, SRL whole i64 and select lower i32.
30264       SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
30265                                                  ShiftAmt, DAG);
30266       SDValue Lower =
30267           getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
30268       Lower = DAG.getBitcast(ExVT, Lower);
30269       if (VT == MVT::v2i64)
30270         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
30271       if (VT == MVT::v4i64)
30272         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
30273                                   {8, 1, 10, 3, 12, 5, 14, 7});
30274     }
30275     return DAG.getBitcast(VT, Ex);
30276   };
30277 
30278   // Optimize shl/srl/sra with constant shift amount.
30279   APInt APIntShiftAmt;
30280   if (!X86::isConstantSplat(Amt, APIntShiftAmt))
30281     return SDValue();
30282 
30283   // If the shift amount is out of range, return undef.
30284   if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
30285     return DAG.getUNDEF(VT);
30286 
30287   uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
30288 
30289   if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) {
30290     // Hardware support for vector shifts is sparse which makes us scalarize the
30291     // vector operations in many cases. Also, on sandybridge ADD is faster than
30292     // shl: (shl V, 1) -> (add (freeze V), (freeze V))
30293     if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
30294       // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
30295       // must be 0). (add undef, undef) however can be any value. To make this
30296       // safe, we must freeze R to ensure that register allocation uses the same
30297       // register for an undefined value. This ensures that the result will
30298       // still be even and preserves the original semantics.
30299       R = DAG.getFreeze(R);
30300       return DAG.getNode(ISD::ADD, dl, VT, R, R);
30301     }
30302 
30303     return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
30304   }
30305 
30306   // i64 SRA needs to be performed as partial shifts.
30307   if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
30308        (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
30309       Op.getOpcode() == ISD::SRA)
30310     return ArithmeticShiftRight64(ShiftAmt);
30311 
30312   if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
30313       (Subtarget.hasBWI() && VT == MVT::v64i8)) {
30314     unsigned NumElts = VT.getVectorNumElements();
30315     MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
30316 
30317     // Simple i8 add case
30318     if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
30319       // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
30320       // must be 0). (add undef, undef) however can be any value. To make this
30321       // safe, we must freeze R to ensure that register allocation uses the same
30322       // register for an undefined value. This ensures that the result will
30323       // still be even and preserves the original semantics.
30324       R = DAG.getFreeze(R);
30325       return DAG.getNode(ISD::ADD, dl, VT, R, R);
30326     }
30327 
30328     // ashr(R, 7)  === cmp_slt(R, 0)
30329     if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
30330       SDValue Zeros = DAG.getConstant(0, dl, VT);
30331       if (VT.is512BitVector()) {
30332         assert(VT == MVT::v64i8 && "Unexpected element type!");
30333         SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
30334         return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
30335       }
30336       return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
30337     }
30338 
30339     // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
30340     if (VT == MVT::v16i8 && Subtarget.hasXOP())
30341       return SDValue();
30342 
30343     if (Op.getOpcode() == ISD::SHL) {
30344       // Make a large shift.
30345       SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
30346                                                ShiftAmt, DAG);
30347       SHL = DAG.getBitcast(VT, SHL);
30348       // Zero out the rightmost bits.
30349       APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
30350       return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
30351     }
30352     if (Op.getOpcode() == ISD::SRL) {
30353       // Make a large shift.
30354       SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
30355                                                ShiftAmt, DAG);
30356       SRL = DAG.getBitcast(VT, SRL);
30357       // Zero out the leftmost bits.
30358       APInt Mask = APInt::getLowBitsSet(8, 8 - ShiftAmt);
30359       return DAG.getNode(ISD::AND, dl, VT, SRL, DAG.getConstant(Mask, dl, VT));
30360     }
30361     if (Op.getOpcode() == ISD::SRA) {
30362       // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
30363       SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
30364 
30365       SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
30366       Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
30367       Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
30368       return Res;
30369     }
30370     llvm_unreachable("Unknown shift opcode.");
30371   }
30372 
30373   return SDValue();
30374 }
30375 
LowerShiftByScalarVariable(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)30376 static SDValue LowerShiftByScalarVariable(SDValue Op, SelectionDAG &DAG,
30377                                           const X86Subtarget &Subtarget) {
30378   MVT VT = Op.getSimpleValueType();
30379   SDLoc dl(Op);
30380   SDValue R = Op.getOperand(0);
30381   SDValue Amt = Op.getOperand(1);
30382   unsigned Opcode = Op.getOpcode();
30383   unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
30384 
30385   int BaseShAmtIdx = -1;
30386   if (SDValue BaseShAmt = DAG.getSplatSourceVector(Amt, BaseShAmtIdx)) {
30387     if (supportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode))
30388       return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, BaseShAmtIdx,
30389                                  Subtarget, DAG);
30390 
30391     // vXi8 shifts - shift as v8i16 + mask result.
30392     if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
30393          (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
30394          VT == MVT::v64i8) &&
30395         !Subtarget.hasXOP()) {
30396       unsigned NumElts = VT.getVectorNumElements();
30397       MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
30398       if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
30399         unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
30400         unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
30401 
30402         // Create the mask using vXi16 shifts. For shift-rights we need to move
30403         // the upper byte down before splatting the vXi8 mask.
30404         SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
30405         BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
30406                                       BaseShAmt, BaseShAmtIdx, Subtarget, DAG);
30407         if (Opcode != ISD::SHL)
30408           BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
30409                                                8, DAG);
30410         BitMask = DAG.getBitcast(VT, BitMask);
30411         BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
30412                                        SmallVector<int, 64>(NumElts, 0));
30413 
30414         SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
30415                                           DAG.getBitcast(ExtVT, R), BaseShAmt,
30416                                           BaseShAmtIdx, Subtarget, DAG);
30417         Res = DAG.getBitcast(VT, Res);
30418         Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
30419 
30420         if (Opcode == ISD::SRA) {
30421           // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
30422           // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
30423           SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
30424           SignMask =
30425               getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask, BaseShAmt,
30426                                   BaseShAmtIdx, Subtarget, DAG);
30427           SignMask = DAG.getBitcast(VT, SignMask);
30428           Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
30429           Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
30430         }
30431         return Res;
30432       }
30433     }
30434   }
30435 
30436   return SDValue();
30437 }
30438 
30439 // Convert a shift/rotate left amount to a multiplication scale factor.
convertShiftLeftToScale(SDValue Amt,const SDLoc & dl,const X86Subtarget & Subtarget,SelectionDAG & DAG)30440 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
30441                                        const X86Subtarget &Subtarget,
30442                                        SelectionDAG &DAG) {
30443   MVT VT = Amt.getSimpleValueType();
30444   if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
30445         (Subtarget.hasInt256() && VT == MVT::v16i16) ||
30446         (Subtarget.hasAVX512() && VT == MVT::v32i16) ||
30447         (!Subtarget.hasAVX512() && VT == MVT::v16i8) ||
30448         (Subtarget.hasInt256() && VT == MVT::v32i8) ||
30449         (Subtarget.hasBWI() && VT == MVT::v64i8)))
30450     return SDValue();
30451 
30452   MVT SVT = VT.getVectorElementType();
30453   unsigned SVTBits = SVT.getSizeInBits();
30454   unsigned NumElems = VT.getVectorNumElements();
30455 
30456   APInt UndefElts;
30457   SmallVector<APInt> EltBits;
30458   if (getTargetConstantBitsFromNode(Amt, SVTBits, UndefElts, EltBits)) {
30459     APInt One(SVTBits, 1);
30460     SmallVector<SDValue> Elts(NumElems, DAG.getUNDEF(SVT));
30461     for (unsigned I = 0; I != NumElems; ++I) {
30462       if (UndefElts[I] || EltBits[I].uge(SVTBits))
30463         continue;
30464       uint64_t ShAmt = EltBits[I].getZExtValue();
30465       Elts[I] = DAG.getConstant(One.shl(ShAmt), dl, SVT);
30466     }
30467     return DAG.getBuildVector(VT, dl, Elts);
30468   }
30469 
30470   // If the target doesn't support variable shifts, use either FP conversion
30471   // or integer multiplication to avoid shifting each element individually.
30472   if (VT == MVT::v4i32) {
30473     Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
30474     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
30475                       DAG.getConstant(0x3f800000U, dl, VT));
30476     Amt = DAG.getBitcast(MVT::v4f32, Amt);
30477     return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
30478   }
30479 
30480   // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
30481   if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
30482     SDValue Z = DAG.getConstant(0, dl, VT);
30483     SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
30484     SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
30485     Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
30486     Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
30487     if (Subtarget.hasSSE41())
30488       return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
30489     return getPack(DAG, Subtarget, dl, VT, Lo, Hi);
30490   }
30491 
30492   return SDValue();
30493 }
30494 
LowerShift(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)30495 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
30496                           SelectionDAG &DAG) {
30497   MVT VT = Op.getSimpleValueType();
30498   SDLoc dl(Op);
30499   SDValue R = Op.getOperand(0);
30500   SDValue Amt = Op.getOperand(1);
30501   unsigned EltSizeInBits = VT.getScalarSizeInBits();
30502   bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
30503 
30504   unsigned Opc = Op.getOpcode();
30505   unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
30506   unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
30507 
30508   assert(VT.isVector() && "Custom lowering only for vector shifts!");
30509   assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
30510 
30511   if (SDValue V = LowerShiftByScalarImmediate(Op, DAG, Subtarget))
30512     return V;
30513 
30514   if (SDValue V = LowerShiftByScalarVariable(Op, DAG, Subtarget))
30515     return V;
30516 
30517   if (supportedVectorVarShift(VT, Subtarget, Opc))
30518     return Op;
30519 
30520   // i64 vector arithmetic shift can be emulated with the transform:
30521   // M = lshr(SIGN_MASK, Amt)
30522   // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
30523   if (((VT == MVT::v2i64 && !Subtarget.hasXOP()) ||
30524        (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
30525       Opc == ISD::SRA) {
30526     SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
30527     SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
30528     R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
30529     R = DAG.getNode(ISD::XOR, dl, VT, R, M);
30530     R = DAG.getNode(ISD::SUB, dl, VT, R, M);
30531     return R;
30532   }
30533 
30534   // XOP has 128-bit variable logical/arithmetic shifts.
30535   // +ve/-ve Amt = shift left/right.
30536   if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
30537                              VT == MVT::v8i16 || VT == MVT::v16i8)) {
30538     if (Opc == ISD::SRL || Opc == ISD::SRA) {
30539       SDValue Zero = DAG.getConstant(0, dl, VT);
30540       Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
30541     }
30542     if (Opc == ISD::SHL || Opc == ISD::SRL)
30543       return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
30544     if (Opc == ISD::SRA)
30545       return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
30546   }
30547 
30548   // 2i64 vector logical shifts can efficiently avoid scalarization - do the
30549   // shifts per-lane and then shuffle the partial results back together.
30550   if (VT == MVT::v2i64 && Opc != ISD::SRA) {
30551     // Splat the shift amounts so the scalar shifts above will catch it.
30552     SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
30553     SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
30554     SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
30555     SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
30556     return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
30557   }
30558 
30559   // If possible, lower this shift as a sequence of two shifts by
30560   // constant plus a BLENDing shuffle instead of scalarizing it.
30561   // Example:
30562   //   (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
30563   //
30564   // Could be rewritten as:
30565   //   (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
30566   //
30567   // The advantage is that the two shifts from the example would be
30568   // lowered as X86ISD::VSRLI nodes in parallel before blending.
30569   if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
30570                       (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
30571     SDValue Amt1, Amt2;
30572     unsigned NumElts = VT.getVectorNumElements();
30573     SmallVector<int, 8> ShuffleMask;
30574     for (unsigned i = 0; i != NumElts; ++i) {
30575       SDValue A = Amt->getOperand(i);
30576       if (A.isUndef()) {
30577         ShuffleMask.push_back(SM_SentinelUndef);
30578         continue;
30579       }
30580       if (!Amt1 || Amt1 == A) {
30581         ShuffleMask.push_back(i);
30582         Amt1 = A;
30583         continue;
30584       }
30585       if (!Amt2 || Amt2 == A) {
30586         ShuffleMask.push_back(i + NumElts);
30587         Amt2 = A;
30588         continue;
30589       }
30590       break;
30591     }
30592 
30593     // Only perform this blend if we can perform it without loading a mask.
30594     if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
30595         (VT != MVT::v16i16 ||
30596          is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
30597         (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
30598          canWidenShuffleElements(ShuffleMask))) {
30599       auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
30600       auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
30601       if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
30602           Cst2->getAPIntValue().ult(EltSizeInBits)) {
30603         SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
30604                                                     Cst1->getZExtValue(), DAG);
30605         SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
30606                                                     Cst2->getZExtValue(), DAG);
30607         return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
30608       }
30609     }
30610   }
30611 
30612   // If possible, lower this packed shift into a vector multiply instead of
30613   // expanding it into a sequence of scalar shifts.
30614   // For v32i8 cases, it might be quicker to split/extend to vXi16 shifts.
30615   if (Opc == ISD::SHL && !(VT == MVT::v32i8 && (Subtarget.hasXOP() ||
30616                                                 Subtarget.canExtendTo512BW())))
30617     if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
30618       return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
30619 
30620   // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
30621   // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
30622   if (Opc == ISD::SRL && ConstantAmt &&
30623       (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
30624     SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
30625     SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
30626     if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
30627       SDValue Zero = DAG.getConstant(0, dl, VT);
30628       SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
30629       SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
30630       return DAG.getSelect(dl, VT, ZAmt, R, Res);
30631     }
30632   }
30633 
30634   // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
30635   // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
30636   // TODO: Special case handling for shift by 0/1, really we can afford either
30637   // of these cases in pre-SSE41/XOP/AVX512 but not both.
30638   if (Opc == ISD::SRA && ConstantAmt &&
30639       (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
30640       ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
30641         !Subtarget.hasAVX512()) ||
30642        DAG.isKnownNeverZero(Amt))) {
30643     SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
30644     SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
30645     if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
30646       SDValue Amt0 =
30647           DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
30648       SDValue Amt1 =
30649           DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
30650       SDValue Sra1 =
30651           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
30652       SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
30653       Res = DAG.getSelect(dl, VT, Amt0, R, Res);
30654       return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
30655     }
30656   }
30657 
30658   // v4i32 Non Uniform Shifts.
30659   // If the shift amount is constant we can shift each lane using the SSE2
30660   // immediate shifts, else we need to zero-extend each lane to the lower i64
30661   // and shift using the SSE2 variable shifts.
30662   // The separate results can then be blended together.
30663   if (VT == MVT::v4i32) {
30664     SDValue Amt0, Amt1, Amt2, Amt3;
30665     if (ConstantAmt) {
30666       Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
30667       Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
30668       Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
30669       Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
30670     } else {
30671       // The SSE2 shifts use the lower i64 as the same shift amount for
30672       // all lanes and the upper i64 is ignored. On AVX we're better off
30673       // just zero-extending, but for SSE just duplicating the top 16-bits is
30674       // cheaper and has the same effect for out of range values.
30675       if (Subtarget.hasAVX()) {
30676         SDValue Z = DAG.getConstant(0, dl, VT);
30677         Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
30678         Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
30679         Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
30680         Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
30681       } else {
30682         SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
30683         SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
30684                                              {4, 5, 6, 7, -1, -1, -1, -1});
30685         SDValue Msk02 = getV4X86ShuffleImm8ForMask({0, 1, 1, 1}, dl, DAG);
30686         SDValue Msk13 = getV4X86ShuffleImm8ForMask({2, 3, 3, 3}, dl, DAG);
30687         Amt0 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk02);
30688         Amt1 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk13);
30689         Amt2 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk02);
30690         Amt3 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk13);
30691       }
30692     }
30693 
30694     unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
30695     SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
30696     SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
30697     SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
30698     SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
30699 
30700     // Merge the shifted lane results optimally with/without PBLENDW.
30701     // TODO - ideally shuffle combining would handle this.
30702     if (Subtarget.hasSSE41()) {
30703       SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
30704       SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
30705       return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
30706     }
30707     SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
30708     SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
30709     return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
30710   }
30711 
30712   // It's worth extending once and using the vXi16/vXi32 shifts for smaller
30713   // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
30714   // make the existing SSE solution better.
30715   // NOTE: We honor prefered vector width before promoting to 512-bits.
30716   if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
30717       (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
30718       (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
30719       (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
30720       (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
30721     assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
30722            "Unexpected vector type");
30723     MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
30724     MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
30725     unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
30726     R = DAG.getNode(ExtOpc, dl, ExtVT, R);
30727     Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
30728     return DAG.getNode(ISD::TRUNCATE, dl, VT,
30729                        DAG.getNode(Opc, dl, ExtVT, R, Amt));
30730   }
30731 
30732   // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
30733   // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
30734   if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
30735       (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
30736        (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
30737       !Subtarget.hasXOP()) {
30738     int NumElts = VT.getVectorNumElements();
30739     SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
30740 
30741     // Extend constant shift amount to vXi16 (it doesn't matter if the type
30742     // isn't legal).
30743     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
30744     Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
30745     Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
30746     Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
30747     assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
30748            "Constant build vector expected");
30749 
30750     if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
30751       R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
30752                           : DAG.getZExtOrTrunc(R, dl, ExVT);
30753       R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
30754       R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
30755       return DAG.getZExtOrTrunc(R, dl, VT);
30756     }
30757 
30758     SmallVector<SDValue, 16> LoAmt, HiAmt;
30759     for (int i = 0; i != NumElts; i += 16) {
30760       for (int j = 0; j != 8; ++j) {
30761         LoAmt.push_back(Amt.getOperand(i + j));
30762         HiAmt.push_back(Amt.getOperand(i + j + 8));
30763       }
30764     }
30765 
30766     MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
30767     SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
30768     SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
30769 
30770     SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
30771     SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
30772     LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
30773     HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
30774     LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
30775     HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
30776     LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
30777     HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
30778     return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
30779   }
30780 
30781   if (VT == MVT::v16i8 ||
30782       (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
30783       (VT == MVT::v64i8 && Subtarget.hasBWI())) {
30784     MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
30785 
30786     auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
30787       if (VT.is512BitVector()) {
30788         // On AVX512BW targets we make use of the fact that VSELECT lowers
30789         // to a masked blend which selects bytes based just on the sign bit
30790         // extracted to a mask.
30791         MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
30792         V0 = DAG.getBitcast(VT, V0);
30793         V1 = DAG.getBitcast(VT, V1);
30794         Sel = DAG.getBitcast(VT, Sel);
30795         Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
30796                            ISD::SETGT);
30797         return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
30798       } else if (Subtarget.hasSSE41()) {
30799         // On SSE41 targets we can use PBLENDVB which selects bytes based just
30800         // on the sign bit.
30801         V0 = DAG.getBitcast(VT, V0);
30802         V1 = DAG.getBitcast(VT, V1);
30803         Sel = DAG.getBitcast(VT, Sel);
30804         return DAG.getBitcast(SelVT,
30805                               DAG.getNode(X86ISD::BLENDV, dl, VT, Sel, V0, V1));
30806       }
30807       // On pre-SSE41 targets we test for the sign bit by comparing to
30808       // zero - a negative value will set all bits of the lanes to true
30809       // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
30810       SDValue Z = DAG.getConstant(0, dl, SelVT);
30811       SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
30812       return DAG.getSelect(dl, SelVT, C, V0, V1);
30813     };
30814 
30815     // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
30816     // We can safely do this using i16 shifts as we're only interested in
30817     // the 3 lower bits of each byte.
30818     Amt = DAG.getBitcast(ExtVT, Amt);
30819     Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
30820     Amt = DAG.getBitcast(VT, Amt);
30821 
30822     if (Opc == ISD::SHL || Opc == ISD::SRL) {
30823       // r = VSELECT(r, shift(r, 4), a);
30824       SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
30825       R = SignBitSelect(VT, Amt, M, R);
30826 
30827       // a += a
30828       Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30829 
30830       // r = VSELECT(r, shift(r, 2), a);
30831       M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
30832       R = SignBitSelect(VT, Amt, M, R);
30833 
30834       // a += a
30835       Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30836 
30837       // return VSELECT(r, shift(r, 1), a);
30838       M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
30839       R = SignBitSelect(VT, Amt, M, R);
30840       return R;
30841     }
30842 
30843     if (Opc == ISD::SRA) {
30844       // For SRA we need to unpack each byte to the higher byte of a i16 vector
30845       // so we can correctly sign extend. We don't care what happens to the
30846       // lower byte.
30847       SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
30848       SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
30849       SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
30850       SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
30851       ALo = DAG.getBitcast(ExtVT, ALo);
30852       AHi = DAG.getBitcast(ExtVT, AHi);
30853       RLo = DAG.getBitcast(ExtVT, RLo);
30854       RHi = DAG.getBitcast(ExtVT, RHi);
30855 
30856       // r = VSELECT(r, shift(r, 4), a);
30857       SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
30858       SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
30859       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
30860       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
30861 
30862       // a += a
30863       ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
30864       AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
30865 
30866       // r = VSELECT(r, shift(r, 2), a);
30867       MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
30868       MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
30869       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
30870       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
30871 
30872       // a += a
30873       ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
30874       AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
30875 
30876       // r = VSELECT(r, shift(r, 1), a);
30877       MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
30878       MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
30879       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
30880       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
30881 
30882       // Logical shift the result back to the lower byte, leaving a zero upper
30883       // byte meaning that we can safely pack with PACKUSWB.
30884       RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
30885       RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
30886       return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
30887     }
30888   }
30889 
30890   if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
30891     MVT ExtVT = MVT::v8i32;
30892     SDValue Z = DAG.getConstant(0, dl, VT);
30893     SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
30894     SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
30895     SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
30896     SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
30897     ALo = DAG.getBitcast(ExtVT, ALo);
30898     AHi = DAG.getBitcast(ExtVT, AHi);
30899     RLo = DAG.getBitcast(ExtVT, RLo);
30900     RHi = DAG.getBitcast(ExtVT, RHi);
30901     SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
30902     SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
30903     Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
30904     Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
30905     return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
30906   }
30907 
30908   if (VT == MVT::v8i16) {
30909     // If we have a constant shift amount, the non-SSE41 path is best as
30910     // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
30911     bool UseSSE41 = Subtarget.hasSSE41() &&
30912                     !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
30913 
30914     auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
30915       // On SSE41 targets we can use PBLENDVB which selects bytes based just on
30916       // the sign bit.
30917       if (UseSSE41) {
30918         MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
30919         V0 = DAG.getBitcast(ExtVT, V0);
30920         V1 = DAG.getBitcast(ExtVT, V1);
30921         Sel = DAG.getBitcast(ExtVT, Sel);
30922         return DAG.getBitcast(
30923             VT, DAG.getNode(X86ISD::BLENDV, dl, ExtVT, Sel, V0, V1));
30924       }
30925       // On pre-SSE41 targets we splat the sign bit - a negative value will
30926       // set all bits of the lanes to true and VSELECT uses that in
30927       // its OR(AND(V0,C),AND(V1,~C)) lowering.
30928       SDValue C =
30929           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
30930       return DAG.getSelect(dl, VT, C, V0, V1);
30931     };
30932 
30933     // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
30934     if (UseSSE41) {
30935       // On SSE41 targets we need to replicate the shift mask in both
30936       // bytes for PBLENDVB.
30937       Amt = DAG.getNode(
30938           ISD::OR, dl, VT,
30939           getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
30940           getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
30941     } else {
30942       Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
30943     }
30944 
30945     // r = VSELECT(r, shift(r, 8), a);
30946     SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
30947     R = SignBitSelect(Amt, M, R);
30948 
30949     // a += a
30950     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30951 
30952     // r = VSELECT(r, shift(r, 4), a);
30953     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
30954     R = SignBitSelect(Amt, M, R);
30955 
30956     // a += a
30957     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30958 
30959     // r = VSELECT(r, shift(r, 2), a);
30960     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
30961     R = SignBitSelect(Amt, M, R);
30962 
30963     // a += a
30964     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30965 
30966     // return VSELECT(r, shift(r, 1), a);
30967     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
30968     R = SignBitSelect(Amt, M, R);
30969     return R;
30970   }
30971 
30972   // Decompose 256-bit shifts into 128-bit shifts.
30973   if (VT.is256BitVector())
30974     return splitVectorIntBinary(Op, DAG);
30975 
30976   if (VT == MVT::v32i16 || VT == MVT::v64i8)
30977     return splitVectorIntBinary(Op, DAG);
30978 
30979   return SDValue();
30980 }
30981 
LowerFunnelShift(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)30982 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
30983                                 SelectionDAG &DAG) {
30984   MVT VT = Op.getSimpleValueType();
30985   assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
30986          "Unexpected funnel shift opcode!");
30987 
30988   SDLoc DL(Op);
30989   SDValue Op0 = Op.getOperand(0);
30990   SDValue Op1 = Op.getOperand(1);
30991   SDValue Amt = Op.getOperand(2);
30992   unsigned EltSizeInBits = VT.getScalarSizeInBits();
30993   bool IsFSHR = Op.getOpcode() == ISD::FSHR;
30994 
30995   if (VT.isVector()) {
30996     APInt APIntShiftAmt;
30997     bool IsCstSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
30998 
30999     if (Subtarget.hasVBMI2() && EltSizeInBits > 8) {
31000       if (IsFSHR)
31001         std::swap(Op0, Op1);
31002 
31003       if (IsCstSplat) {
31004         uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
31005         SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8);
31006         return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
31007                              {Op0, Op1, Imm}, DAG, Subtarget);
31008       }
31009       return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
31010                            {Op0, Op1, Amt}, DAG, Subtarget);
31011     }
31012     assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
31013             VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16 ||
31014             VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) &&
31015            "Unexpected funnel shift type!");
31016 
31017     // fshl(x,y,z) -> unpack(y,x) << (z & (bw-1))) >> bw.
31018     // fshr(x,y,z) -> unpack(y,x) >> (z & (bw-1))).
31019     if (IsCstSplat)
31020       return SDValue();
31021 
31022     SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
31023     SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
31024     bool IsCst = ISD::isBuildVectorOfConstantSDNodes(AmtMod.getNode());
31025 
31026     // Constant vXi16 funnel shifts can be efficiently handled by default.
31027     if (IsCst && EltSizeInBits == 16)
31028       return SDValue();
31029 
31030     unsigned ShiftOpc = IsFSHR ? ISD::SRL : ISD::SHL;
31031     unsigned NumElts = VT.getVectorNumElements();
31032     MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
31033     MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
31034 
31035     // Split 256-bit integers on XOP/pre-AVX2 targets.
31036     // Split 512-bit integers on non 512-bit BWI targets.
31037     if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 16) ||
31038                                  !Subtarget.hasAVX2())) ||
31039         (VT.is512BitVector() && !Subtarget.useBWIRegs() &&
31040          EltSizeInBits < 32)) {
31041       // Pre-mask the amount modulo using the wider vector.
31042       Op = DAG.getNode(Op.getOpcode(), DL, VT, Op0, Op1, AmtMod);
31043       return splitVectorOp(Op, DAG);
31044     }
31045 
31046     // Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
31047     if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, ShiftOpc)) {
31048       int ScalarAmtIdx = -1;
31049       if (SDValue ScalarAmt = DAG.getSplatSourceVector(AmtMod, ScalarAmtIdx)) {
31050         // Uniform vXi16 funnel shifts can be efficiently handled by default.
31051         if (EltSizeInBits == 16)
31052           return SDValue();
31053 
31054         SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
31055         SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
31056         Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt,
31057                                  ScalarAmtIdx, Subtarget, DAG);
31058         Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt,
31059                                  ScalarAmtIdx, Subtarget, DAG);
31060         return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
31061       }
31062     }
31063 
31064     MVT WideSVT = MVT::getIntegerVT(
31065         std::min<unsigned>(EltSizeInBits * 2, Subtarget.hasBWI() ? 16 : 32));
31066     MVT WideVT = MVT::getVectorVT(WideSVT, NumElts);
31067 
31068     // If per-element shifts are legal, fallback to generic expansion.
31069     if (supportedVectorVarShift(VT, Subtarget, ShiftOpc) || Subtarget.hasXOP())
31070       return SDValue();
31071 
31072     // Attempt to fold as:
31073     // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
31074     // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
31075     if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
31076         supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
31077       Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Op0);
31078       Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Op1);
31079       AmtMod = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
31080       Op0 = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, Op0,
31081                                        EltSizeInBits, DAG);
31082       SDValue Res = DAG.getNode(ISD::OR, DL, WideVT, Op0, Op1);
31083       Res = DAG.getNode(ShiftOpc, DL, WideVT, Res, AmtMod);
31084       if (!IsFSHR)
31085         Res = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, Res,
31086                                          EltSizeInBits, DAG);
31087       return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
31088     }
31089 
31090     // Attempt to fold per-element (ExtVT) shift as unpack(y,x) << zext(z)
31091     if (((IsCst || !Subtarget.hasAVX512()) && !IsFSHR && EltSizeInBits <= 16) ||
31092         supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
31093       SDValue Z = DAG.getConstant(0, DL, VT);
31094       SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
31095       SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
31096       SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
31097       SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
31098       SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
31099       SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
31100       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
31101     }
31102 
31103     // Fallback to generic expansion.
31104     return SDValue();
31105   }
31106   assert(
31107       (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
31108       "Unexpected funnel shift type!");
31109 
31110   // Expand slow SHLD/SHRD cases if we are not optimizing for size.
31111   bool OptForSize = DAG.shouldOptForSize();
31112   bool ExpandFunnel = !OptForSize && Subtarget.isSHLDSlow();
31113 
31114   // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
31115   // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
31116   if ((VT == MVT::i8 || (ExpandFunnel && VT == MVT::i16)) &&
31117       !isa<ConstantSDNode>(Amt)) {
31118     SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, Amt.getValueType());
31119     SDValue HiShift = DAG.getConstant(EltSizeInBits, DL, Amt.getValueType());
31120     Op0 = DAG.getAnyExtOrTrunc(Op0, DL, MVT::i32);
31121     Op1 = DAG.getZExtOrTrunc(Op1, DL, MVT::i32);
31122     Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, Mask);
31123     SDValue Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Op0, HiShift);
31124     Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, Op1);
31125     if (IsFSHR) {
31126       Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, Amt);
31127     } else {
31128       Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Res, Amt);
31129       Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, HiShift);
31130     }
31131     return DAG.getZExtOrTrunc(Res, DL, VT);
31132   }
31133 
31134   if (VT == MVT::i8 || ExpandFunnel)
31135     return SDValue();
31136 
31137   // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
31138   if (VT == MVT::i16) {
31139     Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
31140                       DAG.getConstant(15, DL, Amt.getValueType()));
31141     unsigned FSHOp = (IsFSHR ? X86ISD::FSHR : X86ISD::FSHL);
31142     return DAG.getNode(FSHOp, DL, VT, Op0, Op1, Amt);
31143   }
31144 
31145   return Op;
31146 }
31147 
LowerRotate(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31148 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
31149                            SelectionDAG &DAG) {
31150   MVT VT = Op.getSimpleValueType();
31151   assert(VT.isVector() && "Custom lowering only for vector rotates!");
31152 
31153   SDLoc DL(Op);
31154   SDValue R = Op.getOperand(0);
31155   SDValue Amt = Op.getOperand(1);
31156   unsigned Opcode = Op.getOpcode();
31157   unsigned EltSizeInBits = VT.getScalarSizeInBits();
31158   int NumElts = VT.getVectorNumElements();
31159   bool IsROTL = Opcode == ISD::ROTL;
31160 
31161   // Check for constant splat rotation amount.
31162   APInt CstSplatValue;
31163   bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
31164 
31165   // Check for splat rotate by zero.
31166   if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0)
31167     return R;
31168 
31169   // AVX512 implicitly uses modulo rotation amounts.
31170   if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
31171     // Attempt to rotate by immediate.
31172     if (IsCstSplat) {
31173       unsigned RotOpc = IsROTL ? X86ISD::VROTLI : X86ISD::VROTRI;
31174       uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
31175       return DAG.getNode(RotOpc, DL, VT, R,
31176                          DAG.getTargetConstant(RotAmt, DL, MVT::i8));
31177     }
31178 
31179     // Else, fall-back on VPROLV/VPRORV.
31180     return Op;
31181   }
31182 
31183   // AVX512 VBMI2 vXi16 - lower to funnel shifts.
31184   if (Subtarget.hasVBMI2() && 16 == EltSizeInBits) {
31185     unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
31186     return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
31187   }
31188 
31189   SDValue Z = DAG.getConstant(0, DL, VT);
31190 
31191   if (!IsROTL) {
31192     // If the ISD::ROTR amount is constant, we're always better converting to
31193     // ISD::ROTL.
31194     if (SDValue NegAmt = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {Z, Amt}))
31195       return DAG.getNode(ISD::ROTL, DL, VT, R, NegAmt);
31196 
31197     // XOP targets always prefers ISD::ROTL.
31198     if (Subtarget.hasXOP())
31199       return DAG.getNode(ISD::ROTL, DL, VT, R,
31200                          DAG.getNode(ISD::SUB, DL, VT, Z, Amt));
31201   }
31202 
31203   // Split 256-bit integers on XOP/pre-AVX2 targets.
31204   if (VT.is256BitVector() && (Subtarget.hasXOP() || !Subtarget.hasAVX2()))
31205     return splitVectorIntBinary(Op, DAG);
31206 
31207   // XOP has 128-bit vector variable + immediate rotates.
31208   // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
31209   // XOP implicitly uses modulo rotation amounts.
31210   if (Subtarget.hasXOP()) {
31211     assert(IsROTL && "Only ROTL expected");
31212     assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
31213 
31214     // Attempt to rotate by immediate.
31215     if (IsCstSplat) {
31216       uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
31217       return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
31218                          DAG.getTargetConstant(RotAmt, DL, MVT::i8));
31219     }
31220 
31221     // Use general rotate by variable (per-element).
31222     return Op;
31223   }
31224 
31225   // Rotate by an uniform constant - expand back to shifts.
31226   if (IsCstSplat)
31227     return SDValue();
31228 
31229   // Split 512-bit integers on non 512-bit BWI targets.
31230   if (VT.is512BitVector() && !Subtarget.useBWIRegs())
31231     return splitVectorIntBinary(Op, DAG);
31232 
31233   assert(
31234       (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
31235        ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
31236         Subtarget.hasAVX2()) ||
31237        ((VT == MVT::v32i16 || VT == MVT::v64i8) && Subtarget.useBWIRegs())) &&
31238       "Only vXi32/vXi16/vXi8 vector rotates supported");
31239 
31240   MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
31241   MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
31242 
31243   SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
31244   SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
31245 
31246   // Attempt to fold as unpack(x,x) << zext(splat(y)):
31247   // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
31248   // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
31249   if (EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) {
31250     int BaseRotAmtIdx = -1;
31251     if (SDValue BaseRotAmt = DAG.getSplatSourceVector(AmtMod, BaseRotAmtIdx)) {
31252       if (EltSizeInBits == 16 && Subtarget.hasSSE41()) {
31253         unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
31254         return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
31255       }
31256       unsigned ShiftX86Opc = IsROTL ? X86ISD::VSHLI : X86ISD::VSRLI;
31257       SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
31258       SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
31259       Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
31260                                BaseRotAmtIdx, Subtarget, DAG);
31261       Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
31262                                BaseRotAmtIdx, Subtarget, DAG);
31263       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
31264     }
31265   }
31266 
31267   // v16i8/v32i8/v64i8: Split rotation into rot4/rot2/rot1 stages and select by
31268   // the amount bit.
31269   // TODO: We're doing nothing here that we couldn't do for funnel shifts.
31270   if (EltSizeInBits == 8) {
31271     bool IsConstAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
31272     MVT WideVT =
31273         MVT::getVectorVT(Subtarget.hasBWI() ? MVT::i16 : MVT::i32, NumElts);
31274     unsigned ShiftOpc = IsROTL ? ISD::SHL : ISD::SRL;
31275 
31276     // Attempt to fold as:
31277     // rotl(x,y) -> (((aext(x) << bw) | zext(x)) << (y & (bw-1))) >> bw.
31278     // rotr(x,y) -> (((aext(x) << bw) | zext(x)) >> (y & (bw-1))).
31279     if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
31280         supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
31281       // If we're rotating by constant, just use default promotion.
31282       if (IsConstAmt)
31283         return SDValue();
31284       // See if we can perform this by widening to vXi16 or vXi32.
31285       R = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, R);
31286       R = DAG.getNode(
31287           ISD::OR, DL, WideVT, R,
31288           getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, R, 8, DAG));
31289       Amt = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
31290       R = DAG.getNode(ShiftOpc, DL, WideVT, R, Amt);
31291       if (IsROTL)
31292         R = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, R, 8, DAG);
31293       return DAG.getNode(ISD::TRUNCATE, DL, VT, R);
31294     }
31295 
31296     // Attempt to fold as unpack(x,x) << zext(y):
31297     // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
31298     // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
31299     if (IsConstAmt || supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
31300       // See if we can perform this by unpacking to lo/hi vXi16.
31301       SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
31302       SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
31303       SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
31304       SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
31305       SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
31306       SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
31307       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
31308     }
31309     assert((VT == MVT::v16i8 || VT == MVT::v32i8) && "Unsupported vXi8 type");
31310 
31311     // We don't need ModuloAmt here as we just peek at individual bits.
31312     auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
31313       if (Subtarget.hasSSE41()) {
31314         // On SSE41 targets we can use PBLENDVB which selects bytes based just
31315         // on the sign bit.
31316         V0 = DAG.getBitcast(VT, V0);
31317         V1 = DAG.getBitcast(VT, V1);
31318         Sel = DAG.getBitcast(VT, Sel);
31319         return DAG.getBitcast(SelVT,
31320                               DAG.getNode(X86ISD::BLENDV, DL, VT, Sel, V0, V1));
31321       }
31322       // On pre-SSE41 targets we test for the sign bit by comparing to
31323       // zero - a negative value will set all bits of the lanes to true
31324       // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
31325       SDValue Z = DAG.getConstant(0, DL, SelVT);
31326       SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
31327       return DAG.getSelect(DL, SelVT, C, V0, V1);
31328     };
31329 
31330     // ISD::ROTR is currently only profitable on AVX512 targets with VPTERNLOG.
31331     if (!IsROTL && !useVPTERNLOG(Subtarget, VT)) {
31332       Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
31333       IsROTL = true;
31334     }
31335 
31336     unsigned ShiftLHS = IsROTL ? ISD::SHL : ISD::SRL;
31337     unsigned ShiftRHS = IsROTL ? ISD::SRL : ISD::SHL;
31338 
31339     // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
31340     // We can safely do this using i16 shifts as we're only interested in
31341     // the 3 lower bits of each byte.
31342     Amt = DAG.getBitcast(ExtVT, Amt);
31343     Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
31344     Amt = DAG.getBitcast(VT, Amt);
31345 
31346     // r = VSELECT(r, rot(r, 4), a);
31347     SDValue M;
31348     M = DAG.getNode(
31349         ISD::OR, DL, VT,
31350         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(4, DL, VT)),
31351         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(4, DL, VT)));
31352     R = SignBitSelect(VT, Amt, M, R);
31353 
31354     // a += a
31355     Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
31356 
31357     // r = VSELECT(r, rot(r, 2), a);
31358     M = DAG.getNode(
31359         ISD::OR, DL, VT,
31360         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(2, DL, VT)),
31361         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(6, DL, VT)));
31362     R = SignBitSelect(VT, Amt, M, R);
31363 
31364     // a += a
31365     Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
31366 
31367     // return VSELECT(r, rot(r, 1), a);
31368     M = DAG.getNode(
31369         ISD::OR, DL, VT,
31370         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(1, DL, VT)),
31371         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(7, DL, VT)));
31372     return SignBitSelect(VT, Amt, M, R);
31373   }
31374 
31375   bool IsSplatAmt = DAG.isSplatValue(Amt);
31376   bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
31377   bool LegalVarShifts = supportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
31378                         supportedVectorVarShift(VT, Subtarget, ISD::SRL);
31379 
31380   // Fallback for splats + all supported variable shifts.
31381   // Fallback for non-constants AVX2 vXi16 as well.
31382   if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
31383     Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
31384     SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
31385     AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
31386     SDValue SHL = DAG.getNode(IsROTL ? ISD::SHL : ISD::SRL, DL, VT, R, Amt);
31387     SDValue SRL = DAG.getNode(IsROTL ? ISD::SRL : ISD::SHL, DL, VT, R, AmtR);
31388     return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
31389   }
31390 
31391   // Everything below assumes ISD::ROTL.
31392   if (!IsROTL) {
31393     Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
31394     IsROTL = true;
31395   }
31396 
31397   // ISD::ROT* uses modulo rotate amounts.
31398   Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
31399 
31400   assert(IsROTL && "Only ROTL supported");
31401 
31402   // As with shifts, attempt to convert the rotation amount to a multiplication
31403   // factor, fallback to general expansion.
31404   SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
31405   if (!Scale)
31406     return SDValue();
31407 
31408   // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
31409   if (EltSizeInBits == 16) {
31410     SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
31411     SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
31412     return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
31413   }
31414 
31415   // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
31416   // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
31417   // that can then be OR'd with the lower 32-bits.
31418   assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
31419   static const int OddMask[] = {1, -1, 3, -1};
31420   SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
31421   SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
31422 
31423   SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
31424                               DAG.getBitcast(MVT::v2i64, R),
31425                               DAG.getBitcast(MVT::v2i64, Scale));
31426   SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
31427                               DAG.getBitcast(MVT::v2i64, R13),
31428                               DAG.getBitcast(MVT::v2i64, Scale13));
31429   Res02 = DAG.getBitcast(VT, Res02);
31430   Res13 = DAG.getBitcast(VT, Res13);
31431 
31432   return DAG.getNode(ISD::OR, DL, VT,
31433                      DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
31434                      DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
31435 }
31436 
31437 /// Returns true if the operand type is exactly twice the native width, and
31438 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
31439 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
31440 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
needsCmpXchgNb(Type * MemType) const31441 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
31442   unsigned OpWidth = MemType->getPrimitiveSizeInBits();
31443 
31444   if (OpWidth == 64)
31445     return Subtarget.canUseCMPXCHG8B() && !Subtarget.is64Bit();
31446   if (OpWidth == 128)
31447     return Subtarget.canUseCMPXCHG16B();
31448 
31449   return false;
31450 }
31451 
31452 TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicStoreInIR(StoreInst * SI) const31453 X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
31454   Type *MemType = SI->getValueOperand()->getType();
31455 
31456   bool NoImplicitFloatOps =
31457       SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
31458   if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
31459       !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
31460       (Subtarget.hasSSE1() || Subtarget.hasX87()))
31461     return AtomicExpansionKind::None;
31462 
31463   return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
31464                                  : AtomicExpansionKind::None;
31465 }
31466 
31467 // Note: this turns large loads into lock cmpxchg8b/16b.
31468 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
31469 TargetLowering::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst * LI) const31470 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
31471   Type *MemType = LI->getType();
31472 
31473   // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
31474   // can use movq to do the load. If we have X87 we can load into an 80-bit
31475   // X87 register and store it to a stack temporary.
31476   bool NoImplicitFloatOps =
31477       LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
31478   if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
31479       !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
31480       (Subtarget.hasSSE1() || Subtarget.hasX87()))
31481     return AtomicExpansionKind::None;
31482 
31483   return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
31484                                  : AtomicExpansionKind::None;
31485 }
31486 
31487 enum BitTestKind : unsigned {
31488   UndefBit,
31489   ConstantBit,
31490   NotConstantBit,
31491   ShiftBit,
31492   NotShiftBit
31493 };
31494 
FindSingleBitChange(Value * V)31495 static std::pair<Value *, BitTestKind> FindSingleBitChange(Value *V) {
31496   using namespace llvm::PatternMatch;
31497   BitTestKind BTK = UndefBit;
31498   auto *C = dyn_cast<ConstantInt>(V);
31499   if (C) {
31500     // Check if V is a power of 2 or NOT power of 2.
31501     if (isPowerOf2_64(C->getZExtValue()))
31502       BTK = ConstantBit;
31503     else if (isPowerOf2_64((~C->getValue()).getZExtValue()))
31504       BTK = NotConstantBit;
31505     return {V, BTK};
31506   }
31507 
31508   // Check if V is some power of 2 pattern known to be non-zero
31509   auto *I = dyn_cast<Instruction>(V);
31510   if (I) {
31511     bool Not = false;
31512     // Check if we have a NOT
31513     Value *PeekI;
31514     if (match(I, m_c_Xor(m_Value(PeekI), m_AllOnes())) ||
31515         match(I, m_Sub(m_AllOnes(), m_Value(PeekI)))) {
31516       Not = true;
31517       I = dyn_cast<Instruction>(PeekI);
31518 
31519       // If I is constant, it will fold and we can evaluate later. If its an
31520       // argument or something of that nature, we can't analyze.
31521       if (I == nullptr)
31522         return {nullptr, UndefBit};
31523     }
31524     // We can only use 1 << X without more sophisticated analysis. C << X where
31525     // C is a power of 2 but not 1 can result in zero which cannot be translated
31526     // to bittest. Likewise any C >> X (either arith or logical) can be zero.
31527     if (I->getOpcode() == Instruction::Shl) {
31528       // Todo(1): The cmpxchg case is pretty costly so matching `BLSI(X)`, `X &
31529       // -X` and some other provable power of 2 patterns that we can use CTZ on
31530       // may be profitable.
31531       // Todo(2): It may be possible in some cases to prove that Shl(C, X) is
31532       // non-zero even where C != 1. Likewise LShr(C, X) and AShr(C, X) may also
31533       // be provably a non-zero power of 2.
31534       // Todo(3): ROTL and ROTR patterns on a power of 2 C should also be
31535       // transformable to bittest.
31536       auto *ShiftVal = dyn_cast<ConstantInt>(I->getOperand(0));
31537       if (!ShiftVal)
31538         return {nullptr, UndefBit};
31539       if (ShiftVal->equalsInt(1))
31540         BTK = Not ? NotShiftBit : ShiftBit;
31541 
31542       if (BTK == UndefBit)
31543         return {nullptr, UndefBit};
31544 
31545       Value *BitV = I->getOperand(1);
31546 
31547       Value *AndOp;
31548       const APInt *AndC;
31549       if (match(BitV, m_c_And(m_Value(AndOp), m_APInt(AndC)))) {
31550         // Read past a shiftmask instruction to find count
31551         if (*AndC == (I->getType()->getPrimitiveSizeInBits() - 1))
31552           BitV = AndOp;
31553       }
31554       return {BitV, BTK};
31555     }
31556   }
31557   return {nullptr, UndefBit};
31558 }
31559 
31560 TargetLowering::AtomicExpansionKind
shouldExpandLogicAtomicRMWInIR(AtomicRMWInst * AI) const31561 X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {
31562   // If the atomicrmw's result isn't actually used, we can just add a "lock"
31563   // prefix to a normal instruction for these operations.
31564   if (AI->use_empty())
31565     return AtomicExpansionKind::None;
31566 
31567   // If the atomicrmw's result is used by a single bit AND, we may use
31568   // bts/btr/btc instruction for these operations.
31569   // Note: InstCombinePass can cause a de-optimization here. It replaces the
31570   // SETCC(And(AtomicRMW(P, power_of_2), power_of_2)) with LShr and Xor
31571   // (depending on CC). This pattern can only use bts/btr/btc but we don't
31572   // detect it.
31573   Instruction *I = AI->user_back();
31574   auto BitChange = FindSingleBitChange(AI->getValOperand());
31575   if (BitChange.second == UndefBit || !AI->hasOneUse() ||
31576       I->getOpcode() != Instruction::And ||
31577       AI->getType()->getPrimitiveSizeInBits() == 8 ||
31578       AI->getParent() != I->getParent())
31579     return AtomicExpansionKind::CmpXChg;
31580 
31581   unsigned OtherIdx = I->getOperand(0) == AI ? 1 : 0;
31582 
31583   // This is a redundant AND, it should get cleaned up elsewhere.
31584   if (AI == I->getOperand(OtherIdx))
31585     return AtomicExpansionKind::CmpXChg;
31586 
31587   // The following instruction must be a AND single bit.
31588   if (BitChange.second == ConstantBit || BitChange.second == NotConstantBit) {
31589     auto *C1 = cast<ConstantInt>(AI->getValOperand());
31590     auto *C2 = dyn_cast<ConstantInt>(I->getOperand(OtherIdx));
31591     if (!C2 || !isPowerOf2_64(C2->getZExtValue())) {
31592       return AtomicExpansionKind::CmpXChg;
31593     }
31594     if (AI->getOperation() == AtomicRMWInst::And) {
31595       return ~C1->getValue() == C2->getValue()
31596                  ? AtomicExpansionKind::BitTestIntrinsic
31597                  : AtomicExpansionKind::CmpXChg;
31598     }
31599     return C1 == C2 ? AtomicExpansionKind::BitTestIntrinsic
31600                     : AtomicExpansionKind::CmpXChg;
31601   }
31602 
31603   assert(BitChange.second == ShiftBit || BitChange.second == NotShiftBit);
31604 
31605   auto BitTested = FindSingleBitChange(I->getOperand(OtherIdx));
31606   if (BitTested.second != ShiftBit && BitTested.second != NotShiftBit)
31607     return AtomicExpansionKind::CmpXChg;
31608 
31609   assert(BitChange.first != nullptr && BitTested.first != nullptr);
31610 
31611   // If shift amounts are not the same we can't use BitTestIntrinsic.
31612   if (BitChange.first != BitTested.first)
31613     return AtomicExpansionKind::CmpXChg;
31614 
31615   // If atomic AND need to be masking all be one bit and testing the one bit
31616   // unset in the mask.
31617   if (AI->getOperation() == AtomicRMWInst::And)
31618     return (BitChange.second == NotShiftBit && BitTested.second == ShiftBit)
31619                ? AtomicExpansionKind::BitTestIntrinsic
31620                : AtomicExpansionKind::CmpXChg;
31621 
31622   // If atomic XOR/OR need to be setting and testing the same bit.
31623   return (BitChange.second == ShiftBit && BitTested.second == ShiftBit)
31624              ? AtomicExpansionKind::BitTestIntrinsic
31625              : AtomicExpansionKind::CmpXChg;
31626 }
31627 
emitBitTestAtomicRMWIntrinsic(AtomicRMWInst * AI) const31628 void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
31629   IRBuilder<> Builder(AI);
31630   Intrinsic::ID IID_C = Intrinsic::not_intrinsic;
31631   Intrinsic::ID IID_I = Intrinsic::not_intrinsic;
31632   switch (AI->getOperation()) {
31633   default:
31634     llvm_unreachable("Unknown atomic operation");
31635   case AtomicRMWInst::Or:
31636     IID_C = Intrinsic::x86_atomic_bts;
31637     IID_I = Intrinsic::x86_atomic_bts_rm;
31638     break;
31639   case AtomicRMWInst::Xor:
31640     IID_C = Intrinsic::x86_atomic_btc;
31641     IID_I = Intrinsic::x86_atomic_btc_rm;
31642     break;
31643   case AtomicRMWInst::And:
31644     IID_C = Intrinsic::x86_atomic_btr;
31645     IID_I = Intrinsic::x86_atomic_btr_rm;
31646     break;
31647   }
31648   Instruction *I = AI->user_back();
31649   LLVMContext &Ctx = AI->getContext();
31650   Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
31651                                           Type::getInt8PtrTy(Ctx));
31652   Function *BitTest = nullptr;
31653   Value *Result = nullptr;
31654   auto BitTested = FindSingleBitChange(AI->getValOperand());
31655   assert(BitTested.first != nullptr);
31656 
31657   if (BitTested.second == ConstantBit || BitTested.second == NotConstantBit) {
31658     auto *C = cast<ConstantInt>(I->getOperand(I->getOperand(0) == AI ? 1 : 0));
31659 
31660     BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_C, AI->getType());
31661 
31662     unsigned Imm = countTrailingZeros(C->getZExtValue());
31663     Result = Builder.CreateCall(BitTest, {Addr, Builder.getInt8(Imm)});
31664   } else {
31665     BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_I, AI->getType());
31666 
31667     assert(BitTested.second == ShiftBit || BitTested.second == NotShiftBit);
31668 
31669     Value *SI = BitTested.first;
31670     assert(SI != nullptr);
31671 
31672     // BT{S|R|C} on memory operand don't modulo bit position so we need to
31673     // mask it.
31674     unsigned ShiftBits = SI->getType()->getPrimitiveSizeInBits();
31675     Value *BitPos =
31676         Builder.CreateAnd(SI, Builder.getIntN(ShiftBits, ShiftBits - 1));
31677     // Todo(1): In many cases it may be provable that SI is less than
31678     // ShiftBits in which case this mask is unnecessary
31679     // Todo(2): In the fairly idiomatic case of P[X / sizeof_bits(X)] OP 1
31680     // << (X % sizeof_bits(X)) we can drop the shift mask and AGEN in
31681     // favor of just a raw BT{S|R|C}.
31682 
31683     Result = Builder.CreateCall(BitTest, {Addr, BitPos});
31684     Result = Builder.CreateZExtOrTrunc(Result, AI->getType());
31685 
31686     // If the result is only used for zero/non-zero status then we don't need to
31687     // shift value back. Otherwise do so.
31688     for (auto It = I->user_begin(); It != I->user_end(); ++It) {
31689       if (auto *ICmp = dyn_cast<ICmpInst>(*It)) {
31690         if (ICmp->isEquality()) {
31691           auto *C0 = dyn_cast<ConstantInt>(ICmp->getOperand(0));
31692           auto *C1 = dyn_cast<ConstantInt>(ICmp->getOperand(1));
31693           if (C0 || C1) {
31694             assert(C0 == nullptr || C1 == nullptr);
31695             if ((C0 ? C0 : C1)->isZero())
31696               continue;
31697           }
31698         }
31699       }
31700       Result = Builder.CreateShl(Result, BitPos);
31701       break;
31702     }
31703   }
31704 
31705   I->replaceAllUsesWith(Result);
31706   I->eraseFromParent();
31707   AI->eraseFromParent();
31708 }
31709 
shouldExpandCmpArithRMWInIR(AtomicRMWInst * AI)31710 static bool shouldExpandCmpArithRMWInIR(AtomicRMWInst *AI) {
31711   using namespace llvm::PatternMatch;
31712   if (!AI->hasOneUse())
31713     return false;
31714 
31715   Value *Op = AI->getOperand(1);
31716   ICmpInst::Predicate Pred;
31717   Instruction *I = AI->user_back();
31718   AtomicRMWInst::BinOp Opc = AI->getOperation();
31719   if (Opc == AtomicRMWInst::Add) {
31720     if (match(I, m_c_ICmp(Pred, m_Sub(m_ZeroInt(), m_Specific(Op)), m_Value())))
31721       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
31722     if (match(I, m_OneUse(m_c_Add(m_Specific(Op), m_Value())))) {
31723       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
31724         return Pred == CmpInst::ICMP_SLT;
31725       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
31726         return Pred == CmpInst::ICMP_SGT;
31727     }
31728     return false;
31729   }
31730   if (Opc == AtomicRMWInst::Sub) {
31731     if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
31732       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
31733     if (match(I, m_OneUse(m_Sub(m_Value(), m_Specific(Op))))) {
31734       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
31735         return Pred == CmpInst::ICMP_SLT;
31736       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
31737         return Pred == CmpInst::ICMP_SGT;
31738     }
31739     return false;
31740   }
31741   if ((Opc == AtomicRMWInst::Or &&
31742        match(I, m_OneUse(m_c_Or(m_Specific(Op), m_Value())))) ||
31743       (Opc == AtomicRMWInst::And &&
31744        match(I, m_OneUse(m_c_And(m_Specific(Op), m_Value()))))) {
31745     if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
31746       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE ||
31747              Pred == CmpInst::ICMP_SLT;
31748     if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
31749       return Pred == CmpInst::ICMP_SGT;
31750     return false;
31751   }
31752   if (Opc == AtomicRMWInst::Xor) {
31753     if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
31754       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
31755     if (match(I, m_OneUse(m_c_Xor(m_Specific(Op), m_Value())))) {
31756       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
31757         return Pred == CmpInst::ICMP_SLT;
31758       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
31759         return Pred == CmpInst::ICMP_SGT;
31760     }
31761     return false;
31762   }
31763 
31764   return false;
31765 }
31766 
emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst * AI) const31767 void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
31768     AtomicRMWInst *AI) const {
31769   IRBuilder<> Builder(AI);
31770   Instruction *TempI = nullptr;
31771   LLVMContext &Ctx = AI->getContext();
31772   ICmpInst *ICI = dyn_cast<ICmpInst>(AI->user_back());
31773   if (!ICI) {
31774     TempI = AI->user_back();
31775     assert(TempI->hasOneUse() && "Must have one use");
31776     ICI = cast<ICmpInst>(TempI->user_back());
31777   }
31778   X86::CondCode CC = X86::COND_INVALID;
31779   ICmpInst::Predicate Pred = ICI->getPredicate();
31780   switch (Pred) {
31781   default:
31782     llvm_unreachable("Not supported Pred");
31783   case CmpInst::ICMP_EQ:
31784     CC = X86::COND_E;
31785     break;
31786   case CmpInst::ICMP_NE:
31787     CC = X86::COND_NE;
31788     break;
31789   case CmpInst::ICMP_SLT:
31790     CC = X86::COND_S;
31791     break;
31792   case CmpInst::ICMP_SGT:
31793     CC = X86::COND_NS;
31794     break;
31795   }
31796   Intrinsic::ID IID = Intrinsic::not_intrinsic;
31797   switch (AI->getOperation()) {
31798   default:
31799     llvm_unreachable("Unknown atomic operation");
31800   case AtomicRMWInst::Add:
31801     IID = Intrinsic::x86_atomic_add_cc;
31802     break;
31803   case AtomicRMWInst::Sub:
31804     IID = Intrinsic::x86_atomic_sub_cc;
31805     break;
31806   case AtomicRMWInst::Or:
31807     IID = Intrinsic::x86_atomic_or_cc;
31808     break;
31809   case AtomicRMWInst::And:
31810     IID = Intrinsic::x86_atomic_and_cc;
31811     break;
31812   case AtomicRMWInst::Xor:
31813     IID = Intrinsic::x86_atomic_xor_cc;
31814     break;
31815   }
31816   Function *CmpArith =
31817       Intrinsic::getDeclaration(AI->getModule(), IID, AI->getType());
31818   Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
31819                                           Type::getInt8PtrTy(Ctx));
31820   Value *Call = Builder.CreateCall(
31821       CmpArith, {Addr, AI->getValOperand(), Builder.getInt32((unsigned)CC)});
31822   Value *Result = Builder.CreateTrunc(Call, Type::getInt1Ty(Ctx));
31823   ICI->replaceAllUsesWith(Result);
31824   ICI->eraseFromParent();
31825   if (TempI)
31826     TempI->eraseFromParent();
31827   AI->eraseFromParent();
31828 }
31829 
31830 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI) const31831 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
31832   unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
31833   Type *MemType = AI->getType();
31834 
31835   // If the operand is too big, we must see if cmpxchg8/16b is available
31836   // and default to library calls otherwise.
31837   if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
31838     return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
31839                                    : AtomicExpansionKind::None;
31840   }
31841 
31842   AtomicRMWInst::BinOp Op = AI->getOperation();
31843   switch (Op) {
31844   case AtomicRMWInst::Xchg:
31845     return AtomicExpansionKind::None;
31846   case AtomicRMWInst::Add:
31847   case AtomicRMWInst::Sub:
31848     if (shouldExpandCmpArithRMWInIR(AI))
31849       return AtomicExpansionKind::CmpArithIntrinsic;
31850     // It's better to use xadd, xsub or xchg for these in other cases.
31851     return AtomicExpansionKind::None;
31852   case AtomicRMWInst::Or:
31853   case AtomicRMWInst::And:
31854   case AtomicRMWInst::Xor:
31855     if (shouldExpandCmpArithRMWInIR(AI))
31856       return AtomicExpansionKind::CmpArithIntrinsic;
31857     return shouldExpandLogicAtomicRMWInIR(AI);
31858   case AtomicRMWInst::Nand:
31859   case AtomicRMWInst::Max:
31860   case AtomicRMWInst::Min:
31861   case AtomicRMWInst::UMax:
31862   case AtomicRMWInst::UMin:
31863   case AtomicRMWInst::FAdd:
31864   case AtomicRMWInst::FSub:
31865   case AtomicRMWInst::FMax:
31866   case AtomicRMWInst::FMin:
31867   case AtomicRMWInst::UIncWrap:
31868   case AtomicRMWInst::UDecWrap:
31869   default:
31870     // These always require a non-trivial set of data operations on x86. We must
31871     // use a cmpxchg loop.
31872     return AtomicExpansionKind::CmpXChg;
31873   }
31874 }
31875 
31876 LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst * AI) const31877 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
31878   unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
31879   Type *MemType = AI->getType();
31880   // Accesses larger than the native width are turned into cmpxchg/libcalls, so
31881   // there is no benefit in turning such RMWs into loads, and it is actually
31882   // harmful as it introduces a mfence.
31883   if (MemType->getPrimitiveSizeInBits() > NativeWidth)
31884     return nullptr;
31885 
31886   // If this is a canonical idempotent atomicrmw w/no uses, we have a better
31887   // lowering available in lowerAtomicArith.
31888   // TODO: push more cases through this path.
31889   if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
31890     if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
31891         AI->use_empty())
31892       return nullptr;
31893 
31894   IRBuilder<> Builder(AI);
31895   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
31896   auto SSID = AI->getSyncScopeID();
31897   // We must restrict the ordering to avoid generating loads with Release or
31898   // ReleaseAcquire orderings.
31899   auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
31900 
31901   // Before the load we need a fence. Here is an example lifted from
31902   // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
31903   // is required:
31904   // Thread 0:
31905   //   x.store(1, relaxed);
31906   //   r1 = y.fetch_add(0, release);
31907   // Thread 1:
31908   //   y.fetch_add(42, acquire);
31909   //   r2 = x.load(relaxed);
31910   // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
31911   // lowered to just a load without a fence. A mfence flushes the store buffer,
31912   // making the optimization clearly correct.
31913   // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
31914   // otherwise, we might be able to be more aggressive on relaxed idempotent
31915   // rmw. In practice, they do not look useful, so we don't try to be
31916   // especially clever.
31917   if (SSID == SyncScope::SingleThread)
31918     // FIXME: we could just insert an ISD::MEMBARRIER here, except we are at
31919     // the IR level, so we must wrap it in an intrinsic.
31920     return nullptr;
31921 
31922   if (!Subtarget.hasMFence())
31923     // FIXME: it might make sense to use a locked operation here but on a
31924     // different cache-line to prevent cache-line bouncing. In practice it
31925     // is probably a small win, and x86 processors without mfence are rare
31926     // enough that we do not bother.
31927     return nullptr;
31928 
31929   Function *MFence =
31930       llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
31931   Builder.CreateCall(MFence, {});
31932 
31933   // Finally we can emit the atomic load.
31934   LoadInst *Loaded = Builder.CreateAlignedLoad(
31935       AI->getType(), AI->getPointerOperand(), AI->getAlign());
31936   Loaded->setAtomic(Order, SSID);
31937   AI->replaceAllUsesWith(Loaded);
31938   AI->eraseFromParent();
31939   return Loaded;
31940 }
31941 
lowerAtomicStoreAsStoreSDNode(const StoreInst & SI) const31942 bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
31943   if (!SI.isUnordered())
31944     return false;
31945   return ExperimentalUnorderedISEL;
31946 }
lowerAtomicLoadAsLoadSDNode(const LoadInst & LI) const31947 bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
31948   if (!LI.isUnordered())
31949     return false;
31950   return ExperimentalUnorderedISEL;
31951 }
31952 
31953 
31954 /// Emit a locked operation on a stack location which does not change any
31955 /// memory location, but does involve a lock prefix.  Location is chosen to be
31956 /// a) very likely accessed only by a single thread to minimize cache traffic,
31957 /// and b) definitely dereferenceable.  Returns the new Chain result.
emitLockedStackOp(SelectionDAG & DAG,const X86Subtarget & Subtarget,SDValue Chain,const SDLoc & DL)31958 static SDValue emitLockedStackOp(SelectionDAG &DAG,
31959                                  const X86Subtarget &Subtarget, SDValue Chain,
31960                                  const SDLoc &DL) {
31961   // Implementation notes:
31962   // 1) LOCK prefix creates a full read/write reordering barrier for memory
31963   // operations issued by the current processor.  As such, the location
31964   // referenced is not relevant for the ordering properties of the instruction.
31965   // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
31966   // 8.2.3.9  Loads and Stores Are Not Reordered with Locked Instructions
31967   // 2) Using an immediate operand appears to be the best encoding choice
31968   // here since it doesn't require an extra register.
31969   // 3) OR appears to be very slightly faster than ADD. (Though, the difference
31970   // is small enough it might just be measurement noise.)
31971   // 4) When choosing offsets, there are several contributing factors:
31972   //   a) If there's no redzone, we default to TOS.  (We could allocate a cache
31973   //      line aligned stack object to improve this case.)
31974   //   b) To minimize our chances of introducing a false dependence, we prefer
31975   //      to offset the stack usage from TOS slightly.
31976   //   c) To minimize concerns about cross thread stack usage - in particular,
31977   //      the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
31978   //      captures state in the TOS frame and accesses it from many threads -
31979   //      we want to use an offset such that the offset is in a distinct cache
31980   //      line from the TOS frame.
31981   //
31982   // For a general discussion of the tradeoffs and benchmark results, see:
31983   // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
31984 
31985   auto &MF = DAG.getMachineFunction();
31986   auto &TFL = *Subtarget.getFrameLowering();
31987   const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
31988 
31989   if (Subtarget.is64Bit()) {
31990     SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
31991     SDValue Ops[] = {
31992       DAG.getRegister(X86::RSP, MVT::i64),                  // Base
31993       DAG.getTargetConstant(1, DL, MVT::i8),                // Scale
31994       DAG.getRegister(0, MVT::i64),                         // Index
31995       DAG.getTargetConstant(SPOffset, DL, MVT::i32),        // Disp
31996       DAG.getRegister(0, MVT::i16),                         // Segment.
31997       Zero,
31998       Chain};
31999     SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
32000                                      MVT::Other, Ops);
32001     return SDValue(Res, 1);
32002   }
32003 
32004   SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
32005   SDValue Ops[] = {
32006     DAG.getRegister(X86::ESP, MVT::i32),            // Base
32007     DAG.getTargetConstant(1, DL, MVT::i8),          // Scale
32008     DAG.getRegister(0, MVT::i32),                   // Index
32009     DAG.getTargetConstant(SPOffset, DL, MVT::i32),  // Disp
32010     DAG.getRegister(0, MVT::i16),                   // Segment.
32011     Zero,
32012     Chain
32013   };
32014   SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
32015                                    MVT::Other, Ops);
32016   return SDValue(Res, 1);
32017 }
32018 
LowerATOMIC_FENCE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32019 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
32020                                  SelectionDAG &DAG) {
32021   SDLoc dl(Op);
32022   AtomicOrdering FenceOrdering =
32023       static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
32024   SyncScope::ID FenceSSID =
32025       static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
32026 
32027   // The only fence that needs an instruction is a sequentially-consistent
32028   // cross-thread fence.
32029   if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
32030       FenceSSID == SyncScope::System) {
32031     if (Subtarget.hasMFence())
32032       return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
32033 
32034     SDValue Chain = Op.getOperand(0);
32035     return emitLockedStackOp(DAG, Subtarget, Chain, dl);
32036   }
32037 
32038   // MEMBARRIER is a compiler barrier; it codegens to a no-op.
32039   return DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
32040 }
32041 
LowerCMP_SWAP(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32042 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
32043                              SelectionDAG &DAG) {
32044   MVT T = Op.getSimpleValueType();
32045   SDLoc DL(Op);
32046   unsigned Reg = 0;
32047   unsigned size = 0;
32048   switch(T.SimpleTy) {
32049   default: llvm_unreachable("Invalid value type!");
32050   case MVT::i8:  Reg = X86::AL;  size = 1; break;
32051   case MVT::i16: Reg = X86::AX;  size = 2; break;
32052   case MVT::i32: Reg = X86::EAX; size = 4; break;
32053   case MVT::i64:
32054     assert(Subtarget.is64Bit() && "Node not type legal!");
32055     Reg = X86::RAX; size = 8;
32056     break;
32057   }
32058   SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
32059                                   Op.getOperand(2), SDValue());
32060   SDValue Ops[] = { cpIn.getValue(0),
32061                     Op.getOperand(1),
32062                     Op.getOperand(3),
32063                     DAG.getTargetConstant(size, DL, MVT::i8),
32064                     cpIn.getValue(1) };
32065   SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
32066   MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
32067   SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
32068                                            Ops, T, MMO);
32069 
32070   SDValue cpOut =
32071     DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
32072   SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
32073                                       MVT::i32, cpOut.getValue(2));
32074   SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
32075 
32076   return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
32077                      cpOut, Success, EFLAGS.getValue(1));
32078 }
32079 
32080 // Create MOVMSKB, taking into account whether we need to split for AVX1.
getPMOVMSKB(const SDLoc & DL,SDValue V,SelectionDAG & DAG,const X86Subtarget & Subtarget)32081 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
32082                            const X86Subtarget &Subtarget) {
32083   MVT InVT = V.getSimpleValueType();
32084 
32085   if (InVT == MVT::v64i8) {
32086     SDValue Lo, Hi;
32087     std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
32088     Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
32089     Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
32090     Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
32091     Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
32092     Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
32093                      DAG.getConstant(32, DL, MVT::i8));
32094     return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
32095   }
32096   if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
32097     SDValue Lo, Hi;
32098     std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
32099     Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
32100     Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
32101     Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
32102                      DAG.getConstant(16, DL, MVT::i8));
32103     return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
32104   }
32105 
32106   return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
32107 }
32108 
LowerBITCAST(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32109 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
32110                             SelectionDAG &DAG) {
32111   SDValue Src = Op.getOperand(0);
32112   MVT SrcVT = Src.getSimpleValueType();
32113   MVT DstVT = Op.getSimpleValueType();
32114 
32115   // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
32116   // half to v32i1 and concatenating the result.
32117   if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
32118     assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
32119     assert(Subtarget.hasBWI() && "Expected BWI target");
32120     SDLoc dl(Op);
32121     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
32122                              DAG.getIntPtrConstant(0, dl));
32123     Lo = DAG.getBitcast(MVT::v32i1, Lo);
32124     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
32125                              DAG.getIntPtrConstant(1, dl));
32126     Hi = DAG.getBitcast(MVT::v32i1, Hi);
32127     return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
32128   }
32129 
32130   // Use MOVMSK for vector to scalar conversion to prevent scalarization.
32131   if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
32132     assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
32133     MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
32134     SDLoc DL(Op);
32135     SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
32136     V = getPMOVMSKB(DL, V, DAG, Subtarget);
32137     return DAG.getZExtOrTrunc(V, DL, DstVT);
32138   }
32139 
32140   assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
32141           SrcVT == MVT::i64) && "Unexpected VT!");
32142 
32143   assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32144   if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
32145       !(DstVT == MVT::x86mmx && SrcVT.isVector()))
32146     // This conversion needs to be expanded.
32147     return SDValue();
32148 
32149   SDLoc dl(Op);
32150   if (SrcVT.isVector()) {
32151     // Widen the vector in input in the case of MVT::v2i32.
32152     // Example: from MVT::v2i32 to MVT::v4i32.
32153     MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
32154                                  SrcVT.getVectorNumElements() * 2);
32155     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
32156                       DAG.getUNDEF(SrcVT));
32157   } else {
32158     assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
32159            "Unexpected source type in LowerBITCAST");
32160     Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
32161   }
32162 
32163   MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
32164   Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
32165 
32166   if (DstVT == MVT::x86mmx)
32167     return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
32168 
32169   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
32170                      DAG.getIntPtrConstant(0, dl));
32171 }
32172 
32173 /// Compute the horizontal sum of bytes in V for the elements of VT.
32174 ///
32175 /// Requires V to be a byte vector and VT to be an integer vector type with
32176 /// wider elements than V's type. The width of the elements of VT determines
32177 /// how many bytes of V are summed horizontally to produce each element of the
32178 /// result.
LowerHorizontalByteSum(SDValue V,MVT VT,const X86Subtarget & Subtarget,SelectionDAG & DAG)32179 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
32180                                       const X86Subtarget &Subtarget,
32181                                       SelectionDAG &DAG) {
32182   SDLoc DL(V);
32183   MVT ByteVecVT = V.getSimpleValueType();
32184   MVT EltVT = VT.getVectorElementType();
32185   assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
32186          "Expected value to have byte element type.");
32187   assert(EltVT != MVT::i8 &&
32188          "Horizontal byte sum only makes sense for wider elements!");
32189   unsigned VecSize = VT.getSizeInBits();
32190   assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
32191 
32192   // PSADBW instruction horizontally add all bytes and leave the result in i64
32193   // chunks, thus directly computes the pop count for v2i64 and v4i64.
32194   if (EltVT == MVT::i64) {
32195     SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
32196     MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
32197     V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
32198     return DAG.getBitcast(VT, V);
32199   }
32200 
32201   if (EltVT == MVT::i32) {
32202     // We unpack the low half and high half into i32s interleaved with zeros so
32203     // that we can use PSADBW to horizontally sum them. The most useful part of
32204     // this is that it lines up the results of two PSADBW instructions to be
32205     // two v2i64 vectors which concatenated are the 4 population counts. We can
32206     // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
32207     SDValue Zeros = DAG.getConstant(0, DL, VT);
32208     SDValue V32 = DAG.getBitcast(VT, V);
32209     SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
32210     SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
32211 
32212     // Do the horizontal sums into two v2i64s.
32213     Zeros = DAG.getConstant(0, DL, ByteVecVT);
32214     MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
32215     Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
32216                       DAG.getBitcast(ByteVecVT, Low), Zeros);
32217     High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
32218                        DAG.getBitcast(ByteVecVT, High), Zeros);
32219 
32220     // Merge them together.
32221     MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
32222     V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
32223                     DAG.getBitcast(ShortVecVT, Low),
32224                     DAG.getBitcast(ShortVecVT, High));
32225 
32226     return DAG.getBitcast(VT, V);
32227   }
32228 
32229   // The only element type left is i16.
32230   assert(EltVT == MVT::i16 && "Unknown how to handle type");
32231 
32232   // To obtain pop count for each i16 element starting from the pop count for
32233   // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
32234   // right by 8. It is important to shift as i16s as i8 vector shift isn't
32235   // directly supported.
32236   SDValue ShifterV = DAG.getConstant(8, DL, VT);
32237   SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
32238   V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
32239                   DAG.getBitcast(ByteVecVT, V));
32240   return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
32241 }
32242 
LowerVectorCTPOPInRegLUT(SDValue Op,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)32243 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
32244                                         const X86Subtarget &Subtarget,
32245                                         SelectionDAG &DAG) {
32246   MVT VT = Op.getSimpleValueType();
32247   MVT EltVT = VT.getVectorElementType();
32248   int NumElts = VT.getVectorNumElements();
32249   (void)EltVT;
32250   assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
32251 
32252   // Implement a lookup table in register by using an algorithm based on:
32253   // http://wm.ite.pl/articles/sse-popcount.html
32254   //
32255   // The general idea is that every lower byte nibble in the input vector is an
32256   // index into a in-register pre-computed pop count table. We then split up the
32257   // input vector in two new ones: (1) a vector with only the shifted-right
32258   // higher nibbles for each byte and (2) a vector with the lower nibbles (and
32259   // masked out higher ones) for each byte. PSHUFB is used separately with both
32260   // to index the in-register table. Next, both are added and the result is a
32261   // i8 vector where each element contains the pop count for input byte.
32262   const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
32263                        /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
32264                        /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
32265                        /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
32266 
32267   SmallVector<SDValue, 64> LUTVec;
32268   for (int i = 0; i < NumElts; ++i)
32269     LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
32270   SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
32271   SDValue M0F = DAG.getConstant(0x0F, DL, VT);
32272 
32273   // High nibbles
32274   SDValue FourV = DAG.getConstant(4, DL, VT);
32275   SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
32276 
32277   // Low nibbles
32278   SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
32279 
32280   // The input vector is used as the shuffle mask that index elements into the
32281   // LUT. After counting low and high nibbles, add the vector to obtain the
32282   // final pop count per i8 element.
32283   SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
32284   SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
32285   return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
32286 }
32287 
32288 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
32289 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
LowerVectorCTPOP(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32290 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
32291                                 SelectionDAG &DAG) {
32292   MVT VT = Op.getSimpleValueType();
32293   assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
32294          "Unknown CTPOP type to handle");
32295   SDLoc DL(Op.getNode());
32296   SDValue Op0 = Op.getOperand(0);
32297 
32298   // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
32299   if (Subtarget.hasVPOPCNTDQ()) {
32300     unsigned NumElems = VT.getVectorNumElements();
32301     assert((VT.getVectorElementType() == MVT::i8 ||
32302             VT.getVectorElementType() == MVT::i16) && "Unexpected type");
32303     if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
32304       MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
32305       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
32306       Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
32307       return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
32308     }
32309   }
32310 
32311   // Decompose 256-bit ops into smaller 128-bit ops.
32312   if (VT.is256BitVector() && !Subtarget.hasInt256())
32313     return splitVectorIntUnary(Op, DAG);
32314 
32315   // Decompose 512-bit ops into smaller 256-bit ops.
32316   if (VT.is512BitVector() && !Subtarget.hasBWI())
32317     return splitVectorIntUnary(Op, DAG);
32318 
32319   // For element types greater than i8, do vXi8 pop counts and a bytesum.
32320   if (VT.getScalarType() != MVT::i8) {
32321     MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
32322     SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
32323     SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
32324     return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
32325   }
32326 
32327   // We can't use the fast LUT approach, so fall back on LegalizeDAG.
32328   if (!Subtarget.hasSSSE3())
32329     return SDValue();
32330 
32331   return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
32332 }
32333 
LowerCTPOP(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32334 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
32335                           SelectionDAG &DAG) {
32336   assert(Op.getSimpleValueType().isVector() &&
32337          "We only do custom lowering for vector population count.");
32338   return LowerVectorCTPOP(Op, Subtarget, DAG);
32339 }
32340 
LowerBITREVERSE_XOP(SDValue Op,SelectionDAG & DAG)32341 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
32342   MVT VT = Op.getSimpleValueType();
32343   SDValue In = Op.getOperand(0);
32344   SDLoc DL(Op);
32345 
32346   // For scalars, its still beneficial to transfer to/from the SIMD unit to
32347   // perform the BITREVERSE.
32348   if (!VT.isVector()) {
32349     MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
32350     SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
32351     Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
32352     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
32353                        DAG.getIntPtrConstant(0, DL));
32354   }
32355 
32356   int NumElts = VT.getVectorNumElements();
32357   int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
32358 
32359   // Decompose 256-bit ops into smaller 128-bit ops.
32360   if (VT.is256BitVector())
32361     return splitVectorIntUnary(Op, DAG);
32362 
32363   assert(VT.is128BitVector() &&
32364          "Only 128-bit vector bitreverse lowering supported.");
32365 
32366   // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
32367   // perform the BSWAP in the shuffle.
32368   // Its best to shuffle using the second operand as this will implicitly allow
32369   // memory folding for multiple vectors.
32370   SmallVector<SDValue, 16> MaskElts;
32371   for (int i = 0; i != NumElts; ++i) {
32372     for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
32373       int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
32374       int PermuteByte = SourceByte | (2 << 5);
32375       MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
32376     }
32377   }
32378 
32379   SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
32380   SDValue Res = DAG.getBitcast(MVT::v16i8, In);
32381   Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
32382                     Res, Mask);
32383   return DAG.getBitcast(VT, Res);
32384 }
32385 
LowerBITREVERSE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32386 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
32387                                SelectionDAG &DAG) {
32388   MVT VT = Op.getSimpleValueType();
32389 
32390   if (Subtarget.hasXOP() && !VT.is512BitVector())
32391     return LowerBITREVERSE_XOP(Op, DAG);
32392 
32393   assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
32394 
32395   SDValue In = Op.getOperand(0);
32396   SDLoc DL(Op);
32397 
32398   assert(VT.getScalarType() == MVT::i8 &&
32399          "Only byte vector BITREVERSE supported");
32400 
32401   // Split v64i8 without BWI so that we can still use the PSHUFB lowering.
32402   if (VT == MVT::v64i8 && !Subtarget.hasBWI())
32403     return splitVectorIntUnary(Op, DAG);
32404 
32405   // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
32406   if (VT == MVT::v32i8 && !Subtarget.hasInt256())
32407     return splitVectorIntUnary(Op, DAG);
32408 
32409   unsigned NumElts = VT.getVectorNumElements();
32410 
32411   // If we have GFNI, we can use GF2P8AFFINEQB to reverse the bits.
32412   if (Subtarget.hasGFNI()) {
32413     MVT MatrixVT = MVT::getVectorVT(MVT::i64, NumElts / 8);
32414     SDValue Matrix = DAG.getConstant(0x8040201008040201ULL, DL, MatrixVT);
32415     Matrix = DAG.getBitcast(VT, Matrix);
32416     return DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, In, Matrix,
32417                        DAG.getTargetConstant(0, DL, MVT::i8));
32418   }
32419 
32420   // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
32421   // two nibbles and a PSHUFB lookup to find the bitreverse of each
32422   // 0-15 value (moved to the other nibble).
32423   SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
32424   SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
32425   SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
32426 
32427   const int LoLUT[16] = {
32428       /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
32429       /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
32430       /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
32431       /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
32432   const int HiLUT[16] = {
32433       /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
32434       /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
32435       /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
32436       /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
32437 
32438   SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
32439   for (unsigned i = 0; i < NumElts; ++i) {
32440     LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
32441     HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
32442   }
32443 
32444   SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
32445   SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
32446   Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
32447   Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
32448   return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
32449 }
32450 
LowerPARITY(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32451 static SDValue LowerPARITY(SDValue Op, const X86Subtarget &Subtarget,
32452                            SelectionDAG &DAG) {
32453   SDLoc DL(Op);
32454   SDValue X = Op.getOperand(0);
32455   MVT VT = Op.getSimpleValueType();
32456 
32457   // Special case. If the input fits in 8-bits we can use a single 8-bit TEST.
32458   if (VT == MVT::i8 ||
32459       DAG.MaskedValueIsZero(X, APInt::getBitsSetFrom(VT.getSizeInBits(), 8))) {
32460     X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
32461     SDValue Flags = DAG.getNode(X86ISD::CMP, DL, MVT::i32, X,
32462                                 DAG.getConstant(0, DL, MVT::i8));
32463     // Copy the inverse of the parity flag into a register with setcc.
32464     SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
32465     // Extend to the original type.
32466     return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
32467   }
32468 
32469   // If we have POPCNT, use the default expansion.
32470   if (Subtarget.hasPOPCNT())
32471     return SDValue();
32472 
32473   if (VT == MVT::i64) {
32474     // Xor the high and low 16-bits together using a 32-bit operation.
32475     SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
32476                              DAG.getNode(ISD::SRL, DL, MVT::i64, X,
32477                                          DAG.getConstant(32, DL, MVT::i8)));
32478     SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
32479     X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
32480   }
32481 
32482   if (VT != MVT::i16) {
32483     // Xor the high and low 16-bits together using a 32-bit operation.
32484     SDValue Hi16 = DAG.getNode(ISD::SRL, DL, MVT::i32, X,
32485                                DAG.getConstant(16, DL, MVT::i8));
32486     X = DAG.getNode(ISD::XOR, DL, MVT::i32, X, Hi16);
32487   } else {
32488     // If the input is 16-bits, we need to extend to use an i32 shift below.
32489     X = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, X);
32490   }
32491 
32492   // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
32493   // This should allow an h-reg to be used to save a shift.
32494   SDValue Hi = DAG.getNode(
32495       ISD::TRUNCATE, DL, MVT::i8,
32496       DAG.getNode(ISD::SRL, DL, MVT::i32, X, DAG.getConstant(8, DL, MVT::i8)));
32497   SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
32498   SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
32499   SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
32500 
32501   // Copy the inverse of the parity flag into a register with setcc.
32502   SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
32503   // Extend to the original type.
32504   return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
32505 }
32506 
lowerAtomicArithWithLOCK(SDValue N,SelectionDAG & DAG,const X86Subtarget & Subtarget)32507 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
32508                                         const X86Subtarget &Subtarget) {
32509   unsigned NewOpc = 0;
32510   switch (N->getOpcode()) {
32511   case ISD::ATOMIC_LOAD_ADD:
32512     NewOpc = X86ISD::LADD;
32513     break;
32514   case ISD::ATOMIC_LOAD_SUB:
32515     NewOpc = X86ISD::LSUB;
32516     break;
32517   case ISD::ATOMIC_LOAD_OR:
32518     NewOpc = X86ISD::LOR;
32519     break;
32520   case ISD::ATOMIC_LOAD_XOR:
32521     NewOpc = X86ISD::LXOR;
32522     break;
32523   case ISD::ATOMIC_LOAD_AND:
32524     NewOpc = X86ISD::LAND;
32525     break;
32526   default:
32527     llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
32528   }
32529 
32530   MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
32531 
32532   return DAG.getMemIntrinsicNode(
32533       NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
32534       {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
32535       /*MemVT=*/N->getSimpleValueType(0), MMO);
32536 }
32537 
32538 /// Lower atomic_load_ops into LOCK-prefixed operations.
lowerAtomicArith(SDValue N,SelectionDAG & DAG,const X86Subtarget & Subtarget)32539 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
32540                                 const X86Subtarget &Subtarget) {
32541   AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
32542   SDValue Chain = N->getOperand(0);
32543   SDValue LHS = N->getOperand(1);
32544   SDValue RHS = N->getOperand(2);
32545   unsigned Opc = N->getOpcode();
32546   MVT VT = N->getSimpleValueType(0);
32547   SDLoc DL(N);
32548 
32549   // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
32550   // can only be lowered when the result is unused.  They should have already
32551   // been transformed into a cmpxchg loop in AtomicExpand.
32552   if (N->hasAnyUseOfValue(0)) {
32553     // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
32554     // select LXADD if LOCK_SUB can't be selected.
32555     if (Opc == ISD::ATOMIC_LOAD_SUB) {
32556       RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
32557       return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
32558                            RHS, AN->getMemOperand());
32559     }
32560     assert(Opc == ISD::ATOMIC_LOAD_ADD &&
32561            "Used AtomicRMW ops other than Add should have been expanded!");
32562     return N;
32563   }
32564 
32565   // Specialized lowering for the canonical form of an idemptotent atomicrmw.
32566   // The core idea here is that since the memory location isn't actually
32567   // changing, all we need is a lowering for the *ordering* impacts of the
32568   // atomicrmw.  As such, we can chose a different operation and memory
32569   // location to minimize impact on other code.
32570   if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
32571     // On X86, the only ordering which actually requires an instruction is
32572     // seq_cst which isn't SingleThread, everything just needs to be preserved
32573     // during codegen and then dropped. Note that we expect (but don't assume),
32574     // that orderings other than seq_cst and acq_rel have been canonicalized to
32575     // a store or load.
32576     if (AN->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent &&
32577         AN->getSyncScopeID() == SyncScope::System) {
32578       // Prefer a locked operation against a stack location to minimize cache
32579       // traffic.  This assumes that stack locations are very likely to be
32580       // accessed only by the owning thread.
32581       SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
32582       assert(!N->hasAnyUseOfValue(0));
32583       // NOTE: The getUNDEF is needed to give something for the unused result 0.
32584       return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
32585                          DAG.getUNDEF(VT), NewChain);
32586     }
32587     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
32588     SDValue NewChain = DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Chain);
32589     assert(!N->hasAnyUseOfValue(0));
32590     // NOTE: The getUNDEF is needed to give something for the unused result 0.
32591     return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
32592                        DAG.getUNDEF(VT), NewChain);
32593   }
32594 
32595   SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
32596   // RAUW the chain, but don't worry about the result, as it's unused.
32597   assert(!N->hasAnyUseOfValue(0));
32598   // NOTE: The getUNDEF is needed to give something for the unused result 0.
32599   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
32600                      DAG.getUNDEF(VT), LockOp.getValue(1));
32601 }
32602 
LowerATOMIC_STORE(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)32603 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
32604                                  const X86Subtarget &Subtarget) {
32605   auto *Node = cast<AtomicSDNode>(Op.getNode());
32606   SDLoc dl(Node);
32607   EVT VT = Node->getMemoryVT();
32608 
32609   bool IsSeqCst =
32610       Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent;
32611   bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
32612 
32613   // If this store is not sequentially consistent and the type is legal
32614   // we can just keep it.
32615   if (!IsSeqCst && IsTypeLegal)
32616     return Op;
32617 
32618   if (VT == MVT::i64 && !IsTypeLegal) {
32619     // For illegal i64 atomic_stores, we can try to use MOVQ or MOVLPS if SSE
32620     // is enabled.
32621     bool NoImplicitFloatOps =
32622         DAG.getMachineFunction().getFunction().hasFnAttribute(
32623             Attribute::NoImplicitFloat);
32624     if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
32625       SDValue Chain;
32626       if (Subtarget.hasSSE1()) {
32627         SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
32628                                        Node->getOperand(2));
32629         MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
32630         SclToVec = DAG.getBitcast(StVT, SclToVec);
32631         SDVTList Tys = DAG.getVTList(MVT::Other);
32632         SDValue Ops[] = {Node->getChain(), SclToVec, Node->getBasePtr()};
32633         Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops,
32634                                         MVT::i64, Node->getMemOperand());
32635       } else if (Subtarget.hasX87()) {
32636         // First load this into an 80-bit X87 register using a stack temporary.
32637         // This will put the whole integer into the significand.
32638         SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
32639         int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
32640         MachinePointerInfo MPI =
32641             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
32642         Chain =
32643             DAG.getStore(Node->getChain(), dl, Node->getOperand(2), StackPtr,
32644                          MPI, MaybeAlign(), MachineMemOperand::MOStore);
32645         SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
32646         SDValue LdOps[] = {Chain, StackPtr};
32647         SDValue Value = DAG.getMemIntrinsicNode(
32648             X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI,
32649             /*Align*/ std::nullopt, MachineMemOperand::MOLoad);
32650         Chain = Value.getValue(1);
32651 
32652         // Now use an FIST to do the atomic store.
32653         SDValue StoreOps[] = {Chain, Value, Node->getBasePtr()};
32654         Chain =
32655             DAG.getMemIntrinsicNode(X86ISD::FIST, dl, DAG.getVTList(MVT::Other),
32656                                     StoreOps, MVT::i64, Node->getMemOperand());
32657       }
32658 
32659       if (Chain) {
32660         // If this is a sequentially consistent store, also emit an appropriate
32661         // barrier.
32662         if (IsSeqCst)
32663           Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
32664 
32665         return Chain;
32666       }
32667     }
32668   }
32669 
32670   // Convert seq_cst store -> xchg
32671   // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
32672   // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
32673   SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
32674                                Node->getMemoryVT(),
32675                                Node->getOperand(0),
32676                                Node->getOperand(1), Node->getOperand(2),
32677                                Node->getMemOperand());
32678   return Swap.getValue(1);
32679 }
32680 
LowerADDSUBCARRY(SDValue Op,SelectionDAG & DAG)32681 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
32682   SDNode *N = Op.getNode();
32683   MVT VT = N->getSimpleValueType(0);
32684   unsigned Opc = Op.getOpcode();
32685 
32686   // Let legalize expand this if it isn't a legal type yet.
32687   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
32688     return SDValue();
32689 
32690   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
32691   SDLoc DL(N);
32692 
32693   // Set the carry flag.
32694   SDValue Carry = Op.getOperand(2);
32695   EVT CarryVT = Carry.getValueType();
32696   Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
32697                       Carry, DAG.getAllOnesConstant(DL, CarryVT));
32698 
32699   bool IsAdd = Opc == ISD::ADDCARRY || Opc == ISD::SADDO_CARRY;
32700   SDValue Sum = DAG.getNode(IsAdd ? X86ISD::ADC : X86ISD::SBB, DL, VTs,
32701                             Op.getOperand(0), Op.getOperand(1),
32702                             Carry.getValue(1));
32703 
32704   bool IsSigned = Opc == ISD::SADDO_CARRY || Opc == ISD::SSUBO_CARRY;
32705   SDValue SetCC = getSETCC(IsSigned ? X86::COND_O : X86::COND_B,
32706                            Sum.getValue(1), DL, DAG);
32707   if (N->getValueType(1) == MVT::i1)
32708     SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
32709 
32710   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
32711 }
32712 
LowerFSINCOS(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32713 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
32714                             SelectionDAG &DAG) {
32715   assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
32716 
32717   // For MacOSX, we want to call an alternative entry point: __sincos_stret,
32718   // which returns the values as { float, float } (in XMM0) or
32719   // { double, double } (which is returned in XMM0, XMM1).
32720   SDLoc dl(Op);
32721   SDValue Arg = Op.getOperand(0);
32722   EVT ArgVT = Arg.getValueType();
32723   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
32724 
32725   TargetLowering::ArgListTy Args;
32726   TargetLowering::ArgListEntry Entry;
32727 
32728   Entry.Node = Arg;
32729   Entry.Ty = ArgTy;
32730   Entry.IsSExt = false;
32731   Entry.IsZExt = false;
32732   Args.push_back(Entry);
32733 
32734   bool isF64 = ArgVT == MVT::f64;
32735   // Only optimize x86_64 for now. i386 is a bit messy. For f32,
32736   // the small struct {f32, f32} is returned in (eax, edx). For f64,
32737   // the results are returned via SRet in memory.
32738   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32739   RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
32740   const char *LibcallName = TLI.getLibcallName(LC);
32741   SDValue Callee =
32742       DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
32743 
32744   Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
32745                       : (Type *)FixedVectorType::get(ArgTy, 4);
32746 
32747   TargetLowering::CallLoweringInfo CLI(DAG);
32748   CLI.setDebugLoc(dl)
32749       .setChain(DAG.getEntryNode())
32750       .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
32751 
32752   std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
32753 
32754   if (isF64)
32755     // Returned in xmm0 and xmm1.
32756     return CallResult.first;
32757 
32758   // Returned in bits 0:31 and 32:64 xmm0.
32759   SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
32760                                CallResult.first, DAG.getIntPtrConstant(0, dl));
32761   SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
32762                                CallResult.first, DAG.getIntPtrConstant(1, dl));
32763   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
32764   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
32765 }
32766 
32767 /// Widen a vector input to a vector of NVT.  The
32768 /// input vector must have the same element type as NVT.
ExtendToType(SDValue InOp,MVT NVT,SelectionDAG & DAG,bool FillWithZeroes=false)32769 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
32770                             bool FillWithZeroes = false) {
32771   // Check if InOp already has the right width.
32772   MVT InVT = InOp.getSimpleValueType();
32773   if (InVT == NVT)
32774     return InOp;
32775 
32776   if (InOp.isUndef())
32777     return DAG.getUNDEF(NVT);
32778 
32779   assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
32780          "input and widen element type must match");
32781 
32782   unsigned InNumElts = InVT.getVectorNumElements();
32783   unsigned WidenNumElts = NVT.getVectorNumElements();
32784   assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
32785          "Unexpected request for vector widening");
32786 
32787   SDLoc dl(InOp);
32788   if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
32789       InOp.getNumOperands() == 2) {
32790     SDValue N1 = InOp.getOperand(1);
32791     if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
32792         N1.isUndef()) {
32793       InOp = InOp.getOperand(0);
32794       InVT = InOp.getSimpleValueType();
32795       InNumElts = InVT.getVectorNumElements();
32796     }
32797   }
32798   if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
32799       ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
32800     SmallVector<SDValue, 16> Ops;
32801     for (unsigned i = 0; i < InNumElts; ++i)
32802       Ops.push_back(InOp.getOperand(i));
32803 
32804     EVT EltVT = InOp.getOperand(0).getValueType();
32805 
32806     SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
32807       DAG.getUNDEF(EltVT);
32808     for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
32809       Ops.push_back(FillVal);
32810     return DAG.getBuildVector(NVT, dl, Ops);
32811   }
32812   SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
32813     DAG.getUNDEF(NVT);
32814   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
32815                      InOp, DAG.getIntPtrConstant(0, dl));
32816 }
32817 
LowerMSCATTER(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32818 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
32819                              SelectionDAG &DAG) {
32820   assert(Subtarget.hasAVX512() &&
32821          "MGATHER/MSCATTER are supported on AVX-512 arch only");
32822 
32823   MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
32824   SDValue Src = N->getValue();
32825   MVT VT = Src.getSimpleValueType();
32826   assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
32827   SDLoc dl(Op);
32828 
32829   SDValue Scale = N->getScale();
32830   SDValue Index = N->getIndex();
32831   SDValue Mask = N->getMask();
32832   SDValue Chain = N->getChain();
32833   SDValue BasePtr = N->getBasePtr();
32834 
32835   if (VT == MVT::v2f32 || VT == MVT::v2i32) {
32836     assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
32837     // If the index is v2i64 and we have VLX we can use xmm for data and index.
32838     if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
32839       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32840       EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
32841       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
32842       SDVTList VTs = DAG.getVTList(MVT::Other);
32843       SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
32844       return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
32845                                      N->getMemoryVT(), N->getMemOperand());
32846     }
32847     return SDValue();
32848   }
32849 
32850   MVT IndexVT = Index.getSimpleValueType();
32851 
32852   // If the index is v2i32, we're being called by type legalization and we
32853   // should just let the default handling take care of it.
32854   if (IndexVT == MVT::v2i32)
32855     return SDValue();
32856 
32857   // If we don't have VLX and neither the passthru or index is 512-bits, we
32858   // need to widen until one is.
32859   if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
32860       !Index.getSimpleValueType().is512BitVector()) {
32861     // Determine how much we need to widen by to get a 512-bit type.
32862     unsigned Factor = std::min(512/VT.getSizeInBits(),
32863                                512/IndexVT.getSizeInBits());
32864     unsigned NumElts = VT.getVectorNumElements() * Factor;
32865 
32866     VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
32867     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
32868     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
32869 
32870     Src = ExtendToType(Src, VT, DAG);
32871     Index = ExtendToType(Index, IndexVT, DAG);
32872     Mask = ExtendToType(Mask, MaskVT, DAG, true);
32873   }
32874 
32875   SDVTList VTs = DAG.getVTList(MVT::Other);
32876   SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
32877   return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
32878                                  N->getMemoryVT(), N->getMemOperand());
32879 }
32880 
LowerMLOAD(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32881 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
32882                           SelectionDAG &DAG) {
32883 
32884   MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
32885   MVT VT = Op.getSimpleValueType();
32886   MVT ScalarVT = VT.getScalarType();
32887   SDValue Mask = N->getMask();
32888   MVT MaskVT = Mask.getSimpleValueType();
32889   SDValue PassThru = N->getPassThru();
32890   SDLoc dl(Op);
32891 
32892   // Handle AVX masked loads which don't support passthru other than 0.
32893   if (MaskVT.getVectorElementType() != MVT::i1) {
32894     // We also allow undef in the isel pattern.
32895     if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
32896       return Op;
32897 
32898     SDValue NewLoad = DAG.getMaskedLoad(
32899         VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
32900         getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
32901         N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
32902         N->isExpandingLoad());
32903     // Emit a blend.
32904     SDValue Select = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
32905     return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
32906   }
32907 
32908   assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
32909          "Expanding masked load is supported on AVX-512 target only!");
32910 
32911   assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
32912          "Expanding masked load is supported for 32 and 64-bit types only!");
32913 
32914   assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
32915          "Cannot lower masked load op.");
32916 
32917   assert((ScalarVT.getSizeInBits() >= 32 ||
32918           (Subtarget.hasBWI() &&
32919               (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
32920          "Unsupported masked load op.");
32921 
32922   // This operation is legal for targets with VLX, but without
32923   // VLX the vector should be widened to 512 bit
32924   unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
32925   MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
32926   PassThru = ExtendToType(PassThru, WideDataVT, DAG);
32927 
32928   // Mask element has to be i1.
32929   assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
32930          "Unexpected mask type");
32931 
32932   MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
32933 
32934   Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
32935   SDValue NewLoad = DAG.getMaskedLoad(
32936       WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
32937       PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
32938       N->getExtensionType(), N->isExpandingLoad());
32939 
32940   SDValue Extract =
32941       DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, NewLoad.getValue(0),
32942                   DAG.getIntPtrConstant(0, dl));
32943   SDValue RetOps[] = {Extract, NewLoad.getValue(1)};
32944   return DAG.getMergeValues(RetOps, dl);
32945 }
32946 
LowerMSTORE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32947 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
32948                            SelectionDAG &DAG) {
32949   MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
32950   SDValue DataToStore = N->getValue();
32951   MVT VT = DataToStore.getSimpleValueType();
32952   MVT ScalarVT = VT.getScalarType();
32953   SDValue Mask = N->getMask();
32954   SDLoc dl(Op);
32955 
32956   assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
32957          "Expanding masked load is supported on AVX-512 target only!");
32958 
32959   assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
32960          "Expanding masked load is supported for 32 and 64-bit types only!");
32961 
32962   assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
32963          "Cannot lower masked store op.");
32964 
32965   assert((ScalarVT.getSizeInBits() >= 32 ||
32966           (Subtarget.hasBWI() &&
32967               (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
32968           "Unsupported masked store op.");
32969 
32970   // This operation is legal for targets with VLX, but without
32971   // VLX the vector should be widened to 512 bit
32972   unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
32973   MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
32974 
32975   // Mask element has to be i1.
32976   assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
32977          "Unexpected mask type");
32978 
32979   MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
32980 
32981   DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
32982   Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
32983   return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
32984                             N->getOffset(), Mask, N->getMemoryVT(),
32985                             N->getMemOperand(), N->getAddressingMode(),
32986                             N->isTruncatingStore(), N->isCompressingStore());
32987 }
32988 
LowerMGATHER(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32989 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
32990                             SelectionDAG &DAG) {
32991   assert(Subtarget.hasAVX2() &&
32992          "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
32993 
32994   MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
32995   SDLoc dl(Op);
32996   MVT VT = Op.getSimpleValueType();
32997   SDValue Index = N->getIndex();
32998   SDValue Mask = N->getMask();
32999   SDValue PassThru = N->getPassThru();
33000   MVT IndexVT = Index.getSimpleValueType();
33001 
33002   assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
33003 
33004   // If the index is v2i32, we're being called by type legalization.
33005   if (IndexVT == MVT::v2i32)
33006     return SDValue();
33007 
33008   // If we don't have VLX and neither the passthru or index is 512-bits, we
33009   // need to widen until one is.
33010   MVT OrigVT = VT;
33011   if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
33012       !IndexVT.is512BitVector()) {
33013     // Determine how much we need to widen by to get a 512-bit type.
33014     unsigned Factor = std::min(512/VT.getSizeInBits(),
33015                                512/IndexVT.getSizeInBits());
33016 
33017     unsigned NumElts = VT.getVectorNumElements() * Factor;
33018 
33019     VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
33020     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
33021     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
33022 
33023     PassThru = ExtendToType(PassThru, VT, DAG);
33024     Index = ExtendToType(Index, IndexVT, DAG);
33025     Mask = ExtendToType(Mask, MaskVT, DAG, true);
33026   }
33027 
33028   // Break dependency on the data register.
33029   if (PassThru.isUndef())
33030     PassThru = getZeroVector(VT, Subtarget, DAG, dl);
33031 
33032   SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
33033                     N->getScale() };
33034   SDValue NewGather = DAG.getMemIntrinsicNode(
33035       X86ISD::MGATHER, dl, DAG.getVTList(VT, MVT::Other), Ops, N->getMemoryVT(),
33036       N->getMemOperand());
33037   SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
33038                                 NewGather, DAG.getIntPtrConstant(0, dl));
33039   return DAG.getMergeValues({Extract, NewGather.getValue(1)}, dl);
33040 }
33041 
LowerADDRSPACECAST(SDValue Op,SelectionDAG & DAG)33042 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
33043   SDLoc dl(Op);
33044   SDValue Src = Op.getOperand(0);
33045   MVT DstVT = Op.getSimpleValueType();
33046 
33047   AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
33048   unsigned SrcAS = N->getSrcAddressSpace();
33049 
33050   assert(SrcAS != N->getDestAddressSpace() &&
33051          "addrspacecast must be between different address spaces");
33052 
33053   if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
33054     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
33055   } else if (DstVT == MVT::i64) {
33056     Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
33057   } else if (DstVT == MVT::i32) {
33058     Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
33059   } else {
33060     report_fatal_error("Bad address space in addrspacecast");
33061   }
33062   return Op;
33063 }
33064 
LowerGC_TRANSITION(SDValue Op,SelectionDAG & DAG) const33065 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
33066                                               SelectionDAG &DAG) const {
33067   // TODO: Eventually, the lowering of these nodes should be informed by or
33068   // deferred to the GC strategy for the function in which they appear. For
33069   // now, however, they must be lowered to something. Since they are logically
33070   // no-ops in the case of a null GC strategy (or a GC strategy which does not
33071   // require special handling for these nodes), lower them as literal NOOPs for
33072   // the time being.
33073   SmallVector<SDValue, 2> Ops;
33074   Ops.push_back(Op.getOperand(0));
33075   if (Op->getGluedNode())
33076     Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
33077 
33078   SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
33079   return SDValue(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
33080 }
33081 
33082 // Custom split CVTPS2PH with wide types.
LowerCVTPS2PH(SDValue Op,SelectionDAG & DAG)33083 static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
33084   SDLoc dl(Op);
33085   EVT VT = Op.getValueType();
33086   SDValue Lo, Hi;
33087   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
33088   EVT LoVT, HiVT;
33089   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
33090   SDValue RC = Op.getOperand(1);
33091   Lo = DAG.getNode(X86ISD::CVTPS2PH, dl, LoVT, Lo, RC);
33092   Hi = DAG.getNode(X86ISD::CVTPS2PH, dl, HiVT, Hi, RC);
33093   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33094 }
33095 
getInstrStrFromOpNo(const SmallVectorImpl<StringRef> & AsmStrs,unsigned OpNo)33096 static StringRef getInstrStrFromOpNo(const SmallVectorImpl<StringRef> &AsmStrs,
33097                                      unsigned OpNo) {
33098   const APInt Operand(32, OpNo);
33099   std::string OpNoStr = llvm::toString(Operand, 10, false);
33100   std::string Str(" $");
33101 
33102   std::string OpNoStr1(Str + OpNoStr);             // e.g. " $1" (OpNo=1)
33103   std::string OpNoStr2(Str + "{" + OpNoStr + ":"); // With modifier, e.g. ${1:P}
33104 
33105   auto I = StringRef::npos;
33106   for (auto &AsmStr : AsmStrs) {
33107     // Match the OpNo string. We should match exactly to exclude match
33108     // sub-string, e.g. "$12" contain "$1"
33109     if (AsmStr.endswith(OpNoStr1))
33110       I = AsmStr.size() - OpNoStr1.size();
33111 
33112     // Get the index of operand in AsmStr.
33113     if (I == StringRef::npos)
33114       I = AsmStr.find(OpNoStr1 + ",");
33115     if (I == StringRef::npos)
33116       I = AsmStr.find(OpNoStr2);
33117 
33118     if (I == StringRef::npos)
33119       continue;
33120 
33121     assert(I > 0 && "Unexpected inline asm string!");
33122     // Remove the operand string and label (if exsit).
33123     // For example:
33124     // ".L__MSASMLABEL_.${:uid}__l:call dword ptr ${0:P}"
33125     // ==>
33126     // ".L__MSASMLABEL_.${:uid}__l:call dword ptr "
33127     // ==>
33128     // "call dword ptr "
33129     auto TmpStr = AsmStr.substr(0, I);
33130     I = TmpStr.rfind(':');
33131     if (I == StringRef::npos)
33132       return TmpStr;
33133 
33134     assert(I < TmpStr.size() && "Unexpected inline asm string!");
33135     auto Asm = TmpStr.drop_front(I + 1);
33136     return Asm;
33137   }
33138 
33139   return StringRef();
33140 }
33141 
isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> & AsmStrs,unsigned OpNo) const33142 bool X86TargetLowering::isInlineAsmTargetBranch(
33143     const SmallVectorImpl<StringRef> &AsmStrs, unsigned OpNo) const {
33144   StringRef InstrStr = getInstrStrFromOpNo(AsmStrs, OpNo);
33145 
33146   if (InstrStr.contains("call"))
33147     return true;
33148 
33149   return false;
33150 }
33151 
33152 /// Provide custom lowering hooks for some operations.
LowerOperation(SDValue Op,SelectionDAG & DAG) const33153 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
33154   switch (Op.getOpcode()) {
33155   default: llvm_unreachable("Should not custom lower this!");
33156   case ISD::ATOMIC_FENCE:       return LowerATOMIC_FENCE(Op, Subtarget, DAG);
33157   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
33158     return LowerCMP_SWAP(Op, Subtarget, DAG);
33159   case ISD::CTPOP:              return LowerCTPOP(Op, Subtarget, DAG);
33160   case ISD::ATOMIC_LOAD_ADD:
33161   case ISD::ATOMIC_LOAD_SUB:
33162   case ISD::ATOMIC_LOAD_OR:
33163   case ISD::ATOMIC_LOAD_XOR:
33164   case ISD::ATOMIC_LOAD_AND:    return lowerAtomicArith(Op, DAG, Subtarget);
33165   case ISD::ATOMIC_STORE:       return LowerATOMIC_STORE(Op, DAG, Subtarget);
33166   case ISD::BITREVERSE:         return LowerBITREVERSE(Op, Subtarget, DAG);
33167   case ISD::PARITY:             return LowerPARITY(Op, Subtarget, DAG);
33168   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
33169   case ISD::CONCAT_VECTORS:     return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
33170   case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
33171   case ISD::VSELECT:            return LowerVSELECT(Op, DAG);
33172   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
33173   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
33174   case ISD::INSERT_SUBVECTOR:   return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
33175   case ISD::EXTRACT_SUBVECTOR:  return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
33176   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
33177   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
33178   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
33179   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
33180   case ISD::ExternalSymbol:     return LowerExternalSymbol(Op, DAG);
33181   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
33182   case ISD::SHL_PARTS:
33183   case ISD::SRA_PARTS:
33184   case ISD::SRL_PARTS:          return LowerShiftParts(Op, DAG);
33185   case ISD::FSHL:
33186   case ISD::FSHR:               return LowerFunnelShift(Op, Subtarget, DAG);
33187   case ISD::STRICT_SINT_TO_FP:
33188   case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
33189   case ISD::STRICT_UINT_TO_FP:
33190   case ISD::UINT_TO_FP:         return LowerUINT_TO_FP(Op, DAG);
33191   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
33192   case ISD::ZERO_EXTEND:        return LowerZERO_EXTEND(Op, Subtarget, DAG);
33193   case ISD::SIGN_EXTEND:        return LowerSIGN_EXTEND(Op, Subtarget, DAG);
33194   case ISD::ANY_EXTEND:         return LowerANY_EXTEND(Op, Subtarget, DAG);
33195   case ISD::ZERO_EXTEND_VECTOR_INREG:
33196   case ISD::SIGN_EXTEND_VECTOR_INREG:
33197     return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
33198   case ISD::FP_TO_SINT:
33199   case ISD::STRICT_FP_TO_SINT:
33200   case ISD::FP_TO_UINT:
33201   case ISD::STRICT_FP_TO_UINT:  return LowerFP_TO_INT(Op, DAG);
33202   case ISD::FP_TO_SINT_SAT:
33203   case ISD::FP_TO_UINT_SAT:     return LowerFP_TO_INT_SAT(Op, DAG);
33204   case ISD::FP_EXTEND:
33205   case ISD::STRICT_FP_EXTEND:   return LowerFP_EXTEND(Op, DAG);
33206   case ISD::FP_ROUND:
33207   case ISD::STRICT_FP_ROUND:    return LowerFP_ROUND(Op, DAG);
33208   case ISD::FP16_TO_FP:
33209   case ISD::STRICT_FP16_TO_FP:  return LowerFP16_TO_FP(Op, DAG);
33210   case ISD::FP_TO_FP16:
33211   case ISD::STRICT_FP_TO_FP16:  return LowerFP_TO_FP16(Op, DAG);
33212   case ISD::FP_TO_BF16:         return LowerFP_TO_BF16(Op, DAG);
33213   case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
33214   case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
33215   case ISD::FADD:
33216   case ISD::FSUB:               return lowerFaddFsub(Op, DAG);
33217   case ISD::FROUND:             return LowerFROUND(Op, DAG);
33218   case ISD::FABS:
33219   case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
33220   case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
33221   case ISD::FGETSIGN:           return LowerFGETSIGN(Op, DAG);
33222   case ISD::LRINT:
33223   case ISD::LLRINT:             return LowerLRINT_LLRINT(Op, DAG);
33224   case ISD::SETCC:
33225   case ISD::STRICT_FSETCC:
33226   case ISD::STRICT_FSETCCS:     return LowerSETCC(Op, DAG);
33227   case ISD::SETCCCARRY:         return LowerSETCCCARRY(Op, DAG);
33228   case ISD::SELECT:             return LowerSELECT(Op, DAG);
33229   case ISD::BRCOND:             return LowerBRCOND(Op, DAG);
33230   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
33231   case ISD::VASTART:            return LowerVASTART(Op, DAG);
33232   case ISD::VAARG:              return LowerVAARG(Op, DAG);
33233   case ISD::VACOPY:             return LowerVACOPY(Op, Subtarget, DAG);
33234   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
33235   case ISD::INTRINSIC_VOID:
33236   case ISD::INTRINSIC_W_CHAIN:  return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
33237   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
33238   case ISD::ADDROFRETURNADDR:   return LowerADDROFRETURNADDR(Op, DAG);
33239   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
33240   case ISD::FRAME_TO_ARGS_OFFSET:
33241                                 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
33242   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
33243   case ISD::EH_RETURN:          return LowerEH_RETURN(Op, DAG);
33244   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
33245   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
33246   case ISD::EH_SJLJ_SETUP_DISPATCH:
33247     return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
33248   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
33249   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
33250   case ISD::GET_ROUNDING:       return LowerGET_ROUNDING(Op, DAG);
33251   case ISD::SET_ROUNDING:       return LowerSET_ROUNDING(Op, DAG);
33252   case ISD::CTLZ:
33253   case ISD::CTLZ_ZERO_UNDEF:    return LowerCTLZ(Op, Subtarget, DAG);
33254   case ISD::CTTZ:
33255   case ISD::CTTZ_ZERO_UNDEF:    return LowerCTTZ(Op, Subtarget, DAG);
33256   case ISD::MUL:                return LowerMUL(Op, Subtarget, DAG);
33257   case ISD::MULHS:
33258   case ISD::MULHU:              return LowerMULH(Op, Subtarget, DAG);
33259   case ISD::ROTL:
33260   case ISD::ROTR:               return LowerRotate(Op, Subtarget, DAG);
33261   case ISD::SRA:
33262   case ISD::SRL:
33263   case ISD::SHL:                return LowerShift(Op, Subtarget, DAG);
33264   case ISD::SADDO:
33265   case ISD::UADDO:
33266   case ISD::SSUBO:
33267   case ISD::USUBO:              return LowerXALUO(Op, DAG);
33268   case ISD::SMULO:
33269   case ISD::UMULO:              return LowerMULO(Op, Subtarget, DAG);
33270   case ISD::READCYCLECOUNTER:   return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
33271   case ISD::BITCAST:            return LowerBITCAST(Op, Subtarget, DAG);
33272   case ISD::SADDO_CARRY:
33273   case ISD::SSUBO_CARRY:
33274   case ISD::ADDCARRY:
33275   case ISD::SUBCARRY:           return LowerADDSUBCARRY(Op, DAG);
33276   case ISD::ADD:
33277   case ISD::SUB:                return lowerAddSub(Op, DAG, Subtarget);
33278   case ISD::UADDSAT:
33279   case ISD::SADDSAT:
33280   case ISD::USUBSAT:
33281   case ISD::SSUBSAT:            return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
33282   case ISD::SMAX:
33283   case ISD::SMIN:
33284   case ISD::UMAX:
33285   case ISD::UMIN:               return LowerMINMAX(Op, Subtarget, DAG);
33286   case ISD::ABS:                return LowerABS(Op, Subtarget, DAG);
33287   case ISD::AVGCEILU:           return LowerAVG(Op, Subtarget, DAG);
33288   case ISD::FSINCOS:            return LowerFSINCOS(Op, Subtarget, DAG);
33289   case ISD::MLOAD:              return LowerMLOAD(Op, Subtarget, DAG);
33290   case ISD::MSTORE:             return LowerMSTORE(Op, Subtarget, DAG);
33291   case ISD::MGATHER:            return LowerMGATHER(Op, Subtarget, DAG);
33292   case ISD::MSCATTER:           return LowerMSCATTER(Op, Subtarget, DAG);
33293   case ISD::GC_TRANSITION_START:
33294   case ISD::GC_TRANSITION_END:  return LowerGC_TRANSITION(Op, DAG);
33295   case ISD::ADDRSPACECAST:      return LowerADDRSPACECAST(Op, DAG);
33296   case X86ISD::CVTPS2PH:        return LowerCVTPS2PH(Op, DAG);
33297   }
33298 }
33299 
33300 /// Replace a node with an illegal result type with a new node built out of
33301 /// custom code.
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const33302 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
33303                                            SmallVectorImpl<SDValue>&Results,
33304                                            SelectionDAG &DAG) const {
33305   SDLoc dl(N);
33306   switch (N->getOpcode()) {
33307   default:
33308 #ifndef NDEBUG
33309     dbgs() << "ReplaceNodeResults: ";
33310     N->dump(&DAG);
33311 #endif
33312     llvm_unreachable("Do not know how to custom type legalize this operation!");
33313   case X86ISD::CVTPH2PS: {
33314     EVT VT = N->getValueType(0);
33315     SDValue Lo, Hi;
33316     std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
33317     EVT LoVT, HiVT;
33318     std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
33319     Lo = DAG.getNode(X86ISD::CVTPH2PS, dl, LoVT, Lo);
33320     Hi = DAG.getNode(X86ISD::CVTPH2PS, dl, HiVT, Hi);
33321     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33322     Results.push_back(Res);
33323     return;
33324   }
33325   case X86ISD::STRICT_CVTPH2PS: {
33326     EVT VT = N->getValueType(0);
33327     SDValue Lo, Hi;
33328     std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 1);
33329     EVT LoVT, HiVT;
33330     std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
33331     Lo = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {LoVT, MVT::Other},
33332                      {N->getOperand(0), Lo});
33333     Hi = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {HiVT, MVT::Other},
33334                      {N->getOperand(0), Hi});
33335     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
33336                                 Lo.getValue(1), Hi.getValue(1));
33337     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33338     Results.push_back(Res);
33339     Results.push_back(Chain);
33340     return;
33341   }
33342   case X86ISD::CVTPS2PH:
33343     Results.push_back(LowerCVTPS2PH(SDValue(N, 0), DAG));
33344     return;
33345   case ISD::CTPOP: {
33346     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
33347     // Use a v2i64 if possible.
33348     bool NoImplicitFloatOps =
33349         DAG.getMachineFunction().getFunction().hasFnAttribute(
33350             Attribute::NoImplicitFloat);
33351     if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
33352       SDValue Wide =
33353           DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
33354       Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
33355       // Bit count should fit in 32-bits, extract it as that and then zero
33356       // extend to i64. Otherwise we end up extracting bits 63:32 separately.
33357       Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
33358       Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
33359                          DAG.getIntPtrConstant(0, dl));
33360       Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
33361       Results.push_back(Wide);
33362     }
33363     return;
33364   }
33365   case ISD::MUL: {
33366     EVT VT = N->getValueType(0);
33367     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33368            VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
33369     // Pre-promote these to vXi16 to avoid op legalization thinking all 16
33370     // elements are needed.
33371     MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
33372     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
33373     SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
33374     SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
33375     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
33376     unsigned NumConcats = 16 / VT.getVectorNumElements();
33377     SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
33378     ConcatOps[0] = Res;
33379     Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
33380     Results.push_back(Res);
33381     return;
33382   }
33383   case ISD::SMULO:
33384   case ISD::UMULO: {
33385     EVT VT = N->getValueType(0);
33386     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33387            VT == MVT::v2i32 && "Unexpected VT!");
33388     bool IsSigned = N->getOpcode() == ISD::SMULO;
33389     unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
33390     SDValue Op0 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(0));
33391     SDValue Op1 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(1));
33392     SDValue Res = DAG.getNode(ISD::MUL, dl, MVT::v2i64, Op0, Op1);
33393     // Extract the high 32 bits from each result using PSHUFD.
33394     // TODO: Could use SRL+TRUNCATE but that doesn't become a PSHUFD.
33395     SDValue Hi = DAG.getBitcast(MVT::v4i32, Res);
33396     Hi = DAG.getVectorShuffle(MVT::v4i32, dl, Hi, Hi, {1, 3, -1, -1});
33397     Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Hi,
33398                      DAG.getIntPtrConstant(0, dl));
33399 
33400     // Truncate the low bits of the result. This will become PSHUFD.
33401     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
33402 
33403     SDValue HiCmp;
33404     if (IsSigned) {
33405       // SMULO overflows if the high bits don't match the sign of the low.
33406       HiCmp = DAG.getNode(ISD::SRA, dl, VT, Res, DAG.getConstant(31, dl, VT));
33407     } else {
33408       // UMULO overflows if the high bits are non-zero.
33409       HiCmp = DAG.getConstant(0, dl, VT);
33410     }
33411     SDValue Ovf = DAG.getSetCC(dl, N->getValueType(1), Hi, HiCmp, ISD::SETNE);
33412 
33413     // Widen the result with by padding with undef.
33414     Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
33415                       DAG.getUNDEF(VT));
33416     Results.push_back(Res);
33417     Results.push_back(Ovf);
33418     return;
33419   }
33420   case X86ISD::VPMADDWD: {
33421     // Legalize types for X86ISD::VPMADDWD by widening.
33422     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
33423 
33424     EVT VT = N->getValueType(0);
33425     EVT InVT = N->getOperand(0).getValueType();
33426     assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
33427            "Expected a VT that divides into 128 bits.");
33428     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33429            "Unexpected type action!");
33430     unsigned NumConcat = 128 / InVT.getSizeInBits();
33431 
33432     EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
33433                                     InVT.getVectorElementType(),
33434                                     NumConcat * InVT.getVectorNumElements());
33435     EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
33436                                   VT.getVectorElementType(),
33437                                   NumConcat * VT.getVectorNumElements());
33438 
33439     SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
33440     Ops[0] = N->getOperand(0);
33441     SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
33442     Ops[0] = N->getOperand(1);
33443     SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
33444 
33445     SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
33446     Results.push_back(Res);
33447     return;
33448   }
33449   // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
33450   case X86ISD::FMINC:
33451   case X86ISD::FMIN:
33452   case X86ISD::FMAXC:
33453   case X86ISD::FMAX: {
33454     EVT VT = N->getValueType(0);
33455     assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
33456     SDValue UNDEF = DAG.getUNDEF(VT);
33457     SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
33458                               N->getOperand(0), UNDEF);
33459     SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
33460                               N->getOperand(1), UNDEF);
33461     Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
33462     return;
33463   }
33464   case ISD::SDIV:
33465   case ISD::UDIV:
33466   case ISD::SREM:
33467   case ISD::UREM: {
33468     EVT VT = N->getValueType(0);
33469     if (VT.isVector()) {
33470       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33471              "Unexpected type action!");
33472       // If this RHS is a constant splat vector we can widen this and let
33473       // division/remainder by constant optimize it.
33474       // TODO: Can we do something for non-splat?
33475       APInt SplatVal;
33476       if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
33477         unsigned NumConcats = 128 / VT.getSizeInBits();
33478         SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
33479         Ops0[0] = N->getOperand(0);
33480         EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
33481         SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
33482         SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
33483         SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
33484         Results.push_back(Res);
33485       }
33486       return;
33487     }
33488 
33489     SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
33490     Results.push_back(V);
33491     return;
33492   }
33493   case ISD::TRUNCATE: {
33494     MVT VT = N->getSimpleValueType(0);
33495     if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
33496       return;
33497 
33498     // The generic legalizer will try to widen the input type to the same
33499     // number of elements as the widened result type. But this isn't always
33500     // the best thing so do some custom legalization to avoid some cases.
33501     MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
33502     SDValue In = N->getOperand(0);
33503     EVT InVT = In.getValueType();
33504 
33505     unsigned InBits = InVT.getSizeInBits();
33506     if (128 % InBits == 0) {
33507       // 128 bit and smaller inputs should avoid truncate all together and
33508       // just use a build_vector that will become a shuffle.
33509       // TODO: Widen and use a shuffle directly?
33510       MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
33511       EVT EltVT = VT.getVectorElementType();
33512       unsigned WidenNumElts = WidenVT.getVectorNumElements();
33513       SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
33514       // Use the original element count so we don't do more scalar opts than
33515       // necessary.
33516       unsigned MinElts = VT.getVectorNumElements();
33517       for (unsigned i=0; i < MinElts; ++i) {
33518         SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
33519                                   DAG.getIntPtrConstant(i, dl));
33520         Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
33521       }
33522       Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
33523       return;
33524     }
33525     // With AVX512 there are some cases that can use a target specific
33526     // truncate node to go from 256/512 to less than 128 with zeros in the
33527     // upper elements of the 128 bit result.
33528     if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
33529       // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
33530       if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
33531         Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
33532         return;
33533       }
33534       // There's one case we can widen to 512 bits and use VTRUNC.
33535       if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
33536         In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
33537                          DAG.getUNDEF(MVT::v4i64));
33538         Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
33539         return;
33540       }
33541     }
33542     if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
33543         getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
33544         isTypeLegal(MVT::v4i64)) {
33545       // Input needs to be split and output needs to widened. Let's use two
33546       // VTRUNCs, and shuffle their results together into the wider type.
33547       SDValue Lo, Hi;
33548       std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
33549 
33550       Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
33551       Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
33552       SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
33553                                          { 0,  1,  2,  3, 16, 17, 18, 19,
33554                                           -1, -1, -1, -1, -1, -1, -1, -1 });
33555       Results.push_back(Res);
33556       return;
33557     }
33558 
33559     return;
33560   }
33561   case ISD::ANY_EXTEND:
33562     // Right now, only MVT::v8i8 has Custom action for an illegal type.
33563     // It's intended to custom handle the input type.
33564     assert(N->getValueType(0) == MVT::v8i8 &&
33565            "Do not know how to legalize this Node");
33566     return;
33567   case ISD::SIGN_EXTEND:
33568   case ISD::ZERO_EXTEND: {
33569     EVT VT = N->getValueType(0);
33570     SDValue In = N->getOperand(0);
33571     EVT InVT = In.getValueType();
33572     if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
33573         (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
33574       assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
33575              "Unexpected type action!");
33576       assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
33577       // Custom split this so we can extend i8/i16->i32 invec. This is better
33578       // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
33579       // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
33580       // we allow the sra from the extend to i32 to be shared by the split.
33581       In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
33582 
33583       // Fill a vector with sign bits for each element.
33584       SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
33585       SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
33586 
33587       // Create an unpackl and unpackh to interleave the sign bits then bitcast
33588       // to v2i64.
33589       SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
33590                                         {0, 4, 1, 5});
33591       Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
33592       SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
33593                                         {2, 6, 3, 7});
33594       Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
33595 
33596       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33597       Results.push_back(Res);
33598       return;
33599     }
33600 
33601     if (VT == MVT::v16i32 || VT == MVT::v8i64) {
33602       if (!InVT.is128BitVector()) {
33603         // Not a 128 bit vector, but maybe type legalization will promote
33604         // it to 128 bits.
33605         if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
33606           return;
33607         InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
33608         if (!InVT.is128BitVector())
33609           return;
33610 
33611         // Promote the input to 128 bits. Type legalization will turn this into
33612         // zext_inreg/sext_inreg.
33613         In = DAG.getNode(N->getOpcode(), dl, InVT, In);
33614       }
33615 
33616       // Perform custom splitting instead of the two stage extend we would get
33617       // by default.
33618       EVT LoVT, HiVT;
33619       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
33620       assert(isTypeLegal(LoVT) && "Split VT not legal?");
33621 
33622       SDValue Lo = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, LoVT, In, DAG);
33623 
33624       // We need to shift the input over by half the number of elements.
33625       unsigned NumElts = InVT.getVectorNumElements();
33626       unsigned HalfNumElts = NumElts / 2;
33627       SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
33628       for (unsigned i = 0; i != HalfNumElts; ++i)
33629         ShufMask[i] = i + HalfNumElts;
33630 
33631       SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
33632       Hi = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, HiVT, Hi, DAG);
33633 
33634       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33635       Results.push_back(Res);
33636     }
33637     return;
33638   }
33639   case ISD::FP_TO_SINT:
33640   case ISD::STRICT_FP_TO_SINT:
33641   case ISD::FP_TO_UINT:
33642   case ISD::STRICT_FP_TO_UINT: {
33643     bool IsStrict = N->isStrictFPOpcode();
33644     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
33645                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
33646     EVT VT = N->getValueType(0);
33647     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
33648     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
33649     EVT SrcVT = Src.getValueType();
33650 
33651     SDValue Res;
33652     if (isSoftFP16(SrcVT)) {
33653       EVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
33654       if (IsStrict) {
33655         Res =
33656             DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
33657                         {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
33658                                             {NVT, MVT::Other}, {Chain, Src})});
33659         Chain = Res.getValue(1);
33660       } else {
33661         Res = DAG.getNode(N->getOpcode(), dl, VT,
33662                           DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
33663       }
33664       Results.push_back(Res);
33665       if (IsStrict)
33666         Results.push_back(Chain);
33667 
33668       return;
33669     }
33670 
33671     if (VT.isVector() && Subtarget.hasFP16() &&
33672         SrcVT.getVectorElementType() == MVT::f16) {
33673       EVT EleVT = VT.getVectorElementType();
33674       EVT ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
33675 
33676       if (SrcVT != MVT::v8f16) {
33677         SDValue Tmp =
33678             IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
33679         SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
33680         Ops[0] = Src;
33681         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
33682       }
33683 
33684       if (IsStrict) {
33685         unsigned Opc =
33686             IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
33687         Res =
33688             DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {N->getOperand(0), Src});
33689         Chain = Res.getValue(1);
33690       } else {
33691         unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
33692         Res = DAG.getNode(Opc, dl, ResVT, Src);
33693       }
33694 
33695       // TODO: Need to add exception check code for strict FP.
33696       if (EleVT.getSizeInBits() < 16) {
33697         MVT TmpVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8);
33698         Res = DAG.getNode(ISD::TRUNCATE, dl, TmpVT, Res);
33699 
33700         // Now widen to 128 bits.
33701         unsigned NumConcats = 128 / TmpVT.getSizeInBits();
33702         MVT ConcatVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8 * NumConcats);
33703         SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(TmpVT));
33704         ConcatOps[0] = Res;
33705         Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
33706       }
33707 
33708       Results.push_back(Res);
33709       if (IsStrict)
33710         Results.push_back(Chain);
33711 
33712       return;
33713     }
33714 
33715     if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
33716       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33717              "Unexpected type action!");
33718 
33719       // Try to create a 128 bit vector, but don't exceed a 32 bit element.
33720       unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
33721       MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
33722                                        VT.getVectorNumElements());
33723       SDValue Res;
33724       SDValue Chain;
33725       if (IsStrict) {
33726         Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
33727                           {N->getOperand(0), Src});
33728         Chain = Res.getValue(1);
33729       } else
33730         Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
33731 
33732       // Preserve what we know about the size of the original result. If the
33733       // result is v2i32, we have to manually widen the assert.
33734       if (PromoteVT == MVT::v2i32)
33735         Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
33736                           DAG.getUNDEF(MVT::v2i32));
33737 
33738       Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext, dl,
33739                         Res.getValueType(), Res,
33740                         DAG.getValueType(VT.getVectorElementType()));
33741 
33742       if (PromoteVT == MVT::v2i32)
33743         Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
33744                           DAG.getIntPtrConstant(0, dl));
33745 
33746       // Truncate back to the original width.
33747       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
33748 
33749       // Now widen to 128 bits.
33750       unsigned NumConcats = 128 / VT.getSizeInBits();
33751       MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
33752                                       VT.getVectorNumElements() * NumConcats);
33753       SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
33754       ConcatOps[0] = Res;
33755       Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
33756       Results.push_back(Res);
33757       if (IsStrict)
33758         Results.push_back(Chain);
33759       return;
33760     }
33761 
33762 
33763     if (VT == MVT::v2i32) {
33764       assert((!IsStrict || IsSigned || Subtarget.hasAVX512()) &&
33765              "Strict unsigned conversion requires AVX512");
33766       assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
33767       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33768              "Unexpected type action!");
33769       if (Src.getValueType() == MVT::v2f64) {
33770         if (!IsSigned && !Subtarget.hasAVX512()) {
33771           SDValue Res =
33772               expandFP_TO_UINT_SSE(MVT::v4i32, Src, dl, DAG, Subtarget);
33773           Results.push_back(Res);
33774           return;
33775         }
33776 
33777         unsigned Opc;
33778         if (IsStrict)
33779           Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
33780         else
33781           Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
33782 
33783         // If we have VLX we can emit a target specific FP_TO_UINT node,.
33784         if (!IsSigned && !Subtarget.hasVLX()) {
33785           // Otherwise we can defer to the generic legalizer which will widen
33786           // the input as well. This will be further widened during op
33787           // legalization to v8i32<-v8f64.
33788           // For strict nodes we'll need to widen ourselves.
33789           // FIXME: Fix the type legalizer to safely widen strict nodes?
33790           if (!IsStrict)
33791             return;
33792           Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
33793                             DAG.getConstantFP(0.0, dl, MVT::v2f64));
33794           Opc = N->getOpcode();
33795         }
33796         SDValue Res;
33797         SDValue Chain;
33798         if (IsStrict) {
33799           Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
33800                             {N->getOperand(0), Src});
33801           Chain = Res.getValue(1);
33802         } else {
33803           Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
33804         }
33805         Results.push_back(Res);
33806         if (IsStrict)
33807           Results.push_back(Chain);
33808         return;
33809       }
33810 
33811       // Custom widen strict v2f32->v2i32 by padding with zeros.
33812       // FIXME: Should generic type legalizer do this?
33813       if (Src.getValueType() == MVT::v2f32 && IsStrict) {
33814         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
33815                           DAG.getConstantFP(0.0, dl, MVT::v2f32));
33816         SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
33817                                   {N->getOperand(0), Src});
33818         Results.push_back(Res);
33819         Results.push_back(Res.getValue(1));
33820         return;
33821       }
33822 
33823       // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
33824       // so early out here.
33825       return;
33826     }
33827 
33828     assert(!VT.isVector() && "Vectors should have been handled above!");
33829 
33830     if ((Subtarget.hasDQI() && VT == MVT::i64 &&
33831          (SrcVT == MVT::f32 || SrcVT == MVT::f64)) ||
33832         (Subtarget.hasFP16() && SrcVT == MVT::f16)) {
33833       assert(!Subtarget.is64Bit() && "i64 should be legal");
33834       unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
33835       // If we use a 128-bit result we might need to use a target specific node.
33836       unsigned SrcElts =
33837           std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
33838       MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
33839       MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
33840       unsigned Opc = N->getOpcode();
33841       if (NumElts != SrcElts) {
33842         if (IsStrict)
33843           Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
33844         else
33845           Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
33846       }
33847 
33848       SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
33849       SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
33850                                 DAG.getConstantFP(0.0, dl, VecInVT), Src,
33851                                 ZeroIdx);
33852       SDValue Chain;
33853       if (IsStrict) {
33854         SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
33855         Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
33856         Chain = Res.getValue(1);
33857       } else
33858         Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
33859       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
33860       Results.push_back(Res);
33861       if (IsStrict)
33862         Results.push_back(Chain);
33863       return;
33864     }
33865 
33866     if (VT == MVT::i128 && Subtarget.isTargetWin64()) {
33867       SDValue Chain;
33868       SDValue V = LowerWin64_FP_TO_INT128(SDValue(N, 0), DAG, Chain);
33869       Results.push_back(V);
33870       if (IsStrict)
33871         Results.push_back(Chain);
33872       return;
33873     }
33874 
33875     if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
33876       Results.push_back(V);
33877       if (IsStrict)
33878         Results.push_back(Chain);
33879     }
33880     return;
33881   }
33882   case ISD::LRINT:
33883   case ISD::LLRINT: {
33884     if (SDValue V = LRINT_LLRINTHelper(N, DAG))
33885       Results.push_back(V);
33886     return;
33887   }
33888 
33889   case ISD::SINT_TO_FP:
33890   case ISD::STRICT_SINT_TO_FP:
33891   case ISD::UINT_TO_FP:
33892   case ISD::STRICT_UINT_TO_FP: {
33893     bool IsStrict = N->isStrictFPOpcode();
33894     bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
33895                     N->getOpcode() == ISD::STRICT_SINT_TO_FP;
33896     EVT VT = N->getValueType(0);
33897     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
33898     if (VT.getVectorElementType() == MVT::f16 && Subtarget.hasFP16() &&
33899         Subtarget.hasVLX()) {
33900       if (Src.getValueType().getVectorElementType() == MVT::i16)
33901         return;
33902 
33903       if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2i32)
33904         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
33905                           IsStrict ? DAG.getConstant(0, dl, MVT::v2i32)
33906                                    : DAG.getUNDEF(MVT::v2i32));
33907       if (IsStrict) {
33908         unsigned Opc =
33909             IsSigned ? X86ISD::STRICT_CVTSI2P : X86ISD::STRICT_CVTUI2P;
33910         SDValue Res = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
33911                                   {N->getOperand(0), Src});
33912         Results.push_back(Res);
33913         Results.push_back(Res.getValue(1));
33914       } else {
33915         unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
33916         Results.push_back(DAG.getNode(Opc, dl, MVT::v8f16, Src));
33917       }
33918       return;
33919     }
33920     if (VT != MVT::v2f32)
33921       return;
33922     EVT SrcVT = Src.getValueType();
33923     if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
33924       if (IsStrict) {
33925         unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
33926                                 : X86ISD::STRICT_CVTUI2P;
33927         SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
33928                                   {N->getOperand(0), Src});
33929         Results.push_back(Res);
33930         Results.push_back(Res.getValue(1));
33931       } else {
33932         unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
33933         Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
33934       }
33935       return;
33936     }
33937     if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
33938         Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
33939       SDValue Zero = DAG.getConstant(0, dl, SrcVT);
33940       SDValue One  = DAG.getConstant(1, dl, SrcVT);
33941       SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
33942                                  DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
33943                                  DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
33944       SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
33945       SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
33946       SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
33947       for (int i = 0; i != 2; ++i) {
33948         SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
33949                                   SignSrc, DAG.getIntPtrConstant(i, dl));
33950         if (IsStrict)
33951           SignCvts[i] =
33952               DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
33953                           {N->getOperand(0), Elt});
33954         else
33955           SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Elt);
33956       };
33957       SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
33958       SDValue Slow, Chain;
33959       if (IsStrict) {
33960         Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
33961                             SignCvts[0].getValue(1), SignCvts[1].getValue(1));
33962         Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
33963                            {Chain, SignCvt, SignCvt});
33964         Chain = Slow.getValue(1);
33965       } else {
33966         Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
33967       }
33968       IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
33969       IsNeg =
33970           DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
33971       SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
33972       Results.push_back(Cvt);
33973       if (IsStrict)
33974         Results.push_back(Chain);
33975       return;
33976     }
33977 
33978     if (SrcVT != MVT::v2i32)
33979       return;
33980 
33981     if (IsSigned || Subtarget.hasAVX512()) {
33982       if (!IsStrict)
33983         return;
33984 
33985       // Custom widen strict v2i32->v2f32 to avoid scalarization.
33986       // FIXME: Should generic type legalizer do this?
33987       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
33988                         DAG.getConstant(0, dl, MVT::v2i32));
33989       SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
33990                                 {N->getOperand(0), Src});
33991       Results.push_back(Res);
33992       Results.push_back(Res.getValue(1));
33993       return;
33994     }
33995 
33996     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
33997     SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
33998     SDValue VBias =
33999         DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
34000     SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
34001                              DAG.getBitcast(MVT::v2i64, VBias));
34002     Or = DAG.getBitcast(MVT::v2f64, Or);
34003     if (IsStrict) {
34004       SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
34005                                 {N->getOperand(0), Or, VBias});
34006       SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
34007                                 {MVT::v4f32, MVT::Other},
34008                                 {Sub.getValue(1), Sub});
34009       Results.push_back(Res);
34010       Results.push_back(Res.getValue(1));
34011     } else {
34012       // TODO: Are there any fast-math-flags to propagate here?
34013       SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
34014       Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
34015     }
34016     return;
34017   }
34018   case ISD::STRICT_FP_ROUND:
34019   case ISD::FP_ROUND: {
34020     bool IsStrict = N->isStrictFPOpcode();
34021     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
34022     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
34023     SDValue Rnd = N->getOperand(IsStrict ? 2 : 1);
34024     EVT SrcVT = Src.getValueType();
34025     EVT VT = N->getValueType(0);
34026     SDValue V;
34027     if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2f32) {
34028       SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f32)
34029                              : DAG.getUNDEF(MVT::v2f32);
34030       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, Ext);
34031     }
34032     if (!Subtarget.hasFP16() && VT.getVectorElementType() == MVT::f16) {
34033       assert(Subtarget.hasF16C() && "Cannot widen f16 without F16C");
34034       if (SrcVT.getVectorElementType() != MVT::f32)
34035         return;
34036 
34037       if (IsStrict)
34038         V = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
34039                         {Chain, Src, Rnd});
34040       else
34041         V = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Src, Rnd);
34042 
34043       Results.push_back(DAG.getBitcast(MVT::v8f16, V));
34044       if (IsStrict)
34045         Results.push_back(V.getValue(1));
34046       return;
34047     }
34048     if (!isTypeLegal(Src.getValueType()))
34049       return;
34050     EVT NewVT = VT.getVectorElementType() == MVT::f16 ? MVT::v8f16 : MVT::v4f32;
34051     if (IsStrict)
34052       V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {NewVT, MVT::Other},
34053                       {Chain, Src});
34054     else
34055       V = DAG.getNode(X86ISD::VFPROUND, dl, NewVT, Src);
34056     Results.push_back(V);
34057     if (IsStrict)
34058       Results.push_back(V.getValue(1));
34059     return;
34060   }
34061   case ISD::FP_EXTEND:
34062   case ISD::STRICT_FP_EXTEND: {
34063     // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
34064     // No other ValueType for FP_EXTEND should reach this point.
34065     assert(N->getValueType(0) == MVT::v2f32 &&
34066            "Do not know how to legalize this Node");
34067     if (!Subtarget.hasFP16() || !Subtarget.hasVLX())
34068       return;
34069     bool IsStrict = N->isStrictFPOpcode();
34070     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
34071     SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f16)
34072                            : DAG.getUNDEF(MVT::v2f16);
34073     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f16, Src, Ext);
34074     if (IsStrict)
34075       V = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::v4f32, MVT::Other},
34076                       {N->getOperand(0), V});
34077     else
34078       V = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, V);
34079     Results.push_back(V);
34080     if (IsStrict)
34081       Results.push_back(V.getValue(1));
34082     return;
34083   }
34084   case ISD::INTRINSIC_W_CHAIN: {
34085     unsigned IntNo = N->getConstantOperandVal(1);
34086     switch (IntNo) {
34087     default : llvm_unreachable("Do not know how to custom type "
34088                                "legalize this intrinsic operation!");
34089     case Intrinsic::x86_rdtsc:
34090       return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
34091                                      Results);
34092     case Intrinsic::x86_rdtscp:
34093       return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
34094                                      Results);
34095     case Intrinsic::x86_rdpmc:
34096       expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
34097                                   Results);
34098       return;
34099     case Intrinsic::x86_rdpru:
34100       expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPRU, X86::ECX, Subtarget,
34101         Results);
34102       return;
34103     case Intrinsic::x86_xgetbv:
34104       expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
34105                                   Results);
34106       return;
34107     }
34108   }
34109   case ISD::READCYCLECOUNTER: {
34110     return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
34111   }
34112   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
34113     EVT T = N->getValueType(0);
34114     assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
34115     bool Regs64bit = T == MVT::i128;
34116     assert((!Regs64bit || Subtarget.canUseCMPXCHG16B()) &&
34117            "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
34118     MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
34119     SDValue cpInL, cpInH;
34120     cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
34121                         DAG.getConstant(0, dl, HalfT));
34122     cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
34123                         DAG.getConstant(1, dl, HalfT));
34124     cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
34125                              Regs64bit ? X86::RAX : X86::EAX,
34126                              cpInL, SDValue());
34127     cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
34128                              Regs64bit ? X86::RDX : X86::EDX,
34129                              cpInH, cpInL.getValue(1));
34130     SDValue swapInL, swapInH;
34131     swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
34132                           DAG.getConstant(0, dl, HalfT));
34133     swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
34134                           DAG.getConstant(1, dl, HalfT));
34135     swapInH =
34136         DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
34137                          swapInH, cpInH.getValue(1));
34138 
34139     // In 64-bit mode we might need the base pointer in RBX, but we can't know
34140     // until later. So we keep the RBX input in a vreg and use a custom
34141     // inserter.
34142     // Since RBX will be a reserved register the register allocator will not
34143     // make sure its value will be properly saved and restored around this
34144     // live-range.
34145     SDValue Result;
34146     SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
34147     MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
34148     if (Regs64bit) {
34149       SDValue Ops[] = {swapInH.getValue(0), N->getOperand(1), swapInL,
34150                        swapInH.getValue(1)};
34151       Result =
34152           DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG16_DAG, dl, Tys, Ops, T, MMO);
34153     } else {
34154       swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl, X86::EBX, swapInL,
34155                                  swapInH.getValue(1));
34156       SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
34157                        swapInL.getValue(1)};
34158       Result =
34159           DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, T, MMO);
34160     }
34161 
34162     SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
34163                                         Regs64bit ? X86::RAX : X86::EAX,
34164                                         HalfT, Result.getValue(1));
34165     SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
34166                                         Regs64bit ? X86::RDX : X86::EDX,
34167                                         HalfT, cpOutL.getValue(2));
34168     SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
34169 
34170     SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
34171                                         MVT::i32, cpOutH.getValue(2));
34172     SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
34173     Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
34174 
34175     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
34176     Results.push_back(Success);
34177     Results.push_back(EFLAGS.getValue(1));
34178     return;
34179   }
34180   case ISD::ATOMIC_LOAD: {
34181     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
34182     bool NoImplicitFloatOps =
34183         DAG.getMachineFunction().getFunction().hasFnAttribute(
34184             Attribute::NoImplicitFloat);
34185     if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
34186       auto *Node = cast<AtomicSDNode>(N);
34187       if (Subtarget.hasSSE1()) {
34188         // Use a VZEXT_LOAD which will be selected as MOVQ or XORPS+MOVLPS.
34189         // Then extract the lower 64-bits.
34190         MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
34191         SDVTList Tys = DAG.getVTList(LdVT, MVT::Other);
34192         SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
34193         SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
34194                                              MVT::i64, Node->getMemOperand());
34195         if (Subtarget.hasSSE2()) {
34196           SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
34197                                     DAG.getIntPtrConstant(0, dl));
34198           Results.push_back(Res);
34199           Results.push_back(Ld.getValue(1));
34200           return;
34201         }
34202         // We use an alternative sequence for SSE1 that extracts as v2f32 and
34203         // then casts to i64. This avoids a 128-bit stack temporary being
34204         // created by type legalization if we were to cast v4f32->v2i64.
34205         SDValue Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Ld,
34206                                   DAG.getIntPtrConstant(0, dl));
34207         Res = DAG.getBitcast(MVT::i64, Res);
34208         Results.push_back(Res);
34209         Results.push_back(Ld.getValue(1));
34210         return;
34211       }
34212       if (Subtarget.hasX87()) {
34213         // First load this into an 80-bit X87 register. This will put the whole
34214         // integer into the significand.
34215         SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
34216         SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
34217         SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD,
34218                                                  dl, Tys, Ops, MVT::i64,
34219                                                  Node->getMemOperand());
34220         SDValue Chain = Result.getValue(1);
34221 
34222         // Now store the X87 register to a stack temporary and convert to i64.
34223         // This store is not atomic and doesn't need to be.
34224         // FIXME: We don't need a stack temporary if the result of the load
34225         // is already being stored. We could just directly store there.
34226         SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
34227         int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
34228         MachinePointerInfo MPI =
34229             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
34230         SDValue StoreOps[] = { Chain, Result, StackPtr };
34231         Chain = DAG.getMemIntrinsicNode(
34232             X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
34233             MPI, std::nullopt /*Align*/, MachineMemOperand::MOStore);
34234 
34235         // Finally load the value back from the stack temporary and return it.
34236         // This load is not atomic and doesn't need to be.
34237         // This load will be further type legalized.
34238         Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
34239         Results.push_back(Result);
34240         Results.push_back(Result.getValue(1));
34241         return;
34242       }
34243     }
34244     // TODO: Use MOVLPS when SSE1 is available?
34245     // Delegate to generic TypeLegalization. Situations we can really handle
34246     // should have already been dealt with by AtomicExpandPass.cpp.
34247     break;
34248   }
34249   case ISD::ATOMIC_SWAP:
34250   case ISD::ATOMIC_LOAD_ADD:
34251   case ISD::ATOMIC_LOAD_SUB:
34252   case ISD::ATOMIC_LOAD_AND:
34253   case ISD::ATOMIC_LOAD_OR:
34254   case ISD::ATOMIC_LOAD_XOR:
34255   case ISD::ATOMIC_LOAD_NAND:
34256   case ISD::ATOMIC_LOAD_MIN:
34257   case ISD::ATOMIC_LOAD_MAX:
34258   case ISD::ATOMIC_LOAD_UMIN:
34259   case ISD::ATOMIC_LOAD_UMAX:
34260     // Delegate to generic TypeLegalization. Situations we can really handle
34261     // should have already been dealt with by AtomicExpandPass.cpp.
34262     break;
34263 
34264   case ISD::BITCAST: {
34265     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
34266     EVT DstVT = N->getValueType(0);
34267     EVT SrcVT = N->getOperand(0).getValueType();
34268 
34269     // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
34270     // we can split using the k-register rather than memory.
34271     if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
34272       assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
34273       SDValue Lo, Hi;
34274       std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
34275       Lo = DAG.getBitcast(MVT::i32, Lo);
34276       Hi = DAG.getBitcast(MVT::i32, Hi);
34277       SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
34278       Results.push_back(Res);
34279       return;
34280     }
34281 
34282     if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
34283       // FIXME: Use v4f32 for SSE1?
34284       assert(Subtarget.hasSSE2() && "Requires SSE2");
34285       assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
34286              "Unexpected type action!");
34287       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
34288       SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64,
34289                                 N->getOperand(0));
34290       Res = DAG.getBitcast(WideVT, Res);
34291       Results.push_back(Res);
34292       return;
34293     }
34294 
34295     return;
34296   }
34297   case ISD::MGATHER: {
34298     EVT VT = N->getValueType(0);
34299     if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
34300         (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
34301       auto *Gather = cast<MaskedGatherSDNode>(N);
34302       SDValue Index = Gather->getIndex();
34303       if (Index.getValueType() != MVT::v2i64)
34304         return;
34305       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
34306              "Unexpected type action!");
34307       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
34308       SDValue Mask = Gather->getMask();
34309       assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
34310       SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
34311                                      Gather->getPassThru(),
34312                                      DAG.getUNDEF(VT));
34313       if (!Subtarget.hasVLX()) {
34314         // We need to widen the mask, but the instruction will only use 2
34315         // of its elements. So we can use undef.
34316         Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
34317                            DAG.getUNDEF(MVT::v2i1));
34318         Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
34319       }
34320       SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
34321                         Gather->getBasePtr(), Index, Gather->getScale() };
34322       SDValue Res = DAG.getMemIntrinsicNode(
34323           X86ISD::MGATHER, dl, DAG.getVTList(WideVT, MVT::Other), Ops,
34324           Gather->getMemoryVT(), Gather->getMemOperand());
34325       Results.push_back(Res);
34326       Results.push_back(Res.getValue(1));
34327       return;
34328     }
34329     return;
34330   }
34331   case ISD::LOAD: {
34332     // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
34333     // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
34334     // cast since type legalization will try to use an i64 load.
34335     MVT VT = N->getSimpleValueType(0);
34336     assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
34337     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
34338            "Unexpected type action!");
34339     if (!ISD::isNON_EXTLoad(N))
34340       return;
34341     auto *Ld = cast<LoadSDNode>(N);
34342     if (Subtarget.hasSSE2()) {
34343       MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
34344       SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
34345                                 Ld->getPointerInfo(), Ld->getOriginalAlign(),
34346                                 Ld->getMemOperand()->getFlags());
34347       SDValue Chain = Res.getValue(1);
34348       MVT VecVT = MVT::getVectorVT(LdVT, 2);
34349       Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
34350       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
34351       Res = DAG.getBitcast(WideVT, Res);
34352       Results.push_back(Res);
34353       Results.push_back(Chain);
34354       return;
34355     }
34356     assert(Subtarget.hasSSE1() && "Expected SSE");
34357     SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
34358     SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
34359     SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
34360                                           MVT::i64, Ld->getMemOperand());
34361     Results.push_back(Res);
34362     Results.push_back(Res.getValue(1));
34363     return;
34364   }
34365   case ISD::ADDRSPACECAST: {
34366     SDValue V = LowerADDRSPACECAST(SDValue(N,0), DAG);
34367     Results.push_back(V);
34368     return;
34369   }
34370   case ISD::BITREVERSE: {
34371     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
34372     assert(Subtarget.hasXOP() && "Expected XOP");
34373     // We can use VPPERM by copying to a vector register and back. We'll need
34374     // to move the scalar in two i32 pieces.
34375     Results.push_back(LowerBITREVERSE(SDValue(N, 0), Subtarget, DAG));
34376     return;
34377   }
34378   case ISD::EXTRACT_VECTOR_ELT: {
34379     // f16 = extract vXf16 %vec, i64 %idx
34380     assert(N->getSimpleValueType(0) == MVT::f16 &&
34381            "Unexpected Value type of EXTRACT_VECTOR_ELT!");
34382     assert(Subtarget.hasFP16() && "Expected FP16");
34383     SDValue VecOp = N->getOperand(0);
34384     EVT ExtVT = VecOp.getValueType().changeVectorElementTypeToInteger();
34385     SDValue Split = DAG.getBitcast(ExtVT, N->getOperand(0));
34386     Split = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Split,
34387                         N->getOperand(1));
34388     Split = DAG.getBitcast(MVT::f16, Split);
34389     Results.push_back(Split);
34390     return;
34391   }
34392   }
34393 }
34394 
getTargetNodeName(unsigned Opcode) const34395 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
34396   switch ((X86ISD::NodeType)Opcode) {
34397   case X86ISD::FIRST_NUMBER:       break;
34398 #define NODE_NAME_CASE(NODE) case X86ISD::NODE: return "X86ISD::" #NODE;
34399   NODE_NAME_CASE(BSF)
34400   NODE_NAME_CASE(BSR)
34401   NODE_NAME_CASE(FSHL)
34402   NODE_NAME_CASE(FSHR)
34403   NODE_NAME_CASE(FAND)
34404   NODE_NAME_CASE(FANDN)
34405   NODE_NAME_CASE(FOR)
34406   NODE_NAME_CASE(FXOR)
34407   NODE_NAME_CASE(FILD)
34408   NODE_NAME_CASE(FIST)
34409   NODE_NAME_CASE(FP_TO_INT_IN_MEM)
34410   NODE_NAME_CASE(FLD)
34411   NODE_NAME_CASE(FST)
34412   NODE_NAME_CASE(CALL)
34413   NODE_NAME_CASE(CALL_RVMARKER)
34414   NODE_NAME_CASE(BT)
34415   NODE_NAME_CASE(CMP)
34416   NODE_NAME_CASE(FCMP)
34417   NODE_NAME_CASE(STRICT_FCMP)
34418   NODE_NAME_CASE(STRICT_FCMPS)
34419   NODE_NAME_CASE(COMI)
34420   NODE_NAME_CASE(UCOMI)
34421   NODE_NAME_CASE(CMPM)
34422   NODE_NAME_CASE(CMPMM)
34423   NODE_NAME_CASE(STRICT_CMPM)
34424   NODE_NAME_CASE(CMPMM_SAE)
34425   NODE_NAME_CASE(SETCC)
34426   NODE_NAME_CASE(SETCC_CARRY)
34427   NODE_NAME_CASE(FSETCC)
34428   NODE_NAME_CASE(FSETCCM)
34429   NODE_NAME_CASE(FSETCCM_SAE)
34430   NODE_NAME_CASE(CMOV)
34431   NODE_NAME_CASE(BRCOND)
34432   NODE_NAME_CASE(RET_FLAG)
34433   NODE_NAME_CASE(IRET)
34434   NODE_NAME_CASE(REP_STOS)
34435   NODE_NAME_CASE(REP_MOVS)
34436   NODE_NAME_CASE(GlobalBaseReg)
34437   NODE_NAME_CASE(Wrapper)
34438   NODE_NAME_CASE(WrapperRIP)
34439   NODE_NAME_CASE(MOVQ2DQ)
34440   NODE_NAME_CASE(MOVDQ2Q)
34441   NODE_NAME_CASE(MMX_MOVD2W)
34442   NODE_NAME_CASE(MMX_MOVW2D)
34443   NODE_NAME_CASE(PEXTRB)
34444   NODE_NAME_CASE(PEXTRW)
34445   NODE_NAME_CASE(INSERTPS)
34446   NODE_NAME_CASE(PINSRB)
34447   NODE_NAME_CASE(PINSRW)
34448   NODE_NAME_CASE(PSHUFB)
34449   NODE_NAME_CASE(ANDNP)
34450   NODE_NAME_CASE(BLENDI)
34451   NODE_NAME_CASE(BLENDV)
34452   NODE_NAME_CASE(HADD)
34453   NODE_NAME_CASE(HSUB)
34454   NODE_NAME_CASE(FHADD)
34455   NODE_NAME_CASE(FHSUB)
34456   NODE_NAME_CASE(CONFLICT)
34457   NODE_NAME_CASE(FMAX)
34458   NODE_NAME_CASE(FMAXS)
34459   NODE_NAME_CASE(FMAX_SAE)
34460   NODE_NAME_CASE(FMAXS_SAE)
34461   NODE_NAME_CASE(FMIN)
34462   NODE_NAME_CASE(FMINS)
34463   NODE_NAME_CASE(FMIN_SAE)
34464   NODE_NAME_CASE(FMINS_SAE)
34465   NODE_NAME_CASE(FMAXC)
34466   NODE_NAME_CASE(FMINC)
34467   NODE_NAME_CASE(FRSQRT)
34468   NODE_NAME_CASE(FRCP)
34469   NODE_NAME_CASE(EXTRQI)
34470   NODE_NAME_CASE(INSERTQI)
34471   NODE_NAME_CASE(TLSADDR)
34472   NODE_NAME_CASE(TLSBASEADDR)
34473   NODE_NAME_CASE(TLSCALL)
34474   NODE_NAME_CASE(EH_SJLJ_SETJMP)
34475   NODE_NAME_CASE(EH_SJLJ_LONGJMP)
34476   NODE_NAME_CASE(EH_SJLJ_SETUP_DISPATCH)
34477   NODE_NAME_CASE(EH_RETURN)
34478   NODE_NAME_CASE(TC_RETURN)
34479   NODE_NAME_CASE(FNSTCW16m)
34480   NODE_NAME_CASE(FLDCW16m)
34481   NODE_NAME_CASE(LCMPXCHG_DAG)
34482   NODE_NAME_CASE(LCMPXCHG8_DAG)
34483   NODE_NAME_CASE(LCMPXCHG16_DAG)
34484   NODE_NAME_CASE(LCMPXCHG16_SAVE_RBX_DAG)
34485   NODE_NAME_CASE(LADD)
34486   NODE_NAME_CASE(LSUB)
34487   NODE_NAME_CASE(LOR)
34488   NODE_NAME_CASE(LXOR)
34489   NODE_NAME_CASE(LAND)
34490   NODE_NAME_CASE(LBTS)
34491   NODE_NAME_CASE(LBTC)
34492   NODE_NAME_CASE(LBTR)
34493   NODE_NAME_CASE(LBTS_RM)
34494   NODE_NAME_CASE(LBTC_RM)
34495   NODE_NAME_CASE(LBTR_RM)
34496   NODE_NAME_CASE(AADD)
34497   NODE_NAME_CASE(AOR)
34498   NODE_NAME_CASE(AXOR)
34499   NODE_NAME_CASE(AAND)
34500   NODE_NAME_CASE(VZEXT_MOVL)
34501   NODE_NAME_CASE(VZEXT_LOAD)
34502   NODE_NAME_CASE(VEXTRACT_STORE)
34503   NODE_NAME_CASE(VTRUNC)
34504   NODE_NAME_CASE(VTRUNCS)
34505   NODE_NAME_CASE(VTRUNCUS)
34506   NODE_NAME_CASE(VMTRUNC)
34507   NODE_NAME_CASE(VMTRUNCS)
34508   NODE_NAME_CASE(VMTRUNCUS)
34509   NODE_NAME_CASE(VTRUNCSTORES)
34510   NODE_NAME_CASE(VTRUNCSTOREUS)
34511   NODE_NAME_CASE(VMTRUNCSTORES)
34512   NODE_NAME_CASE(VMTRUNCSTOREUS)
34513   NODE_NAME_CASE(VFPEXT)
34514   NODE_NAME_CASE(STRICT_VFPEXT)
34515   NODE_NAME_CASE(VFPEXT_SAE)
34516   NODE_NAME_CASE(VFPEXTS)
34517   NODE_NAME_CASE(VFPEXTS_SAE)
34518   NODE_NAME_CASE(VFPROUND)
34519   NODE_NAME_CASE(STRICT_VFPROUND)
34520   NODE_NAME_CASE(VMFPROUND)
34521   NODE_NAME_CASE(VFPROUND_RND)
34522   NODE_NAME_CASE(VFPROUNDS)
34523   NODE_NAME_CASE(VFPROUNDS_RND)
34524   NODE_NAME_CASE(VSHLDQ)
34525   NODE_NAME_CASE(VSRLDQ)
34526   NODE_NAME_CASE(VSHL)
34527   NODE_NAME_CASE(VSRL)
34528   NODE_NAME_CASE(VSRA)
34529   NODE_NAME_CASE(VSHLI)
34530   NODE_NAME_CASE(VSRLI)
34531   NODE_NAME_CASE(VSRAI)
34532   NODE_NAME_CASE(VSHLV)
34533   NODE_NAME_CASE(VSRLV)
34534   NODE_NAME_CASE(VSRAV)
34535   NODE_NAME_CASE(VROTLI)
34536   NODE_NAME_CASE(VROTRI)
34537   NODE_NAME_CASE(VPPERM)
34538   NODE_NAME_CASE(CMPP)
34539   NODE_NAME_CASE(STRICT_CMPP)
34540   NODE_NAME_CASE(PCMPEQ)
34541   NODE_NAME_CASE(PCMPGT)
34542   NODE_NAME_CASE(PHMINPOS)
34543   NODE_NAME_CASE(ADD)
34544   NODE_NAME_CASE(SUB)
34545   NODE_NAME_CASE(ADC)
34546   NODE_NAME_CASE(SBB)
34547   NODE_NAME_CASE(SMUL)
34548   NODE_NAME_CASE(UMUL)
34549   NODE_NAME_CASE(OR)
34550   NODE_NAME_CASE(XOR)
34551   NODE_NAME_CASE(AND)
34552   NODE_NAME_CASE(BEXTR)
34553   NODE_NAME_CASE(BEXTRI)
34554   NODE_NAME_CASE(BZHI)
34555   NODE_NAME_CASE(PDEP)
34556   NODE_NAME_CASE(PEXT)
34557   NODE_NAME_CASE(MUL_IMM)
34558   NODE_NAME_CASE(MOVMSK)
34559   NODE_NAME_CASE(PTEST)
34560   NODE_NAME_CASE(TESTP)
34561   NODE_NAME_CASE(KORTEST)
34562   NODE_NAME_CASE(KTEST)
34563   NODE_NAME_CASE(KADD)
34564   NODE_NAME_CASE(KSHIFTL)
34565   NODE_NAME_CASE(KSHIFTR)
34566   NODE_NAME_CASE(PACKSS)
34567   NODE_NAME_CASE(PACKUS)
34568   NODE_NAME_CASE(PALIGNR)
34569   NODE_NAME_CASE(VALIGN)
34570   NODE_NAME_CASE(VSHLD)
34571   NODE_NAME_CASE(VSHRD)
34572   NODE_NAME_CASE(VSHLDV)
34573   NODE_NAME_CASE(VSHRDV)
34574   NODE_NAME_CASE(PSHUFD)
34575   NODE_NAME_CASE(PSHUFHW)
34576   NODE_NAME_CASE(PSHUFLW)
34577   NODE_NAME_CASE(SHUFP)
34578   NODE_NAME_CASE(SHUF128)
34579   NODE_NAME_CASE(MOVLHPS)
34580   NODE_NAME_CASE(MOVHLPS)
34581   NODE_NAME_CASE(MOVDDUP)
34582   NODE_NAME_CASE(MOVSHDUP)
34583   NODE_NAME_CASE(MOVSLDUP)
34584   NODE_NAME_CASE(MOVSD)
34585   NODE_NAME_CASE(MOVSS)
34586   NODE_NAME_CASE(MOVSH)
34587   NODE_NAME_CASE(UNPCKL)
34588   NODE_NAME_CASE(UNPCKH)
34589   NODE_NAME_CASE(VBROADCAST)
34590   NODE_NAME_CASE(VBROADCAST_LOAD)
34591   NODE_NAME_CASE(VBROADCASTM)
34592   NODE_NAME_CASE(SUBV_BROADCAST_LOAD)
34593   NODE_NAME_CASE(VPERMILPV)
34594   NODE_NAME_CASE(VPERMILPI)
34595   NODE_NAME_CASE(VPERM2X128)
34596   NODE_NAME_CASE(VPERMV)
34597   NODE_NAME_CASE(VPERMV3)
34598   NODE_NAME_CASE(VPERMI)
34599   NODE_NAME_CASE(VPTERNLOG)
34600   NODE_NAME_CASE(VFIXUPIMM)
34601   NODE_NAME_CASE(VFIXUPIMM_SAE)
34602   NODE_NAME_CASE(VFIXUPIMMS)
34603   NODE_NAME_CASE(VFIXUPIMMS_SAE)
34604   NODE_NAME_CASE(VRANGE)
34605   NODE_NAME_CASE(VRANGE_SAE)
34606   NODE_NAME_CASE(VRANGES)
34607   NODE_NAME_CASE(VRANGES_SAE)
34608   NODE_NAME_CASE(PMULUDQ)
34609   NODE_NAME_CASE(PMULDQ)
34610   NODE_NAME_CASE(PSADBW)
34611   NODE_NAME_CASE(DBPSADBW)
34612   NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
34613   NODE_NAME_CASE(VAARG_64)
34614   NODE_NAME_CASE(VAARG_X32)
34615   NODE_NAME_CASE(DYN_ALLOCA)
34616   NODE_NAME_CASE(MFENCE)
34617   NODE_NAME_CASE(SEG_ALLOCA)
34618   NODE_NAME_CASE(PROBED_ALLOCA)
34619   NODE_NAME_CASE(RDRAND)
34620   NODE_NAME_CASE(RDSEED)
34621   NODE_NAME_CASE(RDPKRU)
34622   NODE_NAME_CASE(WRPKRU)
34623   NODE_NAME_CASE(VPMADDUBSW)
34624   NODE_NAME_CASE(VPMADDWD)
34625   NODE_NAME_CASE(VPSHA)
34626   NODE_NAME_CASE(VPSHL)
34627   NODE_NAME_CASE(VPCOM)
34628   NODE_NAME_CASE(VPCOMU)
34629   NODE_NAME_CASE(VPERMIL2)
34630   NODE_NAME_CASE(FMSUB)
34631   NODE_NAME_CASE(STRICT_FMSUB)
34632   NODE_NAME_CASE(FNMADD)
34633   NODE_NAME_CASE(STRICT_FNMADD)
34634   NODE_NAME_CASE(FNMSUB)
34635   NODE_NAME_CASE(STRICT_FNMSUB)
34636   NODE_NAME_CASE(FMADDSUB)
34637   NODE_NAME_CASE(FMSUBADD)
34638   NODE_NAME_CASE(FMADD_RND)
34639   NODE_NAME_CASE(FNMADD_RND)
34640   NODE_NAME_CASE(FMSUB_RND)
34641   NODE_NAME_CASE(FNMSUB_RND)
34642   NODE_NAME_CASE(FMADDSUB_RND)
34643   NODE_NAME_CASE(FMSUBADD_RND)
34644   NODE_NAME_CASE(VFMADDC)
34645   NODE_NAME_CASE(VFMADDC_RND)
34646   NODE_NAME_CASE(VFCMADDC)
34647   NODE_NAME_CASE(VFCMADDC_RND)
34648   NODE_NAME_CASE(VFMULC)
34649   NODE_NAME_CASE(VFMULC_RND)
34650   NODE_NAME_CASE(VFCMULC)
34651   NODE_NAME_CASE(VFCMULC_RND)
34652   NODE_NAME_CASE(VFMULCSH)
34653   NODE_NAME_CASE(VFMULCSH_RND)
34654   NODE_NAME_CASE(VFCMULCSH)
34655   NODE_NAME_CASE(VFCMULCSH_RND)
34656   NODE_NAME_CASE(VFMADDCSH)
34657   NODE_NAME_CASE(VFMADDCSH_RND)
34658   NODE_NAME_CASE(VFCMADDCSH)
34659   NODE_NAME_CASE(VFCMADDCSH_RND)
34660   NODE_NAME_CASE(VPMADD52H)
34661   NODE_NAME_CASE(VPMADD52L)
34662   NODE_NAME_CASE(VRNDSCALE)
34663   NODE_NAME_CASE(STRICT_VRNDSCALE)
34664   NODE_NAME_CASE(VRNDSCALE_SAE)
34665   NODE_NAME_CASE(VRNDSCALES)
34666   NODE_NAME_CASE(VRNDSCALES_SAE)
34667   NODE_NAME_CASE(VREDUCE)
34668   NODE_NAME_CASE(VREDUCE_SAE)
34669   NODE_NAME_CASE(VREDUCES)
34670   NODE_NAME_CASE(VREDUCES_SAE)
34671   NODE_NAME_CASE(VGETMANT)
34672   NODE_NAME_CASE(VGETMANT_SAE)
34673   NODE_NAME_CASE(VGETMANTS)
34674   NODE_NAME_CASE(VGETMANTS_SAE)
34675   NODE_NAME_CASE(PCMPESTR)
34676   NODE_NAME_CASE(PCMPISTR)
34677   NODE_NAME_CASE(XTEST)
34678   NODE_NAME_CASE(COMPRESS)
34679   NODE_NAME_CASE(EXPAND)
34680   NODE_NAME_CASE(SELECTS)
34681   NODE_NAME_CASE(ADDSUB)
34682   NODE_NAME_CASE(RCP14)
34683   NODE_NAME_CASE(RCP14S)
34684   NODE_NAME_CASE(RCP28)
34685   NODE_NAME_CASE(RCP28_SAE)
34686   NODE_NAME_CASE(RCP28S)
34687   NODE_NAME_CASE(RCP28S_SAE)
34688   NODE_NAME_CASE(EXP2)
34689   NODE_NAME_CASE(EXP2_SAE)
34690   NODE_NAME_CASE(RSQRT14)
34691   NODE_NAME_CASE(RSQRT14S)
34692   NODE_NAME_CASE(RSQRT28)
34693   NODE_NAME_CASE(RSQRT28_SAE)
34694   NODE_NAME_CASE(RSQRT28S)
34695   NODE_NAME_CASE(RSQRT28S_SAE)
34696   NODE_NAME_CASE(FADD_RND)
34697   NODE_NAME_CASE(FADDS)
34698   NODE_NAME_CASE(FADDS_RND)
34699   NODE_NAME_CASE(FSUB_RND)
34700   NODE_NAME_CASE(FSUBS)
34701   NODE_NAME_CASE(FSUBS_RND)
34702   NODE_NAME_CASE(FMUL_RND)
34703   NODE_NAME_CASE(FMULS)
34704   NODE_NAME_CASE(FMULS_RND)
34705   NODE_NAME_CASE(FDIV_RND)
34706   NODE_NAME_CASE(FDIVS)
34707   NODE_NAME_CASE(FDIVS_RND)
34708   NODE_NAME_CASE(FSQRT_RND)
34709   NODE_NAME_CASE(FSQRTS)
34710   NODE_NAME_CASE(FSQRTS_RND)
34711   NODE_NAME_CASE(FGETEXP)
34712   NODE_NAME_CASE(FGETEXP_SAE)
34713   NODE_NAME_CASE(FGETEXPS)
34714   NODE_NAME_CASE(FGETEXPS_SAE)
34715   NODE_NAME_CASE(SCALEF)
34716   NODE_NAME_CASE(SCALEF_RND)
34717   NODE_NAME_CASE(SCALEFS)
34718   NODE_NAME_CASE(SCALEFS_RND)
34719   NODE_NAME_CASE(MULHRS)
34720   NODE_NAME_CASE(SINT_TO_FP_RND)
34721   NODE_NAME_CASE(UINT_TO_FP_RND)
34722   NODE_NAME_CASE(CVTTP2SI)
34723   NODE_NAME_CASE(CVTTP2UI)
34724   NODE_NAME_CASE(STRICT_CVTTP2SI)
34725   NODE_NAME_CASE(STRICT_CVTTP2UI)
34726   NODE_NAME_CASE(MCVTTP2SI)
34727   NODE_NAME_CASE(MCVTTP2UI)
34728   NODE_NAME_CASE(CVTTP2SI_SAE)
34729   NODE_NAME_CASE(CVTTP2UI_SAE)
34730   NODE_NAME_CASE(CVTTS2SI)
34731   NODE_NAME_CASE(CVTTS2UI)
34732   NODE_NAME_CASE(CVTTS2SI_SAE)
34733   NODE_NAME_CASE(CVTTS2UI_SAE)
34734   NODE_NAME_CASE(CVTSI2P)
34735   NODE_NAME_CASE(CVTUI2P)
34736   NODE_NAME_CASE(STRICT_CVTSI2P)
34737   NODE_NAME_CASE(STRICT_CVTUI2P)
34738   NODE_NAME_CASE(MCVTSI2P)
34739   NODE_NAME_CASE(MCVTUI2P)
34740   NODE_NAME_CASE(VFPCLASS)
34741   NODE_NAME_CASE(VFPCLASSS)
34742   NODE_NAME_CASE(MULTISHIFT)
34743   NODE_NAME_CASE(SCALAR_SINT_TO_FP)
34744   NODE_NAME_CASE(SCALAR_SINT_TO_FP_RND)
34745   NODE_NAME_CASE(SCALAR_UINT_TO_FP)
34746   NODE_NAME_CASE(SCALAR_UINT_TO_FP_RND)
34747   NODE_NAME_CASE(CVTPS2PH)
34748   NODE_NAME_CASE(STRICT_CVTPS2PH)
34749   NODE_NAME_CASE(CVTPS2PH_SAE)
34750   NODE_NAME_CASE(MCVTPS2PH)
34751   NODE_NAME_CASE(MCVTPS2PH_SAE)
34752   NODE_NAME_CASE(CVTPH2PS)
34753   NODE_NAME_CASE(STRICT_CVTPH2PS)
34754   NODE_NAME_CASE(CVTPH2PS_SAE)
34755   NODE_NAME_CASE(CVTP2SI)
34756   NODE_NAME_CASE(CVTP2UI)
34757   NODE_NAME_CASE(MCVTP2SI)
34758   NODE_NAME_CASE(MCVTP2UI)
34759   NODE_NAME_CASE(CVTP2SI_RND)
34760   NODE_NAME_CASE(CVTP2UI_RND)
34761   NODE_NAME_CASE(CVTS2SI)
34762   NODE_NAME_CASE(CVTS2UI)
34763   NODE_NAME_CASE(CVTS2SI_RND)
34764   NODE_NAME_CASE(CVTS2UI_RND)
34765   NODE_NAME_CASE(CVTNE2PS2BF16)
34766   NODE_NAME_CASE(CVTNEPS2BF16)
34767   NODE_NAME_CASE(MCVTNEPS2BF16)
34768   NODE_NAME_CASE(DPBF16PS)
34769   NODE_NAME_CASE(LWPINS)
34770   NODE_NAME_CASE(MGATHER)
34771   NODE_NAME_CASE(MSCATTER)
34772   NODE_NAME_CASE(VPDPBUSD)
34773   NODE_NAME_CASE(VPDPBUSDS)
34774   NODE_NAME_CASE(VPDPWSSD)
34775   NODE_NAME_CASE(VPDPWSSDS)
34776   NODE_NAME_CASE(VPSHUFBITQMB)
34777   NODE_NAME_CASE(GF2P8MULB)
34778   NODE_NAME_CASE(GF2P8AFFINEQB)
34779   NODE_NAME_CASE(GF2P8AFFINEINVQB)
34780   NODE_NAME_CASE(NT_CALL)
34781   NODE_NAME_CASE(NT_BRIND)
34782   NODE_NAME_CASE(UMWAIT)
34783   NODE_NAME_CASE(TPAUSE)
34784   NODE_NAME_CASE(ENQCMD)
34785   NODE_NAME_CASE(ENQCMDS)
34786   NODE_NAME_CASE(VP2INTERSECT)
34787   NODE_NAME_CASE(VPDPBSUD)
34788   NODE_NAME_CASE(VPDPBSUDS)
34789   NODE_NAME_CASE(VPDPBUUD)
34790   NODE_NAME_CASE(VPDPBUUDS)
34791   NODE_NAME_CASE(VPDPBSSD)
34792   NODE_NAME_CASE(VPDPBSSDS)
34793   NODE_NAME_CASE(AESENC128KL)
34794   NODE_NAME_CASE(AESDEC128KL)
34795   NODE_NAME_CASE(AESENC256KL)
34796   NODE_NAME_CASE(AESDEC256KL)
34797   NODE_NAME_CASE(AESENCWIDE128KL)
34798   NODE_NAME_CASE(AESDECWIDE128KL)
34799   NODE_NAME_CASE(AESENCWIDE256KL)
34800   NODE_NAME_CASE(AESDECWIDE256KL)
34801   NODE_NAME_CASE(CMPCCXADD)
34802   NODE_NAME_CASE(TESTUI)
34803   NODE_NAME_CASE(FP80_ADD)
34804   NODE_NAME_CASE(STRICT_FP80_ADD)
34805   }
34806   return nullptr;
34807 #undef NODE_NAME_CASE
34808 }
34809 
34810 /// Return true if the addressing mode represented by AM is legal for this
34811 /// target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const34812 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
34813                                               const AddrMode &AM, Type *Ty,
34814                                               unsigned AS,
34815                                               Instruction *I) const {
34816   // X86 supports extremely general addressing modes.
34817   CodeModel::Model M = getTargetMachine().getCodeModel();
34818 
34819   // X86 allows a sign-extended 32-bit immediate field as a displacement.
34820   if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
34821     return false;
34822 
34823   if (AM.BaseGV) {
34824     unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
34825 
34826     // If a reference to this global requires an extra load, we can't fold it.
34827     if (isGlobalStubReference(GVFlags))
34828       return false;
34829 
34830     // If BaseGV requires a register for the PIC base, we cannot also have a
34831     // BaseReg specified.
34832     if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
34833       return false;
34834 
34835     // If lower 4G is not available, then we must use rip-relative addressing.
34836     if ((M != CodeModel::Small || isPositionIndependent()) &&
34837         Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
34838       return false;
34839   }
34840 
34841   switch (AM.Scale) {
34842   case 0:
34843   case 1:
34844   case 2:
34845   case 4:
34846   case 8:
34847     // These scales always work.
34848     break;
34849   case 3:
34850   case 5:
34851   case 9:
34852     // These scales are formed with basereg+scalereg.  Only accept if there is
34853     // no basereg yet.
34854     if (AM.HasBaseReg)
34855       return false;
34856     break;
34857   default:  // Other stuff never works.
34858     return false;
34859   }
34860 
34861   return true;
34862 }
34863 
isVectorShiftByScalarCheap(Type * Ty) const34864 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
34865   unsigned Bits = Ty->getScalarSizeInBits();
34866 
34867   // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
34868   // Splitting for v32i8/v16i16 on XOP+AVX2 targets is still preferred.
34869   if (Subtarget.hasXOP() &&
34870       (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
34871     return false;
34872 
34873   // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
34874   // shifts just as cheap as scalar ones.
34875   if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
34876     return false;
34877 
34878   // AVX512BW has shifts such as vpsllvw.
34879   if (Subtarget.hasBWI() && Bits == 16)
34880     return false;
34881 
34882   // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
34883   // fully general vector.
34884   return true;
34885 }
34886 
isBinOp(unsigned Opcode) const34887 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
34888   switch (Opcode) {
34889   // These are non-commutative binops.
34890   // TODO: Add more X86ISD opcodes once we have test coverage.
34891   case X86ISD::ANDNP:
34892   case X86ISD::PCMPGT:
34893   case X86ISD::FMAX:
34894   case X86ISD::FMIN:
34895   case X86ISD::FANDN:
34896   case X86ISD::VPSHA:
34897   case X86ISD::VPSHL:
34898   case X86ISD::VSHLV:
34899   case X86ISD::VSRLV:
34900   case X86ISD::VSRAV:
34901     return true;
34902   }
34903 
34904   return TargetLoweringBase::isBinOp(Opcode);
34905 }
34906 
isCommutativeBinOp(unsigned Opcode) const34907 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
34908   switch (Opcode) {
34909   // TODO: Add more X86ISD opcodes once we have test coverage.
34910   case X86ISD::PCMPEQ:
34911   case X86ISD::PMULDQ:
34912   case X86ISD::PMULUDQ:
34913   case X86ISD::FMAXC:
34914   case X86ISD::FMINC:
34915   case X86ISD::FAND:
34916   case X86ISD::FOR:
34917   case X86ISD::FXOR:
34918     return true;
34919   }
34920 
34921   return TargetLoweringBase::isCommutativeBinOp(Opcode);
34922 }
34923 
isTruncateFree(Type * Ty1,Type * Ty2) const34924 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
34925   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
34926     return false;
34927   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
34928   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
34929   return NumBits1 > NumBits2;
34930 }
34931 
allowTruncateForTailCall(Type * Ty1,Type * Ty2) const34932 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
34933   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
34934     return false;
34935 
34936   if (!isTypeLegal(EVT::getEVT(Ty1)))
34937     return false;
34938 
34939   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
34940 
34941   // Assuming the caller doesn't have a zeroext or signext return parameter,
34942   // truncation all the way down to i1 is valid.
34943   return true;
34944 }
34945 
isLegalICmpImmediate(int64_t Imm) const34946 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
34947   return isInt<32>(Imm);
34948 }
34949 
isLegalAddImmediate(int64_t Imm) const34950 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
34951   // Can also use sub to handle negated immediates.
34952   return isInt<32>(Imm);
34953 }
34954 
isLegalStoreImmediate(int64_t Imm) const34955 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
34956   return isInt<32>(Imm);
34957 }
34958 
isTruncateFree(EVT VT1,EVT VT2) const34959 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
34960   if (!VT1.isScalarInteger() || !VT2.isScalarInteger())
34961     return false;
34962   unsigned NumBits1 = VT1.getSizeInBits();
34963   unsigned NumBits2 = VT2.getSizeInBits();
34964   return NumBits1 > NumBits2;
34965 }
34966 
isZExtFree(Type * Ty1,Type * Ty2) const34967 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
34968   // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
34969   return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
34970 }
34971 
isZExtFree(EVT VT1,EVT VT2) const34972 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
34973   // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
34974   return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
34975 }
34976 
isZExtFree(SDValue Val,EVT VT2) const34977 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
34978   EVT VT1 = Val.getValueType();
34979   if (isZExtFree(VT1, VT2))
34980     return true;
34981 
34982   if (Val.getOpcode() != ISD::LOAD)
34983     return false;
34984 
34985   if (!VT1.isSimple() || !VT1.isInteger() ||
34986       !VT2.isSimple() || !VT2.isInteger())
34987     return false;
34988 
34989   switch (VT1.getSimpleVT().SimpleTy) {
34990   default: break;
34991   case MVT::i8:
34992   case MVT::i16:
34993   case MVT::i32:
34994     // X86 has 8, 16, and 32-bit zero-extending loads.
34995     return true;
34996   }
34997 
34998   return false;
34999 }
35000 
shouldSinkOperands(Instruction * I,SmallVectorImpl<Use * > & Ops) const35001 bool X86TargetLowering::shouldSinkOperands(Instruction *I,
35002                                            SmallVectorImpl<Use *> &Ops) const {
35003   using namespace llvm::PatternMatch;
35004 
35005   FixedVectorType *VTy = dyn_cast<FixedVectorType>(I->getType());
35006   if (!VTy)
35007     return false;
35008 
35009   if (I->getOpcode() == Instruction::Mul &&
35010       VTy->getElementType()->isIntegerTy(64)) {
35011     for (auto &Op : I->operands()) {
35012       // Make sure we are not already sinking this operand
35013       if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
35014         continue;
35015 
35016       // Look for PMULDQ pattern where the input is a sext_inreg from vXi32 or
35017       // the PMULUDQ pattern where the input is a zext_inreg from vXi32.
35018       if (Subtarget.hasSSE41() &&
35019           match(Op.get(), m_AShr(m_Shl(m_Value(), m_SpecificInt(32)),
35020                                  m_SpecificInt(32)))) {
35021         Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0));
35022         Ops.push_back(&Op);
35023       } else if (Subtarget.hasSSE2() &&
35024                  match(Op.get(),
35025                        m_And(m_Value(), m_SpecificInt(UINT64_C(0xffffffff))))) {
35026         Ops.push_back(&Op);
35027       }
35028     }
35029 
35030     return !Ops.empty();
35031   }
35032 
35033   // A uniform shift amount in a vector shift or funnel shift may be much
35034   // cheaper than a generic variable vector shift, so make that pattern visible
35035   // to SDAG by sinking the shuffle instruction next to the shift.
35036   int ShiftAmountOpNum = -1;
35037   if (I->isShift())
35038     ShiftAmountOpNum = 1;
35039   else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
35040     if (II->getIntrinsicID() == Intrinsic::fshl ||
35041         II->getIntrinsicID() == Intrinsic::fshr)
35042       ShiftAmountOpNum = 2;
35043   }
35044 
35045   if (ShiftAmountOpNum == -1)
35046     return false;
35047 
35048   auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
35049   if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
35050       isVectorShiftByScalarCheap(I->getType())) {
35051     Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
35052     return true;
35053   }
35054 
35055   return false;
35056 }
35057 
shouldConvertPhiType(Type * From,Type * To) const35058 bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {
35059   if (!Subtarget.is64Bit())
35060     return false;
35061   return TargetLowering::shouldConvertPhiType(From, To);
35062 }
35063 
isVectorLoadExtDesirable(SDValue ExtVal) const35064 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
35065   if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
35066     return false;
35067 
35068   EVT SrcVT = ExtVal.getOperand(0).getValueType();
35069 
35070   // There is no extending load for vXi1.
35071   if (SrcVT.getScalarType() == MVT::i1)
35072     return false;
35073 
35074   return true;
35075 }
35076 
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT VT) const35077 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
35078                                                    EVT VT) const {
35079   if (!Subtarget.hasAnyFMA())
35080     return false;
35081 
35082   VT = VT.getScalarType();
35083 
35084   if (!VT.isSimple())
35085     return false;
35086 
35087   switch (VT.getSimpleVT().SimpleTy) {
35088   case MVT::f16:
35089     return Subtarget.hasFP16();
35090   case MVT::f32:
35091   case MVT::f64:
35092     return true;
35093   default:
35094     break;
35095   }
35096 
35097   return false;
35098 }
35099 
isNarrowingProfitable(EVT VT1,EVT VT2) const35100 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
35101   // i16 instructions are longer (0x66 prefix) and potentially slower.
35102   return !(VT1 == MVT::i32 && VT2 == MVT::i16);
35103 }
35104 
shouldFoldSelectWithIdentityConstant(unsigned Opcode,EVT VT) const35105 bool X86TargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
35106                                                              EVT VT) const {
35107   // TODO: This is too general. There are cases where pre-AVX512 codegen would
35108   //       benefit. The transform may also be profitable for scalar code.
35109   if (!Subtarget.hasAVX512())
35110     return false;
35111   if (!Subtarget.hasVLX() && !VT.is512BitVector())
35112     return false;
35113   if (!VT.isVector() || VT.getScalarType() == MVT::i1)
35114     return false;
35115 
35116   return true;
35117 }
35118 
35119 /// Targets can use this to indicate that they only support *some*
35120 /// VECTOR_SHUFFLE operations, those with specific masks.
35121 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
35122 /// are assumed to be legal.
isShuffleMaskLegal(ArrayRef<int> Mask,EVT VT) const35123 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {
35124   if (!VT.isSimple())
35125     return false;
35126 
35127   // Not for i1 vectors
35128   if (VT.getSimpleVT().getScalarType() == MVT::i1)
35129     return false;
35130 
35131   // Very little shuffling can be done for 64-bit vectors right now.
35132   if (VT.getSimpleVT().getSizeInBits() == 64)
35133     return false;
35134 
35135   // We only care that the types being shuffled are legal. The lowering can
35136   // handle any possible shuffle mask that results.
35137   return isTypeLegal(VT.getSimpleVT());
35138 }
35139 
isVectorClearMaskLegal(ArrayRef<int> Mask,EVT VT) const35140 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
35141                                                EVT VT) const {
35142   // Don't convert an 'and' into a shuffle that we don't directly support.
35143   // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
35144   if (!Subtarget.hasAVX2())
35145     if (VT == MVT::v32i8 || VT == MVT::v16i16)
35146       return false;
35147 
35148   // Just delegate to the generic legality, clear masks aren't special.
35149   return isShuffleMaskLegal(Mask, VT);
35150 }
35151 
areJTsAllowed(const Function * Fn) const35152 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
35153   // If the subtarget is using thunks, we need to not generate jump tables.
35154   if (Subtarget.useIndirectThunkBranches())
35155     return false;
35156 
35157   // Otherwise, fallback on the generic logic.
35158   return TargetLowering::areJTsAllowed(Fn);
35159 }
35160 
getPreferredSwitchConditionType(LLVMContext & Context,EVT ConditionVT) const35161 MVT X86TargetLowering::getPreferredSwitchConditionType(LLVMContext &Context,
35162                                                        EVT ConditionVT) const {
35163   // Avoid 8 and 16 bit types because they increase the chance for unnecessary
35164   // zero-extensions.
35165   if (ConditionVT.getSizeInBits() < 32)
35166     return MVT::i32;
35167   return TargetLoweringBase::getPreferredSwitchConditionType(Context,
35168                                                              ConditionVT);
35169 }
35170 
35171 //===----------------------------------------------------------------------===//
35172 //                           X86 Scheduler Hooks
35173 //===----------------------------------------------------------------------===//
35174 
35175 // Returns true if EFLAG is consumed after this iterator in the rest of the
35176 // basic block or any successors of the basic block.
isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,MachineBasicBlock * BB)35177 static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
35178                               MachineBasicBlock *BB) {
35179   // Scan forward through BB for a use/def of EFLAGS.
35180   for (const MachineInstr &mi : llvm::make_range(std::next(Itr), BB->end())) {
35181     if (mi.readsRegister(X86::EFLAGS))
35182       return true;
35183     // If we found a def, we can stop searching.
35184     if (mi.definesRegister(X86::EFLAGS))
35185       return false;
35186   }
35187 
35188   // If we hit the end of the block, check whether EFLAGS is live into a
35189   // successor.
35190   for (MachineBasicBlock *Succ : BB->successors())
35191     if (Succ->isLiveIn(X86::EFLAGS))
35192       return true;
35193 
35194   return false;
35195 }
35196 
35197 /// Utility function to emit xbegin specifying the start of an RTM region.
emitXBegin(MachineInstr & MI,MachineBasicBlock * MBB,const TargetInstrInfo * TII)35198 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
35199                                      const TargetInstrInfo *TII) {
35200   const DebugLoc &DL = MI.getDebugLoc();
35201 
35202   const BasicBlock *BB = MBB->getBasicBlock();
35203   MachineFunction::iterator I = ++MBB->getIterator();
35204 
35205   // For the v = xbegin(), we generate
35206   //
35207   // thisMBB:
35208   //  xbegin sinkMBB
35209   //
35210   // mainMBB:
35211   //  s0 = -1
35212   //
35213   // fallBB:
35214   //  eax = # XABORT_DEF
35215   //  s1 = eax
35216   //
35217   // sinkMBB:
35218   //  v = phi(s0/mainBB, s1/fallBB)
35219 
35220   MachineBasicBlock *thisMBB = MBB;
35221   MachineFunction *MF = MBB->getParent();
35222   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
35223   MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
35224   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35225   MF->insert(I, mainMBB);
35226   MF->insert(I, fallMBB);
35227   MF->insert(I, sinkMBB);
35228 
35229   if (isEFLAGSLiveAfter(MI, MBB)) {
35230     mainMBB->addLiveIn(X86::EFLAGS);
35231     fallMBB->addLiveIn(X86::EFLAGS);
35232     sinkMBB->addLiveIn(X86::EFLAGS);
35233   }
35234 
35235   // Transfer the remainder of BB and its successor edges to sinkMBB.
35236   sinkMBB->splice(sinkMBB->begin(), MBB,
35237                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
35238   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35239 
35240   MachineRegisterInfo &MRI = MF->getRegInfo();
35241   Register DstReg = MI.getOperand(0).getReg();
35242   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
35243   Register mainDstReg = MRI.createVirtualRegister(RC);
35244   Register fallDstReg = MRI.createVirtualRegister(RC);
35245 
35246   // thisMBB:
35247   //  xbegin fallMBB
35248   //  # fallthrough to mainMBB
35249   //  # abortion to fallMBB
35250   BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
35251   thisMBB->addSuccessor(mainMBB);
35252   thisMBB->addSuccessor(fallMBB);
35253 
35254   // mainMBB:
35255   //  mainDstReg := -1
35256   BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
35257   BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
35258   mainMBB->addSuccessor(sinkMBB);
35259 
35260   // fallMBB:
35261   //  ; pseudo instruction to model hardware's definition from XABORT
35262   //  EAX := XABORT_DEF
35263   //  fallDstReg := EAX
35264   BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
35265   BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
35266       .addReg(X86::EAX);
35267   fallMBB->addSuccessor(sinkMBB);
35268 
35269   // sinkMBB:
35270   //  DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
35271   BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
35272       .addReg(mainDstReg).addMBB(mainMBB)
35273       .addReg(fallDstReg).addMBB(fallMBB);
35274 
35275   MI.eraseFromParent();
35276   return sinkMBB;
35277 }
35278 
35279 MachineBasicBlock *
EmitVAARGWithCustomInserter(MachineInstr & MI,MachineBasicBlock * MBB) const35280 X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
35281                                                MachineBasicBlock *MBB) const {
35282   // Emit va_arg instruction on X86-64.
35283 
35284   // Operands to this pseudo-instruction:
35285   // 0  ) Output        : destination address (reg)
35286   // 1-5) Input         : va_list address (addr, i64mem)
35287   // 6  ) ArgSize       : Size (in bytes) of vararg type
35288   // 7  ) ArgMode       : 0=overflow only, 1=use gp_offset, 2=use fp_offset
35289   // 8  ) Align         : Alignment of type
35290   // 9  ) EFLAGS (implicit-def)
35291 
35292   assert(MI.getNumOperands() == 10 && "VAARG should have 10 operands!");
35293   static_assert(X86::AddrNumOperands == 5, "VAARG assumes 5 address operands");
35294 
35295   Register DestReg = MI.getOperand(0).getReg();
35296   MachineOperand &Base = MI.getOperand(1);
35297   MachineOperand &Scale = MI.getOperand(2);
35298   MachineOperand &Index = MI.getOperand(3);
35299   MachineOperand &Disp = MI.getOperand(4);
35300   MachineOperand &Segment = MI.getOperand(5);
35301   unsigned ArgSize = MI.getOperand(6).getImm();
35302   unsigned ArgMode = MI.getOperand(7).getImm();
35303   Align Alignment = Align(MI.getOperand(8).getImm());
35304 
35305   MachineFunction *MF = MBB->getParent();
35306 
35307   // Memory Reference
35308   assert(MI.hasOneMemOperand() && "Expected VAARG to have one memoperand");
35309 
35310   MachineMemOperand *OldMMO = MI.memoperands().front();
35311 
35312   // Clone the MMO into two separate MMOs for loading and storing
35313   MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
35314       OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
35315   MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
35316       OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
35317 
35318   // Machine Information
35319   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35320   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
35321   const TargetRegisterClass *AddrRegClass =
35322       getRegClassFor(getPointerTy(MBB->getParent()->getDataLayout()));
35323   const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
35324   const DebugLoc &DL = MI.getDebugLoc();
35325 
35326   // struct va_list {
35327   //   i32   gp_offset
35328   //   i32   fp_offset
35329   //   i64   overflow_area (address)
35330   //   i64   reg_save_area (address)
35331   // }
35332   // sizeof(va_list) = 24
35333   // alignment(va_list) = 8
35334 
35335   unsigned TotalNumIntRegs = 6;
35336   unsigned TotalNumXMMRegs = 8;
35337   bool UseGPOffset = (ArgMode == 1);
35338   bool UseFPOffset = (ArgMode == 2);
35339   unsigned MaxOffset = TotalNumIntRegs * 8 +
35340                        (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
35341 
35342   /* Align ArgSize to a multiple of 8 */
35343   unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
35344   bool NeedsAlign = (Alignment > 8);
35345 
35346   MachineBasicBlock *thisMBB = MBB;
35347   MachineBasicBlock *overflowMBB;
35348   MachineBasicBlock *offsetMBB;
35349   MachineBasicBlock *endMBB;
35350 
35351   unsigned OffsetDestReg = 0;    // Argument address computed by offsetMBB
35352   unsigned OverflowDestReg = 0;  // Argument address computed by overflowMBB
35353   unsigned OffsetReg = 0;
35354 
35355   if (!UseGPOffset && !UseFPOffset) {
35356     // If we only pull from the overflow region, we don't create a branch.
35357     // We don't need to alter control flow.
35358     OffsetDestReg = 0; // unused
35359     OverflowDestReg = DestReg;
35360 
35361     offsetMBB = nullptr;
35362     overflowMBB = thisMBB;
35363     endMBB = thisMBB;
35364   } else {
35365     // First emit code to check if gp_offset (or fp_offset) is below the bound.
35366     // If so, pull the argument from reg_save_area. (branch to offsetMBB)
35367     // If not, pull from overflow_area. (branch to overflowMBB)
35368     //
35369     //       thisMBB
35370     //         |     .
35371     //         |        .
35372     //     offsetMBB   overflowMBB
35373     //         |        .
35374     //         |     .
35375     //        endMBB
35376 
35377     // Registers for the PHI in endMBB
35378     OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
35379     OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
35380 
35381     const BasicBlock *LLVM_BB = MBB->getBasicBlock();
35382     overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35383     offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35384     endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35385 
35386     MachineFunction::iterator MBBIter = ++MBB->getIterator();
35387 
35388     // Insert the new basic blocks
35389     MF->insert(MBBIter, offsetMBB);
35390     MF->insert(MBBIter, overflowMBB);
35391     MF->insert(MBBIter, endMBB);
35392 
35393     // Transfer the remainder of MBB and its successor edges to endMBB.
35394     endMBB->splice(endMBB->begin(), thisMBB,
35395                    std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
35396     endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
35397 
35398     // Make offsetMBB and overflowMBB successors of thisMBB
35399     thisMBB->addSuccessor(offsetMBB);
35400     thisMBB->addSuccessor(overflowMBB);
35401 
35402     // endMBB is a successor of both offsetMBB and overflowMBB
35403     offsetMBB->addSuccessor(endMBB);
35404     overflowMBB->addSuccessor(endMBB);
35405 
35406     // Load the offset value into a register
35407     OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
35408     BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
35409         .add(Base)
35410         .add(Scale)
35411         .add(Index)
35412         .addDisp(Disp, UseFPOffset ? 4 : 0)
35413         .add(Segment)
35414         .setMemRefs(LoadOnlyMMO);
35415 
35416     // Check if there is enough room left to pull this argument.
35417     BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
35418       .addReg(OffsetReg)
35419       .addImm(MaxOffset + 8 - ArgSizeA8);
35420 
35421     // Branch to "overflowMBB" if offset >= max
35422     // Fall through to "offsetMBB" otherwise
35423     BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
35424       .addMBB(overflowMBB).addImm(X86::COND_AE);
35425   }
35426 
35427   // In offsetMBB, emit code to use the reg_save_area.
35428   if (offsetMBB) {
35429     assert(OffsetReg != 0);
35430 
35431     // Read the reg_save_area address.
35432     Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
35433     BuildMI(
35434         offsetMBB, DL,
35435         TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
35436         RegSaveReg)
35437         .add(Base)
35438         .add(Scale)
35439         .add(Index)
35440         .addDisp(Disp, Subtarget.isTarget64BitLP64() ? 16 : 12)
35441         .add(Segment)
35442         .setMemRefs(LoadOnlyMMO);
35443 
35444     if (Subtarget.isTarget64BitLP64()) {
35445       // Zero-extend the offset
35446       Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
35447       BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
35448           .addImm(0)
35449           .addReg(OffsetReg)
35450           .addImm(X86::sub_32bit);
35451 
35452       // Add the offset to the reg_save_area to get the final address.
35453       BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
35454           .addReg(OffsetReg64)
35455           .addReg(RegSaveReg);
35456     } else {
35457       // Add the offset to the reg_save_area to get the final address.
35458       BuildMI(offsetMBB, DL, TII->get(X86::ADD32rr), OffsetDestReg)
35459           .addReg(OffsetReg)
35460           .addReg(RegSaveReg);
35461     }
35462 
35463     // Compute the offset for the next argument
35464     Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
35465     BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
35466       .addReg(OffsetReg)
35467       .addImm(UseFPOffset ? 16 : 8);
35468 
35469     // Store it back into the va_list.
35470     BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
35471         .add(Base)
35472         .add(Scale)
35473         .add(Index)
35474         .addDisp(Disp, UseFPOffset ? 4 : 0)
35475         .add(Segment)
35476         .addReg(NextOffsetReg)
35477         .setMemRefs(StoreOnlyMMO);
35478 
35479     // Jump to endMBB
35480     BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
35481       .addMBB(endMBB);
35482   }
35483 
35484   //
35485   // Emit code to use overflow area
35486   //
35487 
35488   // Load the overflow_area address into a register.
35489   Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
35490   BuildMI(overflowMBB, DL,
35491           TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
35492           OverflowAddrReg)
35493       .add(Base)
35494       .add(Scale)
35495       .add(Index)
35496       .addDisp(Disp, 8)
35497       .add(Segment)
35498       .setMemRefs(LoadOnlyMMO);
35499 
35500   // If we need to align it, do so. Otherwise, just copy the address
35501   // to OverflowDestReg.
35502   if (NeedsAlign) {
35503     // Align the overflow address
35504     Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
35505 
35506     // aligned_addr = (addr + (align-1)) & ~(align-1)
35507     BuildMI(
35508         overflowMBB, DL,
35509         TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
35510         TmpReg)
35511         .addReg(OverflowAddrReg)
35512         .addImm(Alignment.value() - 1);
35513 
35514     BuildMI(
35515         overflowMBB, DL,
35516         TII->get(Subtarget.isTarget64BitLP64() ? X86::AND64ri32 : X86::AND32ri),
35517         OverflowDestReg)
35518         .addReg(TmpReg)
35519         .addImm(~(uint64_t)(Alignment.value() - 1));
35520   } else {
35521     BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
35522       .addReg(OverflowAddrReg);
35523   }
35524 
35525   // Compute the next overflow address after this argument.
35526   // (the overflow address should be kept 8-byte aligned)
35527   Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
35528   BuildMI(
35529       overflowMBB, DL,
35530       TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
35531       NextAddrReg)
35532       .addReg(OverflowDestReg)
35533       .addImm(ArgSizeA8);
35534 
35535   // Store the new overflow address.
35536   BuildMI(overflowMBB, DL,
35537           TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64mr : X86::MOV32mr))
35538       .add(Base)
35539       .add(Scale)
35540       .add(Index)
35541       .addDisp(Disp, 8)
35542       .add(Segment)
35543       .addReg(NextAddrReg)
35544       .setMemRefs(StoreOnlyMMO);
35545 
35546   // If we branched, emit the PHI to the front of endMBB.
35547   if (offsetMBB) {
35548     BuildMI(*endMBB, endMBB->begin(), DL,
35549             TII->get(X86::PHI), DestReg)
35550       .addReg(OffsetDestReg).addMBB(offsetMBB)
35551       .addReg(OverflowDestReg).addMBB(overflowMBB);
35552   }
35553 
35554   // Erase the pseudo instruction
35555   MI.eraseFromParent();
35556 
35557   return endMBB;
35558 }
35559 
35560 // The EFLAGS operand of SelectItr might be missing a kill marker
35561 // because there were multiple uses of EFLAGS, and ISel didn't know
35562 // which to mark. Figure out whether SelectItr should have had a
35563 // kill marker, and set it if it should. Returns the correct kill
35564 // marker value.
checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,MachineBasicBlock * BB,const TargetRegisterInfo * TRI)35565 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
35566                                      MachineBasicBlock* BB,
35567                                      const TargetRegisterInfo* TRI) {
35568   if (isEFLAGSLiveAfter(SelectItr, BB))
35569     return false;
35570 
35571   // We found a def, or hit the end of the basic block and EFLAGS wasn't live
35572   // out. SelectMI should have a kill flag on EFLAGS.
35573   SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
35574   return true;
35575 }
35576 
35577 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
35578 // together with other CMOV pseudo-opcodes into a single basic-block with
35579 // conditional jump around it.
isCMOVPseudo(MachineInstr & MI)35580 static bool isCMOVPseudo(MachineInstr &MI) {
35581   switch (MI.getOpcode()) {
35582   case X86::CMOV_FR16:
35583   case X86::CMOV_FR16X:
35584   case X86::CMOV_FR32:
35585   case X86::CMOV_FR32X:
35586   case X86::CMOV_FR64:
35587   case X86::CMOV_FR64X:
35588   case X86::CMOV_GR8:
35589   case X86::CMOV_GR16:
35590   case X86::CMOV_GR32:
35591   case X86::CMOV_RFP32:
35592   case X86::CMOV_RFP64:
35593   case X86::CMOV_RFP80:
35594   case X86::CMOV_VR64:
35595   case X86::CMOV_VR128:
35596   case X86::CMOV_VR128X:
35597   case X86::CMOV_VR256:
35598   case X86::CMOV_VR256X:
35599   case X86::CMOV_VR512:
35600   case X86::CMOV_VK1:
35601   case X86::CMOV_VK2:
35602   case X86::CMOV_VK4:
35603   case X86::CMOV_VK8:
35604   case X86::CMOV_VK16:
35605   case X86::CMOV_VK32:
35606   case X86::CMOV_VK64:
35607     return true;
35608 
35609   default:
35610     return false;
35611   }
35612 }
35613 
35614 // Helper function, which inserts PHI functions into SinkMBB:
35615 //   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
35616 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
35617 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
35618 // the last PHI function inserted.
createPHIsForCMOVsInSinkBB(MachineBasicBlock::iterator MIItBegin,MachineBasicBlock::iterator MIItEnd,MachineBasicBlock * TrueMBB,MachineBasicBlock * FalseMBB,MachineBasicBlock * SinkMBB)35619 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
35620     MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
35621     MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
35622     MachineBasicBlock *SinkMBB) {
35623   MachineFunction *MF = TrueMBB->getParent();
35624   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
35625   const DebugLoc &DL = MIItBegin->getDebugLoc();
35626 
35627   X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
35628   X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
35629 
35630   MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
35631 
35632   // As we are creating the PHIs, we have to be careful if there is more than
35633   // one.  Later CMOVs may reference the results of earlier CMOVs, but later
35634   // PHIs have to reference the individual true/false inputs from earlier PHIs.
35635   // That also means that PHI construction must work forward from earlier to
35636   // later, and that the code must maintain a mapping from earlier PHI's
35637   // destination registers, and the registers that went into the PHI.
35638   DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
35639   MachineInstrBuilder MIB;
35640 
35641   for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
35642     Register DestReg = MIIt->getOperand(0).getReg();
35643     Register Op1Reg = MIIt->getOperand(1).getReg();
35644     Register Op2Reg = MIIt->getOperand(2).getReg();
35645 
35646     // If this CMOV we are generating is the opposite condition from
35647     // the jump we generated, then we have to swap the operands for the
35648     // PHI that is going to be generated.
35649     if (MIIt->getOperand(3).getImm() == OppCC)
35650       std::swap(Op1Reg, Op2Reg);
35651 
35652     if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
35653       Op1Reg = RegRewriteTable[Op1Reg].first;
35654 
35655     if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
35656       Op2Reg = RegRewriteTable[Op2Reg].second;
35657 
35658     MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
35659               .addReg(Op1Reg)
35660               .addMBB(FalseMBB)
35661               .addReg(Op2Reg)
35662               .addMBB(TrueMBB);
35663 
35664     // Add this PHI to the rewrite table.
35665     RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
35666   }
35667 
35668   return MIB;
35669 }
35670 
35671 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
35672 MachineBasicBlock *
EmitLoweredCascadedSelect(MachineInstr & FirstCMOV,MachineInstr & SecondCascadedCMOV,MachineBasicBlock * ThisMBB) const35673 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
35674                                              MachineInstr &SecondCascadedCMOV,
35675                                              MachineBasicBlock *ThisMBB) const {
35676   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35677   const DebugLoc &DL = FirstCMOV.getDebugLoc();
35678 
35679   // We lower cascaded CMOVs such as
35680   //
35681   //   (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
35682   //
35683   // to two successive branches.
35684   //
35685   // Without this, we would add a PHI between the two jumps, which ends up
35686   // creating a few copies all around. For instance, for
35687   //
35688   //    (sitofp (zext (fcmp une)))
35689   //
35690   // we would generate:
35691   //
35692   //         ucomiss %xmm1, %xmm0
35693   //         movss  <1.0f>, %xmm0
35694   //         movaps  %xmm0, %xmm1
35695   //         jne     .LBB5_2
35696   //         xorps   %xmm1, %xmm1
35697   // .LBB5_2:
35698   //         jp      .LBB5_4
35699   //         movaps  %xmm1, %xmm0
35700   // .LBB5_4:
35701   //         retq
35702   //
35703   // because this custom-inserter would have generated:
35704   //
35705   //   A
35706   //   | \
35707   //   |  B
35708   //   | /
35709   //   C
35710   //   | \
35711   //   |  D
35712   //   | /
35713   //   E
35714   //
35715   // A: X = ...; Y = ...
35716   // B: empty
35717   // C: Z = PHI [X, A], [Y, B]
35718   // D: empty
35719   // E: PHI [X, C], [Z, D]
35720   //
35721   // If we lower both CMOVs in a single step, we can instead generate:
35722   //
35723   //   A
35724   //   | \
35725   //   |  C
35726   //   | /|
35727   //   |/ |
35728   //   |  |
35729   //   |  D
35730   //   | /
35731   //   E
35732   //
35733   // A: X = ...; Y = ...
35734   // D: empty
35735   // E: PHI [X, A], [X, C], [Y, D]
35736   //
35737   // Which, in our sitofp/fcmp example, gives us something like:
35738   //
35739   //         ucomiss %xmm1, %xmm0
35740   //         movss  <1.0f>, %xmm0
35741   //         jne     .LBB5_4
35742   //         jp      .LBB5_4
35743   //         xorps   %xmm0, %xmm0
35744   // .LBB5_4:
35745   //         retq
35746   //
35747 
35748   // We lower cascaded CMOV into two successive branches to the same block.
35749   // EFLAGS is used by both, so mark it as live in the second.
35750   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
35751   MachineFunction *F = ThisMBB->getParent();
35752   MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
35753   MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
35754   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
35755 
35756   MachineFunction::iterator It = ++ThisMBB->getIterator();
35757   F->insert(It, FirstInsertedMBB);
35758   F->insert(It, SecondInsertedMBB);
35759   F->insert(It, SinkMBB);
35760 
35761   // For a cascaded CMOV, we lower it to two successive branches to
35762   // the same block (SinkMBB).  EFLAGS is used by both, so mark it as live in
35763   // the FirstInsertedMBB.
35764   FirstInsertedMBB->addLiveIn(X86::EFLAGS);
35765 
35766   // If the EFLAGS register isn't dead in the terminator, then claim that it's
35767   // live into the sink and copy blocks.
35768   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
35769   if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
35770       !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
35771     SecondInsertedMBB->addLiveIn(X86::EFLAGS);
35772     SinkMBB->addLiveIn(X86::EFLAGS);
35773   }
35774 
35775   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
35776   SinkMBB->splice(SinkMBB->begin(), ThisMBB,
35777                   std::next(MachineBasicBlock::iterator(FirstCMOV)),
35778                   ThisMBB->end());
35779   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
35780 
35781   // Fallthrough block for ThisMBB.
35782   ThisMBB->addSuccessor(FirstInsertedMBB);
35783   // The true block target of the first branch is always SinkMBB.
35784   ThisMBB->addSuccessor(SinkMBB);
35785   // Fallthrough block for FirstInsertedMBB.
35786   FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
35787   // The true block for the branch of FirstInsertedMBB.
35788   FirstInsertedMBB->addSuccessor(SinkMBB);
35789   // This is fallthrough.
35790   SecondInsertedMBB->addSuccessor(SinkMBB);
35791 
35792   // Create the conditional branch instructions.
35793   X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
35794   BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
35795 
35796   X86::CondCode SecondCC =
35797       X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
35798   BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
35799 
35800   //  SinkMBB:
35801   //   %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
35802   Register DestReg = SecondCascadedCMOV.getOperand(0).getReg();
35803   Register Op1Reg = FirstCMOV.getOperand(1).getReg();
35804   Register Op2Reg = FirstCMOV.getOperand(2).getReg();
35805   MachineInstrBuilder MIB =
35806       BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
35807           .addReg(Op1Reg)
35808           .addMBB(SecondInsertedMBB)
35809           .addReg(Op2Reg)
35810           .addMBB(ThisMBB);
35811 
35812   // The second SecondInsertedMBB provides the same incoming value as the
35813   // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
35814   MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
35815 
35816   // Now remove the CMOVs.
35817   FirstCMOV.eraseFromParent();
35818   SecondCascadedCMOV.eraseFromParent();
35819 
35820   return SinkMBB;
35821 }
35822 
35823 MachineBasicBlock *
EmitLoweredSelect(MachineInstr & MI,MachineBasicBlock * ThisMBB) const35824 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
35825                                      MachineBasicBlock *ThisMBB) const {
35826   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35827   const DebugLoc &DL = MI.getDebugLoc();
35828 
35829   // To "insert" a SELECT_CC instruction, we actually have to insert the
35830   // diamond control-flow pattern.  The incoming instruction knows the
35831   // destination vreg to set, the condition code register to branch on, the
35832   // true/false values to select between and a branch opcode to use.
35833 
35834   //  ThisMBB:
35835   //  ...
35836   //   TrueVal = ...
35837   //   cmpTY ccX, r1, r2
35838   //   bCC copy1MBB
35839   //   fallthrough --> FalseMBB
35840 
35841   // This code lowers all pseudo-CMOV instructions. Generally it lowers these
35842   // as described above, by inserting a BB, and then making a PHI at the join
35843   // point to select the true and false operands of the CMOV in the PHI.
35844   //
35845   // The code also handles two different cases of multiple CMOV opcodes
35846   // in a row.
35847   //
35848   // Case 1:
35849   // In this case, there are multiple CMOVs in a row, all which are based on
35850   // the same condition setting (or the exact opposite condition setting).
35851   // In this case we can lower all the CMOVs using a single inserted BB, and
35852   // then make a number of PHIs at the join point to model the CMOVs. The only
35853   // trickiness here, is that in a case like:
35854   //
35855   // t2 = CMOV cond1 t1, f1
35856   // t3 = CMOV cond1 t2, f2
35857   //
35858   // when rewriting this into PHIs, we have to perform some renaming on the
35859   // temps since you cannot have a PHI operand refer to a PHI result earlier
35860   // in the same block.  The "simple" but wrong lowering would be:
35861   //
35862   // t2 = PHI t1(BB1), f1(BB2)
35863   // t3 = PHI t2(BB1), f2(BB2)
35864   //
35865   // but clearly t2 is not defined in BB1, so that is incorrect. The proper
35866   // renaming is to note that on the path through BB1, t2 is really just a
35867   // copy of t1, and do that renaming, properly generating:
35868   //
35869   // t2 = PHI t1(BB1), f1(BB2)
35870   // t3 = PHI t1(BB1), f2(BB2)
35871   //
35872   // Case 2:
35873   // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
35874   // function - EmitLoweredCascadedSelect.
35875 
35876   X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
35877   X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
35878   MachineInstr *LastCMOV = &MI;
35879   MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
35880 
35881   // Check for case 1, where there are multiple CMOVs with the same condition
35882   // first.  Of the two cases of multiple CMOV lowerings, case 1 reduces the
35883   // number of jumps the most.
35884 
35885   if (isCMOVPseudo(MI)) {
35886     // See if we have a string of CMOVS with the same condition. Skip over
35887     // intervening debug insts.
35888     while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
35889            (NextMIIt->getOperand(3).getImm() == CC ||
35890             NextMIIt->getOperand(3).getImm() == OppCC)) {
35891       LastCMOV = &*NextMIIt;
35892       NextMIIt = next_nodbg(NextMIIt, ThisMBB->end());
35893     }
35894   }
35895 
35896   // This checks for case 2, but only do this if we didn't already find
35897   // case 1, as indicated by LastCMOV == MI.
35898   if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
35899       NextMIIt->getOpcode() == MI.getOpcode() &&
35900       NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
35901       NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
35902       NextMIIt->getOperand(1).isKill()) {
35903     return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
35904   }
35905 
35906   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
35907   MachineFunction *F = ThisMBB->getParent();
35908   MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
35909   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
35910 
35911   MachineFunction::iterator It = ++ThisMBB->getIterator();
35912   F->insert(It, FalseMBB);
35913   F->insert(It, SinkMBB);
35914 
35915   // If the EFLAGS register isn't dead in the terminator, then claim that it's
35916   // live into the sink and copy blocks.
35917   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
35918   if (!LastCMOV->killsRegister(X86::EFLAGS) &&
35919       !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
35920     FalseMBB->addLiveIn(X86::EFLAGS);
35921     SinkMBB->addLiveIn(X86::EFLAGS);
35922   }
35923 
35924   // Transfer any debug instructions inside the CMOV sequence to the sunk block.
35925   auto DbgRange = llvm::make_range(MachineBasicBlock::iterator(MI),
35926                                    MachineBasicBlock::iterator(LastCMOV));
35927   for (MachineInstr &MI : llvm::make_early_inc_range(DbgRange))
35928     if (MI.isDebugInstr())
35929       SinkMBB->push_back(MI.removeFromParent());
35930 
35931   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
35932   SinkMBB->splice(SinkMBB->end(), ThisMBB,
35933                   std::next(MachineBasicBlock::iterator(LastCMOV)),
35934                   ThisMBB->end());
35935   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
35936 
35937   // Fallthrough block for ThisMBB.
35938   ThisMBB->addSuccessor(FalseMBB);
35939   // The true block target of the first (or only) branch is always a SinkMBB.
35940   ThisMBB->addSuccessor(SinkMBB);
35941   // Fallthrough block for FalseMBB.
35942   FalseMBB->addSuccessor(SinkMBB);
35943 
35944   // Create the conditional branch instruction.
35945   BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
35946 
35947   //  SinkMBB:
35948   //   %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
35949   //  ...
35950   MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
35951   MachineBasicBlock::iterator MIItEnd =
35952       std::next(MachineBasicBlock::iterator(LastCMOV));
35953   createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
35954 
35955   // Now remove the CMOV(s).
35956   ThisMBB->erase(MIItBegin, MIItEnd);
35957 
35958   return SinkMBB;
35959 }
35960 
getSUBriOpcode(bool IsLP64,int64_t Imm)35961 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
35962   if (IsLP64) {
35963     if (isInt<8>(Imm))
35964       return X86::SUB64ri8;
35965     return X86::SUB64ri32;
35966   } else {
35967     if (isInt<8>(Imm))
35968       return X86::SUB32ri8;
35969     return X86::SUB32ri;
35970   }
35971 }
35972 
35973 MachineBasicBlock *
EmitLoweredProbedAlloca(MachineInstr & MI,MachineBasicBlock * MBB) const35974 X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
35975                                            MachineBasicBlock *MBB) const {
35976   MachineFunction *MF = MBB->getParent();
35977   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35978   const X86FrameLowering &TFI = *Subtarget.getFrameLowering();
35979   const DebugLoc &DL = MI.getDebugLoc();
35980   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
35981 
35982   const unsigned ProbeSize = getStackProbeSize(*MF);
35983 
35984   MachineRegisterInfo &MRI = MF->getRegInfo();
35985   MachineBasicBlock *testMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35986   MachineBasicBlock *tailMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35987   MachineBasicBlock *blockMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35988 
35989   MachineFunction::iterator MBBIter = ++MBB->getIterator();
35990   MF->insert(MBBIter, testMBB);
35991   MF->insert(MBBIter, blockMBB);
35992   MF->insert(MBBIter, tailMBB);
35993 
35994   Register sizeVReg = MI.getOperand(1).getReg();
35995 
35996   Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
35997 
35998   Register TmpStackPtr = MRI.createVirtualRegister(
35999       TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
36000   Register FinalStackPtr = MRI.createVirtualRegister(
36001       TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
36002 
36003   BuildMI(*MBB, {MI}, DL, TII->get(TargetOpcode::COPY), TmpStackPtr)
36004       .addReg(physSPReg);
36005   {
36006     const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
36007     BuildMI(*MBB, {MI}, DL, TII->get(Opc), FinalStackPtr)
36008         .addReg(TmpStackPtr)
36009         .addReg(sizeVReg);
36010   }
36011 
36012   // test rsp size
36013 
36014   BuildMI(testMBB, DL,
36015           TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
36016       .addReg(FinalStackPtr)
36017       .addReg(physSPReg);
36018 
36019   BuildMI(testMBB, DL, TII->get(X86::JCC_1))
36020       .addMBB(tailMBB)
36021       .addImm(X86::COND_GE);
36022   testMBB->addSuccessor(blockMBB);
36023   testMBB->addSuccessor(tailMBB);
36024 
36025   // Touch the block then extend it. This is done on the opposite side of
36026   // static probe where we allocate then touch, to avoid the need of probing the
36027   // tail of the static alloca. Possible scenarios are:
36028   //
36029   //       + ---- <- ------------ <- ------------- <- ------------ +
36030   //       |                                                       |
36031   // [free probe] -> [page alloc] -> [alloc probe] -> [tail alloc] + -> [dyn probe] -> [page alloc] -> [dyn probe] -> [tail alloc] +
36032   //                                                               |                                                               |
36033   //                                                               + <- ----------- <- ------------ <- ----------- <- ------------ +
36034   //
36035   // The property we want to enforce is to never have more than [page alloc] between two probes.
36036 
36037   const unsigned XORMIOpc =
36038       TFI.Uses64BitFramePtr ? X86::XOR64mi8 : X86::XOR32mi8;
36039   addRegOffset(BuildMI(blockMBB, DL, TII->get(XORMIOpc)), physSPReg, false, 0)
36040       .addImm(0);
36041 
36042   BuildMI(blockMBB, DL,
36043           TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr, ProbeSize)), physSPReg)
36044       .addReg(physSPReg)
36045       .addImm(ProbeSize);
36046 
36047 
36048   BuildMI(blockMBB, DL, TII->get(X86::JMP_1)).addMBB(testMBB);
36049   blockMBB->addSuccessor(testMBB);
36050 
36051   // Replace original instruction by the expected stack ptr
36052   BuildMI(tailMBB, DL, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
36053       .addReg(FinalStackPtr);
36054 
36055   tailMBB->splice(tailMBB->end(), MBB,
36056                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
36057   tailMBB->transferSuccessorsAndUpdatePHIs(MBB);
36058   MBB->addSuccessor(testMBB);
36059 
36060   // Delete the original pseudo instruction.
36061   MI.eraseFromParent();
36062 
36063   // And we're done.
36064   return tailMBB;
36065 }
36066 
36067 MachineBasicBlock *
EmitLoweredSegAlloca(MachineInstr & MI,MachineBasicBlock * BB) const36068 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
36069                                         MachineBasicBlock *BB) const {
36070   MachineFunction *MF = BB->getParent();
36071   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36072   const DebugLoc &DL = MI.getDebugLoc();
36073   const BasicBlock *LLVM_BB = BB->getBasicBlock();
36074 
36075   assert(MF->shouldSplitStack());
36076 
36077   const bool Is64Bit = Subtarget.is64Bit();
36078   const bool IsLP64 = Subtarget.isTarget64BitLP64();
36079 
36080   const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
36081   const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
36082 
36083   // BB:
36084   //  ... [Till the alloca]
36085   // If stacklet is not large enough, jump to mallocMBB
36086   //
36087   // bumpMBB:
36088   //  Allocate by subtracting from RSP
36089   //  Jump to continueMBB
36090   //
36091   // mallocMBB:
36092   //  Allocate by call to runtime
36093   //
36094   // continueMBB:
36095   //  ...
36096   //  [rest of original BB]
36097   //
36098 
36099   MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36100   MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36101   MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36102 
36103   MachineRegisterInfo &MRI = MF->getRegInfo();
36104   const TargetRegisterClass *AddrRegClass =
36105       getRegClassFor(getPointerTy(MF->getDataLayout()));
36106 
36107   Register mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
36108            bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
36109            tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
36110            SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
36111            sizeVReg = MI.getOperand(1).getReg(),
36112            physSPReg =
36113                IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
36114 
36115   MachineFunction::iterator MBBIter = ++BB->getIterator();
36116 
36117   MF->insert(MBBIter, bumpMBB);
36118   MF->insert(MBBIter, mallocMBB);
36119   MF->insert(MBBIter, continueMBB);
36120 
36121   continueMBB->splice(continueMBB->begin(), BB,
36122                       std::next(MachineBasicBlock::iterator(MI)), BB->end());
36123   continueMBB->transferSuccessorsAndUpdatePHIs(BB);
36124 
36125   // Add code to the main basic block to check if the stack limit has been hit,
36126   // and if so, jump to mallocMBB otherwise to bumpMBB.
36127   BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
36128   BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
36129     .addReg(tmpSPVReg).addReg(sizeVReg);
36130   BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
36131     .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
36132     .addReg(SPLimitVReg);
36133   BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
36134 
36135   // bumpMBB simply decreases the stack pointer, since we know the current
36136   // stacklet has enough space.
36137   BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
36138     .addReg(SPLimitVReg);
36139   BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
36140     .addReg(SPLimitVReg);
36141   BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
36142 
36143   // Calls into a routine in libgcc to allocate more space from the heap.
36144   const uint32_t *RegMask =
36145       Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
36146   if (IsLP64) {
36147     BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
36148       .addReg(sizeVReg);
36149     BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
36150       .addExternalSymbol("__morestack_allocate_stack_space")
36151       .addRegMask(RegMask)
36152       .addReg(X86::RDI, RegState::Implicit)
36153       .addReg(X86::RAX, RegState::ImplicitDefine);
36154   } else if (Is64Bit) {
36155     BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
36156       .addReg(sizeVReg);
36157     BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
36158       .addExternalSymbol("__morestack_allocate_stack_space")
36159       .addRegMask(RegMask)
36160       .addReg(X86::EDI, RegState::Implicit)
36161       .addReg(X86::EAX, RegState::ImplicitDefine);
36162   } else {
36163     BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
36164       .addImm(12);
36165     BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
36166     BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
36167       .addExternalSymbol("__morestack_allocate_stack_space")
36168       .addRegMask(RegMask)
36169       .addReg(X86::EAX, RegState::ImplicitDefine);
36170   }
36171 
36172   if (!Is64Bit)
36173     BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
36174       .addImm(16);
36175 
36176   BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
36177     .addReg(IsLP64 ? X86::RAX : X86::EAX);
36178   BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
36179 
36180   // Set up the CFG correctly.
36181   BB->addSuccessor(bumpMBB);
36182   BB->addSuccessor(mallocMBB);
36183   mallocMBB->addSuccessor(continueMBB);
36184   bumpMBB->addSuccessor(continueMBB);
36185 
36186   // Take care of the PHI nodes.
36187   BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
36188           MI.getOperand(0).getReg())
36189       .addReg(mallocPtrVReg)
36190       .addMBB(mallocMBB)
36191       .addReg(bumpSPPtrVReg)
36192       .addMBB(bumpMBB);
36193 
36194   // Delete the original pseudo instruction.
36195   MI.eraseFromParent();
36196 
36197   // And we're done.
36198   return continueMBB;
36199 }
36200 
36201 MachineBasicBlock *
EmitLoweredCatchRet(MachineInstr & MI,MachineBasicBlock * BB) const36202 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
36203                                        MachineBasicBlock *BB) const {
36204   MachineFunction *MF = BB->getParent();
36205   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
36206   MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
36207   const DebugLoc &DL = MI.getDebugLoc();
36208 
36209   assert(!isAsynchronousEHPersonality(
36210              classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
36211          "SEH does not use catchret!");
36212 
36213   // Only 32-bit EH needs to worry about manually restoring stack pointers.
36214   if (!Subtarget.is32Bit())
36215     return BB;
36216 
36217   // C++ EH creates a new target block to hold the restore code, and wires up
36218   // the new block to the return destination with a normal JMP_4.
36219   MachineBasicBlock *RestoreMBB =
36220       MF->CreateMachineBasicBlock(BB->getBasicBlock());
36221   assert(BB->succ_size() == 1);
36222   MF->insert(std::next(BB->getIterator()), RestoreMBB);
36223   RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
36224   BB->addSuccessor(RestoreMBB);
36225   MI.getOperand(0).setMBB(RestoreMBB);
36226 
36227   // Marking this as an EH pad but not a funclet entry block causes PEI to
36228   // restore stack pointers in the block.
36229   RestoreMBB->setIsEHPad(true);
36230 
36231   auto RestoreMBBI = RestoreMBB->begin();
36232   BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
36233   return BB;
36234 }
36235 
36236 MachineBasicBlock *
EmitLoweredTLSAddr(MachineInstr & MI,MachineBasicBlock * BB) const36237 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
36238                                       MachineBasicBlock *BB) const {
36239   // So, here we replace TLSADDR with the sequence:
36240   // adjust_stackdown -> TLSADDR -> adjust_stackup.
36241   // We need this because TLSADDR is lowered into calls
36242   // inside MC, therefore without the two markers shrink-wrapping
36243   // may push the prologue/epilogue pass them.
36244   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
36245   const DebugLoc &DL = MI.getDebugLoc();
36246   MachineFunction &MF = *BB->getParent();
36247 
36248   // Emit CALLSEQ_START right before the instruction.
36249   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
36250   MachineInstrBuilder CallseqStart =
36251     BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
36252   BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
36253 
36254   // Emit CALLSEQ_END right after the instruction.
36255   // We don't call erase from parent because we want to keep the
36256   // original instruction around.
36257   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
36258   MachineInstrBuilder CallseqEnd =
36259     BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
36260   BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
36261 
36262   return BB;
36263 }
36264 
36265 MachineBasicBlock *
EmitLoweredTLSCall(MachineInstr & MI,MachineBasicBlock * BB) const36266 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
36267                                       MachineBasicBlock *BB) const {
36268   // This is pretty easy.  We're taking the value that we received from
36269   // our load from the relocation, sticking it in either RDI (x86-64)
36270   // or EAX and doing an indirect call.  The return value will then
36271   // be in the normal return register.
36272   MachineFunction *F = BB->getParent();
36273   const X86InstrInfo *TII = Subtarget.getInstrInfo();
36274   const DebugLoc &DL = MI.getDebugLoc();
36275 
36276   assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
36277   assert(MI.getOperand(3).isGlobal() && "This should be a global");
36278 
36279   // Get a register mask for the lowered call.
36280   // FIXME: The 32-bit calls have non-standard calling conventions. Use a
36281   // proper register mask.
36282   const uint32_t *RegMask =
36283       Subtarget.is64Bit() ?
36284       Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
36285       Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
36286   if (Subtarget.is64Bit()) {
36287     MachineInstrBuilder MIB =
36288         BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
36289             .addReg(X86::RIP)
36290             .addImm(0)
36291             .addReg(0)
36292             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
36293                               MI.getOperand(3).getTargetFlags())
36294             .addReg(0);
36295     MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
36296     addDirectMem(MIB, X86::RDI);
36297     MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
36298   } else if (!isPositionIndependent()) {
36299     MachineInstrBuilder MIB =
36300         BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
36301             .addReg(0)
36302             .addImm(0)
36303             .addReg(0)
36304             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
36305                               MI.getOperand(3).getTargetFlags())
36306             .addReg(0);
36307     MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
36308     addDirectMem(MIB, X86::EAX);
36309     MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
36310   } else {
36311     MachineInstrBuilder MIB =
36312         BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
36313             .addReg(TII->getGlobalBaseReg(F))
36314             .addImm(0)
36315             .addReg(0)
36316             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
36317                               MI.getOperand(3).getTargetFlags())
36318             .addReg(0);
36319     MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
36320     addDirectMem(MIB, X86::EAX);
36321     MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
36322   }
36323 
36324   MI.eraseFromParent(); // The pseudo instruction is gone now.
36325   return BB;
36326 }
36327 
getOpcodeForIndirectThunk(unsigned RPOpc)36328 static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
36329   switch (RPOpc) {
36330   case X86::INDIRECT_THUNK_CALL32:
36331     return X86::CALLpcrel32;
36332   case X86::INDIRECT_THUNK_CALL64:
36333     return X86::CALL64pcrel32;
36334   case X86::INDIRECT_THUNK_TCRETURN32:
36335     return X86::TCRETURNdi;
36336   case X86::INDIRECT_THUNK_TCRETURN64:
36337     return X86::TCRETURNdi64;
36338   }
36339   llvm_unreachable("not indirect thunk opcode");
36340 }
36341 
getIndirectThunkSymbol(const X86Subtarget & Subtarget,unsigned Reg)36342 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
36343                                           unsigned Reg) {
36344   if (Subtarget.useRetpolineExternalThunk()) {
36345     // When using an external thunk for retpolines, we pick names that match the
36346     // names GCC happens to use as well. This helps simplify the implementation
36347     // of the thunks for kernels where they have no easy ability to create
36348     // aliases and are doing non-trivial configuration of the thunk's body. For
36349     // example, the Linux kernel will do boot-time hot patching of the thunk
36350     // bodies and cannot easily export aliases of these to loaded modules.
36351     //
36352     // Note that at any point in the future, we may need to change the semantics
36353     // of how we implement retpolines and at that time will likely change the
36354     // name of the called thunk. Essentially, there is no hard guarantee that
36355     // LLVM will generate calls to specific thunks, we merely make a best-effort
36356     // attempt to help out kernels and other systems where duplicating the
36357     // thunks is costly.
36358     switch (Reg) {
36359     case X86::EAX:
36360       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36361       return "__x86_indirect_thunk_eax";
36362     case X86::ECX:
36363       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36364       return "__x86_indirect_thunk_ecx";
36365     case X86::EDX:
36366       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36367       return "__x86_indirect_thunk_edx";
36368     case X86::EDI:
36369       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36370       return "__x86_indirect_thunk_edi";
36371     case X86::R11:
36372       assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
36373       return "__x86_indirect_thunk_r11";
36374     }
36375     llvm_unreachable("unexpected reg for external indirect thunk");
36376   }
36377 
36378   if (Subtarget.useRetpolineIndirectCalls() ||
36379       Subtarget.useRetpolineIndirectBranches()) {
36380     // When targeting an internal COMDAT thunk use an LLVM-specific name.
36381     switch (Reg) {
36382     case X86::EAX:
36383       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36384       return "__llvm_retpoline_eax";
36385     case X86::ECX:
36386       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36387       return "__llvm_retpoline_ecx";
36388     case X86::EDX:
36389       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36390       return "__llvm_retpoline_edx";
36391     case X86::EDI:
36392       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36393       return "__llvm_retpoline_edi";
36394     case X86::R11:
36395       assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
36396       return "__llvm_retpoline_r11";
36397     }
36398     llvm_unreachable("unexpected reg for retpoline");
36399   }
36400 
36401   if (Subtarget.useLVIControlFlowIntegrity()) {
36402     assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
36403     return "__llvm_lvi_thunk_r11";
36404   }
36405   llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
36406 }
36407 
36408 MachineBasicBlock *
EmitLoweredIndirectThunk(MachineInstr & MI,MachineBasicBlock * BB) const36409 X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
36410                                             MachineBasicBlock *BB) const {
36411   // Copy the virtual register into the R11 physical register and
36412   // call the retpoline thunk.
36413   const DebugLoc &DL = MI.getDebugLoc();
36414   const X86InstrInfo *TII = Subtarget.getInstrInfo();
36415   Register CalleeVReg = MI.getOperand(0).getReg();
36416   unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
36417 
36418   // Find an available scratch register to hold the callee. On 64-bit, we can
36419   // just use R11, but we scan for uses anyway to ensure we don't generate
36420   // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
36421   // already a register use operand to the call to hold the callee. If none
36422   // are available, use EDI instead. EDI is chosen because EBX is the PIC base
36423   // register and ESI is the base pointer to realigned stack frames with VLAs.
36424   SmallVector<unsigned, 3> AvailableRegs;
36425   if (Subtarget.is64Bit())
36426     AvailableRegs.push_back(X86::R11);
36427   else
36428     AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
36429 
36430   // Zero out any registers that are already used.
36431   for (const auto &MO : MI.operands()) {
36432     if (MO.isReg() && MO.isUse())
36433       for (unsigned &Reg : AvailableRegs)
36434         if (Reg == MO.getReg())
36435           Reg = 0;
36436   }
36437 
36438   // Choose the first remaining non-zero available register.
36439   unsigned AvailableReg = 0;
36440   for (unsigned MaybeReg : AvailableRegs) {
36441     if (MaybeReg) {
36442       AvailableReg = MaybeReg;
36443       break;
36444     }
36445   }
36446   if (!AvailableReg)
36447     report_fatal_error("calling convention incompatible with retpoline, no "
36448                        "available registers");
36449 
36450   const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
36451 
36452   BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
36453       .addReg(CalleeVReg);
36454   MI.getOperand(0).ChangeToES(Symbol);
36455   MI.setDesc(TII->get(Opc));
36456   MachineInstrBuilder(*BB->getParent(), &MI)
36457       .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
36458   return BB;
36459 }
36460 
36461 /// SetJmp implies future control flow change upon calling the corresponding
36462 /// LongJmp.
36463 /// Instead of using the 'return' instruction, the long jump fixes the stack and
36464 /// performs an indirect branch. To do so it uses the registers that were stored
36465 /// in the jump buffer (when calling SetJmp).
36466 /// In case the shadow stack is enabled we need to fix it as well, because some
36467 /// return addresses will be skipped.
36468 /// The function will save the SSP for future fixing in the function
36469 /// emitLongJmpShadowStackFix.
36470 /// \sa emitLongJmpShadowStackFix
36471 /// \param [in] MI The temporary Machine Instruction for the builtin.
36472 /// \param [in] MBB The Machine Basic Block that will be modified.
emitSetJmpShadowStackFix(MachineInstr & MI,MachineBasicBlock * MBB) const36473 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
36474                                                  MachineBasicBlock *MBB) const {
36475   const DebugLoc &DL = MI.getDebugLoc();
36476   MachineFunction *MF = MBB->getParent();
36477   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36478   MachineRegisterInfo &MRI = MF->getRegInfo();
36479   MachineInstrBuilder MIB;
36480 
36481   // Memory Reference.
36482   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
36483                                            MI.memoperands_end());
36484 
36485   // Initialize a register with zero.
36486   MVT PVT = getPointerTy(MF->getDataLayout());
36487   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
36488   Register ZReg = MRI.createVirtualRegister(PtrRC);
36489   unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
36490   BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
36491       .addDef(ZReg)
36492       .addReg(ZReg, RegState::Undef)
36493       .addReg(ZReg, RegState::Undef);
36494 
36495   // Read the current SSP Register value to the zeroed register.
36496   Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
36497   unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
36498   BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
36499 
36500   // Write the SSP register value to offset 3 in input memory buffer.
36501   unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
36502   MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
36503   const int64_t SSPOffset = 3 * PVT.getStoreSize();
36504   const unsigned MemOpndSlot = 1;
36505   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36506     if (i == X86::AddrDisp)
36507       MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
36508     else
36509       MIB.add(MI.getOperand(MemOpndSlot + i));
36510   }
36511   MIB.addReg(SSPCopyReg);
36512   MIB.setMemRefs(MMOs);
36513 }
36514 
36515 MachineBasicBlock *
emitEHSjLjSetJmp(MachineInstr & MI,MachineBasicBlock * MBB) const36516 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
36517                                     MachineBasicBlock *MBB) const {
36518   const DebugLoc &DL = MI.getDebugLoc();
36519   MachineFunction *MF = MBB->getParent();
36520   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36521   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
36522   MachineRegisterInfo &MRI = MF->getRegInfo();
36523 
36524   const BasicBlock *BB = MBB->getBasicBlock();
36525   MachineFunction::iterator I = ++MBB->getIterator();
36526 
36527   // Memory Reference
36528   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
36529                                            MI.memoperands_end());
36530 
36531   unsigned DstReg;
36532   unsigned MemOpndSlot = 0;
36533 
36534   unsigned CurOp = 0;
36535 
36536   DstReg = MI.getOperand(CurOp++).getReg();
36537   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
36538   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
36539   (void)TRI;
36540   Register mainDstReg = MRI.createVirtualRegister(RC);
36541   Register restoreDstReg = MRI.createVirtualRegister(RC);
36542 
36543   MemOpndSlot = CurOp;
36544 
36545   MVT PVT = getPointerTy(MF->getDataLayout());
36546   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
36547          "Invalid Pointer Size!");
36548 
36549   // For v = setjmp(buf), we generate
36550   //
36551   // thisMBB:
36552   //  buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
36553   //  SjLjSetup restoreMBB
36554   //
36555   // mainMBB:
36556   //  v_main = 0
36557   //
36558   // sinkMBB:
36559   //  v = phi(main, restore)
36560   //
36561   // restoreMBB:
36562   //  if base pointer being used, load it from frame
36563   //  v_restore = 1
36564 
36565   MachineBasicBlock *thisMBB = MBB;
36566   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
36567   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
36568   MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
36569   MF->insert(I, mainMBB);
36570   MF->insert(I, sinkMBB);
36571   MF->push_back(restoreMBB);
36572   restoreMBB->setMachineBlockAddressTaken();
36573 
36574   MachineInstrBuilder MIB;
36575 
36576   // Transfer the remainder of BB and its successor edges to sinkMBB.
36577   sinkMBB->splice(sinkMBB->begin(), MBB,
36578                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
36579   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
36580 
36581   // thisMBB:
36582   unsigned PtrStoreOpc = 0;
36583   unsigned LabelReg = 0;
36584   const int64_t LabelOffset = 1 * PVT.getStoreSize();
36585   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
36586                      !isPositionIndependent();
36587 
36588   // Prepare IP either in reg or imm.
36589   if (!UseImmLabel) {
36590     PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
36591     const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
36592     LabelReg = MRI.createVirtualRegister(PtrRC);
36593     if (Subtarget.is64Bit()) {
36594       MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
36595               .addReg(X86::RIP)
36596               .addImm(0)
36597               .addReg(0)
36598               .addMBB(restoreMBB)
36599               .addReg(0);
36600     } else {
36601       const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
36602       MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
36603               .addReg(XII->getGlobalBaseReg(MF))
36604               .addImm(0)
36605               .addReg(0)
36606               .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
36607               .addReg(0);
36608     }
36609   } else
36610     PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
36611   // Store IP
36612   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
36613   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36614     if (i == X86::AddrDisp)
36615       MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
36616     else
36617       MIB.add(MI.getOperand(MemOpndSlot + i));
36618   }
36619   if (!UseImmLabel)
36620     MIB.addReg(LabelReg);
36621   else
36622     MIB.addMBB(restoreMBB);
36623   MIB.setMemRefs(MMOs);
36624 
36625   if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
36626     emitSetJmpShadowStackFix(MI, thisMBB);
36627   }
36628 
36629   // Setup
36630   MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
36631           .addMBB(restoreMBB);
36632 
36633   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
36634   MIB.addRegMask(RegInfo->getNoPreservedMask());
36635   thisMBB->addSuccessor(mainMBB);
36636   thisMBB->addSuccessor(restoreMBB);
36637 
36638   // mainMBB:
36639   //  EAX = 0
36640   BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
36641   mainMBB->addSuccessor(sinkMBB);
36642 
36643   // sinkMBB:
36644   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
36645           TII->get(X86::PHI), DstReg)
36646     .addReg(mainDstReg).addMBB(mainMBB)
36647     .addReg(restoreDstReg).addMBB(restoreMBB);
36648 
36649   // restoreMBB:
36650   if (RegInfo->hasBasePointer(*MF)) {
36651     const bool Uses64BitFramePtr =
36652         Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
36653     X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
36654     X86FI->setRestoreBasePointer(MF);
36655     Register FramePtr = RegInfo->getFrameRegister(*MF);
36656     Register BasePtr = RegInfo->getBaseRegister();
36657     unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
36658     addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
36659                  FramePtr, true, X86FI->getRestoreBasePointerOffset())
36660       .setMIFlag(MachineInstr::FrameSetup);
36661   }
36662   BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
36663   BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
36664   restoreMBB->addSuccessor(sinkMBB);
36665 
36666   MI.eraseFromParent();
36667   return sinkMBB;
36668 }
36669 
36670 /// Fix the shadow stack using the previously saved SSP pointer.
36671 /// \sa emitSetJmpShadowStackFix
36672 /// \param [in] MI The temporary Machine Instruction for the builtin.
36673 /// \param [in] MBB The Machine Basic Block that will be modified.
36674 /// \return The sink MBB that will perform the future indirect branch.
36675 MachineBasicBlock *
emitLongJmpShadowStackFix(MachineInstr & MI,MachineBasicBlock * MBB) const36676 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
36677                                              MachineBasicBlock *MBB) const {
36678   const DebugLoc &DL = MI.getDebugLoc();
36679   MachineFunction *MF = MBB->getParent();
36680   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36681   MachineRegisterInfo &MRI = MF->getRegInfo();
36682 
36683   // Memory Reference
36684   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
36685                                            MI.memoperands_end());
36686 
36687   MVT PVT = getPointerTy(MF->getDataLayout());
36688   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
36689 
36690   // checkSspMBB:
36691   //         xor vreg1, vreg1
36692   //         rdssp vreg1
36693   //         test vreg1, vreg1
36694   //         je sinkMBB   # Jump if Shadow Stack is not supported
36695   // fallMBB:
36696   //         mov buf+24/12(%rip), vreg2
36697   //         sub vreg1, vreg2
36698   //         jbe sinkMBB  # No need to fix the Shadow Stack
36699   // fixShadowMBB:
36700   //         shr 3/2, vreg2
36701   //         incssp vreg2  # fix the SSP according to the lower 8 bits
36702   //         shr 8, vreg2
36703   //         je sinkMBB
36704   // fixShadowLoopPrepareMBB:
36705   //         shl vreg2
36706   //         mov 128, vreg3
36707   // fixShadowLoopMBB:
36708   //         incssp vreg3
36709   //         dec vreg2
36710   //         jne fixShadowLoopMBB # Iterate until you finish fixing
36711   //                              # the Shadow Stack
36712   // sinkMBB:
36713 
36714   MachineFunction::iterator I = ++MBB->getIterator();
36715   const BasicBlock *BB = MBB->getBasicBlock();
36716 
36717   MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
36718   MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
36719   MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
36720   MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
36721   MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
36722   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
36723   MF->insert(I, checkSspMBB);
36724   MF->insert(I, fallMBB);
36725   MF->insert(I, fixShadowMBB);
36726   MF->insert(I, fixShadowLoopPrepareMBB);
36727   MF->insert(I, fixShadowLoopMBB);
36728   MF->insert(I, sinkMBB);
36729 
36730   // Transfer the remainder of BB and its successor edges to sinkMBB.
36731   sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
36732                   MBB->end());
36733   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
36734 
36735   MBB->addSuccessor(checkSspMBB);
36736 
36737   // Initialize a register with zero.
36738   Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
36739   BuildMI(checkSspMBB, DL, TII->get(X86::MOV32r0), ZReg);
36740 
36741   if (PVT == MVT::i64) {
36742     Register TmpZReg = MRI.createVirtualRegister(PtrRC);
36743     BuildMI(checkSspMBB, DL, TII->get(X86::SUBREG_TO_REG), TmpZReg)
36744       .addImm(0)
36745       .addReg(ZReg)
36746       .addImm(X86::sub_32bit);
36747     ZReg = TmpZReg;
36748   }
36749 
36750   // Read the current SSP Register value to the zeroed register.
36751   Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
36752   unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
36753   BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
36754 
36755   // Check whether the result of the SSP register is zero and jump directly
36756   // to the sink.
36757   unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
36758   BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
36759       .addReg(SSPCopyReg)
36760       .addReg(SSPCopyReg);
36761   BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
36762   checkSspMBB->addSuccessor(sinkMBB);
36763   checkSspMBB->addSuccessor(fallMBB);
36764 
36765   // Reload the previously saved SSP register value.
36766   Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
36767   unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
36768   const int64_t SPPOffset = 3 * PVT.getStoreSize();
36769   MachineInstrBuilder MIB =
36770       BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
36771   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36772     const MachineOperand &MO = MI.getOperand(i);
36773     if (i == X86::AddrDisp)
36774       MIB.addDisp(MO, SPPOffset);
36775     else if (MO.isReg()) // Don't add the whole operand, we don't want to
36776                          // preserve kill flags.
36777       MIB.addReg(MO.getReg());
36778     else
36779       MIB.add(MO);
36780   }
36781   MIB.setMemRefs(MMOs);
36782 
36783   // Subtract the current SSP from the previous SSP.
36784   Register SspSubReg = MRI.createVirtualRegister(PtrRC);
36785   unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
36786   BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
36787       .addReg(PrevSSPReg)
36788       .addReg(SSPCopyReg);
36789 
36790   // Jump to sink in case PrevSSPReg <= SSPCopyReg.
36791   BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
36792   fallMBB->addSuccessor(sinkMBB);
36793   fallMBB->addSuccessor(fixShadowMBB);
36794 
36795   // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
36796   unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
36797   unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
36798   Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
36799   BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
36800       .addReg(SspSubReg)
36801       .addImm(Offset);
36802 
36803   // Increase SSP when looking only on the lower 8 bits of the delta.
36804   unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
36805   BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
36806 
36807   // Reset the lower 8 bits.
36808   Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
36809   BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
36810       .addReg(SspFirstShrReg)
36811       .addImm(8);
36812 
36813   // Jump if the result of the shift is zero.
36814   BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
36815   fixShadowMBB->addSuccessor(sinkMBB);
36816   fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
36817 
36818   // Do a single shift left.
36819   unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
36820   Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
36821   BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
36822       .addReg(SspSecondShrReg);
36823 
36824   // Save the value 128 to a register (will be used next with incssp).
36825   Register Value128InReg = MRI.createVirtualRegister(PtrRC);
36826   unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
36827   BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
36828       .addImm(128);
36829   fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
36830 
36831   // Since incssp only looks at the lower 8 bits, we might need to do several
36832   // iterations of incssp until we finish fixing the shadow stack.
36833   Register DecReg = MRI.createVirtualRegister(PtrRC);
36834   Register CounterReg = MRI.createVirtualRegister(PtrRC);
36835   BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
36836       .addReg(SspAfterShlReg)
36837       .addMBB(fixShadowLoopPrepareMBB)
36838       .addReg(DecReg)
36839       .addMBB(fixShadowLoopMBB);
36840 
36841   // Every iteration we increase the SSP by 128.
36842   BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
36843 
36844   // Every iteration we decrement the counter by 1.
36845   unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
36846   BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
36847 
36848   // Jump if the counter is not zero yet.
36849   BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
36850   fixShadowLoopMBB->addSuccessor(sinkMBB);
36851   fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
36852 
36853   return sinkMBB;
36854 }
36855 
36856 MachineBasicBlock *
emitEHSjLjLongJmp(MachineInstr & MI,MachineBasicBlock * MBB) const36857 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
36858                                      MachineBasicBlock *MBB) const {
36859   const DebugLoc &DL = MI.getDebugLoc();
36860   MachineFunction *MF = MBB->getParent();
36861   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36862   MachineRegisterInfo &MRI = MF->getRegInfo();
36863 
36864   // Memory Reference
36865   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
36866                                            MI.memoperands_end());
36867 
36868   MVT PVT = getPointerTy(MF->getDataLayout());
36869   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
36870          "Invalid Pointer Size!");
36871 
36872   const TargetRegisterClass *RC =
36873     (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
36874   Register Tmp = MRI.createVirtualRegister(RC);
36875   // Since FP is only updated here but NOT referenced, it's treated as GPR.
36876   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
36877   Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
36878   Register SP = RegInfo->getStackRegister();
36879 
36880   MachineInstrBuilder MIB;
36881 
36882   const int64_t LabelOffset = 1 * PVT.getStoreSize();
36883   const int64_t SPOffset = 2 * PVT.getStoreSize();
36884 
36885   unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
36886   unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
36887 
36888   MachineBasicBlock *thisMBB = MBB;
36889 
36890   // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
36891   if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
36892     thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
36893   }
36894 
36895   // Reload FP
36896   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
36897   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36898     const MachineOperand &MO = MI.getOperand(i);
36899     if (MO.isReg()) // Don't add the whole operand, we don't want to
36900                     // preserve kill flags.
36901       MIB.addReg(MO.getReg());
36902     else
36903       MIB.add(MO);
36904   }
36905   MIB.setMemRefs(MMOs);
36906 
36907   // Reload IP
36908   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
36909   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36910     const MachineOperand &MO = MI.getOperand(i);
36911     if (i == X86::AddrDisp)
36912       MIB.addDisp(MO, LabelOffset);
36913     else if (MO.isReg()) // Don't add the whole operand, we don't want to
36914                          // preserve kill flags.
36915       MIB.addReg(MO.getReg());
36916     else
36917       MIB.add(MO);
36918   }
36919   MIB.setMemRefs(MMOs);
36920 
36921   // Reload SP
36922   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
36923   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36924     if (i == X86::AddrDisp)
36925       MIB.addDisp(MI.getOperand(i), SPOffset);
36926     else
36927       MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
36928                                  // the last instruction of the expansion.
36929   }
36930   MIB.setMemRefs(MMOs);
36931 
36932   // Jump
36933   BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
36934 
36935   MI.eraseFromParent();
36936   return thisMBB;
36937 }
36938 
SetupEntryBlockForSjLj(MachineInstr & MI,MachineBasicBlock * MBB,MachineBasicBlock * DispatchBB,int FI) const36939 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
36940                                                MachineBasicBlock *MBB,
36941                                                MachineBasicBlock *DispatchBB,
36942                                                int FI) const {
36943   const DebugLoc &DL = MI.getDebugLoc();
36944   MachineFunction *MF = MBB->getParent();
36945   MachineRegisterInfo *MRI = &MF->getRegInfo();
36946   const X86InstrInfo *TII = Subtarget.getInstrInfo();
36947 
36948   MVT PVT = getPointerTy(MF->getDataLayout());
36949   assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
36950 
36951   unsigned Op = 0;
36952   unsigned VR = 0;
36953 
36954   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
36955                      !isPositionIndependent();
36956 
36957   if (UseImmLabel) {
36958     Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
36959   } else {
36960     const TargetRegisterClass *TRC =
36961         (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
36962     VR = MRI->createVirtualRegister(TRC);
36963     Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
36964 
36965     if (Subtarget.is64Bit())
36966       BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
36967           .addReg(X86::RIP)
36968           .addImm(1)
36969           .addReg(0)
36970           .addMBB(DispatchBB)
36971           .addReg(0);
36972     else
36973       BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
36974           .addReg(0) /* TII->getGlobalBaseReg(MF) */
36975           .addImm(1)
36976           .addReg(0)
36977           .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
36978           .addReg(0);
36979   }
36980 
36981   MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
36982   addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
36983   if (UseImmLabel)
36984     MIB.addMBB(DispatchBB);
36985   else
36986     MIB.addReg(VR);
36987 }
36988 
36989 MachineBasicBlock *
EmitSjLjDispatchBlock(MachineInstr & MI,MachineBasicBlock * BB) const36990 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
36991                                          MachineBasicBlock *BB) const {
36992   const DebugLoc &DL = MI.getDebugLoc();
36993   MachineFunction *MF = BB->getParent();
36994   MachineRegisterInfo *MRI = &MF->getRegInfo();
36995   const X86InstrInfo *TII = Subtarget.getInstrInfo();
36996   int FI = MF->getFrameInfo().getFunctionContextIndex();
36997 
36998   // Get a mapping of the call site numbers to all of the landing pads they're
36999   // associated with.
37000   DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
37001   unsigned MaxCSNum = 0;
37002   for (auto &MBB : *MF) {
37003     if (!MBB.isEHPad())
37004       continue;
37005 
37006     MCSymbol *Sym = nullptr;
37007     for (const auto &MI : MBB) {
37008       if (MI.isDebugInstr())
37009         continue;
37010 
37011       assert(MI.isEHLabel() && "expected EH_LABEL");
37012       Sym = MI.getOperand(0).getMCSymbol();
37013       break;
37014     }
37015 
37016     if (!MF->hasCallSiteLandingPad(Sym))
37017       continue;
37018 
37019     for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
37020       CallSiteNumToLPad[CSI].push_back(&MBB);
37021       MaxCSNum = std::max(MaxCSNum, CSI);
37022     }
37023   }
37024 
37025   // Get an ordered list of the machine basic blocks for the jump table.
37026   std::vector<MachineBasicBlock *> LPadList;
37027   SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
37028   LPadList.reserve(CallSiteNumToLPad.size());
37029 
37030   for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
37031     for (auto &LP : CallSiteNumToLPad[CSI]) {
37032       LPadList.push_back(LP);
37033       InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
37034     }
37035   }
37036 
37037   assert(!LPadList.empty() &&
37038          "No landing pad destinations for the dispatch jump table!");
37039 
37040   // Create the MBBs for the dispatch code.
37041 
37042   // Shove the dispatch's address into the return slot in the function context.
37043   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
37044   DispatchBB->setIsEHPad(true);
37045 
37046   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
37047   BuildMI(TrapBB, DL, TII->get(X86::TRAP));
37048   DispatchBB->addSuccessor(TrapBB);
37049 
37050   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
37051   DispatchBB->addSuccessor(DispContBB);
37052 
37053   // Insert MBBs.
37054   MF->push_back(DispatchBB);
37055   MF->push_back(DispContBB);
37056   MF->push_back(TrapBB);
37057 
37058   // Insert code into the entry block that creates and registers the function
37059   // context.
37060   SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
37061 
37062   // Create the jump table and associated information
37063   unsigned JTE = getJumpTableEncoding();
37064   MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
37065   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
37066 
37067   const X86RegisterInfo &RI = TII->getRegisterInfo();
37068   // Add a register mask with no preserved registers.  This results in all
37069   // registers being marked as clobbered.
37070   if (RI.hasBasePointer(*MF)) {
37071     const bool FPIs64Bit =
37072         Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
37073     X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
37074     MFI->setRestoreBasePointer(MF);
37075 
37076     Register FP = RI.getFrameRegister(*MF);
37077     Register BP = RI.getBaseRegister();
37078     unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
37079     addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
37080                  MFI->getRestoreBasePointerOffset())
37081         .addRegMask(RI.getNoPreservedMask());
37082   } else {
37083     BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
37084         .addRegMask(RI.getNoPreservedMask());
37085   }
37086 
37087   // IReg is used as an index in a memory operand and therefore can't be SP
37088   Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
37089   addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
37090                     Subtarget.is64Bit() ? 8 : 4);
37091   BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
37092       .addReg(IReg)
37093       .addImm(LPadList.size());
37094   BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
37095 
37096   if (Subtarget.is64Bit()) {
37097     Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
37098     Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
37099 
37100     // leaq .LJTI0_0(%rip), BReg
37101     BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
37102         .addReg(X86::RIP)
37103         .addImm(1)
37104         .addReg(0)
37105         .addJumpTableIndex(MJTI)
37106         .addReg(0);
37107     // movzx IReg64, IReg
37108     BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
37109         .addImm(0)
37110         .addReg(IReg)
37111         .addImm(X86::sub_32bit);
37112 
37113     switch (JTE) {
37114     case MachineJumpTableInfo::EK_BlockAddress:
37115       // jmpq *(BReg,IReg64,8)
37116       BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
37117           .addReg(BReg)
37118           .addImm(8)
37119           .addReg(IReg64)
37120           .addImm(0)
37121           .addReg(0);
37122       break;
37123     case MachineJumpTableInfo::EK_LabelDifference32: {
37124       Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
37125       Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
37126       Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
37127 
37128       // movl (BReg,IReg64,4), OReg
37129       BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
37130           .addReg(BReg)
37131           .addImm(4)
37132           .addReg(IReg64)
37133           .addImm(0)
37134           .addReg(0);
37135       // movsx OReg64, OReg
37136       BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
37137       // addq BReg, OReg64, TReg
37138       BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
37139           .addReg(OReg64)
37140           .addReg(BReg);
37141       // jmpq *TReg
37142       BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
37143       break;
37144     }
37145     default:
37146       llvm_unreachable("Unexpected jump table encoding");
37147     }
37148   } else {
37149     // jmpl *.LJTI0_0(,IReg,4)
37150     BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
37151         .addReg(0)
37152         .addImm(4)
37153         .addReg(IReg)
37154         .addJumpTableIndex(MJTI)
37155         .addReg(0);
37156   }
37157 
37158   // Add the jump table entries as successors to the MBB.
37159   SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
37160   for (auto &LP : LPadList)
37161     if (SeenMBBs.insert(LP).second)
37162       DispContBB->addSuccessor(LP);
37163 
37164   // N.B. the order the invoke BBs are processed in doesn't matter here.
37165   SmallVector<MachineBasicBlock *, 64> MBBLPads;
37166   const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
37167   for (MachineBasicBlock *MBB : InvokeBBs) {
37168     // Remove the landing pad successor from the invoke block and replace it
37169     // with the new dispatch block.
37170     // Keep a copy of Successors since it's modified inside the loop.
37171     SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
37172                                                    MBB->succ_rend());
37173     // FIXME: Avoid quadratic complexity.
37174     for (auto *MBBS : Successors) {
37175       if (MBBS->isEHPad()) {
37176         MBB->removeSuccessor(MBBS);
37177         MBBLPads.push_back(MBBS);
37178       }
37179     }
37180 
37181     MBB->addSuccessor(DispatchBB);
37182 
37183     // Find the invoke call and mark all of the callee-saved registers as
37184     // 'implicit defined' so that they're spilled.  This prevents code from
37185     // moving instructions to before the EH block, where they will never be
37186     // executed.
37187     for (auto &II : reverse(*MBB)) {
37188       if (!II.isCall())
37189         continue;
37190 
37191       DenseMap<unsigned, bool> DefRegs;
37192       for (auto &MOp : II.operands())
37193         if (MOp.isReg())
37194           DefRegs[MOp.getReg()] = true;
37195 
37196       MachineInstrBuilder MIB(*MF, &II);
37197       for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
37198         unsigned Reg = SavedRegs[RegIdx];
37199         if (!DefRegs[Reg])
37200           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
37201       }
37202 
37203       break;
37204     }
37205   }
37206 
37207   // Mark all former landing pads as non-landing pads.  The dispatch is the only
37208   // landing pad now.
37209   for (auto &LP : MBBLPads)
37210     LP->setIsEHPad(false);
37211 
37212   // The instruction is gone now.
37213   MI.eraseFromParent();
37214   return BB;
37215 }
37216 
37217 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const37218 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
37219                                                MachineBasicBlock *BB) const {
37220   MachineFunction *MF = BB->getParent();
37221   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
37222   const DebugLoc &DL = MI.getDebugLoc();
37223 
37224   auto TMMImmToTMMReg = [](unsigned Imm) {
37225     assert (Imm < 8 && "Illegal tmm index");
37226     return X86::TMM0 + Imm;
37227   };
37228   switch (MI.getOpcode()) {
37229   default: llvm_unreachable("Unexpected instr type to insert");
37230   case X86::TLS_addr32:
37231   case X86::TLS_addr64:
37232   case X86::TLS_addrX32:
37233   case X86::TLS_base_addr32:
37234   case X86::TLS_base_addr64:
37235   case X86::TLS_base_addrX32:
37236     return EmitLoweredTLSAddr(MI, BB);
37237   case X86::INDIRECT_THUNK_CALL32:
37238   case X86::INDIRECT_THUNK_CALL64:
37239   case X86::INDIRECT_THUNK_TCRETURN32:
37240   case X86::INDIRECT_THUNK_TCRETURN64:
37241     return EmitLoweredIndirectThunk(MI, BB);
37242   case X86::CATCHRET:
37243     return EmitLoweredCatchRet(MI, BB);
37244   case X86::SEG_ALLOCA_32:
37245   case X86::SEG_ALLOCA_64:
37246     return EmitLoweredSegAlloca(MI, BB);
37247   case X86::PROBED_ALLOCA_32:
37248   case X86::PROBED_ALLOCA_64:
37249     return EmitLoweredProbedAlloca(MI, BB);
37250   case X86::TLSCall_32:
37251   case X86::TLSCall_64:
37252     return EmitLoweredTLSCall(MI, BB);
37253   case X86::CMOV_FR16:
37254   case X86::CMOV_FR16X:
37255   case X86::CMOV_FR32:
37256   case X86::CMOV_FR32X:
37257   case X86::CMOV_FR64:
37258   case X86::CMOV_FR64X:
37259   case X86::CMOV_GR8:
37260   case X86::CMOV_GR16:
37261   case X86::CMOV_GR32:
37262   case X86::CMOV_RFP32:
37263   case X86::CMOV_RFP64:
37264   case X86::CMOV_RFP80:
37265   case X86::CMOV_VR64:
37266   case X86::CMOV_VR128:
37267   case X86::CMOV_VR128X:
37268   case X86::CMOV_VR256:
37269   case X86::CMOV_VR256X:
37270   case X86::CMOV_VR512:
37271   case X86::CMOV_VK1:
37272   case X86::CMOV_VK2:
37273   case X86::CMOV_VK4:
37274   case X86::CMOV_VK8:
37275   case X86::CMOV_VK16:
37276   case X86::CMOV_VK32:
37277   case X86::CMOV_VK64:
37278     return EmitLoweredSelect(MI, BB);
37279 
37280   case X86::RDFLAGS32:
37281   case X86::RDFLAGS64: {
37282     unsigned PushF =
37283         MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
37284     unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
37285     MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
37286     // Permit reads of the EFLAGS and DF registers without them being defined.
37287     // This intrinsic exists to read external processor state in flags, such as
37288     // the trap flag, interrupt flag, and direction flag, none of which are
37289     // modeled by the backend.
37290     assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
37291            "Unexpected register in operand!");
37292     Push->getOperand(2).setIsUndef();
37293     assert(Push->getOperand(3).getReg() == X86::DF &&
37294            "Unexpected register in operand!");
37295     Push->getOperand(3).setIsUndef();
37296     BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
37297 
37298     MI.eraseFromParent(); // The pseudo is gone now.
37299     return BB;
37300   }
37301 
37302   case X86::WRFLAGS32:
37303   case X86::WRFLAGS64: {
37304     unsigned Push =
37305         MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
37306     unsigned PopF =
37307         MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
37308     BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
37309     BuildMI(*BB, MI, DL, TII->get(PopF));
37310 
37311     MI.eraseFromParent(); // The pseudo is gone now.
37312     return BB;
37313   }
37314 
37315   case X86::FP80_ADDr:
37316   case X86::FP80_ADDm32: {
37317     // Change the floating point control register to use double extended
37318     // precision when performing the addition.
37319     int OrigCWFrameIdx =
37320         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
37321     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::FNSTCW16m)),
37322                       OrigCWFrameIdx);
37323 
37324     // Load the old value of the control word...
37325     Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
37326     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
37327                       OrigCWFrameIdx);
37328 
37329     // OR 0b11 into bit 8 and 9. 0b11 is the encoding for double extended
37330     // precision.
37331     Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
37332     BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
37333         .addReg(OldCW, RegState::Kill)
37334         .addImm(0x300);
37335 
37336     // Extract to 16 bits.
37337     Register NewCW16 =
37338         MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
37339     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
37340         .addReg(NewCW, RegState::Kill, X86::sub_16bit);
37341 
37342     // Prepare memory for FLDCW.
37343     int NewCWFrameIdx =
37344         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
37345     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
37346                       NewCWFrameIdx)
37347         .addReg(NewCW16, RegState::Kill);
37348 
37349     // Reload the modified control word now...
37350     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::FLDCW16m)),
37351                       NewCWFrameIdx);
37352 
37353     // Do the addition.
37354     if (MI.getOpcode() == X86::FP80_ADDr) {
37355       BuildMI(*BB, MI, DL, TII->get(X86::ADD_Fp80))
37356           .add(MI.getOperand(0))
37357           .add(MI.getOperand(1))
37358           .add(MI.getOperand(2));
37359     } else {
37360       BuildMI(*BB, MI, DL, TII->get(X86::ADD_Fp80m32))
37361           .add(MI.getOperand(0))
37362           .add(MI.getOperand(1))
37363           .add(MI.getOperand(2))
37364           .add(MI.getOperand(3))
37365           .add(MI.getOperand(4))
37366           .add(MI.getOperand(5))
37367           .add(MI.getOperand(6));
37368     }
37369 
37370     // Reload the original control word now.
37371     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::FLDCW16m)),
37372                       OrigCWFrameIdx);
37373 
37374     MI.eraseFromParent(); // The pseudo instruction is gone now.
37375     return BB;
37376   }
37377 
37378   case X86::FP32_TO_INT16_IN_MEM:
37379   case X86::FP32_TO_INT32_IN_MEM:
37380   case X86::FP32_TO_INT64_IN_MEM:
37381   case X86::FP64_TO_INT16_IN_MEM:
37382   case X86::FP64_TO_INT32_IN_MEM:
37383   case X86::FP64_TO_INT64_IN_MEM:
37384   case X86::FP80_TO_INT16_IN_MEM:
37385   case X86::FP80_TO_INT32_IN_MEM:
37386   case X86::FP80_TO_INT64_IN_MEM: {
37387     // Change the floating point control register to use "round towards zero"
37388     // mode when truncating to an integer value.
37389     int OrigCWFrameIdx =
37390         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
37391     addFrameReference(BuildMI(*BB, MI, DL,
37392                               TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
37393 
37394     // Load the old value of the control word...
37395     Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
37396     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
37397                       OrigCWFrameIdx);
37398 
37399     // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
37400     Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
37401     BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
37402       .addReg(OldCW, RegState::Kill).addImm(0xC00);
37403 
37404     // Extract to 16 bits.
37405     Register NewCW16 =
37406         MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
37407     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
37408       .addReg(NewCW, RegState::Kill, X86::sub_16bit);
37409 
37410     // Prepare memory for FLDCW.
37411     int NewCWFrameIdx =
37412         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
37413     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
37414                       NewCWFrameIdx)
37415       .addReg(NewCW16, RegState::Kill);
37416 
37417     // Reload the modified control word now...
37418     addFrameReference(BuildMI(*BB, MI, DL,
37419                               TII->get(X86::FLDCW16m)), NewCWFrameIdx);
37420 
37421     // Get the X86 opcode to use.
37422     unsigned Opc;
37423     switch (MI.getOpcode()) {
37424     default: llvm_unreachable("illegal opcode!");
37425     case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
37426     case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
37427     case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
37428     case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
37429     case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
37430     case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
37431     case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
37432     case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
37433     case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
37434     }
37435 
37436     X86AddressMode AM = getAddressFromInstr(&MI, 0);
37437     addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
37438         .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
37439 
37440     // Reload the original control word now.
37441     addFrameReference(BuildMI(*BB, MI, DL,
37442                               TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
37443 
37444     MI.eraseFromParent(); // The pseudo instruction is gone now.
37445     return BB;
37446   }
37447 
37448   // xbegin
37449   case X86::XBEGIN:
37450     return emitXBegin(MI, BB, Subtarget.getInstrInfo());
37451 
37452   case X86::VAARG_64:
37453   case X86::VAARG_X32:
37454     return EmitVAARGWithCustomInserter(MI, BB);
37455 
37456   case X86::EH_SjLj_SetJmp32:
37457   case X86::EH_SjLj_SetJmp64:
37458     return emitEHSjLjSetJmp(MI, BB);
37459 
37460   case X86::EH_SjLj_LongJmp32:
37461   case X86::EH_SjLj_LongJmp64:
37462     return emitEHSjLjLongJmp(MI, BB);
37463 
37464   case X86::Int_eh_sjlj_setup_dispatch:
37465     return EmitSjLjDispatchBlock(MI, BB);
37466 
37467   case TargetOpcode::STATEPOINT:
37468     // As an implementation detail, STATEPOINT shares the STACKMAP format at
37469     // this point in the process.  We diverge later.
37470     return emitPatchPoint(MI, BB);
37471 
37472   case TargetOpcode::STACKMAP:
37473   case TargetOpcode::PATCHPOINT:
37474     return emitPatchPoint(MI, BB);
37475 
37476   case TargetOpcode::PATCHABLE_EVENT_CALL:
37477   case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
37478     return BB;
37479 
37480   case X86::LCMPXCHG8B: {
37481     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
37482     // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
37483     // requires a memory operand. If it happens that current architecture is
37484     // i686 and for current function we need a base pointer
37485     // - which is ESI for i686 - register allocator would not be able to
37486     // allocate registers for an address in form of X(%reg, %reg, Y)
37487     // - there never would be enough unreserved registers during regalloc
37488     // (without the need for base ptr the only option would be X(%edi, %esi, Y).
37489     // We are giving a hand to register allocator by precomputing the address in
37490     // a new vreg using LEA.
37491 
37492     // If it is not i686 or there is no base pointer - nothing to do here.
37493     if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
37494       return BB;
37495 
37496     // Even though this code does not necessarily needs the base pointer to
37497     // be ESI, we check for that. The reason: if this assert fails, there are
37498     // some changes happened in the compiler base pointer handling, which most
37499     // probably have to be addressed somehow here.
37500     assert(TRI->getBaseRegister() == X86::ESI &&
37501            "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
37502            "base pointer in mind");
37503 
37504     MachineRegisterInfo &MRI = MF->getRegInfo();
37505     MVT SPTy = getPointerTy(MF->getDataLayout());
37506     const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
37507     Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
37508 
37509     X86AddressMode AM = getAddressFromInstr(&MI, 0);
37510     // Regalloc does not need any help when the memory operand of CMPXCHG8B
37511     // does not use index register.
37512     if (AM.IndexReg == X86::NoRegister)
37513       return BB;
37514 
37515     // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
37516     // four operand definitions that are E[ABCD] registers. We skip them and
37517     // then insert the LEA.
37518     MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
37519     while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
37520                                    RMBBI->definesRegister(X86::EBX) ||
37521                                    RMBBI->definesRegister(X86::ECX) ||
37522                                    RMBBI->definesRegister(X86::EDX))) {
37523       ++RMBBI;
37524     }
37525     MachineBasicBlock::iterator MBBI(RMBBI);
37526     addFullAddress(
37527         BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
37528 
37529     setDirectAddressInInstr(&MI, 0, computedAddrVReg);
37530 
37531     return BB;
37532   }
37533   case X86::LCMPXCHG16B_NO_RBX: {
37534     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
37535     Register BasePtr = TRI->getBaseRegister();
37536     if (TRI->hasBasePointer(*MF) &&
37537         (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
37538       if (!BB->isLiveIn(BasePtr))
37539         BB->addLiveIn(BasePtr);
37540       // Save RBX into a virtual register.
37541       Register SaveRBX =
37542           MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
37543       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), SaveRBX)
37544           .addReg(X86::RBX);
37545       Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
37546       MachineInstrBuilder MIB =
37547           BuildMI(*BB, MI, DL, TII->get(X86::LCMPXCHG16B_SAVE_RBX), Dst);
37548       for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
37549         MIB.add(MI.getOperand(Idx));
37550       MIB.add(MI.getOperand(X86::AddrNumOperands));
37551       MIB.addReg(SaveRBX);
37552     } else {
37553       // Simple case, just copy the virtual register to RBX.
37554       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::RBX)
37555           .add(MI.getOperand(X86::AddrNumOperands));
37556       MachineInstrBuilder MIB =
37557           BuildMI(*BB, MI, DL, TII->get(X86::LCMPXCHG16B));
37558       for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
37559         MIB.add(MI.getOperand(Idx));
37560     }
37561     MI.eraseFromParent();
37562     return BB;
37563   }
37564   case X86::MWAITX: {
37565     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
37566     Register BasePtr = TRI->getBaseRegister();
37567     bool IsRBX = (BasePtr == X86::RBX || BasePtr == X86::EBX);
37568     // If no need to save the base pointer, we generate MWAITXrrr,
37569     // else we generate pseudo MWAITX_SAVE_RBX.
37570     if (!IsRBX || !TRI->hasBasePointer(*MF)) {
37571       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::ECX)
37572           .addReg(MI.getOperand(0).getReg());
37573       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::EAX)
37574           .addReg(MI.getOperand(1).getReg());
37575       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::EBX)
37576           .addReg(MI.getOperand(2).getReg());
37577       BuildMI(*BB, MI, DL, TII->get(X86::MWAITXrrr));
37578       MI.eraseFromParent();
37579     } else {
37580       if (!BB->isLiveIn(BasePtr)) {
37581         BB->addLiveIn(BasePtr);
37582       }
37583       // Parameters can be copied into ECX and EAX but not EBX yet.
37584       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::ECX)
37585           .addReg(MI.getOperand(0).getReg());
37586       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::EAX)
37587           .addReg(MI.getOperand(1).getReg());
37588       assert(Subtarget.is64Bit() && "Expected 64-bit mode!");
37589       // Save RBX into a virtual register.
37590       Register SaveRBX =
37591           MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
37592       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), SaveRBX)
37593           .addReg(X86::RBX);
37594       // Generate mwaitx pseudo.
37595       Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
37596       BuildMI(*BB, MI, DL, TII->get(X86::MWAITX_SAVE_RBX))
37597           .addDef(Dst) // Destination tied in with SaveRBX.
37598           .addReg(MI.getOperand(2).getReg()) // input value of EBX.
37599           .addUse(SaveRBX);                  // Save of base pointer.
37600       MI.eraseFromParent();
37601     }
37602     return BB;
37603   }
37604   case TargetOpcode::PREALLOCATED_SETUP: {
37605     assert(Subtarget.is32Bit() && "preallocated only used in 32-bit");
37606     auto MFI = MF->getInfo<X86MachineFunctionInfo>();
37607     MFI->setHasPreallocatedCall(true);
37608     int64_t PreallocatedId = MI.getOperand(0).getImm();
37609     size_t StackAdjustment = MFI->getPreallocatedStackSize(PreallocatedId);
37610     assert(StackAdjustment != 0 && "0 stack adjustment");
37611     LLVM_DEBUG(dbgs() << "PREALLOCATED_SETUP stack adjustment "
37612                       << StackAdjustment << "\n");
37613     BuildMI(*BB, MI, DL, TII->get(X86::SUB32ri), X86::ESP)
37614         .addReg(X86::ESP)
37615         .addImm(StackAdjustment);
37616     MI.eraseFromParent();
37617     return BB;
37618   }
37619   case TargetOpcode::PREALLOCATED_ARG: {
37620     assert(Subtarget.is32Bit() && "preallocated calls only used in 32-bit");
37621     int64_t PreallocatedId = MI.getOperand(1).getImm();
37622     int64_t ArgIdx = MI.getOperand(2).getImm();
37623     auto MFI = MF->getInfo<X86MachineFunctionInfo>();
37624     size_t ArgOffset = MFI->getPreallocatedArgOffsets(PreallocatedId)[ArgIdx];
37625     LLVM_DEBUG(dbgs() << "PREALLOCATED_ARG arg index " << ArgIdx
37626                       << ", arg offset " << ArgOffset << "\n");
37627     // stack pointer + offset
37628     addRegOffset(
37629         BuildMI(*BB, MI, DL, TII->get(X86::LEA32r), MI.getOperand(0).getReg()),
37630         X86::ESP, false, ArgOffset);
37631     MI.eraseFromParent();
37632     return BB;
37633   }
37634   case X86::PTDPBSSD:
37635   case X86::PTDPBSUD:
37636   case X86::PTDPBUSD:
37637   case X86::PTDPBUUD:
37638   case X86::PTDPBF16PS:
37639   case X86::PTDPFP16PS: {
37640     unsigned Opc;
37641     switch (MI.getOpcode()) {
37642     default: llvm_unreachable("illegal opcode!");
37643     case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
37644     case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
37645     case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
37646     case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
37647     case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
37648     case X86::PTDPFP16PS: Opc = X86::TDPFP16PS; break;
37649     }
37650 
37651     MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
37652     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
37653     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
37654     MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
37655     MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
37656 
37657     MI.eraseFromParent(); // The pseudo is gone now.
37658     return BB;
37659   }
37660   case X86::PTILEZERO: {
37661     unsigned Imm = MI.getOperand(0).getImm();
37662     BuildMI(*BB, MI, DL, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
37663     MI.eraseFromParent(); // The pseudo is gone now.
37664     return BB;
37665   }
37666   case X86::PTILELOADD:
37667   case X86::PTILELOADDT1:
37668   case X86::PTILESTORED: {
37669     unsigned Opc;
37670     switch (MI.getOpcode()) {
37671     default: llvm_unreachable("illegal opcode!");
37672     case X86::PTILELOADD:   Opc = X86::TILELOADD;   break;
37673     case X86::PTILELOADDT1: Opc = X86::TILELOADDT1; break;
37674     case X86::PTILESTORED:  Opc = X86::TILESTORED;  break;
37675     }
37676 
37677     MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
37678     unsigned CurOp = 0;
37679     if (Opc != X86::TILESTORED)
37680       MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
37681                  RegState::Define);
37682 
37683     MIB.add(MI.getOperand(CurOp++)); // base
37684     MIB.add(MI.getOperand(CurOp++)); // scale
37685     MIB.add(MI.getOperand(CurOp++)); // index -- stride
37686     MIB.add(MI.getOperand(CurOp++)); // displacement
37687     MIB.add(MI.getOperand(CurOp++)); // segment
37688 
37689     if (Opc == X86::TILESTORED)
37690       MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
37691                  RegState::Undef);
37692 
37693     MI.eraseFromParent(); // The pseudo is gone now.
37694     return BB;
37695   }
37696   }
37697 }
37698 
37699 //===----------------------------------------------------------------------===//
37700 //                           X86 Optimization Hooks
37701 //===----------------------------------------------------------------------===//
37702 
37703 bool
targetShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,TargetLoweringOpt & TLO) const37704 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
37705                                                 const APInt &DemandedBits,
37706                                                 const APInt &DemandedElts,
37707                                                 TargetLoweringOpt &TLO) const {
37708   EVT VT = Op.getValueType();
37709   unsigned Opcode = Op.getOpcode();
37710   unsigned EltSize = VT.getScalarSizeInBits();
37711 
37712   if (VT.isVector()) {
37713     // If the constant is only all signbits in the active bits, then we should
37714     // extend it to the entire constant to allow it act as a boolean constant
37715     // vector.
37716     auto NeedsSignExtension = [&](SDValue V, unsigned ActiveBits) {
37717       if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
37718         return false;
37719       for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
37720         if (!DemandedElts[i] || V.getOperand(i).isUndef())
37721           continue;
37722         const APInt &Val = V.getConstantOperandAPInt(i);
37723         if (Val.getBitWidth() > Val.getNumSignBits() &&
37724             Val.trunc(ActiveBits).getNumSignBits() == ActiveBits)
37725           return true;
37726       }
37727       return false;
37728     };
37729     // For vectors - if we have a constant, then try to sign extend.
37730     // TODO: Handle AND/ANDN cases.
37731     unsigned ActiveBits = DemandedBits.getActiveBits();
37732     if (EltSize > ActiveBits && EltSize > 1 && isTypeLegal(VT) &&
37733         (Opcode == ISD::OR || Opcode == ISD::XOR) &&
37734         NeedsSignExtension(Op.getOperand(1), ActiveBits)) {
37735       EVT ExtSVT = EVT::getIntegerVT(*TLO.DAG.getContext(), ActiveBits);
37736       EVT ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtSVT,
37737                                     VT.getVectorNumElements());
37738       SDValue NewC =
37739           TLO.DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(Op), VT,
37740                           Op.getOperand(1), TLO.DAG.getValueType(ExtVT));
37741       SDValue NewOp =
37742           TLO.DAG.getNode(Opcode, SDLoc(Op), VT, Op.getOperand(0), NewC);
37743       return TLO.CombineTo(Op, NewOp);
37744     }
37745     return false;
37746   }
37747 
37748   // Only optimize Ands to prevent shrinking a constant that could be
37749   // matched by movzx.
37750   if (Opcode != ISD::AND)
37751     return false;
37752 
37753   // Make sure the RHS really is a constant.
37754   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
37755   if (!C)
37756     return false;
37757 
37758   const APInt &Mask = C->getAPIntValue();
37759 
37760   // Clear all non-demanded bits initially.
37761   APInt ShrunkMask = Mask & DemandedBits;
37762 
37763   // Find the width of the shrunk mask.
37764   unsigned Width = ShrunkMask.getActiveBits();
37765 
37766   // If the mask is all 0s there's nothing to do here.
37767   if (Width == 0)
37768     return false;
37769 
37770   // Find the next power of 2 width, rounding up to a byte.
37771   Width = PowerOf2Ceil(std::max(Width, 8U));
37772   // Truncate the width to size to handle illegal types.
37773   Width = std::min(Width, EltSize);
37774 
37775   // Calculate a possible zero extend mask for this constant.
37776   APInt ZeroExtendMask = APInt::getLowBitsSet(EltSize, Width);
37777 
37778   // If we aren't changing the mask, just return true to keep it and prevent
37779   // the caller from optimizing.
37780   if (ZeroExtendMask == Mask)
37781     return true;
37782 
37783   // Make sure the new mask can be represented by a combination of mask bits
37784   // and non-demanded bits.
37785   if (!ZeroExtendMask.isSubsetOf(Mask | ~DemandedBits))
37786     return false;
37787 
37788   // Replace the constant with the zero extend mask.
37789   SDLoc DL(Op);
37790   SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
37791   SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
37792   return TLO.CombineTo(Op, NewOp);
37793 }
37794 
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const37795 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
37796                                                       KnownBits &Known,
37797                                                       const APInt &DemandedElts,
37798                                                       const SelectionDAG &DAG,
37799                                                       unsigned Depth) const {
37800   unsigned BitWidth = Known.getBitWidth();
37801   unsigned NumElts = DemandedElts.getBitWidth();
37802   unsigned Opc = Op.getOpcode();
37803   EVT VT = Op.getValueType();
37804   assert((Opc >= ISD::BUILTIN_OP_END ||
37805           Opc == ISD::INTRINSIC_WO_CHAIN ||
37806           Opc == ISD::INTRINSIC_W_CHAIN ||
37807           Opc == ISD::INTRINSIC_VOID) &&
37808          "Should use MaskedValueIsZero if you don't know whether Op"
37809          " is a target node!");
37810 
37811   Known.resetAll();
37812   switch (Opc) {
37813   default: break;
37814   case X86ISD::SETCC:
37815     Known.Zero.setBitsFrom(1);
37816     break;
37817   case X86ISD::MOVMSK: {
37818     unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
37819     Known.Zero.setBitsFrom(NumLoBits);
37820     break;
37821   }
37822   case X86ISD::PEXTRB:
37823   case X86ISD::PEXTRW: {
37824     SDValue Src = Op.getOperand(0);
37825     EVT SrcVT = Src.getValueType();
37826     APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
37827                                             Op.getConstantOperandVal(1));
37828     Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
37829     Known = Known.anyextOrTrunc(BitWidth);
37830     Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
37831     break;
37832   }
37833   case X86ISD::VSRAI:
37834   case X86ISD::VSHLI:
37835   case X86ISD::VSRLI: {
37836     unsigned ShAmt = Op.getConstantOperandVal(1);
37837     if (ShAmt >= VT.getScalarSizeInBits()) {
37838       // Out of range logical bit shifts are guaranteed to be zero.
37839       // Out of range arithmetic bit shifts splat the sign bit.
37840       if (Opc != X86ISD::VSRAI) {
37841         Known.setAllZero();
37842         break;
37843       }
37844 
37845       ShAmt = VT.getScalarSizeInBits() - 1;
37846     }
37847 
37848     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37849     if (Opc == X86ISD::VSHLI) {
37850       Known.Zero <<= ShAmt;
37851       Known.One <<= ShAmt;
37852       // Low bits are known zero.
37853       Known.Zero.setLowBits(ShAmt);
37854     } else if (Opc == X86ISD::VSRLI) {
37855       Known.Zero.lshrInPlace(ShAmt);
37856       Known.One.lshrInPlace(ShAmt);
37857       // High bits are known zero.
37858       Known.Zero.setHighBits(ShAmt);
37859     } else {
37860       Known.Zero.ashrInPlace(ShAmt);
37861       Known.One.ashrInPlace(ShAmt);
37862     }
37863     break;
37864   }
37865   case X86ISD::PACKUS: {
37866     // PACKUS is just a truncation if the upper half is zero.
37867     APInt DemandedLHS, DemandedRHS;
37868     getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
37869 
37870     Known.One = APInt::getAllOnes(BitWidth * 2);
37871     Known.Zero = APInt::getAllOnes(BitWidth * 2);
37872 
37873     KnownBits Known2;
37874     if (!!DemandedLHS) {
37875       Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
37876       Known = KnownBits::commonBits(Known, Known2);
37877     }
37878     if (!!DemandedRHS) {
37879       Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
37880       Known = KnownBits::commonBits(Known, Known2);
37881     }
37882 
37883     if (Known.countMinLeadingZeros() < BitWidth)
37884       Known.resetAll();
37885     Known = Known.trunc(BitWidth);
37886     break;
37887   }
37888   case X86ISD::VBROADCAST: {
37889     SDValue Src = Op.getOperand(0);
37890     if (!Src.getSimpleValueType().isVector()) {
37891       Known = DAG.computeKnownBits(Src, Depth + 1);
37892       return;
37893     }
37894     break;
37895   }
37896   case X86ISD::AND: {
37897     if (Op.getResNo() == 0) {
37898       KnownBits Known2;
37899       Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37900       Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37901       Known &= Known2;
37902     }
37903     break;
37904   }
37905   case X86ISD::ANDNP: {
37906     KnownBits Known2;
37907     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37908     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37909 
37910     // ANDNP = (~X & Y);
37911     Known.One &= Known2.Zero;
37912     Known.Zero |= Known2.One;
37913     break;
37914   }
37915   case X86ISD::FOR: {
37916     KnownBits Known2;
37917     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37918     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37919 
37920     Known |= Known2;
37921     break;
37922   }
37923   case X86ISD::PSADBW: {
37924     assert(VT.getScalarType() == MVT::i64 &&
37925            Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
37926            "Unexpected PSADBW types");
37927 
37928     // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
37929     Known.Zero.setBitsFrom(16);
37930     break;
37931   }
37932   case X86ISD::PMULUDQ: {
37933     KnownBits Known2;
37934     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37935     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37936 
37937     Known = Known.trunc(BitWidth / 2).zext(BitWidth);
37938     Known2 = Known2.trunc(BitWidth / 2).zext(BitWidth);
37939     Known = KnownBits::mul(Known, Known2);
37940     break;
37941   }
37942   case X86ISD::CMOV: {
37943     Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
37944     // If we don't know any bits, early out.
37945     if (Known.isUnknown())
37946       break;
37947     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
37948 
37949     // Only known if known in both the LHS and RHS.
37950     Known = KnownBits::commonBits(Known, Known2);
37951     break;
37952   }
37953   case X86ISD::BEXTR:
37954   case X86ISD::BEXTRI: {
37955     SDValue Op0 = Op.getOperand(0);
37956     SDValue Op1 = Op.getOperand(1);
37957 
37958     if (auto* Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
37959       unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
37960       unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
37961 
37962       // If the length is 0, the result is 0.
37963       if (Length == 0) {
37964         Known.setAllZero();
37965         break;
37966       }
37967 
37968       if ((Shift + Length) <= BitWidth) {
37969         Known = DAG.computeKnownBits(Op0, Depth + 1);
37970         Known = Known.extractBits(Length, Shift);
37971         Known = Known.zextOrTrunc(BitWidth);
37972       }
37973     }
37974     break;
37975   }
37976   case X86ISD::PDEP: {
37977     KnownBits Known2;
37978     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37979     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37980     // Zeros are retained from the mask operand. But not ones.
37981     Known.One.clearAllBits();
37982     // The result will have at least as many trailing zeros as the non-mask
37983     // operand since bits can only map to the same or higher bit position.
37984     Known.Zero.setLowBits(Known2.countMinTrailingZeros());
37985     break;
37986   }
37987   case X86ISD::PEXT: {
37988     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37989     // The result has as many leading zeros as the number of zeroes in the mask.
37990     unsigned Count = Known.Zero.countPopulation();
37991     Known.Zero = APInt::getHighBitsSet(BitWidth, Count);
37992     Known.One.clearAllBits();
37993     break;
37994   }
37995   case X86ISD::VTRUNC:
37996   case X86ISD::VTRUNCS:
37997   case X86ISD::VTRUNCUS:
37998   case X86ISD::CVTSI2P:
37999   case X86ISD::CVTUI2P:
38000   case X86ISD::CVTP2SI:
38001   case X86ISD::CVTP2UI:
38002   case X86ISD::MCVTP2SI:
38003   case X86ISD::MCVTP2UI:
38004   case X86ISD::CVTTP2SI:
38005   case X86ISD::CVTTP2UI:
38006   case X86ISD::MCVTTP2SI:
38007   case X86ISD::MCVTTP2UI:
38008   case X86ISD::MCVTSI2P:
38009   case X86ISD::MCVTUI2P:
38010   case X86ISD::VFPROUND:
38011   case X86ISD::VMFPROUND:
38012   case X86ISD::CVTPS2PH:
38013   case X86ISD::MCVTPS2PH: {
38014     // Truncations/Conversions - upper elements are known zero.
38015     EVT SrcVT = Op.getOperand(0).getValueType();
38016     if (SrcVT.isVector()) {
38017       unsigned NumSrcElts = SrcVT.getVectorNumElements();
38018       if (NumElts > NumSrcElts &&
38019           DemandedElts.countTrailingZeros() >= NumSrcElts)
38020         Known.setAllZero();
38021     }
38022     break;
38023   }
38024   case X86ISD::STRICT_CVTTP2SI:
38025   case X86ISD::STRICT_CVTTP2UI:
38026   case X86ISD::STRICT_CVTSI2P:
38027   case X86ISD::STRICT_CVTUI2P:
38028   case X86ISD::STRICT_VFPROUND:
38029   case X86ISD::STRICT_CVTPS2PH: {
38030     // Strict Conversions - upper elements are known zero.
38031     EVT SrcVT = Op.getOperand(1).getValueType();
38032     if (SrcVT.isVector()) {
38033       unsigned NumSrcElts = SrcVT.getVectorNumElements();
38034       if (NumElts > NumSrcElts &&
38035           DemandedElts.countTrailingZeros() >= NumSrcElts)
38036         Known.setAllZero();
38037     }
38038     break;
38039   }
38040   case X86ISD::MOVQ2DQ: {
38041     // Move from MMX to XMM. Upper half of XMM should be 0.
38042     if (DemandedElts.countTrailingZeros() >= (NumElts / 2))
38043       Known.setAllZero();
38044     break;
38045   }
38046   case X86ISD::VBROADCAST_LOAD: {
38047     APInt UndefElts;
38048     SmallVector<APInt, 16> EltBits;
38049     if (getTargetConstantBitsFromNode(Op, BitWidth, UndefElts, EltBits,
38050                                       /*AllowWholeUndefs*/ false,
38051                                       /*AllowPartialUndefs*/ false)) {
38052       Known.Zero.setAllBits();
38053       Known.One.setAllBits();
38054       for (unsigned I = 0; I != NumElts; ++I) {
38055         if (!DemandedElts[I])
38056           continue;
38057         if (UndefElts[I]) {
38058           Known.resetAll();
38059           break;
38060         }
38061         KnownBits Known2 = KnownBits::makeConstant(EltBits[I]);
38062         Known = KnownBits::commonBits(Known, Known2);
38063       }
38064       return;
38065     }
38066     break;
38067   }
38068   }
38069 
38070   // Handle target shuffles.
38071   // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
38072   if (isTargetShuffle(Opc)) {
38073     SmallVector<int, 64> Mask;
38074     SmallVector<SDValue, 2> Ops;
38075     if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
38076       unsigned NumOps = Ops.size();
38077       unsigned NumElts = VT.getVectorNumElements();
38078       if (Mask.size() == NumElts) {
38079         SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
38080         Known.Zero.setAllBits(); Known.One.setAllBits();
38081         for (unsigned i = 0; i != NumElts; ++i) {
38082           if (!DemandedElts[i])
38083             continue;
38084           int M = Mask[i];
38085           if (M == SM_SentinelUndef) {
38086             // For UNDEF elements, we don't know anything about the common state
38087             // of the shuffle result.
38088             Known.resetAll();
38089             break;
38090           }
38091           if (M == SM_SentinelZero) {
38092             Known.One.clearAllBits();
38093             continue;
38094           }
38095           assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
38096                  "Shuffle index out of range");
38097 
38098           unsigned OpIdx = (unsigned)M / NumElts;
38099           unsigned EltIdx = (unsigned)M % NumElts;
38100           if (Ops[OpIdx].getValueType() != VT) {
38101             // TODO - handle target shuffle ops with different value types.
38102             Known.resetAll();
38103             break;
38104           }
38105           DemandedOps[OpIdx].setBit(EltIdx);
38106         }
38107         // Known bits are the values that are shared by every demanded element.
38108         for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
38109           if (!DemandedOps[i])
38110             continue;
38111           KnownBits Known2 =
38112               DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
38113           Known = KnownBits::commonBits(Known, Known2);
38114         }
38115       }
38116     }
38117   }
38118 }
38119 
ComputeNumSignBitsForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const38120 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
38121     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
38122     unsigned Depth) const {
38123   EVT VT = Op.getValueType();
38124   unsigned VTBits = VT.getScalarSizeInBits();
38125   unsigned Opcode = Op.getOpcode();
38126   switch (Opcode) {
38127   case X86ISD::SETCC_CARRY:
38128     // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
38129     return VTBits;
38130 
38131   case X86ISD::VTRUNC: {
38132     SDValue Src = Op.getOperand(0);
38133     MVT SrcVT = Src.getSimpleValueType();
38134     unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
38135     assert(VTBits < NumSrcBits && "Illegal truncation input type");
38136     APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
38137     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
38138     if (Tmp > (NumSrcBits - VTBits))
38139       return Tmp - (NumSrcBits - VTBits);
38140     return 1;
38141   }
38142 
38143   case X86ISD::PACKSS: {
38144     // PACKSS is just a truncation if the sign bits extend to the packed size.
38145     APInt DemandedLHS, DemandedRHS;
38146     getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
38147                         DemandedRHS);
38148 
38149     unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
38150     unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
38151     if (!!DemandedLHS)
38152       Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
38153     if (!!DemandedRHS)
38154       Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
38155     unsigned Tmp = std::min(Tmp0, Tmp1);
38156     if (Tmp > (SrcBits - VTBits))
38157       return Tmp - (SrcBits - VTBits);
38158     return 1;
38159   }
38160 
38161   case X86ISD::VBROADCAST: {
38162     SDValue Src = Op.getOperand(0);
38163     if (!Src.getSimpleValueType().isVector())
38164       return DAG.ComputeNumSignBits(Src, Depth + 1);
38165     break;
38166   }
38167 
38168   case X86ISD::VSHLI: {
38169     SDValue Src = Op.getOperand(0);
38170     const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
38171     if (ShiftVal.uge(VTBits))
38172       return VTBits; // Shifted all bits out --> zero.
38173     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
38174     if (ShiftVal.uge(Tmp))
38175       return 1; // Shifted all sign bits out --> unknown.
38176     return Tmp - ShiftVal.getZExtValue();
38177   }
38178 
38179   case X86ISD::VSRAI: {
38180     SDValue Src = Op.getOperand(0);
38181     APInt ShiftVal = Op.getConstantOperandAPInt(1);
38182     if (ShiftVal.uge(VTBits - 1))
38183       return VTBits; // Sign splat.
38184     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
38185     ShiftVal += Tmp;
38186     return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
38187   }
38188 
38189   case X86ISD::FSETCC:
38190     // cmpss/cmpsd return zero/all-bits result values in the bottom element.
38191     if (VT == MVT::f32 || VT == MVT::f64 ||
38192         ((VT == MVT::v4f32 || VT == MVT::v2f64) && DemandedElts == 1))
38193       return VTBits;
38194     break;
38195 
38196   case X86ISD::PCMPGT:
38197   case X86ISD::PCMPEQ:
38198   case X86ISD::CMPP:
38199   case X86ISD::VPCOM:
38200   case X86ISD::VPCOMU:
38201     // Vector compares return zero/all-bits result values.
38202     return VTBits;
38203 
38204   case X86ISD::ANDNP: {
38205     unsigned Tmp0 =
38206         DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
38207     if (Tmp0 == 1) return 1; // Early out.
38208     unsigned Tmp1 =
38209         DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
38210     return std::min(Tmp0, Tmp1);
38211   }
38212 
38213   case X86ISD::CMOV: {
38214     unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
38215     if (Tmp0 == 1) return 1;  // Early out.
38216     unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
38217     return std::min(Tmp0, Tmp1);
38218   }
38219   }
38220 
38221   // Handle target shuffles.
38222   // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
38223   if (isTargetShuffle(Opcode)) {
38224     SmallVector<int, 64> Mask;
38225     SmallVector<SDValue, 2> Ops;
38226     if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
38227       unsigned NumOps = Ops.size();
38228       unsigned NumElts = VT.getVectorNumElements();
38229       if (Mask.size() == NumElts) {
38230         SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
38231         for (unsigned i = 0; i != NumElts; ++i) {
38232           if (!DemandedElts[i])
38233             continue;
38234           int M = Mask[i];
38235           if (M == SM_SentinelUndef) {
38236             // For UNDEF elements, we don't know anything about the common state
38237             // of the shuffle result.
38238             return 1;
38239           } else if (M == SM_SentinelZero) {
38240             // Zero = all sign bits.
38241             continue;
38242           }
38243           assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
38244                  "Shuffle index out of range");
38245 
38246           unsigned OpIdx = (unsigned)M / NumElts;
38247           unsigned EltIdx = (unsigned)M % NumElts;
38248           if (Ops[OpIdx].getValueType() != VT) {
38249             // TODO - handle target shuffle ops with different value types.
38250             return 1;
38251           }
38252           DemandedOps[OpIdx].setBit(EltIdx);
38253         }
38254         unsigned Tmp0 = VTBits;
38255         for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
38256           if (!DemandedOps[i])
38257             continue;
38258           unsigned Tmp1 =
38259               DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
38260           Tmp0 = std::min(Tmp0, Tmp1);
38261         }
38262         return Tmp0;
38263       }
38264     }
38265   }
38266 
38267   // Fallback case.
38268   return 1;
38269 }
38270 
unwrapAddress(SDValue N) const38271 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
38272   if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
38273     return N->getOperand(0);
38274   return N;
38275 }
38276 
38277 // Helper to look for a normal load that can be narrowed into a vzload with the
38278 // specified VT and memory VT. Returns SDValue() on failure.
narrowLoadToVZLoad(LoadSDNode * LN,MVT MemVT,MVT VT,SelectionDAG & DAG)38279 static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
38280                                   SelectionDAG &DAG) {
38281   // Can't if the load is volatile or atomic.
38282   if (!LN->isSimple())
38283     return SDValue();
38284 
38285   SDVTList Tys = DAG.getVTList(VT, MVT::Other);
38286   SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
38287   return DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, SDLoc(LN), Tys, Ops, MemVT,
38288                                  LN->getPointerInfo(), LN->getOriginalAlign(),
38289                                  LN->getMemOperand()->getFlags());
38290 }
38291 
38292 // Attempt to match a combined shuffle mask against supported unary shuffle
38293 // instructions.
38294 // TODO: Investigate sharing more of this with shuffle lowering.
matchUnaryShuffle(MVT MaskVT,ArrayRef<int> Mask,bool AllowFloatDomain,bool AllowIntDomain,SDValue V1,const SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & SrcVT,MVT & DstVT)38295 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
38296                               bool AllowFloatDomain, bool AllowIntDomain,
38297                               SDValue V1, const SelectionDAG &DAG,
38298                               const X86Subtarget &Subtarget, unsigned &Shuffle,
38299                               MVT &SrcVT, MVT &DstVT) {
38300   unsigned NumMaskElts = Mask.size();
38301   unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
38302 
38303   // Match against a VZEXT_MOVL vXi32 and vXi16 zero-extending instruction.
38304   if (Mask[0] == 0 &&
38305       (MaskEltSize == 32 || (MaskEltSize == 16 && Subtarget.hasFP16()))) {
38306     if ((isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) ||
38307         (V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38308          isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1))) {
38309       Shuffle = X86ISD::VZEXT_MOVL;
38310       if (MaskEltSize == 16)
38311         SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
38312       else
38313         SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
38314       return true;
38315     }
38316   }
38317 
38318   // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
38319   // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
38320   if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
38321                          (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
38322     unsigned MaxScale = 64 / MaskEltSize;
38323     for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
38324       bool MatchAny = true;
38325       bool MatchZero = true;
38326       unsigned NumDstElts = NumMaskElts / Scale;
38327       for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
38328         if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
38329           MatchAny = MatchZero = false;
38330           break;
38331         }
38332         MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
38333         MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
38334       }
38335       if (MatchAny || MatchZero) {
38336         assert(MatchZero && "Failed to match zext but matched aext?");
38337         unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
38338         MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
38339                                             MVT::getIntegerVT(MaskEltSize);
38340         SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
38341 
38342         Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
38343         if (SrcVT.getVectorNumElements() != NumDstElts)
38344           Shuffle = DAG.getOpcode_EXTEND_VECTOR_INREG(Shuffle);
38345 
38346         DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
38347         DstVT = MVT::getVectorVT(DstVT, NumDstElts);
38348         return true;
38349       }
38350     }
38351   }
38352 
38353   // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
38354   if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2()) ||
38355        (MaskEltSize == 16 && Subtarget.hasFP16())) &&
38356       isUndefOrEqual(Mask[0], 0) &&
38357       isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
38358     Shuffle = X86ISD::VZEXT_MOVL;
38359     if (MaskEltSize == 16)
38360       SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
38361     else
38362       SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
38363     return true;
38364   }
38365 
38366   // Check if we have SSE3 which will let us use MOVDDUP etc. The
38367   // instructions are no slower than UNPCKLPD but has the option to
38368   // fold the input operand into even an unaligned memory load.
38369   if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
38370     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG, V1)) {
38371       Shuffle = X86ISD::MOVDDUP;
38372       SrcVT = DstVT = MVT::v2f64;
38373       return true;
38374     }
38375     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
38376       Shuffle = X86ISD::MOVSLDUP;
38377       SrcVT = DstVT = MVT::v4f32;
38378       return true;
38379     }
38380     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3}, DAG, V1)) {
38381       Shuffle = X86ISD::MOVSHDUP;
38382       SrcVT = DstVT = MVT::v4f32;
38383       return true;
38384     }
38385   }
38386 
38387   if (MaskVT.is256BitVector() && AllowFloatDomain) {
38388     assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
38389     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
38390       Shuffle = X86ISD::MOVDDUP;
38391       SrcVT = DstVT = MVT::v4f64;
38392       return true;
38393     }
38394     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
38395                                   V1)) {
38396       Shuffle = X86ISD::MOVSLDUP;
38397       SrcVT = DstVT = MVT::v8f32;
38398       return true;
38399     }
38400     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3, 5, 5, 7, 7}, DAG,
38401                                   V1)) {
38402       Shuffle = X86ISD::MOVSHDUP;
38403       SrcVT = DstVT = MVT::v8f32;
38404       return true;
38405     }
38406   }
38407 
38408   if (MaskVT.is512BitVector() && AllowFloatDomain) {
38409     assert(Subtarget.hasAVX512() &&
38410            "AVX512 required for 512-bit vector shuffles");
38411     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
38412                                   V1)) {
38413       Shuffle = X86ISD::MOVDDUP;
38414       SrcVT = DstVT = MVT::v8f64;
38415       return true;
38416     }
38417     if (isTargetShuffleEquivalent(
38418             MaskVT, Mask,
38419             {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}, DAG, V1)) {
38420       Shuffle = X86ISD::MOVSLDUP;
38421       SrcVT = DstVT = MVT::v16f32;
38422       return true;
38423     }
38424     if (isTargetShuffleEquivalent(
38425             MaskVT, Mask,
38426             {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}, DAG, V1)) {
38427       Shuffle = X86ISD::MOVSHDUP;
38428       SrcVT = DstVT = MVT::v16f32;
38429       return true;
38430     }
38431   }
38432 
38433   return false;
38434 }
38435 
38436 // Attempt to match a combined shuffle mask against supported unary immediate
38437 // permute instructions.
38438 // TODO: Investigate sharing more of this with shuffle lowering.
matchUnaryPermuteShuffle(MVT MaskVT,ArrayRef<int> Mask,const APInt & Zeroable,bool AllowFloatDomain,bool AllowIntDomain,const SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & ShuffleVT,unsigned & PermuteImm)38439 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
38440                                      const APInt &Zeroable,
38441                                      bool AllowFloatDomain, bool AllowIntDomain,
38442                                      const SelectionDAG &DAG,
38443                                      const X86Subtarget &Subtarget,
38444                                      unsigned &Shuffle, MVT &ShuffleVT,
38445                                      unsigned &PermuteImm) {
38446   unsigned NumMaskElts = Mask.size();
38447   unsigned InputSizeInBits = MaskVT.getSizeInBits();
38448   unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
38449   MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
38450   bool ContainsZeros = isAnyZero(Mask);
38451 
38452   // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
38453   if (!ContainsZeros && MaskScalarSizeInBits == 64) {
38454     // Check for lane crossing permutes.
38455     if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
38456       // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
38457       if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
38458         Shuffle = X86ISD::VPERMI;
38459         ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
38460         PermuteImm = getV4X86ShuffleImm(Mask);
38461         return true;
38462       }
38463       if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
38464         SmallVector<int, 4> RepeatedMask;
38465         if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
38466           Shuffle = X86ISD::VPERMI;
38467           ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
38468           PermuteImm = getV4X86ShuffleImm(RepeatedMask);
38469           return true;
38470         }
38471       }
38472     } else if (AllowFloatDomain && Subtarget.hasAVX()) {
38473       // VPERMILPD can permute with a non-repeating shuffle.
38474       Shuffle = X86ISD::VPERMILPI;
38475       ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
38476       PermuteImm = 0;
38477       for (int i = 0, e = Mask.size(); i != e; ++i) {
38478         int M = Mask[i];
38479         if (M == SM_SentinelUndef)
38480           continue;
38481         assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
38482         PermuteImm |= (M & 1) << i;
38483       }
38484       return true;
38485     }
38486   }
38487 
38488   // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
38489   // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
38490   // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
38491   if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
38492       !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
38493     SmallVector<int, 4> RepeatedMask;
38494     if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
38495       // Narrow the repeated mask to create 32-bit element permutes.
38496       SmallVector<int, 4> WordMask = RepeatedMask;
38497       if (MaskScalarSizeInBits == 64)
38498         narrowShuffleMaskElts(2, RepeatedMask, WordMask);
38499 
38500       Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
38501       ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
38502       ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
38503       PermuteImm = getV4X86ShuffleImm(WordMask);
38504       return true;
38505     }
38506   }
38507 
38508   // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
38509   if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16 &&
38510       ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
38511        (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
38512        (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
38513     SmallVector<int, 4> RepeatedMask;
38514     if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
38515       ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
38516       ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
38517 
38518       // PSHUFLW: permute lower 4 elements only.
38519       if (isUndefOrInRange(LoMask, 0, 4) &&
38520           isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
38521         Shuffle = X86ISD::PSHUFLW;
38522         ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
38523         PermuteImm = getV4X86ShuffleImm(LoMask);
38524         return true;
38525       }
38526 
38527       // PSHUFHW: permute upper 4 elements only.
38528       if (isUndefOrInRange(HiMask, 4, 8) &&
38529           isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
38530         // Offset the HiMask so that we can create the shuffle immediate.
38531         int OffsetHiMask[4];
38532         for (int i = 0; i != 4; ++i)
38533           OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
38534 
38535         Shuffle = X86ISD::PSHUFHW;
38536         ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
38537         PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
38538         return true;
38539       }
38540     }
38541   }
38542 
38543   // Attempt to match against byte/bit shifts.
38544   if (AllowIntDomain &&
38545       ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
38546        (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
38547        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
38548     int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
38549                                        Mask, 0, Zeroable, Subtarget);
38550     if (0 < ShiftAmt && (!ShuffleVT.is512BitVector() || Subtarget.hasBWI() ||
38551                          32 <= ShuffleVT.getScalarSizeInBits())) {
38552       PermuteImm = (unsigned)ShiftAmt;
38553       return true;
38554     }
38555   }
38556 
38557   // Attempt to match against bit rotates.
38558   if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits < 64 &&
38559       ((MaskVT.is128BitVector() && Subtarget.hasXOP()) ||
38560        Subtarget.hasAVX512())) {
38561     int RotateAmt = matchShuffleAsBitRotate(ShuffleVT, MaskScalarSizeInBits,
38562                                             Subtarget, Mask);
38563     if (0 < RotateAmt) {
38564       Shuffle = X86ISD::VROTLI;
38565       PermuteImm = (unsigned)RotateAmt;
38566       return true;
38567     }
38568   }
38569 
38570   return false;
38571 }
38572 
38573 // Attempt to match a combined unary shuffle mask against supported binary
38574 // shuffle instructions.
38575 // TODO: Investigate sharing more of this with shuffle lowering.
matchBinaryShuffle(MVT MaskVT,ArrayRef<int> Mask,bool AllowFloatDomain,bool AllowIntDomain,SDValue & V1,SDValue & V2,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & SrcVT,MVT & DstVT,bool IsUnary)38576 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
38577                                bool AllowFloatDomain, bool AllowIntDomain,
38578                                SDValue &V1, SDValue &V2, const SDLoc &DL,
38579                                SelectionDAG &DAG, const X86Subtarget &Subtarget,
38580                                unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
38581                                bool IsUnary) {
38582   unsigned NumMaskElts = Mask.size();
38583   unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
38584   unsigned SizeInBits = MaskVT.getSizeInBits();
38585 
38586   if (MaskVT.is128BitVector()) {
38587     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG) &&
38588         AllowFloatDomain) {
38589       V2 = V1;
38590       V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
38591       Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
38592       SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
38593       return true;
38594     }
38595     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1}, DAG) &&
38596         AllowFloatDomain) {
38597       V2 = V1;
38598       Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
38599       SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
38600       return true;
38601     }
38602     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 3}, DAG) &&
38603         Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) {
38604       std::swap(V1, V2);
38605       Shuffle = X86ISD::MOVSD;
38606       SrcVT = DstVT = MVT::v2f64;
38607       return true;
38608     }
38609     if (isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG) &&
38610         (AllowFloatDomain || !Subtarget.hasSSE41())) {
38611       Shuffle = X86ISD::MOVSS;
38612       SrcVT = DstVT = MVT::v4f32;
38613       return true;
38614     }
38615     if (isTargetShuffleEquivalent(MaskVT, Mask, {8, 1, 2, 3, 4, 5, 6, 7},
38616                                   DAG) &&
38617         Subtarget.hasFP16()) {
38618       Shuffle = X86ISD::MOVSH;
38619       SrcVT = DstVT = MVT::v8f16;
38620       return true;
38621     }
38622   }
38623 
38624   // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
38625   if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
38626       ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
38627       ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
38628     if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
38629                              Subtarget)) {
38630       DstVT = MaskVT;
38631       return true;
38632     }
38633   }
38634 
38635   // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
38636   if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
38637       (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
38638       (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
38639       (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
38640       (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
38641     if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
38642                               Subtarget)) {
38643       SrcVT = DstVT = MaskVT;
38644       if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
38645         SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
38646       return true;
38647     }
38648   }
38649 
38650   // Attempt to match against a OR if we're performing a blend shuffle and the
38651   // non-blended source element is zero in each case.
38652   // TODO: Handle cases where V1/V2 sizes doesn't match SizeInBits.
38653   if (SizeInBits == V1.getValueSizeInBits() &&
38654       SizeInBits == V2.getValueSizeInBits() &&
38655       (EltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
38656       (EltSizeInBits % V2.getScalarValueSizeInBits()) == 0) {
38657     bool IsBlend = true;
38658     unsigned NumV1Elts = V1.getValueType().getVectorNumElements();
38659     unsigned NumV2Elts = V2.getValueType().getVectorNumElements();
38660     unsigned Scale1 = NumV1Elts / NumMaskElts;
38661     unsigned Scale2 = NumV2Elts / NumMaskElts;
38662     APInt DemandedZeroV1 = APInt::getZero(NumV1Elts);
38663     APInt DemandedZeroV2 = APInt::getZero(NumV2Elts);
38664     for (unsigned i = 0; i != NumMaskElts; ++i) {
38665       int M = Mask[i];
38666       if (M == SM_SentinelUndef)
38667         continue;
38668       if (M == SM_SentinelZero) {
38669         DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
38670         DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
38671         continue;
38672       }
38673       if (M == (int)i) {
38674         DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
38675         continue;
38676       }
38677       if (M == (int)(i + NumMaskElts)) {
38678         DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
38679         continue;
38680       }
38681       IsBlend = false;
38682       break;
38683     }
38684     if (IsBlend) {
38685       if (DAG.MaskedVectorIsZero(V1, DemandedZeroV1) &&
38686           DAG.MaskedVectorIsZero(V2, DemandedZeroV2)) {
38687         Shuffle = ISD::OR;
38688         SrcVT = DstVT = MaskVT.changeTypeToInteger();
38689         return true;
38690       }
38691       if (NumV1Elts == NumV2Elts && NumV1Elts == NumMaskElts) {
38692         // FIXME: handle mismatched sizes?
38693         // TODO: investigate if `ISD::OR` handling in
38694         // `TargetLowering::SimplifyDemandedVectorElts` can be improved instead.
38695         auto computeKnownBitsElementWise = [&DAG](SDValue V) {
38696           unsigned NumElts = V.getValueType().getVectorNumElements();
38697           KnownBits Known(NumElts);
38698           for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
38699             APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
38700             KnownBits PeepholeKnown = DAG.computeKnownBits(V, Mask);
38701             if (PeepholeKnown.isZero())
38702               Known.Zero.setBit(EltIdx);
38703             if (PeepholeKnown.isAllOnes())
38704               Known.One.setBit(EltIdx);
38705           }
38706           return Known;
38707         };
38708 
38709         KnownBits V1Known = computeKnownBitsElementWise(V1);
38710         KnownBits V2Known = computeKnownBitsElementWise(V2);
38711 
38712         for (unsigned i = 0; i != NumMaskElts && IsBlend; ++i) {
38713           int M = Mask[i];
38714           if (M == SM_SentinelUndef)
38715             continue;
38716           if (M == SM_SentinelZero) {
38717             IsBlend &= V1Known.Zero[i] && V2Known.Zero[i];
38718             continue;
38719           }
38720           if (M == (int)i) {
38721             IsBlend &= V2Known.Zero[i] || V1Known.One[i];
38722             continue;
38723           }
38724           if (M == (int)(i + NumMaskElts)) {
38725             IsBlend &= V1Known.Zero[i] || V2Known.One[i];
38726             continue;
38727           }
38728           llvm_unreachable("will not get here.");
38729         }
38730         if (IsBlend) {
38731           Shuffle = ISD::OR;
38732           SrcVT = DstVT = MaskVT.changeTypeToInteger();
38733           return true;
38734         }
38735       }
38736     }
38737   }
38738 
38739   return false;
38740 }
38741 
matchBinaryPermuteShuffle(MVT MaskVT,ArrayRef<int> Mask,const APInt & Zeroable,bool AllowFloatDomain,bool AllowIntDomain,SDValue & V1,SDValue & V2,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & ShuffleVT,unsigned & PermuteImm)38742 static bool matchBinaryPermuteShuffle(
38743     MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
38744     bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
38745     const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
38746     unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
38747   unsigned NumMaskElts = Mask.size();
38748   unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
38749 
38750   // Attempt to match against VALIGND/VALIGNQ rotate.
38751   if (AllowIntDomain && (EltSizeInBits == 64 || EltSizeInBits == 32) &&
38752       ((MaskVT.is128BitVector() && Subtarget.hasVLX()) ||
38753        (MaskVT.is256BitVector() && Subtarget.hasVLX()) ||
38754        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
38755     if (!isAnyZero(Mask)) {
38756       int Rotation = matchShuffleAsElementRotate(V1, V2, Mask);
38757       if (0 < Rotation) {
38758         Shuffle = X86ISD::VALIGN;
38759         if (EltSizeInBits == 64)
38760           ShuffleVT = MVT::getVectorVT(MVT::i64, MaskVT.getSizeInBits() / 64);
38761         else
38762           ShuffleVT = MVT::getVectorVT(MVT::i32, MaskVT.getSizeInBits() / 32);
38763         PermuteImm = Rotation;
38764         return true;
38765       }
38766     }
38767   }
38768 
38769   // Attempt to match against PALIGNR byte rotate.
38770   if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
38771                          (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
38772                          (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
38773     int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
38774     if (0 < ByteRotation) {
38775       Shuffle = X86ISD::PALIGNR;
38776       ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
38777       PermuteImm = ByteRotation;
38778       return true;
38779     }
38780   }
38781 
38782   // Attempt to combine to X86ISD::BLENDI.
38783   if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
38784                             (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
38785       (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
38786     uint64_t BlendMask = 0;
38787     bool ForceV1Zero = false, ForceV2Zero = false;
38788     SmallVector<int, 8> TargetMask(Mask);
38789     if (matchShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
38790                             ForceV2Zero, BlendMask)) {
38791       if (MaskVT == MVT::v16i16) {
38792         // We can only use v16i16 PBLENDW if the lanes are repeated.
38793         SmallVector<int, 8> RepeatedMask;
38794         if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
38795                                         RepeatedMask)) {
38796           assert(RepeatedMask.size() == 8 &&
38797                  "Repeated mask size doesn't match!");
38798           PermuteImm = 0;
38799           for (int i = 0; i < 8; ++i)
38800             if (RepeatedMask[i] >= 8)
38801               PermuteImm |= 1 << i;
38802           V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
38803           V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
38804           Shuffle = X86ISD::BLENDI;
38805           ShuffleVT = MaskVT;
38806           return true;
38807         }
38808       } else {
38809         V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
38810         V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
38811         PermuteImm = (unsigned)BlendMask;
38812         Shuffle = X86ISD::BLENDI;
38813         ShuffleVT = MaskVT;
38814         return true;
38815       }
38816     }
38817   }
38818 
38819   // Attempt to combine to INSERTPS, but only if it has elements that need to
38820   // be set to zero.
38821   if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
38822       MaskVT.is128BitVector() && isAnyZero(Mask) &&
38823       matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
38824     Shuffle = X86ISD::INSERTPS;
38825     ShuffleVT = MVT::v4f32;
38826     return true;
38827   }
38828 
38829   // Attempt to combine to SHUFPD.
38830   if (AllowFloatDomain && EltSizeInBits == 64 &&
38831       ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
38832        (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
38833        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
38834     bool ForceV1Zero = false, ForceV2Zero = false;
38835     if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
38836                                PermuteImm, Mask, Zeroable)) {
38837       V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
38838       V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
38839       Shuffle = X86ISD::SHUFP;
38840       ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
38841       return true;
38842     }
38843   }
38844 
38845   // Attempt to combine to SHUFPS.
38846   if (AllowFloatDomain && EltSizeInBits == 32 &&
38847       ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
38848        (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
38849        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
38850     SmallVector<int, 4> RepeatedMask;
38851     if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
38852       // Match each half of the repeated mask, to determine if its just
38853       // referencing one of the vectors, is zeroable or entirely undef.
38854       auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
38855         int M0 = RepeatedMask[Offset];
38856         int M1 = RepeatedMask[Offset + 1];
38857 
38858         if (isUndefInRange(RepeatedMask, Offset, 2)) {
38859           return DAG.getUNDEF(MaskVT);
38860         } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
38861           S0 = (SM_SentinelUndef == M0 ? -1 : 0);
38862           S1 = (SM_SentinelUndef == M1 ? -1 : 1);
38863           return getZeroVector(MaskVT, Subtarget, DAG, DL);
38864         } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
38865           S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
38866           S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
38867           return V1;
38868         } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
38869           S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
38870           S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
38871           return V2;
38872         }
38873 
38874         return SDValue();
38875       };
38876 
38877       int ShufMask[4] = {-1, -1, -1, -1};
38878       SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
38879       SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
38880 
38881       if (Lo && Hi) {
38882         V1 = Lo;
38883         V2 = Hi;
38884         Shuffle = X86ISD::SHUFP;
38885         ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
38886         PermuteImm = getV4X86ShuffleImm(ShufMask);
38887         return true;
38888       }
38889     }
38890   }
38891 
38892   // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
38893   if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
38894       MaskVT.is128BitVector() &&
38895       matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
38896     Shuffle = X86ISD::INSERTPS;
38897     ShuffleVT = MVT::v4f32;
38898     return true;
38899   }
38900 
38901   return false;
38902 }
38903 
38904 static SDValue combineX86ShuffleChainWithExtract(
38905     ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
38906     bool HasVariableMask, bool AllowVariableCrossLaneMask,
38907     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
38908     const X86Subtarget &Subtarget);
38909 
38910 /// Combine an arbitrary chain of shuffles into a single instruction if
38911 /// possible.
38912 ///
38913 /// This is the leaf of the recursive combine below. When we have found some
38914 /// chain of single-use x86 shuffle instructions and accumulated the combined
38915 /// shuffle mask represented by them, this will try to pattern match that mask
38916 /// into either a single instruction if there is a special purpose instruction
38917 /// for this operation, or into a PSHUFB instruction which is a fully general
38918 /// instruction but should only be used to replace chains over a certain depth.
combineX86ShuffleChain(ArrayRef<SDValue> Inputs,SDValue Root,ArrayRef<int> BaseMask,int Depth,bool HasVariableMask,bool AllowVariableCrossLaneMask,bool AllowVariablePerLaneMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)38919 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
38920                                       ArrayRef<int> BaseMask, int Depth,
38921                                       bool HasVariableMask,
38922                                       bool AllowVariableCrossLaneMask,
38923                                       bool AllowVariablePerLaneMask,
38924                                       SelectionDAG &DAG,
38925                                       const X86Subtarget &Subtarget) {
38926   assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
38927   assert((Inputs.size() == 1 || Inputs.size() == 2) &&
38928          "Unexpected number of shuffle inputs!");
38929 
38930   SDLoc DL(Root);
38931   MVT RootVT = Root.getSimpleValueType();
38932   unsigned RootSizeInBits = RootVT.getSizeInBits();
38933   unsigned NumRootElts = RootVT.getVectorNumElements();
38934 
38935   // Canonicalize shuffle input op to the requested type.
38936   auto CanonicalizeShuffleInput = [&](MVT VT, SDValue Op) {
38937     if (VT.getSizeInBits() > Op.getValueSizeInBits())
38938       Op = widenSubVector(Op, false, Subtarget, DAG, DL, VT.getSizeInBits());
38939     else if (VT.getSizeInBits() < Op.getValueSizeInBits())
38940       Op = extractSubVector(Op, 0, DAG, DL, VT.getSizeInBits());
38941     return DAG.getBitcast(VT, Op);
38942   };
38943 
38944   // Find the inputs that enter the chain. Note that multiple uses are OK
38945   // here, we're not going to remove the operands we find.
38946   bool UnaryShuffle = (Inputs.size() == 1);
38947   SDValue V1 = peekThroughBitcasts(Inputs[0]);
38948   SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
38949                              : peekThroughBitcasts(Inputs[1]));
38950 
38951   MVT VT1 = V1.getSimpleValueType();
38952   MVT VT2 = V2.getSimpleValueType();
38953   assert((RootSizeInBits % VT1.getSizeInBits()) == 0 &&
38954          (RootSizeInBits % VT2.getSizeInBits()) == 0 && "Vector size mismatch");
38955 
38956   SDValue Res;
38957 
38958   unsigned NumBaseMaskElts = BaseMask.size();
38959   if (NumBaseMaskElts == 1) {
38960     assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
38961     return CanonicalizeShuffleInput(RootVT, V1);
38962   }
38963 
38964   bool OptForSize = DAG.shouldOptForSize();
38965   unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
38966   bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
38967                      (RootVT.isFloatingPoint() && Depth >= 1) ||
38968                      (RootVT.is256BitVector() && !Subtarget.hasAVX2());
38969 
38970   // Don't combine if we are a AVX512/EVEX target and the mask element size
38971   // is different from the root element size - this would prevent writemasks
38972   // from being reused.
38973   bool IsMaskedShuffle = false;
38974   if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) {
38975     if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT &&
38976         Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) {
38977       IsMaskedShuffle = true;
38978     }
38979   }
38980 
38981   // If we are shuffling a splat (and not introducing zeros) then we can just
38982   // use it directly. This works for smaller elements as well as they already
38983   // repeat across each mask element.
38984   if (UnaryShuffle && !isAnyZero(BaseMask) &&
38985       V1.getValueSizeInBits() >= RootSizeInBits &&
38986       (BaseMaskEltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
38987       DAG.isSplatValue(V1, /*AllowUndefs*/ false)) {
38988     return CanonicalizeShuffleInput(RootVT, V1);
38989   }
38990 
38991   SmallVector<int, 64> Mask(BaseMask);
38992 
38993   // See if the shuffle is a hidden identity shuffle - repeated args in HOPs
38994   // etc. can be simplified.
38995   if (VT1 == VT2 && VT1.getSizeInBits() == RootSizeInBits && VT1.isVector()) {
38996     SmallVector<int> ScaledMask, IdentityMask;
38997     unsigned NumElts = VT1.getVectorNumElements();
38998     if (Mask.size() <= NumElts &&
38999         scaleShuffleElements(Mask, NumElts, ScaledMask)) {
39000       for (unsigned i = 0; i != NumElts; ++i)
39001         IdentityMask.push_back(i);
39002       if (isTargetShuffleEquivalent(RootVT, ScaledMask, IdentityMask, DAG, V1,
39003                                     V2))
39004         return CanonicalizeShuffleInput(RootVT, V1);
39005     }
39006   }
39007 
39008   // Handle 128/256-bit lane shuffles of 512-bit vectors.
39009   if (RootVT.is512BitVector() &&
39010       (NumBaseMaskElts == 2 || NumBaseMaskElts == 4)) {
39011     // If the upper subvectors are zeroable, then an extract+insert is more
39012     // optimal than using X86ISD::SHUF128. The insertion is free, even if it has
39013     // to zero the upper subvectors.
39014     if (isUndefOrZeroInRange(Mask, 1, NumBaseMaskElts - 1)) {
39015       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
39016         return SDValue(); // Nothing to do!
39017       assert(isInRange(Mask[0], 0, NumBaseMaskElts) &&
39018              "Unexpected lane shuffle");
39019       Res = CanonicalizeShuffleInput(RootVT, V1);
39020       unsigned SubIdx = Mask[0] * (NumRootElts / NumBaseMaskElts);
39021       bool UseZero = isAnyZero(Mask);
39022       Res = extractSubVector(Res, SubIdx, DAG, DL, BaseMaskEltSizeInBits);
39023       return widenSubVector(Res, UseZero, Subtarget, DAG, DL, RootSizeInBits);
39024     }
39025 
39026     // Narrow shuffle mask to v4x128.
39027     SmallVector<int, 4> ScaledMask;
39028     assert((BaseMaskEltSizeInBits % 128) == 0 && "Illegal mask size");
39029     narrowShuffleMaskElts(BaseMaskEltSizeInBits / 128, Mask, ScaledMask);
39030 
39031     // Try to lower to vshuf64x2/vshuf32x4.
39032     auto MatchSHUF128 = [&](MVT ShuffleVT, const SDLoc &DL,
39033                             ArrayRef<int> ScaledMask, SDValue V1, SDValue V2,
39034                             SelectionDAG &DAG) {
39035       unsigned PermMask = 0;
39036       // Insure elements came from the same Op.
39037       SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
39038       for (int i = 0; i < 4; ++i) {
39039         assert(ScaledMask[i] >= -1 && "Illegal shuffle sentinel value");
39040         if (ScaledMask[i] < 0)
39041           continue;
39042 
39043         SDValue Op = ScaledMask[i] >= 4 ? V2 : V1;
39044         unsigned OpIndex = i / 2;
39045         if (Ops[OpIndex].isUndef())
39046           Ops[OpIndex] = Op;
39047         else if (Ops[OpIndex] != Op)
39048           return SDValue();
39049 
39050         // Convert the 128-bit shuffle mask selection values into 128-bit
39051         // selection bits defined by a vshuf64x2 instruction's immediate control
39052         // byte.
39053         PermMask |= (ScaledMask[i] % 4) << (i * 2);
39054       }
39055 
39056       return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
39057                          CanonicalizeShuffleInput(ShuffleVT, Ops[0]),
39058                          CanonicalizeShuffleInput(ShuffleVT, Ops[1]),
39059                          DAG.getTargetConstant(PermMask, DL, MVT::i8));
39060     };
39061 
39062     // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask
39063     // doesn't work because our mask is for 128 bits and we don't have an MVT
39064     // to match that.
39065     bool PreferPERMQ = UnaryShuffle && isUndefOrInRange(ScaledMask[0], 0, 2) &&
39066                        isUndefOrInRange(ScaledMask[1], 0, 2) &&
39067                        isUndefOrInRange(ScaledMask[2], 2, 4) &&
39068                        isUndefOrInRange(ScaledMask[3], 2, 4) &&
39069                        (ScaledMask[0] < 0 || ScaledMask[2] < 0 ||
39070                         ScaledMask[0] == (ScaledMask[2] % 2)) &&
39071                        (ScaledMask[1] < 0 || ScaledMask[3] < 0 ||
39072                         ScaledMask[1] == (ScaledMask[3] % 2));
39073 
39074     if (!isAnyZero(ScaledMask) && !PreferPERMQ) {
39075       if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
39076         return SDValue(); // Nothing to do!
39077       MVT ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
39078       if (SDValue V = MatchSHUF128(ShuffleVT, DL, ScaledMask, V1, V2, DAG))
39079         return DAG.getBitcast(RootVT, V);
39080     }
39081   }
39082 
39083   // Handle 128-bit lane shuffles of 256-bit vectors.
39084   if (RootVT.is256BitVector() && NumBaseMaskElts == 2) {
39085     // If the upper half is zeroable, then an extract+insert is more optimal
39086     // than using X86ISD::VPERM2X128. The insertion is free, even if it has to
39087     // zero the upper half.
39088     if (isUndefOrZero(Mask[1])) {
39089       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
39090         return SDValue(); // Nothing to do!
39091       assert(isInRange(Mask[0], 0, 2) && "Unexpected lane shuffle");
39092       Res = CanonicalizeShuffleInput(RootVT, V1);
39093       Res = extract128BitVector(Res, Mask[0] * (NumRootElts / 2), DAG, DL);
39094       return widenSubVector(Res, Mask[1] == SM_SentinelZero, Subtarget, DAG, DL,
39095                             256);
39096     }
39097 
39098     // If we're inserting the low subvector, an insert-subvector 'concat'
39099     // pattern is quicker than VPERM2X128.
39100     // TODO: Add AVX2 support instead of VPERMQ/VPERMPD.
39101     if (BaseMask[0] == 0 && (BaseMask[1] == 0 || BaseMask[1] == 2) &&
39102         !Subtarget.hasAVX2()) {
39103       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
39104         return SDValue(); // Nothing to do!
39105       SDValue Lo = CanonicalizeShuffleInput(RootVT, V1);
39106       SDValue Hi = CanonicalizeShuffleInput(RootVT, BaseMask[1] == 0 ? V1 : V2);
39107       Hi = extractSubVector(Hi, 0, DAG, DL, 128);
39108       return insertSubVector(Lo, Hi, NumRootElts / 2, DAG, DL, 128);
39109     }
39110 
39111     if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
39112       return SDValue(); // Nothing to do!
39113 
39114     // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
39115     // we need to use the zeroing feature.
39116     // Prefer blends for sequential shuffles unless we are optimizing for size.
39117     if (UnaryShuffle &&
39118         !(Subtarget.hasAVX2() && isUndefOrInRange(Mask, 0, 2)) &&
39119         (OptForSize || !isSequentialOrUndefOrZeroInRange(Mask, 0, 2, 0))) {
39120       unsigned PermMask = 0;
39121       PermMask |= ((Mask[0] < 0 ? 0x8 : (Mask[0] & 1)) << 0);
39122       PermMask |= ((Mask[1] < 0 ? 0x8 : (Mask[1] & 1)) << 4);
39123       return DAG.getNode(
39124           X86ISD::VPERM2X128, DL, RootVT, CanonicalizeShuffleInput(RootVT, V1),
39125           DAG.getUNDEF(RootVT), DAG.getTargetConstant(PermMask, DL, MVT::i8));
39126     }
39127 
39128     if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
39129       return SDValue(); // Nothing to do!
39130 
39131     // TODO - handle AVX512VL cases with X86ISD::SHUF128.
39132     if (!UnaryShuffle && !IsMaskedShuffle) {
39133       assert(llvm::all_of(Mask, [](int M) { return 0 <= M && M < 4; }) &&
39134              "Unexpected shuffle sentinel value");
39135       // Prefer blends to X86ISD::VPERM2X128.
39136       if (!((Mask[0] == 0 && Mask[1] == 3) || (Mask[0] == 2 && Mask[1] == 1))) {
39137         unsigned PermMask = 0;
39138         PermMask |= ((Mask[0] & 3) << 0);
39139         PermMask |= ((Mask[1] & 3) << 4);
39140         SDValue LHS = isInRange(Mask[0], 0, 2) ? V1 : V2;
39141         SDValue RHS = isInRange(Mask[1], 0, 2) ? V1 : V2;
39142         return DAG.getNode(X86ISD::VPERM2X128, DL, RootVT,
39143                           CanonicalizeShuffleInput(RootVT, LHS),
39144                           CanonicalizeShuffleInput(RootVT, RHS),
39145                           DAG.getTargetConstant(PermMask, DL, MVT::i8));
39146       }
39147     }
39148   }
39149 
39150   // For masks that have been widened to 128-bit elements or more,
39151   // narrow back down to 64-bit elements.
39152   if (BaseMaskEltSizeInBits > 64) {
39153     assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
39154     int MaskScale = BaseMaskEltSizeInBits / 64;
39155     SmallVector<int, 64> ScaledMask;
39156     narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
39157     Mask = std::move(ScaledMask);
39158   }
39159 
39160   // For masked shuffles, we're trying to match the root width for better
39161   // writemask folding, attempt to scale the mask.
39162   // TODO - variable shuffles might need this to be widened again.
39163   if (IsMaskedShuffle && NumRootElts > Mask.size()) {
39164     assert((NumRootElts % Mask.size()) == 0 && "Illegal mask size");
39165     int MaskScale = NumRootElts / Mask.size();
39166     SmallVector<int, 64> ScaledMask;
39167     narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
39168     Mask = std::move(ScaledMask);
39169   }
39170 
39171   unsigned NumMaskElts = Mask.size();
39172   unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
39173 
39174   // Determine the effective mask value type.
39175   FloatDomain &= (32 <= MaskEltSizeInBits);
39176   MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
39177                            : MVT::getIntegerVT(MaskEltSizeInBits);
39178   MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
39179 
39180   // Only allow legal mask types.
39181   if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
39182     return SDValue();
39183 
39184   // Attempt to match the mask against known shuffle patterns.
39185   MVT ShuffleSrcVT, ShuffleVT;
39186   unsigned Shuffle, PermuteImm;
39187 
39188   // Which shuffle domains are permitted?
39189   // Permit domain crossing at higher combine depths.
39190   // TODO: Should we indicate which domain is preferred if both are allowed?
39191   bool AllowFloatDomain = FloatDomain || (Depth >= 3);
39192   bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
39193                         (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
39194 
39195   // Determine zeroable mask elements.
39196   APInt KnownUndef, KnownZero;
39197   resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
39198   APInt Zeroable = KnownUndef | KnownZero;
39199 
39200   if (UnaryShuffle) {
39201     // Attempt to match against broadcast-from-vector.
39202     // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
39203     if ((Subtarget.hasAVX2() ||
39204          (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits)) &&
39205         (!IsMaskedShuffle || NumRootElts == NumMaskElts)) {
39206       if (isUndefOrEqual(Mask, 0)) {
39207         if (V1.getValueType() == MaskVT &&
39208             V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39209             X86::mayFoldLoad(V1.getOperand(0), Subtarget)) {
39210           if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
39211             return SDValue(); // Nothing to do!
39212           Res = V1.getOperand(0);
39213           Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
39214           return DAG.getBitcast(RootVT, Res);
39215         }
39216         if (Subtarget.hasAVX2()) {
39217           if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
39218             return SDValue(); // Nothing to do!
39219           Res = CanonicalizeShuffleInput(MaskVT, V1);
39220           Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
39221           return DAG.getBitcast(RootVT, Res);
39222         }
39223       }
39224     }
39225 
39226     if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, V1,
39227                           DAG, Subtarget, Shuffle, ShuffleSrcVT, ShuffleVT) &&
39228         (!IsMaskedShuffle ||
39229          (NumRootElts == ShuffleVT.getVectorNumElements()))) {
39230       if (Depth == 0 && Root.getOpcode() == Shuffle)
39231         return SDValue(); // Nothing to do!
39232       Res = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
39233       Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
39234       return DAG.getBitcast(RootVT, Res);
39235     }
39236 
39237     if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
39238                                  AllowIntDomain, DAG, Subtarget, Shuffle, ShuffleVT,
39239                                  PermuteImm) &&
39240         (!IsMaskedShuffle ||
39241          (NumRootElts == ShuffleVT.getVectorNumElements()))) {
39242       if (Depth == 0 && Root.getOpcode() == Shuffle)
39243         return SDValue(); // Nothing to do!
39244       Res = CanonicalizeShuffleInput(ShuffleVT, V1);
39245       Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
39246                         DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
39247       return DAG.getBitcast(RootVT, Res);
39248     }
39249   }
39250 
39251   // Attempt to combine to INSERTPS, but only if the inserted element has come
39252   // from a scalar.
39253   // TODO: Handle other insertions here as well?
39254   if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
39255       Subtarget.hasSSE41() &&
39256       !isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG)) {
39257     if (MaskEltSizeInBits == 32) {
39258       SDValue SrcV1 = V1, SrcV2 = V2;
39259       if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask,
39260                                  DAG) &&
39261           SrcV2.getOpcode() == ISD::SCALAR_TO_VECTOR) {
39262         if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
39263           return SDValue(); // Nothing to do!
39264         Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
39265                           CanonicalizeShuffleInput(MVT::v4f32, SrcV1),
39266                           CanonicalizeShuffleInput(MVT::v4f32, SrcV2),
39267                           DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
39268         return DAG.getBitcast(RootVT, Res);
39269       }
39270     }
39271     if (MaskEltSizeInBits == 64 &&
39272         isTargetShuffleEquivalent(MaskVT, Mask, {0, 2}, DAG) &&
39273         V2.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39274         V2.getScalarValueSizeInBits() <= 32) {
39275       if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
39276         return SDValue(); // Nothing to do!
39277       PermuteImm = (/*DstIdx*/ 2 << 4) | (/*SrcIdx*/ 0 << 0);
39278       Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
39279                         CanonicalizeShuffleInput(MVT::v4f32, V1),
39280                         CanonicalizeShuffleInput(MVT::v4f32, V2),
39281                         DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
39282       return DAG.getBitcast(RootVT, Res);
39283     }
39284   }
39285 
39286   SDValue NewV1 = V1; // Save operands in case early exit happens.
39287   SDValue NewV2 = V2;
39288   if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
39289                          NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
39290                          ShuffleVT, UnaryShuffle) &&
39291       (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
39292     if (Depth == 0 && Root.getOpcode() == Shuffle)
39293       return SDValue(); // Nothing to do!
39294     NewV1 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV1);
39295     NewV2 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV2);
39296     Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
39297     return DAG.getBitcast(RootVT, Res);
39298   }
39299 
39300   NewV1 = V1; // Save operands in case early exit happens.
39301   NewV2 = V2;
39302   if (matchBinaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
39303                                 AllowIntDomain, NewV1, NewV2, DL, DAG,
39304                                 Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
39305       (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
39306     if (Depth == 0 && Root.getOpcode() == Shuffle)
39307       return SDValue(); // Nothing to do!
39308     NewV1 = CanonicalizeShuffleInput(ShuffleVT, NewV1);
39309     NewV2 = CanonicalizeShuffleInput(ShuffleVT, NewV2);
39310     Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
39311                       DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
39312     return DAG.getBitcast(RootVT, Res);
39313   }
39314 
39315   // Typically from here on, we need an integer version of MaskVT.
39316   MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
39317   IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
39318 
39319   // Annoyingly, SSE4A instructions don't map into the above match helpers.
39320   if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
39321     uint64_t BitLen, BitIdx;
39322     if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
39323                             Zeroable)) {
39324       if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
39325         return SDValue(); // Nothing to do!
39326       V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
39327       Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
39328                         DAG.getTargetConstant(BitLen, DL, MVT::i8),
39329                         DAG.getTargetConstant(BitIdx, DL, MVT::i8));
39330       return DAG.getBitcast(RootVT, Res);
39331     }
39332 
39333     if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
39334       if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
39335         return SDValue(); // Nothing to do!
39336       V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
39337       V2 = CanonicalizeShuffleInput(IntMaskVT, V2);
39338       Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
39339                         DAG.getTargetConstant(BitLen, DL, MVT::i8),
39340                         DAG.getTargetConstant(BitIdx, DL, MVT::i8));
39341       return DAG.getBitcast(RootVT, Res);
39342     }
39343   }
39344 
39345   // Match shuffle against TRUNCATE patterns.
39346   if (AllowIntDomain && MaskEltSizeInBits < 64 && Subtarget.hasAVX512()) {
39347     // Match against a VTRUNC instruction, accounting for src/dst sizes.
39348     if (matchShuffleAsVTRUNC(ShuffleSrcVT, ShuffleVT, IntMaskVT, Mask, Zeroable,
39349                              Subtarget)) {
39350       bool IsTRUNCATE = ShuffleVT.getVectorNumElements() ==
39351                         ShuffleSrcVT.getVectorNumElements();
39352       unsigned Opc =
39353           IsTRUNCATE ? (unsigned)ISD::TRUNCATE : (unsigned)X86ISD::VTRUNC;
39354       if (Depth == 0 && Root.getOpcode() == Opc)
39355         return SDValue(); // Nothing to do!
39356       V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
39357       Res = DAG.getNode(Opc, DL, ShuffleVT, V1);
39358       if (ShuffleVT.getSizeInBits() < RootSizeInBits)
39359         Res = widenSubVector(Res, true, Subtarget, DAG, DL, RootSizeInBits);
39360       return DAG.getBitcast(RootVT, Res);
39361     }
39362 
39363     // Do we need a more general binary truncation pattern?
39364     if (RootSizeInBits < 512 &&
39365         ((RootVT.is256BitVector() && Subtarget.useAVX512Regs()) ||
39366          (RootVT.is128BitVector() && Subtarget.hasVLX())) &&
39367         (MaskEltSizeInBits > 8 || Subtarget.hasBWI()) &&
39368         isSequentialOrUndefInRange(Mask, 0, NumMaskElts, 0, 2)) {
39369       // Bail if this was already a truncation or PACK node.
39370       // We sometimes fail to match PACK if we demand known undef elements.
39371       if (Depth == 0 && (Root.getOpcode() == ISD::TRUNCATE ||
39372                          Root.getOpcode() == X86ISD::PACKSS ||
39373                          Root.getOpcode() == X86ISD::PACKUS))
39374         return SDValue(); // Nothing to do!
39375       ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
39376       ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts / 2);
39377       V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
39378       V2 = CanonicalizeShuffleInput(ShuffleSrcVT, V2);
39379       ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
39380       ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts);
39381       Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShuffleSrcVT, V1, V2);
39382       Res = DAG.getNode(ISD::TRUNCATE, DL, IntMaskVT, Res);
39383       return DAG.getBitcast(RootVT, Res);
39384     }
39385   }
39386 
39387   // Don't try to re-form single instruction chains under any circumstances now
39388   // that we've done encoding canonicalization for them.
39389   if (Depth < 1)
39390     return SDValue();
39391 
39392   // Depth threshold above which we can efficiently use variable mask shuffles.
39393   int VariableCrossLaneShuffleDepth =
39394       Subtarget.hasFastVariableCrossLaneShuffle() ? 1 : 2;
39395   int VariablePerLaneShuffleDepth =
39396       Subtarget.hasFastVariablePerLaneShuffle() ? 1 : 2;
39397   AllowVariableCrossLaneMask &=
39398       (Depth >= VariableCrossLaneShuffleDepth) || HasVariableMask;
39399   AllowVariablePerLaneMask &=
39400       (Depth >= VariablePerLaneShuffleDepth) || HasVariableMask;
39401   // VPERMI2W/VPERMI2B are 3 uops on Skylake and Icelake so we require a
39402   // higher depth before combining them.
39403   bool AllowBWIVPERMV3 =
39404       (Depth >= (VariableCrossLaneShuffleDepth + 2) || HasVariableMask);
39405 
39406   bool MaskContainsZeros = isAnyZero(Mask);
39407 
39408   if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
39409     // If we have a single input lane-crossing shuffle then lower to VPERMV.
39410     if (UnaryShuffle && AllowVariableCrossLaneMask && !MaskContainsZeros) {
39411       if (Subtarget.hasAVX2() &&
39412           (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) {
39413         SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
39414         Res = CanonicalizeShuffleInput(MaskVT, V1);
39415         Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
39416         return DAG.getBitcast(RootVT, Res);
39417       }
39418       // AVX512 variants (non-VLX will pad to 512-bit shuffles).
39419       if ((Subtarget.hasAVX512() &&
39420            (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
39421             MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
39422           (Subtarget.hasBWI() &&
39423            (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
39424           (Subtarget.hasVBMI() &&
39425            (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8))) {
39426         V1 = CanonicalizeShuffleInput(MaskVT, V1);
39427         V2 = DAG.getUNDEF(MaskVT);
39428         Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
39429         return DAG.getBitcast(RootVT, Res);
39430       }
39431     }
39432 
39433     // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
39434     // vector as the second source (non-VLX will pad to 512-bit shuffles).
39435     if (UnaryShuffle && AllowVariableCrossLaneMask &&
39436         ((Subtarget.hasAVX512() &&
39437           (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
39438            MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
39439            MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32 ||
39440            MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
39441          (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
39442           (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
39443          (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
39444           (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
39445       // Adjust shuffle mask - replace SM_SentinelZero with second source index.
39446       for (unsigned i = 0; i != NumMaskElts; ++i)
39447         if (Mask[i] == SM_SentinelZero)
39448           Mask[i] = NumMaskElts + i;
39449       V1 = CanonicalizeShuffleInput(MaskVT, V1);
39450       V2 = getZeroVector(MaskVT, Subtarget, DAG, DL);
39451       Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
39452       return DAG.getBitcast(RootVT, Res);
39453     }
39454 
39455     // If that failed and either input is extracted then try to combine as a
39456     // shuffle with the larger type.
39457     if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
39458             Inputs, Root, BaseMask, Depth, HasVariableMask,
39459             AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG,
39460             Subtarget))
39461       return WideShuffle;
39462 
39463     // If we have a dual input lane-crossing shuffle then lower to VPERMV3,
39464     // (non-VLX will pad to 512-bit shuffles).
39465     if (AllowVariableCrossLaneMask && !MaskContainsZeros &&
39466         ((Subtarget.hasAVX512() &&
39467           (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
39468            MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
39469            MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32 ||
39470            MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
39471          (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
39472           (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
39473          (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
39474           (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
39475       V1 = CanonicalizeShuffleInput(MaskVT, V1);
39476       V2 = CanonicalizeShuffleInput(MaskVT, V2);
39477       Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
39478       return DAG.getBitcast(RootVT, Res);
39479     }
39480     return SDValue();
39481   }
39482 
39483   // See if we can combine a single input shuffle with zeros to a bit-mask,
39484   // which is much simpler than any shuffle.
39485   if (UnaryShuffle && MaskContainsZeros && AllowVariablePerLaneMask &&
39486       isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
39487       DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
39488     APInt Zero = APInt::getZero(MaskEltSizeInBits);
39489     APInt AllOnes = APInt::getAllOnes(MaskEltSizeInBits);
39490     APInt UndefElts(NumMaskElts, 0);
39491     SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
39492     for (unsigned i = 0; i != NumMaskElts; ++i) {
39493       int M = Mask[i];
39494       if (M == SM_SentinelUndef) {
39495         UndefElts.setBit(i);
39496         continue;
39497       }
39498       if (M == SM_SentinelZero)
39499         continue;
39500       EltBits[i] = AllOnes;
39501     }
39502     SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
39503     Res = CanonicalizeShuffleInput(MaskVT, V1);
39504     unsigned AndOpcode =
39505         MaskVT.isFloatingPoint() ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
39506     Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
39507     return DAG.getBitcast(RootVT, Res);
39508   }
39509 
39510   // If we have a single input shuffle with different shuffle patterns in the
39511   // the 128-bit lanes use the variable mask to VPERMILPS.
39512   // TODO Combine other mask types at higher depths.
39513   if (UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
39514       ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
39515        (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
39516     SmallVector<SDValue, 16> VPermIdx;
39517     for (int M : Mask) {
39518       SDValue Idx =
39519           M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
39520       VPermIdx.push_back(Idx);
39521     }
39522     SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
39523     Res = CanonicalizeShuffleInput(MaskVT, V1);
39524     Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
39525     return DAG.getBitcast(RootVT, Res);
39526   }
39527 
39528   // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
39529   // to VPERMIL2PD/VPERMIL2PS.
39530   if (AllowVariablePerLaneMask && Subtarget.hasXOP() &&
39531       (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
39532        MaskVT == MVT::v8f32)) {
39533     // VPERMIL2 Operation.
39534     // Bits[3] - Match Bit.
39535     // Bits[2:1] - (Per Lane) PD Shuffle Mask.
39536     // Bits[2:0] - (Per Lane) PS Shuffle Mask.
39537     unsigned NumLanes = MaskVT.getSizeInBits() / 128;
39538     unsigned NumEltsPerLane = NumMaskElts / NumLanes;
39539     SmallVector<int, 8> VPerm2Idx;
39540     unsigned M2ZImm = 0;
39541     for (int M : Mask) {
39542       if (M == SM_SentinelUndef) {
39543         VPerm2Idx.push_back(-1);
39544         continue;
39545       }
39546       if (M == SM_SentinelZero) {
39547         M2ZImm = 2;
39548         VPerm2Idx.push_back(8);
39549         continue;
39550       }
39551       int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
39552       Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
39553       VPerm2Idx.push_back(Index);
39554     }
39555     V1 = CanonicalizeShuffleInput(MaskVT, V1);
39556     V2 = CanonicalizeShuffleInput(MaskVT, V2);
39557     SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
39558     Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
39559                       DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
39560     return DAG.getBitcast(RootVT, Res);
39561   }
39562 
39563   // If we have 3 or more shuffle instructions or a chain involving a variable
39564   // mask, we can replace them with a single PSHUFB instruction profitably.
39565   // Intel's manuals suggest only using PSHUFB if doing so replacing 5
39566   // instructions, but in practice PSHUFB tends to be *very* fast so we're
39567   // more aggressive.
39568   if (UnaryShuffle && AllowVariablePerLaneMask &&
39569       ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
39570        (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
39571        (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
39572     SmallVector<SDValue, 16> PSHUFBMask;
39573     int NumBytes = RootVT.getSizeInBits() / 8;
39574     int Ratio = NumBytes / NumMaskElts;
39575     for (int i = 0; i < NumBytes; ++i) {
39576       int M = Mask[i / Ratio];
39577       if (M == SM_SentinelUndef) {
39578         PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
39579         continue;
39580       }
39581       if (M == SM_SentinelZero) {
39582         PSHUFBMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
39583         continue;
39584       }
39585       M = Ratio * M + i % Ratio;
39586       assert((M / 16) == (i / 16) && "Lane crossing detected");
39587       PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
39588     }
39589     MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
39590     Res = CanonicalizeShuffleInput(ByteVT, V1);
39591     SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
39592     Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
39593     return DAG.getBitcast(RootVT, Res);
39594   }
39595 
39596   // With XOP, if we have a 128-bit binary input shuffle we can always combine
39597   // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
39598   // slower than PSHUFB on targets that support both.
39599   if (AllowVariablePerLaneMask && RootVT.is128BitVector() &&
39600       Subtarget.hasXOP()) {
39601     // VPPERM Mask Operation
39602     // Bits[4:0] - Byte Index (0 - 31)
39603     // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
39604     SmallVector<SDValue, 16> VPPERMMask;
39605     int NumBytes = 16;
39606     int Ratio = NumBytes / NumMaskElts;
39607     for (int i = 0; i < NumBytes; ++i) {
39608       int M = Mask[i / Ratio];
39609       if (M == SM_SentinelUndef) {
39610         VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
39611         continue;
39612       }
39613       if (M == SM_SentinelZero) {
39614         VPPERMMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
39615         continue;
39616       }
39617       M = Ratio * M + i % Ratio;
39618       VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
39619     }
39620     MVT ByteVT = MVT::v16i8;
39621     V1 = CanonicalizeShuffleInput(ByteVT, V1);
39622     V2 = CanonicalizeShuffleInput(ByteVT, V2);
39623     SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
39624     Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
39625     return DAG.getBitcast(RootVT, Res);
39626   }
39627 
39628   // If that failed and either input is extracted then try to combine as a
39629   // shuffle with the larger type.
39630   if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
39631           Inputs, Root, BaseMask, Depth, HasVariableMask,
39632           AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG, Subtarget))
39633     return WideShuffle;
39634 
39635   // If we have a dual input shuffle then lower to VPERMV3,
39636   // (non-VLX will pad to 512-bit shuffles)
39637   if (!UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
39638       ((Subtarget.hasAVX512() &&
39639         (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v8f64 ||
39640          MaskVT == MVT::v2i64 || MaskVT == MVT::v4i64 || MaskVT == MVT::v8i64 ||
39641          MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 || MaskVT == MVT::v8f32 ||
39642          MaskVT == MVT::v8i32 || MaskVT == MVT::v16f32 ||
39643          MaskVT == MVT::v16i32)) ||
39644        (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
39645         (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16 ||
39646          MaskVT == MVT::v32i16)) ||
39647        (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
39648         (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8 ||
39649          MaskVT == MVT::v64i8)))) {
39650     V1 = CanonicalizeShuffleInput(MaskVT, V1);
39651     V2 = CanonicalizeShuffleInput(MaskVT, V2);
39652     Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
39653     return DAG.getBitcast(RootVT, Res);
39654   }
39655 
39656   // Failed to find any combines.
39657   return SDValue();
39658 }
39659 
39660 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
39661 // instruction if possible.
39662 //
39663 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
39664 // type size to attempt to combine:
39665 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
39666 // -->
39667 // extract_subvector(shuffle(x,y,m2),0)
combineX86ShuffleChainWithExtract(ArrayRef<SDValue> Inputs,SDValue Root,ArrayRef<int> BaseMask,int Depth,bool HasVariableMask,bool AllowVariableCrossLaneMask,bool AllowVariablePerLaneMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)39668 static SDValue combineX86ShuffleChainWithExtract(
39669     ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
39670     bool HasVariableMask, bool AllowVariableCrossLaneMask,
39671     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
39672     const X86Subtarget &Subtarget) {
39673   unsigned NumMaskElts = BaseMask.size();
39674   unsigned NumInputs = Inputs.size();
39675   if (NumInputs == 0)
39676     return SDValue();
39677 
39678   EVT RootVT = Root.getValueType();
39679   unsigned RootSizeInBits = RootVT.getSizeInBits();
39680   assert((RootSizeInBits % NumMaskElts) == 0 && "Unexpected root shuffle mask");
39681 
39682   // Bail if we have any smaller inputs.
39683   if (llvm::any_of(Inputs, [RootSizeInBits](SDValue Input) {
39684         return Input.getValueSizeInBits() < RootSizeInBits;
39685       }))
39686     return SDValue();
39687 
39688   SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
39689   SmallVector<unsigned, 4> Offsets(NumInputs, 0);
39690 
39691   // Peek through subvectors.
39692   // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
39693   unsigned WideSizeInBits = RootSizeInBits;
39694   for (unsigned i = 0; i != NumInputs; ++i) {
39695     SDValue &Src = WideInputs[i];
39696     unsigned &Offset = Offsets[i];
39697     Src = peekThroughBitcasts(Src);
39698     EVT BaseVT = Src.getValueType();
39699     while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
39700       Offset += Src.getConstantOperandVal(1);
39701       Src = Src.getOperand(0);
39702     }
39703     WideSizeInBits = std::max(WideSizeInBits,
39704                               (unsigned)Src.getValueSizeInBits());
39705     assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
39706            "Unexpected subvector extraction");
39707     Offset /= BaseVT.getVectorNumElements();
39708     Offset *= NumMaskElts;
39709   }
39710 
39711   // Bail if we're always extracting from the lowest subvectors,
39712   // combineX86ShuffleChain should match this for the current width.
39713   if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
39714     return SDValue();
39715 
39716   unsigned Scale = WideSizeInBits / RootSizeInBits;
39717   assert((WideSizeInBits % RootSizeInBits) == 0 &&
39718          "Unexpected subvector extraction");
39719 
39720   // If the src vector types aren't the same, see if we can extend
39721   // them to match each other.
39722   // TODO: Support different scalar types?
39723   EVT WideSVT = WideInputs[0].getValueType().getScalarType();
39724   if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
39725         return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
39726                Op.getValueType().getScalarType() != WideSVT;
39727       }))
39728     return SDValue();
39729 
39730   // Create new mask for larger type.
39731   for (unsigned i = 1; i != NumInputs; ++i)
39732     Offsets[i] += i * Scale * NumMaskElts;
39733 
39734   SmallVector<int, 64> WideMask(BaseMask);
39735   for (int &M : WideMask) {
39736     if (M < 0)
39737       continue;
39738     M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
39739   }
39740   WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
39741 
39742   // Remove unused/repeated shuffle source ops.
39743   resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
39744   assert(!WideInputs.empty() && "Shuffle with no inputs detected");
39745 
39746   if (WideInputs.size() > 2)
39747     return SDValue();
39748 
39749   // Increase depth for every upper subvector we've peeked through.
39750   Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
39751 
39752   // Attempt to combine wider chain.
39753   // TODO: Can we use a better Root?
39754   SDValue WideRoot = WideInputs.front().getValueSizeInBits() >
39755                              WideInputs.back().getValueSizeInBits()
39756                          ? WideInputs.front()
39757                          : WideInputs.back();
39758   if (SDValue WideShuffle =
39759           combineX86ShuffleChain(WideInputs, WideRoot, WideMask, Depth,
39760                                  HasVariableMask, AllowVariableCrossLaneMask,
39761                                  AllowVariablePerLaneMask, DAG, Subtarget)) {
39762     WideShuffle =
39763         extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
39764     return DAG.getBitcast(RootVT, WideShuffle);
39765   }
39766   return SDValue();
39767 }
39768 
39769 // Canonicalize the combined shuffle mask chain with horizontal ops.
39770 // NOTE: This may update the Ops and Mask.
canonicalizeShuffleMaskWithHorizOp(MutableArrayRef<SDValue> Ops,MutableArrayRef<int> Mask,unsigned RootSizeInBits,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)39771 static SDValue canonicalizeShuffleMaskWithHorizOp(
39772     MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
39773     unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
39774     const X86Subtarget &Subtarget) {
39775   if (Mask.empty() || Ops.empty())
39776     return SDValue();
39777 
39778   SmallVector<SDValue> BC;
39779   for (SDValue Op : Ops)
39780     BC.push_back(peekThroughBitcasts(Op));
39781 
39782   // All ops must be the same horizop + type.
39783   SDValue BC0 = BC[0];
39784   EVT VT0 = BC0.getValueType();
39785   unsigned Opcode0 = BC0.getOpcode();
39786   if (VT0.getSizeInBits() != RootSizeInBits || llvm::any_of(BC, [&](SDValue V) {
39787         return V.getOpcode() != Opcode0 || V.getValueType() != VT0;
39788       }))
39789     return SDValue();
39790 
39791   bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
39792                   Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
39793   bool isPack = (Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS);
39794   if (!isHoriz && !isPack)
39795     return SDValue();
39796 
39797   // Do all ops have a single use?
39798   bool OneUseOps = llvm::all_of(Ops, [](SDValue Op) {
39799     return Op.hasOneUse() &&
39800            peekThroughBitcasts(Op) == peekThroughOneUseBitcasts(Op);
39801   });
39802 
39803   int NumElts = VT0.getVectorNumElements();
39804   int NumLanes = VT0.getSizeInBits() / 128;
39805   int NumEltsPerLane = NumElts / NumLanes;
39806   int NumHalfEltsPerLane = NumEltsPerLane / 2;
39807   MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
39808   unsigned EltSizeInBits = RootSizeInBits / Mask.size();
39809 
39810   if (NumEltsPerLane >= 4 &&
39811       (isPack || shouldUseHorizontalOp(Ops.size() == 1, DAG, Subtarget))) {
39812     SmallVector<int> LaneMask, ScaledMask;
39813     if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, LaneMask) &&
39814         scaleShuffleElements(LaneMask, 4, ScaledMask)) {
39815       // See if we can remove the shuffle by resorting the HOP chain so that
39816       // the HOP args are pre-shuffled.
39817       // TODO: Generalize to any sized/depth chain.
39818       // TODO: Add support for PACKSS/PACKUS.
39819       if (isHoriz) {
39820         // Attempt to find a HOP(HOP(X,Y),HOP(Z,W)) source operand.
39821         auto GetHOpSrc = [&](int M) {
39822           if (M == SM_SentinelUndef)
39823             return DAG.getUNDEF(VT0);
39824           if (M == SM_SentinelZero)
39825             return getZeroVector(VT0.getSimpleVT(), Subtarget, DAG, DL);
39826           SDValue Src0 = BC[M / 4];
39827           SDValue Src1 = Src0.getOperand((M % 4) >= 2);
39828           if (Src1.getOpcode() == Opcode0 && Src0->isOnlyUserOf(Src1.getNode()))
39829             return Src1.getOperand(M % 2);
39830           return SDValue();
39831         };
39832         SDValue M0 = GetHOpSrc(ScaledMask[0]);
39833         SDValue M1 = GetHOpSrc(ScaledMask[1]);
39834         SDValue M2 = GetHOpSrc(ScaledMask[2]);
39835         SDValue M3 = GetHOpSrc(ScaledMask[3]);
39836         if (M0 && M1 && M2 && M3) {
39837           SDValue LHS = DAG.getNode(Opcode0, DL, SrcVT, M0, M1);
39838           SDValue RHS = DAG.getNode(Opcode0, DL, SrcVT, M2, M3);
39839           return DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
39840         }
39841       }
39842       // shuffle(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
39843       if (Ops.size() >= 2) {
39844         SDValue LHS, RHS;
39845         auto GetHOpSrc = [&](int M, int &OutM) {
39846           // TODO: Support SM_SentinelZero
39847           if (M < 0)
39848             return M == SM_SentinelUndef;
39849           SDValue Src = BC[M / 4].getOperand((M % 4) >= 2);
39850           if (!LHS || LHS == Src) {
39851             LHS = Src;
39852             OutM = (M % 2);
39853             return true;
39854           }
39855           if (!RHS || RHS == Src) {
39856             RHS = Src;
39857             OutM = (M % 2) + 2;
39858             return true;
39859           }
39860           return false;
39861         };
39862         int PostMask[4] = {-1, -1, -1, -1};
39863         if (GetHOpSrc(ScaledMask[0], PostMask[0]) &&
39864             GetHOpSrc(ScaledMask[1], PostMask[1]) &&
39865             GetHOpSrc(ScaledMask[2], PostMask[2]) &&
39866             GetHOpSrc(ScaledMask[3], PostMask[3])) {
39867           LHS = DAG.getBitcast(SrcVT, LHS);
39868           RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
39869           SDValue Res = DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
39870           // Use SHUFPS for the permute so this will work on SSE3 targets,
39871           // shuffle combining and domain handling will simplify this later on.
39872           MVT ShuffleVT = MVT::getVectorVT(MVT::f32, RootSizeInBits / 32);
39873           Res = DAG.getBitcast(ShuffleVT, Res);
39874           return DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
39875                              getV4X86ShuffleImm8ForMask(PostMask, DL, DAG));
39876         }
39877       }
39878     }
39879   }
39880 
39881   if (2 < Ops.size())
39882     return SDValue();
39883 
39884   SDValue BC1 = BC[BC.size() - 1];
39885   if (Mask.size() == VT0.getVectorNumElements()) {
39886     // Canonicalize binary shuffles of horizontal ops that use the
39887     // same sources to an unary shuffle.
39888     // TODO: Try to perform this fold even if the shuffle remains.
39889     if (Ops.size() == 2) {
39890       auto ContainsOps = [](SDValue HOp, SDValue Op) {
39891         return Op == HOp.getOperand(0) || Op == HOp.getOperand(1);
39892       };
39893       // Commute if all BC0's ops are contained in BC1.
39894       if (ContainsOps(BC1, BC0.getOperand(0)) &&
39895           ContainsOps(BC1, BC0.getOperand(1))) {
39896         ShuffleVectorSDNode::commuteMask(Mask);
39897         std::swap(Ops[0], Ops[1]);
39898         std::swap(BC0, BC1);
39899       }
39900 
39901       // If BC1 can be represented by BC0, then convert to unary shuffle.
39902       if (ContainsOps(BC0, BC1.getOperand(0)) &&
39903           ContainsOps(BC0, BC1.getOperand(1))) {
39904         for (int &M : Mask) {
39905           if (M < NumElts) // BC0 element or UNDEF/Zero sentinel.
39906             continue;
39907           int SubLane = ((M % NumEltsPerLane) >= NumHalfEltsPerLane) ? 1 : 0;
39908           M -= NumElts + (SubLane * NumHalfEltsPerLane);
39909           if (BC1.getOperand(SubLane) != BC0.getOperand(0))
39910             M += NumHalfEltsPerLane;
39911         }
39912       }
39913     }
39914 
39915     // Canonicalize unary horizontal ops to only refer to lower halves.
39916     for (int i = 0; i != NumElts; ++i) {
39917       int &M = Mask[i];
39918       if (isUndefOrZero(M))
39919         continue;
39920       if (M < NumElts && BC0.getOperand(0) == BC0.getOperand(1) &&
39921           (M % NumEltsPerLane) >= NumHalfEltsPerLane)
39922         M -= NumHalfEltsPerLane;
39923       if (NumElts <= M && BC1.getOperand(0) == BC1.getOperand(1) &&
39924           (M % NumEltsPerLane) >= NumHalfEltsPerLane)
39925         M -= NumHalfEltsPerLane;
39926     }
39927   }
39928 
39929   // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
39930   // single instruction. Attempt to match a v2X64 repeating shuffle pattern that
39931   // represents the LHS/RHS inputs for the lower/upper halves.
39932   SmallVector<int, 16> TargetMask128, WideMask128;
39933   if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, TargetMask128) &&
39934       scaleShuffleElements(TargetMask128, 2, WideMask128)) {
39935     assert(isUndefOrZeroOrInRange(WideMask128, 0, 4) && "Illegal shuffle");
39936     bool SingleOp = (Ops.size() == 1);
39937     if (isPack || OneUseOps ||
39938         shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
39939       SDValue Lo = isInRange(WideMask128[0], 0, 2) ? BC0 : BC1;
39940       SDValue Hi = isInRange(WideMask128[1], 0, 2) ? BC0 : BC1;
39941       Lo = Lo.getOperand(WideMask128[0] & 1);
39942       Hi = Hi.getOperand(WideMask128[1] & 1);
39943       if (SingleOp) {
39944         SDValue Undef = DAG.getUNDEF(SrcVT);
39945         SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
39946         Lo = (WideMask128[0] == SM_SentinelZero ? Zero : Lo);
39947         Hi = (WideMask128[1] == SM_SentinelZero ? Zero : Hi);
39948         Lo = (WideMask128[0] == SM_SentinelUndef ? Undef : Lo);
39949         Hi = (WideMask128[1] == SM_SentinelUndef ? Undef : Hi);
39950       }
39951       return DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
39952     }
39953   }
39954 
39955   return SDValue();
39956 }
39957 
39958 // Attempt to constant fold all of the constant source ops.
39959 // Returns true if the entire shuffle is folded to a constant.
39960 // TODO: Extend this to merge multiple constant Ops and update the mask.
combineX86ShufflesConstants(ArrayRef<SDValue> Ops,ArrayRef<int> Mask,SDValue Root,bool HasVariableMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)39961 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
39962                                            ArrayRef<int> Mask, SDValue Root,
39963                                            bool HasVariableMask,
39964                                            SelectionDAG &DAG,
39965                                            const X86Subtarget &Subtarget) {
39966   MVT VT = Root.getSimpleValueType();
39967 
39968   unsigned SizeInBits = VT.getSizeInBits();
39969   unsigned NumMaskElts = Mask.size();
39970   unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
39971   unsigned NumOps = Ops.size();
39972 
39973   // Extract constant bits from each source op.
39974   bool OneUseConstantOp = false;
39975   SmallVector<APInt, 16> UndefEltsOps(NumOps);
39976   SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
39977   for (unsigned i = 0; i != NumOps; ++i) {
39978     SDValue SrcOp = Ops[i];
39979     OneUseConstantOp |= SrcOp.hasOneUse();
39980     if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
39981                                        RawBitsOps[i]))
39982       return SDValue();
39983   }
39984 
39985   // If we're optimizing for size, only fold if at least one of the constants is
39986   // only used once or the combined shuffle has included a variable mask
39987   // shuffle, this is to avoid constant pool bloat.
39988   bool IsOptimizingSize = DAG.shouldOptForSize();
39989   if (IsOptimizingSize && !OneUseConstantOp && !HasVariableMask)
39990     return SDValue();
39991 
39992   // Shuffle the constant bits according to the mask.
39993   SDLoc DL(Root);
39994   APInt UndefElts(NumMaskElts, 0);
39995   APInt ZeroElts(NumMaskElts, 0);
39996   APInt ConstantElts(NumMaskElts, 0);
39997   SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
39998                                         APInt::getZero(MaskSizeInBits));
39999   for (unsigned i = 0; i != NumMaskElts; ++i) {
40000     int M = Mask[i];
40001     if (M == SM_SentinelUndef) {
40002       UndefElts.setBit(i);
40003       continue;
40004     } else if (M == SM_SentinelZero) {
40005       ZeroElts.setBit(i);
40006       continue;
40007     }
40008     assert(0 <= M && M < (int)(NumMaskElts * NumOps));
40009 
40010     unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
40011     unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
40012 
40013     auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
40014     if (SrcUndefElts[SrcMaskIdx]) {
40015       UndefElts.setBit(i);
40016       continue;
40017     }
40018 
40019     auto &SrcEltBits = RawBitsOps[SrcOpIdx];
40020     APInt &Bits = SrcEltBits[SrcMaskIdx];
40021     if (!Bits) {
40022       ZeroElts.setBit(i);
40023       continue;
40024     }
40025 
40026     ConstantElts.setBit(i);
40027     ConstantBitData[i] = Bits;
40028   }
40029   assert((UndefElts | ZeroElts | ConstantElts).isAllOnes());
40030 
40031   // Attempt to create a zero vector.
40032   if ((UndefElts | ZeroElts).isAllOnes())
40033     return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
40034 
40035   // Create the constant data.
40036   MVT MaskSVT;
40037   if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
40038     MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
40039   else
40040     MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
40041 
40042   MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
40043   if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
40044     return SDValue();
40045 
40046   SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
40047   return DAG.getBitcast(VT, CstOp);
40048 }
40049 
40050 namespace llvm {
40051   namespace X86 {
40052     enum {
40053       MaxShuffleCombineDepth = 8
40054     };
40055   }
40056 } // namespace llvm
40057 
40058 /// Fully generic combining of x86 shuffle instructions.
40059 ///
40060 /// This should be the last combine run over the x86 shuffle instructions. Once
40061 /// they have been fully optimized, this will recursively consider all chains
40062 /// of single-use shuffle instructions, build a generic model of the cumulative
40063 /// shuffle operation, and check for simpler instructions which implement this
40064 /// operation. We use this primarily for two purposes:
40065 ///
40066 /// 1) Collapse generic shuffles to specialized single instructions when
40067 ///    equivalent. In most cases, this is just an encoding size win, but
40068 ///    sometimes we will collapse multiple generic shuffles into a single
40069 ///    special-purpose shuffle.
40070 /// 2) Look for sequences of shuffle instructions with 3 or more total
40071 ///    instructions, and replace them with the slightly more expensive SSSE3
40072 ///    PSHUFB instruction if available. We do this as the last combining step
40073 ///    to ensure we avoid using PSHUFB if we can implement the shuffle with
40074 ///    a suitable short sequence of other instructions. The PSHUFB will either
40075 ///    use a register or have to read from memory and so is slightly (but only
40076 ///    slightly) more expensive than the other shuffle instructions.
40077 ///
40078 /// Because this is inherently a quadratic operation (for each shuffle in
40079 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
40080 /// This should never be an issue in practice as the shuffle lowering doesn't
40081 /// produce sequences of more than 8 instructions.
40082 ///
40083 /// FIXME: We will currently miss some cases where the redundant shuffling
40084 /// would simplify under the threshold for PSHUFB formation because of
40085 /// combine-ordering. To fix this, we should do the redundant instruction
40086 /// combining in this recursive walk.
combineX86ShufflesRecursively(ArrayRef<SDValue> SrcOps,int SrcOpIndex,SDValue Root,ArrayRef<int> RootMask,ArrayRef<const SDNode * > SrcNodes,unsigned Depth,unsigned MaxDepth,bool HasVariableMask,bool AllowVariableCrossLaneMask,bool AllowVariablePerLaneMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)40087 static SDValue combineX86ShufflesRecursively(
40088     ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
40089     ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
40090     unsigned MaxDepth, bool HasVariableMask, bool AllowVariableCrossLaneMask,
40091     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
40092     const X86Subtarget &Subtarget) {
40093   assert(RootMask.size() > 0 &&
40094          (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
40095          "Illegal shuffle root mask");
40096   MVT RootVT = Root.getSimpleValueType();
40097   assert(RootVT.isVector() && "Shuffles operate on vector types!");
40098   unsigned RootSizeInBits = RootVT.getSizeInBits();
40099 
40100   // Bound the depth of our recursive combine because this is ultimately
40101   // quadratic in nature.
40102   if (Depth >= MaxDepth)
40103     return SDValue();
40104 
40105   // Directly rip through bitcasts to find the underlying operand.
40106   SDValue Op = SrcOps[SrcOpIndex];
40107   Op = peekThroughOneUseBitcasts(Op);
40108 
40109   EVT VT = Op.getValueType();
40110   if (!VT.isVector() || !VT.isSimple())
40111     return SDValue(); // Bail if we hit a non-simple non-vector.
40112 
40113   // FIXME: Just bail on f16 for now.
40114   if (VT.getVectorElementType() == MVT::f16)
40115     return SDValue();
40116 
40117   assert((RootSizeInBits % VT.getSizeInBits()) == 0 &&
40118          "Can only combine shuffles upto size of the root op.");
40119 
40120   // Create a demanded elts mask from the referenced elements of Op.
40121   APInt OpDemandedElts = APInt::getZero(RootMask.size());
40122   for (int M : RootMask) {
40123     int BaseIdx = RootMask.size() * SrcOpIndex;
40124     if (isInRange(M, BaseIdx, BaseIdx + RootMask.size()))
40125       OpDemandedElts.setBit(M - BaseIdx);
40126   }
40127   if (RootSizeInBits != VT.getSizeInBits()) {
40128     // Op is smaller than Root - extract the demanded elts for the subvector.
40129     unsigned Scale = RootSizeInBits / VT.getSizeInBits();
40130     unsigned NumOpMaskElts = RootMask.size() / Scale;
40131     assert((RootMask.size() % Scale) == 0 && "Root mask size mismatch");
40132     assert(OpDemandedElts
40133                .extractBits(RootMask.size() - NumOpMaskElts, NumOpMaskElts)
40134                .isZero() &&
40135            "Out of range elements referenced in root mask");
40136     OpDemandedElts = OpDemandedElts.extractBits(NumOpMaskElts, 0);
40137   }
40138   OpDemandedElts =
40139       APIntOps::ScaleBitMask(OpDemandedElts, VT.getVectorNumElements());
40140 
40141   // Extract target shuffle mask and resolve sentinels and inputs.
40142   SmallVector<int, 64> OpMask;
40143   SmallVector<SDValue, 2> OpInputs;
40144   APInt OpUndef, OpZero;
40145   bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
40146   if (getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
40147                              OpZero, DAG, Depth, false)) {
40148     // Shuffle inputs must not be larger than the shuffle result.
40149     // TODO: Relax this for single input faux shuffles (e.g. trunc).
40150     if (llvm::any_of(OpInputs, [VT](SDValue OpInput) {
40151           return OpInput.getValueSizeInBits() > VT.getSizeInBits();
40152         }))
40153       return SDValue();
40154   } else if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40155              (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
40156              !isNullConstant(Op.getOperand(1))) {
40157     SDValue SrcVec = Op.getOperand(0);
40158     int ExtractIdx = Op.getConstantOperandVal(1);
40159     unsigned NumElts = VT.getVectorNumElements();
40160     OpInputs.assign({SrcVec});
40161     OpMask.assign(NumElts, SM_SentinelUndef);
40162     std::iota(OpMask.begin(), OpMask.end(), ExtractIdx);
40163     OpZero = OpUndef = APInt::getNullValue(NumElts);
40164   } else {
40165     return SDValue();
40166   }
40167 
40168   // If the shuffle result was smaller than the root, we need to adjust the
40169   // mask indices and pad the mask with undefs.
40170   if (RootSizeInBits > VT.getSizeInBits()) {
40171     unsigned NumSubVecs = RootSizeInBits / VT.getSizeInBits();
40172     unsigned OpMaskSize = OpMask.size();
40173     if (OpInputs.size() > 1) {
40174       unsigned PaddedMaskSize = NumSubVecs * OpMaskSize;
40175       for (int &M : OpMask) {
40176         if (M < 0)
40177           continue;
40178         int EltIdx = M % OpMaskSize;
40179         int OpIdx = M / OpMaskSize;
40180         M = (PaddedMaskSize * OpIdx) + EltIdx;
40181       }
40182     }
40183     OpZero = OpZero.zext(NumSubVecs * OpMaskSize);
40184     OpUndef = OpUndef.zext(NumSubVecs * OpMaskSize);
40185     OpMask.append((NumSubVecs - 1) * OpMaskSize, SM_SentinelUndef);
40186   }
40187 
40188   SmallVector<int, 64> Mask;
40189   SmallVector<SDValue, 16> Ops;
40190 
40191   // We don't need to merge masks if the root is empty.
40192   bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
40193   if (EmptyRoot) {
40194     // Only resolve zeros if it will remove an input, otherwise we might end
40195     // up in an infinite loop.
40196     bool ResolveKnownZeros = true;
40197     if (!OpZero.isZero()) {
40198       APInt UsedInputs = APInt::getZero(OpInputs.size());
40199       for (int i = 0, e = OpMask.size(); i != e; ++i) {
40200         int M = OpMask[i];
40201         if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
40202           continue;
40203         UsedInputs.setBit(M / OpMask.size());
40204         if (UsedInputs.isAllOnes()) {
40205           ResolveKnownZeros = false;
40206           break;
40207         }
40208       }
40209     }
40210     resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
40211                                       ResolveKnownZeros);
40212 
40213     Mask = OpMask;
40214     Ops.append(OpInputs.begin(), OpInputs.end());
40215   } else {
40216     resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
40217 
40218     // Add the inputs to the Ops list, avoiding duplicates.
40219     Ops.append(SrcOps.begin(), SrcOps.end());
40220 
40221     auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
40222       // Attempt to find an existing match.
40223       SDValue InputBC = peekThroughBitcasts(Input);
40224       for (int i = 0, e = Ops.size(); i < e; ++i)
40225         if (InputBC == peekThroughBitcasts(Ops[i]))
40226           return i;
40227       // Match failed - should we replace an existing Op?
40228       if (InsertionPoint >= 0) {
40229         Ops[InsertionPoint] = Input;
40230         return InsertionPoint;
40231       }
40232       // Add to the end of the Ops list.
40233       Ops.push_back(Input);
40234       return Ops.size() - 1;
40235     };
40236 
40237     SmallVector<int, 2> OpInputIdx;
40238     for (SDValue OpInput : OpInputs)
40239       OpInputIdx.push_back(
40240           AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
40241 
40242     assert(((RootMask.size() > OpMask.size() &&
40243              RootMask.size() % OpMask.size() == 0) ||
40244             (OpMask.size() > RootMask.size() &&
40245              OpMask.size() % RootMask.size() == 0) ||
40246             OpMask.size() == RootMask.size()) &&
40247            "The smaller number of elements must divide the larger.");
40248 
40249     // This function can be performance-critical, so we rely on the power-of-2
40250     // knowledge that we have about the mask sizes to replace div/rem ops with
40251     // bit-masks and shifts.
40252     assert(isPowerOf2_32(RootMask.size()) &&
40253            "Non-power-of-2 shuffle mask sizes");
40254     assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
40255     unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
40256     unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
40257 
40258     unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
40259     unsigned RootRatio =
40260         std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
40261     unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
40262     assert((RootRatio == 1 || OpRatio == 1) &&
40263            "Must not have a ratio for both incoming and op masks!");
40264 
40265     assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
40266     assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
40267     assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
40268     unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
40269     unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
40270 
40271     Mask.resize(MaskWidth, SM_SentinelUndef);
40272 
40273     // Merge this shuffle operation's mask into our accumulated mask. Note that
40274     // this shuffle's mask will be the first applied to the input, followed by
40275     // the root mask to get us all the way to the root value arrangement. The
40276     // reason for this order is that we are recursing up the operation chain.
40277     for (unsigned i = 0; i < MaskWidth; ++i) {
40278       unsigned RootIdx = i >> RootRatioLog2;
40279       if (RootMask[RootIdx] < 0) {
40280         // This is a zero or undef lane, we're done.
40281         Mask[i] = RootMask[RootIdx];
40282         continue;
40283       }
40284 
40285       unsigned RootMaskedIdx =
40286           RootRatio == 1
40287               ? RootMask[RootIdx]
40288               : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
40289 
40290       // Just insert the scaled root mask value if it references an input other
40291       // than the SrcOp we're currently inserting.
40292       if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
40293           (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
40294         Mask[i] = RootMaskedIdx;
40295         continue;
40296       }
40297 
40298       RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
40299       unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
40300       if (OpMask[OpIdx] < 0) {
40301         // The incoming lanes are zero or undef, it doesn't matter which ones we
40302         // are using.
40303         Mask[i] = OpMask[OpIdx];
40304         continue;
40305       }
40306 
40307       // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
40308       unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
40309                                           : (OpMask[OpIdx] << OpRatioLog2) +
40310                                                 (RootMaskedIdx & (OpRatio - 1));
40311 
40312       OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
40313       int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
40314       assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
40315       OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
40316 
40317       Mask[i] = OpMaskedIdx;
40318     }
40319   }
40320 
40321   // Remove unused/repeated shuffle source ops.
40322   resolveTargetShuffleInputsAndMask(Ops, Mask);
40323 
40324   // Handle the all undef/zero/ones cases early.
40325   if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
40326     return DAG.getUNDEF(RootVT);
40327   if (all_of(Mask, [](int Idx) { return Idx < 0; }))
40328     return getZeroVector(RootVT, Subtarget, DAG, SDLoc(Root));
40329   if (Ops.size() == 1 && ISD::isBuildVectorAllOnes(Ops[0].getNode()) &&
40330       !llvm::is_contained(Mask, SM_SentinelZero))
40331     return getOnesVector(RootVT, DAG, SDLoc(Root));
40332 
40333   assert(!Ops.empty() && "Shuffle with no inputs detected");
40334   HasVariableMask |= IsOpVariableMask;
40335 
40336   // Update the list of shuffle nodes that have been combined so far.
40337   SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
40338                                                 SrcNodes.end());
40339   CombinedNodes.push_back(Op.getNode());
40340 
40341   // See if we can recurse into each shuffle source op (if it's a target
40342   // shuffle). The source op should only be generally combined if it either has
40343   // a single use (i.e. current Op) or all its users have already been combined,
40344   // if not then we can still combine but should prevent generation of variable
40345   // shuffles to avoid constant pool bloat.
40346   // Don't recurse if we already have more source ops than we can combine in
40347   // the remaining recursion depth.
40348   if (Ops.size() < (MaxDepth - Depth)) {
40349     for (int i = 0, e = Ops.size(); i < e; ++i) {
40350       // For empty roots, we need to resolve zeroable elements before combining
40351       // them with other shuffles.
40352       SmallVector<int, 64> ResolvedMask = Mask;
40353       if (EmptyRoot)
40354         resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
40355       bool AllowCrossLaneVar = false;
40356       bool AllowPerLaneVar = false;
40357       if (Ops[i].getNode()->hasOneUse() ||
40358           SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode())) {
40359         AllowCrossLaneVar = AllowVariableCrossLaneMask;
40360         AllowPerLaneVar = AllowVariablePerLaneMask;
40361       }
40362       if (SDValue Res = combineX86ShufflesRecursively(
40363               Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1, MaxDepth,
40364               HasVariableMask, AllowCrossLaneVar, AllowPerLaneVar, DAG,
40365               Subtarget))
40366         return Res;
40367     }
40368   }
40369 
40370   // Attempt to constant fold all of the constant source ops.
40371   if (SDValue Cst = combineX86ShufflesConstants(
40372           Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
40373     return Cst;
40374 
40375   // If constant fold failed and we only have constants - then we have
40376   // multiple uses by a single non-variable shuffle - just bail.
40377   if (Depth == 0 && llvm::all_of(Ops, [&](SDValue Op) {
40378         APInt UndefElts;
40379         SmallVector<APInt> RawBits;
40380         unsigned EltSizeInBits = RootSizeInBits / Mask.size();
40381         return getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
40382                                              RawBits);
40383       })) {
40384     return SDValue();
40385   }
40386 
40387   // Canonicalize the combined shuffle mask chain with horizontal ops.
40388   // NOTE: This will update the Ops and Mask.
40389   if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
40390           Ops, Mask, RootSizeInBits, SDLoc(Root), DAG, Subtarget))
40391     return DAG.getBitcast(RootVT, HOp);
40392 
40393   // Try to refine our inputs given our knowledge of target shuffle mask.
40394   for (auto I : enumerate(Ops)) {
40395     int OpIdx = I.index();
40396     SDValue &Op = I.value();
40397 
40398     // What range of shuffle mask element values results in picking from Op?
40399     int Lo = OpIdx * Mask.size();
40400     int Hi = Lo + Mask.size();
40401 
40402     // Which elements of Op do we demand, given the mask's granularity?
40403     APInt OpDemandedElts(Mask.size(), 0);
40404     for (int MaskElt : Mask) {
40405       if (isInRange(MaskElt, Lo, Hi)) { // Picks from Op?
40406         int OpEltIdx = MaskElt - Lo;
40407         OpDemandedElts.setBit(OpEltIdx);
40408       }
40409     }
40410 
40411     // Is the shuffle result smaller than the root?
40412     if (Op.getValueSizeInBits() < RootSizeInBits) {
40413       // We padded the mask with undefs. But we now need to undo that.
40414       unsigned NumExpectedVectorElts = Mask.size();
40415       unsigned EltSizeInBits = RootSizeInBits / NumExpectedVectorElts;
40416       unsigned NumOpVectorElts = Op.getValueSizeInBits() / EltSizeInBits;
40417       assert(!OpDemandedElts.extractBits(
40418                  NumExpectedVectorElts - NumOpVectorElts, NumOpVectorElts) &&
40419              "Demanding the virtual undef widening padding?");
40420       OpDemandedElts = OpDemandedElts.trunc(NumOpVectorElts); // NUW
40421     }
40422 
40423     // The Op itself may be of different VT, so we need to scale the mask.
40424     unsigned NumOpElts = Op.getValueType().getVectorNumElements();
40425     APInt OpScaledDemandedElts = APIntOps::ScaleBitMask(OpDemandedElts, NumOpElts);
40426 
40427     // Can this operand be simplified any further, given it's demanded elements?
40428     if (SDValue NewOp =
40429             DAG.getTargetLoweringInfo().SimplifyMultipleUseDemandedVectorElts(
40430                 Op, OpScaledDemandedElts, DAG))
40431       Op = NewOp;
40432   }
40433   // FIXME: should we rerun resolveTargetShuffleInputsAndMask() now?
40434 
40435   // Widen any subvector shuffle inputs we've collected.
40436   // TODO: Remove this to avoid generating temporary nodes, we should only
40437   // widen once combineX86ShuffleChain has found a match.
40438   if (any_of(Ops, [RootSizeInBits](SDValue Op) {
40439         return Op.getValueSizeInBits() < RootSizeInBits;
40440       })) {
40441     for (SDValue &Op : Ops)
40442       if (Op.getValueSizeInBits() < RootSizeInBits)
40443         Op = widenSubVector(Op, false, Subtarget, DAG, SDLoc(Op),
40444                             RootSizeInBits);
40445     // Reresolve - we might have repeated subvector sources.
40446     resolveTargetShuffleInputsAndMask(Ops, Mask);
40447   }
40448 
40449   // We can only combine unary and binary shuffle mask cases.
40450   if (Ops.size() <= 2) {
40451     // Minor canonicalization of the accumulated shuffle mask to make it easier
40452     // to match below. All this does is detect masks with sequential pairs of
40453     // elements, and shrink them to the half-width mask. It does this in a loop
40454     // so it will reduce the size of the mask to the minimal width mask which
40455     // performs an equivalent shuffle.
40456     while (Mask.size() > 1) {
40457       SmallVector<int, 64> WidenedMask;
40458       if (!canWidenShuffleElements(Mask, WidenedMask))
40459         break;
40460       Mask = std::move(WidenedMask);
40461     }
40462 
40463     // Canonicalization of binary shuffle masks to improve pattern matching by
40464     // commuting the inputs.
40465     if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
40466       ShuffleVectorSDNode::commuteMask(Mask);
40467       std::swap(Ops[0], Ops[1]);
40468     }
40469 
40470     // Try to combine into a single shuffle instruction.
40471     if (SDValue Shuffle = combineX86ShuffleChain(
40472             Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
40473             AllowVariablePerLaneMask, DAG, Subtarget))
40474       return Shuffle;
40475 
40476     // If all the operands come from the same larger vector, fallthrough and try
40477     // to use combineX86ShuffleChainWithExtract.
40478     SDValue LHS = peekThroughBitcasts(Ops.front());
40479     SDValue RHS = peekThroughBitcasts(Ops.back());
40480     if (Ops.size() != 2 || !Subtarget.hasAVX2() || RootSizeInBits != 128 ||
40481         (RootSizeInBits / Mask.size()) != 64 ||
40482         LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
40483         RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
40484         LHS.getOperand(0) != RHS.getOperand(0))
40485       return SDValue();
40486   }
40487 
40488   // If that failed and any input is extracted then try to combine as a
40489   // shuffle with the larger type.
40490   return combineX86ShuffleChainWithExtract(
40491       Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
40492       AllowVariablePerLaneMask, DAG, Subtarget);
40493 }
40494 
40495 /// Helper entry wrapper to combineX86ShufflesRecursively.
combineX86ShufflesRecursively(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)40496 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
40497                                              const X86Subtarget &Subtarget) {
40498   return combineX86ShufflesRecursively(
40499       {Op}, 0, Op, {0}, {}, /*Depth*/ 0, X86::MaxShuffleCombineDepth,
40500       /*HasVarMask*/ false,
40501       /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, DAG,
40502       Subtarget);
40503 }
40504 
40505 /// Get the PSHUF-style mask from PSHUF node.
40506 ///
40507 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
40508 /// PSHUF-style masks that can be reused with such instructions.
getPSHUFShuffleMask(SDValue N)40509 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
40510   MVT VT = N.getSimpleValueType();
40511   SmallVector<int, 4> Mask;
40512   SmallVector<SDValue, 2> Ops;
40513   bool HaveMask =
40514       getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask);
40515   (void)HaveMask;
40516   assert(HaveMask);
40517 
40518   // If we have more than 128-bits, only the low 128-bits of shuffle mask
40519   // matter. Check that the upper masks are repeats and remove them.
40520   if (VT.getSizeInBits() > 128) {
40521     int LaneElts = 128 / VT.getScalarSizeInBits();
40522 #ifndef NDEBUG
40523     for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
40524       for (int j = 0; j < LaneElts; ++j)
40525         assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
40526                "Mask doesn't repeat in high 128-bit lanes!");
40527 #endif
40528     Mask.resize(LaneElts);
40529   }
40530 
40531   switch (N.getOpcode()) {
40532   case X86ISD::PSHUFD:
40533     return Mask;
40534   case X86ISD::PSHUFLW:
40535     Mask.resize(4);
40536     return Mask;
40537   case X86ISD::PSHUFHW:
40538     Mask.erase(Mask.begin(), Mask.begin() + 4);
40539     for (int &M : Mask)
40540       M -= 4;
40541     return Mask;
40542   default:
40543     llvm_unreachable("No valid shuffle instruction found!");
40544   }
40545 }
40546 
40547 /// Search for a combinable shuffle across a chain ending in pshufd.
40548 ///
40549 /// We walk up the chain and look for a combinable shuffle, skipping over
40550 /// shuffles that we could hoist this shuffle's transformation past without
40551 /// altering anything.
40552 static SDValue
combineRedundantDWordShuffle(SDValue N,MutableArrayRef<int> Mask,SelectionDAG & DAG)40553 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
40554                              SelectionDAG &DAG) {
40555   assert(N.getOpcode() == X86ISD::PSHUFD &&
40556          "Called with something other than an x86 128-bit half shuffle!");
40557   SDLoc DL(N);
40558 
40559   // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
40560   // of the shuffles in the chain so that we can form a fresh chain to replace
40561   // this one.
40562   SmallVector<SDValue, 8> Chain;
40563   SDValue V = N.getOperand(0);
40564   for (; V.hasOneUse(); V = V.getOperand(0)) {
40565     switch (V.getOpcode()) {
40566     default:
40567       return SDValue(); // Nothing combined!
40568 
40569     case ISD::BITCAST:
40570       // Skip bitcasts as we always know the type for the target specific
40571       // instructions.
40572       continue;
40573 
40574     case X86ISD::PSHUFD:
40575       // Found another dword shuffle.
40576       break;
40577 
40578     case X86ISD::PSHUFLW:
40579       // Check that the low words (being shuffled) are the identity in the
40580       // dword shuffle, and the high words are self-contained.
40581       if (Mask[0] != 0 || Mask[1] != 1 ||
40582           !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
40583         return SDValue();
40584 
40585       Chain.push_back(V);
40586       continue;
40587 
40588     case X86ISD::PSHUFHW:
40589       // Check that the high words (being shuffled) are the identity in the
40590       // dword shuffle, and the low words are self-contained.
40591       if (Mask[2] != 2 || Mask[3] != 3 ||
40592           !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
40593         return SDValue();
40594 
40595       Chain.push_back(V);
40596       continue;
40597 
40598     case X86ISD::UNPCKL:
40599     case X86ISD::UNPCKH:
40600       // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
40601       // shuffle into a preceding word shuffle.
40602       if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
40603           V.getSimpleValueType().getVectorElementType() != MVT::i16)
40604         return SDValue();
40605 
40606       // Search for a half-shuffle which we can combine with.
40607       unsigned CombineOp =
40608           V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
40609       if (V.getOperand(0) != V.getOperand(1) ||
40610           !V->isOnlyUserOf(V.getOperand(0).getNode()))
40611         return SDValue();
40612       Chain.push_back(V);
40613       V = V.getOperand(0);
40614       do {
40615         switch (V.getOpcode()) {
40616         default:
40617           return SDValue(); // Nothing to combine.
40618 
40619         case X86ISD::PSHUFLW:
40620         case X86ISD::PSHUFHW:
40621           if (V.getOpcode() == CombineOp)
40622             break;
40623 
40624           Chain.push_back(V);
40625 
40626           [[fallthrough]];
40627         case ISD::BITCAST:
40628           V = V.getOperand(0);
40629           continue;
40630         }
40631         break;
40632       } while (V.hasOneUse());
40633       break;
40634     }
40635     // Break out of the loop if we break out of the switch.
40636     break;
40637   }
40638 
40639   if (!V.hasOneUse())
40640     // We fell out of the loop without finding a viable combining instruction.
40641     return SDValue();
40642 
40643   // Merge this node's mask and our incoming mask.
40644   SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
40645   for (int &M : Mask)
40646     M = VMask[M];
40647   V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
40648                   getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
40649 
40650   // Rebuild the chain around this new shuffle.
40651   while (!Chain.empty()) {
40652     SDValue W = Chain.pop_back_val();
40653 
40654     if (V.getValueType() != W.getOperand(0).getValueType())
40655       V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
40656 
40657     switch (W.getOpcode()) {
40658     default:
40659       llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
40660 
40661     case X86ISD::UNPCKL:
40662     case X86ISD::UNPCKH:
40663       V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
40664       break;
40665 
40666     case X86ISD::PSHUFD:
40667     case X86ISD::PSHUFLW:
40668     case X86ISD::PSHUFHW:
40669       V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
40670       break;
40671     }
40672   }
40673   if (V.getValueType() != N.getValueType())
40674     V = DAG.getBitcast(N.getValueType(), V);
40675 
40676   // Return the new chain to replace N.
40677   return V;
40678 }
40679 
40680 // Attempt to commute shufps LHS loads:
40681 // permilps(shufps(load(),x)) --> permilps(shufps(x,load()))
combineCommutableSHUFP(SDValue N,MVT VT,const SDLoc & DL,SelectionDAG & DAG)40682 static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
40683                                       SelectionDAG &DAG) {
40684   // TODO: Add vXf64 support.
40685   if (VT != MVT::v4f32 && VT != MVT::v8f32 && VT != MVT::v16f32)
40686     return SDValue();
40687 
40688   // SHUFP(LHS, RHS) -> SHUFP(RHS, LHS) iff LHS is foldable + RHS is not.
40689   auto commuteSHUFP = [&VT, &DL, &DAG](SDValue Parent, SDValue V) {
40690     if (V.getOpcode() != X86ISD::SHUFP || !Parent->isOnlyUserOf(V.getNode()))
40691       return SDValue();
40692     SDValue N0 = V.getOperand(0);
40693     SDValue N1 = V.getOperand(1);
40694     unsigned Imm = V.getConstantOperandVal(2);
40695     const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
40696     if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) ||
40697         X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget))
40698       return SDValue();
40699     Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
40700     return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
40701                        DAG.getTargetConstant(Imm, DL, MVT::i8));
40702   };
40703 
40704   switch (N.getOpcode()) {
40705   case X86ISD::VPERMILPI:
40706     if (SDValue NewSHUFP = commuteSHUFP(N, N.getOperand(0))) {
40707       unsigned Imm = N.getConstantOperandVal(1);
40708       return DAG.getNode(X86ISD::VPERMILPI, DL, VT, NewSHUFP,
40709                          DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
40710     }
40711     break;
40712   case X86ISD::SHUFP: {
40713     SDValue N0 = N.getOperand(0);
40714     SDValue N1 = N.getOperand(1);
40715     unsigned Imm = N.getConstantOperandVal(2);
40716     if (N0 == N1) {
40717       if (SDValue NewSHUFP = commuteSHUFP(N, N0))
40718         return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, NewSHUFP,
40719                            DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
40720     } else if (SDValue NewSHUFP = commuteSHUFP(N, N0)) {
40721       return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, N1,
40722                          DAG.getTargetConstant(Imm ^ 0x0A, DL, MVT::i8));
40723     } else if (SDValue NewSHUFP = commuteSHUFP(N, N1)) {
40724       return DAG.getNode(X86ISD::SHUFP, DL, VT, N0, NewSHUFP,
40725                          DAG.getTargetConstant(Imm ^ 0xA0, DL, MVT::i8));
40726     }
40727     break;
40728   }
40729   }
40730 
40731   return SDValue();
40732 }
40733 
40734 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
canonicalizeShuffleWithBinOps(SDValue N,SelectionDAG & DAG,const SDLoc & DL)40735 static SDValue canonicalizeShuffleWithBinOps(SDValue N, SelectionDAG &DAG,
40736                                              const SDLoc &DL) {
40737   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40738   EVT ShuffleVT = N.getValueType();
40739 
40740   auto IsMergeableWithShuffle = [&DAG](SDValue Op, bool FoldLoad = false) {
40741     // AllZeros/AllOnes constants are freely shuffled and will peek through
40742     // bitcasts. Other constant build vectors do not peek through bitcasts. Only
40743     // merge with target shuffles if it has one use so shuffle combining is
40744     // likely to kick in. Shuffles of splats are expected to be removed.
40745     return ISD::isBuildVectorAllOnes(Op.getNode()) ||
40746            ISD::isBuildVectorAllZeros(Op.getNode()) ||
40747            ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
40748            ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()) ||
40749            (isTargetShuffle(Op.getOpcode()) && Op->hasOneUse()) ||
40750            (FoldLoad && isShuffleFoldableLoad(Op)) ||
40751            DAG.isSplatValue(Op, /*AllowUndefs*/ false);
40752   };
40753   auto IsSafeToMoveShuffle = [ShuffleVT](SDValue Op, unsigned BinOp) {
40754     // Ensure we only shuffle whole vector src elements, unless its a logical
40755     // binops where we can more aggressively move shuffles from dst to src.
40756     return BinOp == ISD::AND || BinOp == ISD::OR || BinOp == ISD::XOR ||
40757            BinOp == X86ISD::ANDNP ||
40758            (Op.getScalarValueSizeInBits() <= ShuffleVT.getScalarSizeInBits());
40759   };
40760 
40761   unsigned Opc = N.getOpcode();
40762   switch (Opc) {
40763   // Unary and Unary+Permute Shuffles.
40764   case X86ISD::PSHUFB: {
40765     // Don't merge PSHUFB if it contains zero'd elements.
40766     SmallVector<int> Mask;
40767     SmallVector<SDValue> Ops;
40768     if (!getTargetShuffleMask(N.getNode(), ShuffleVT.getSimpleVT(), false, Ops,
40769                               Mask))
40770       break;
40771     [[fallthrough]];
40772   }
40773   case X86ISD::VBROADCAST:
40774   case X86ISD::MOVDDUP:
40775   case X86ISD::PSHUFD:
40776   case X86ISD::PSHUFHW:
40777   case X86ISD::PSHUFLW:
40778   case X86ISD::VPERMI:
40779   case X86ISD::VPERMILPI: {
40780     if (N.getOperand(0).getValueType() == ShuffleVT &&
40781         N->isOnlyUserOf(N.getOperand(0).getNode())) {
40782       SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
40783       unsigned SrcOpcode = N0.getOpcode();
40784       if (TLI.isBinOp(SrcOpcode) && IsSafeToMoveShuffle(N0, SrcOpcode)) {
40785         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
40786         SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
40787         if (IsMergeableWithShuffle(Op00, Opc != X86ISD::PSHUFB) ||
40788             IsMergeableWithShuffle(Op01, Opc != X86ISD::PSHUFB)) {
40789           SDValue LHS, RHS;
40790           Op00 = DAG.getBitcast(ShuffleVT, Op00);
40791           Op01 = DAG.getBitcast(ShuffleVT, Op01);
40792           if (N.getNumOperands() == 2) {
40793             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, N.getOperand(1));
40794             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, N.getOperand(1));
40795           } else {
40796             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00);
40797             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01);
40798           }
40799           EVT OpVT = N0.getValueType();
40800           return DAG.getBitcast(ShuffleVT,
40801                                 DAG.getNode(SrcOpcode, DL, OpVT,
40802                                             DAG.getBitcast(OpVT, LHS),
40803                                             DAG.getBitcast(OpVT, RHS)));
40804         }
40805       }
40806     }
40807     break;
40808   }
40809   // Binary and Binary+Permute Shuffles.
40810   case X86ISD::INSERTPS: {
40811     // Don't merge INSERTPS if it contains zero'd elements.
40812     unsigned InsertPSMask = N.getConstantOperandVal(2);
40813     unsigned ZeroMask = InsertPSMask & 0xF;
40814     if (ZeroMask != 0)
40815       break;
40816     [[fallthrough]];
40817   }
40818   case X86ISD::MOVSD:
40819   case X86ISD::MOVSS:
40820   case X86ISD::BLENDI:
40821   case X86ISD::SHUFP:
40822   case X86ISD::UNPCKH:
40823   case X86ISD::UNPCKL: {
40824     if (N->isOnlyUserOf(N.getOperand(0).getNode()) &&
40825         N->isOnlyUserOf(N.getOperand(1).getNode())) {
40826       SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
40827       SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
40828       unsigned SrcOpcode = N0.getOpcode();
40829       if (TLI.isBinOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
40830           IsSafeToMoveShuffle(N0, SrcOpcode) &&
40831           IsSafeToMoveShuffle(N1, SrcOpcode)) {
40832         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
40833         SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
40834         SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
40835         SDValue Op11 = peekThroughOneUseBitcasts(N1.getOperand(1));
40836         // Ensure the total number of shuffles doesn't increase by folding this
40837         // shuffle through to the source ops.
40838         if (((IsMergeableWithShuffle(Op00) && IsMergeableWithShuffle(Op10)) ||
40839              (IsMergeableWithShuffle(Op01) && IsMergeableWithShuffle(Op11))) ||
40840             ((IsMergeableWithShuffle(Op00) || IsMergeableWithShuffle(Op10)) &&
40841              (IsMergeableWithShuffle(Op01) || IsMergeableWithShuffle(Op11)))) {
40842           SDValue LHS, RHS;
40843           Op00 = DAG.getBitcast(ShuffleVT, Op00);
40844           Op10 = DAG.getBitcast(ShuffleVT, Op10);
40845           Op01 = DAG.getBitcast(ShuffleVT, Op01);
40846           Op11 = DAG.getBitcast(ShuffleVT, Op11);
40847           if (N.getNumOperands() == 3) {
40848             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
40849             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11, N.getOperand(2));
40850           } else {
40851             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
40852             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11);
40853           }
40854           EVT OpVT = N0.getValueType();
40855           return DAG.getBitcast(ShuffleVT,
40856                                 DAG.getNode(SrcOpcode, DL, OpVT,
40857                                             DAG.getBitcast(OpVT, LHS),
40858                                             DAG.getBitcast(OpVT, RHS)));
40859         }
40860       }
40861     }
40862     break;
40863   }
40864   }
40865   return SDValue();
40866 }
40867 
40868 /// Attempt to fold vpermf128(op(),op()) -> op(vpermf128(),vpermf128()).
canonicalizeLaneShuffleWithRepeatedOps(SDValue V,SelectionDAG & DAG,const SDLoc & DL)40869 static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
40870                                                       SelectionDAG &DAG,
40871                                                       const SDLoc &DL) {
40872   assert(V.getOpcode() == X86ISD::VPERM2X128 && "Unknown lane shuffle");
40873 
40874   MVT VT = V.getSimpleValueType();
40875   SDValue Src0 = peekThroughBitcasts(V.getOperand(0));
40876   SDValue Src1 = peekThroughBitcasts(V.getOperand(1));
40877   unsigned SrcOpc0 = Src0.getOpcode();
40878   unsigned SrcOpc1 = Src1.getOpcode();
40879   EVT SrcVT0 = Src0.getValueType();
40880   EVT SrcVT1 = Src1.getValueType();
40881 
40882   if (!Src1.isUndef() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
40883     return SDValue();
40884 
40885   switch (SrcOpc0) {
40886   case X86ISD::MOVDDUP: {
40887     SDValue LHS = Src0.getOperand(0);
40888     SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
40889     SDValue Res =
40890         DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS, V.getOperand(2));
40891     Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res);
40892     return DAG.getBitcast(VT, Res);
40893   }
40894   case X86ISD::VPERMILPI:
40895     // TODO: Handle v4f64 permutes with different low/high lane masks.
40896     if (SrcVT0 == MVT::v4f64) {
40897       uint64_t Mask = Src0.getConstantOperandVal(1);
40898       if ((Mask & 0x3) != ((Mask >> 2) & 0x3))
40899         break;
40900     }
40901     [[fallthrough]];
40902   case X86ISD::VSHLI:
40903   case X86ISD::VSRLI:
40904   case X86ISD::VSRAI:
40905   case X86ISD::PSHUFD:
40906     if (Src1.isUndef() || Src0.getOperand(1) == Src1.getOperand(1)) {
40907       SDValue LHS = Src0.getOperand(0);
40908       SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
40909       SDValue Res = DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS,
40910                                 V.getOperand(2));
40911       Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res, Src0.getOperand(1));
40912       return DAG.getBitcast(VT, Res);
40913     }
40914     break;
40915   }
40916 
40917   return SDValue();
40918 }
40919 
40920 /// Try to combine x86 target specific shuffles.
combineTargetShuffle(SDValue N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)40921 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
40922                                     TargetLowering::DAGCombinerInfo &DCI,
40923                                     const X86Subtarget &Subtarget) {
40924   SDLoc DL(N);
40925   MVT VT = N.getSimpleValueType();
40926   SmallVector<int, 4> Mask;
40927   unsigned Opcode = N.getOpcode();
40928 
40929   if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
40930     return R;
40931 
40932   // Handle specific target shuffles.
40933   switch (Opcode) {
40934   case X86ISD::MOVDDUP: {
40935     SDValue Src = N.getOperand(0);
40936     // Turn a 128-bit MOVDDUP of a full vector load into movddup+vzload.
40937     if (VT == MVT::v2f64 && Src.hasOneUse() &&
40938         ISD::isNormalLoad(Src.getNode())) {
40939       LoadSDNode *LN = cast<LoadSDNode>(Src);
40940       if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::f64, MVT::v2f64, DAG)) {
40941         SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad);
40942         DCI.CombineTo(N.getNode(), Movddup);
40943         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40944         DCI.recursivelyDeleteUnusedNodes(LN);
40945         return N; // Return N so it doesn't get rechecked!
40946       }
40947     }
40948 
40949     return SDValue();
40950   }
40951   case X86ISD::VBROADCAST: {
40952     SDValue Src = N.getOperand(0);
40953     SDValue BC = peekThroughBitcasts(Src);
40954     EVT SrcVT = Src.getValueType();
40955     EVT BCVT = BC.getValueType();
40956 
40957     // If broadcasting from another shuffle, attempt to simplify it.
40958     // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
40959     if (isTargetShuffle(BC.getOpcode()) &&
40960         VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
40961       unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
40962       SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
40963                                         SM_SentinelUndef);
40964       for (unsigned i = 0; i != Scale; ++i)
40965         DemandedMask[i] = i;
40966       if (SDValue Res = combineX86ShufflesRecursively(
40967               {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
40968               X86::MaxShuffleCombineDepth,
40969               /*HasVarMask*/ false, /*AllowCrossLaneVarMask*/ true,
40970               /*AllowPerLaneVarMask*/ true, DAG, Subtarget))
40971         return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
40972                            DAG.getBitcast(SrcVT, Res));
40973     }
40974 
40975     // broadcast(bitcast(src)) -> bitcast(broadcast(src))
40976     // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
40977     if (Src.getOpcode() == ISD::BITCAST &&
40978         SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits() &&
40979         DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
40980         FixedVectorType::isValidElementType(
40981             BCVT.getScalarType().getTypeForEVT(*DAG.getContext()))) {
40982       EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
40983                                    VT.getVectorNumElements());
40984       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
40985     }
40986 
40987     // vbroadcast(bitcast(vbroadcast(src))) -> bitcast(vbroadcast(src))
40988     // If we're re-broadcasting a smaller type then broadcast with that type and
40989     // bitcast.
40990     // TODO: Do this for any splat?
40991     if (Src.getOpcode() == ISD::BITCAST &&
40992         (BC.getOpcode() == X86ISD::VBROADCAST ||
40993          BC.getOpcode() == X86ISD::VBROADCAST_LOAD) &&
40994         (VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits()) == 0 &&
40995         (VT.getSizeInBits() % BCVT.getSizeInBits()) == 0) {
40996       MVT NewVT =
40997           MVT::getVectorVT(BCVT.getSimpleVT().getScalarType(),
40998                            VT.getSizeInBits() / BCVT.getScalarSizeInBits());
40999       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
41000     }
41001 
41002     // Reduce broadcast source vector to lowest 128-bits.
41003     if (SrcVT.getSizeInBits() > 128)
41004       return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
41005                          extract128BitVector(Src, 0, DAG, DL));
41006 
41007     // broadcast(scalar_to_vector(x)) -> broadcast(x).
41008     if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
41009       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
41010 
41011     // broadcast(extract_vector_elt(x, 0)) -> broadcast(x).
41012     if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
41013         isNullConstant(Src.getOperand(1)) &&
41014         DAG.getTargetLoweringInfo().isTypeLegal(
41015             Src.getOperand(0).getValueType()))
41016       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
41017 
41018     // Share broadcast with the longest vector and extract low subvector (free).
41019     // Ensure the same SDValue from the SDNode use is being used.
41020     for (SDNode *User : Src->uses())
41021       if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
41022           Src == User->getOperand(0) &&
41023           User->getValueSizeInBits(0).getFixedValue() >
41024               VT.getFixedSizeInBits()) {
41025         return extractSubVector(SDValue(User, 0), 0, DAG, DL,
41026                                 VT.getSizeInBits());
41027       }
41028 
41029     // vbroadcast(scalarload X) -> vbroadcast_load X
41030     // For float loads, extract other uses of the scalar from the broadcast.
41031     if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
41032         ISD::isNormalLoad(Src.getNode())) {
41033       LoadSDNode *LN = cast<LoadSDNode>(Src);
41034       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41035       SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41036       SDValue BcastLd =
41037           DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
41038                                   LN->getMemoryVT(), LN->getMemOperand());
41039       // If the load value is used only by N, replace it via CombineTo N.
41040       bool NoReplaceExtract = Src.hasOneUse();
41041       DCI.CombineTo(N.getNode(), BcastLd);
41042       if (NoReplaceExtract) {
41043         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41044         DCI.recursivelyDeleteUnusedNodes(LN);
41045       } else {
41046         SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
41047                                   DAG.getIntPtrConstant(0, DL));
41048         DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
41049       }
41050       return N; // Return N so it doesn't get rechecked!
41051     }
41052 
41053     // Due to isTypeDesirableForOp, we won't always shrink a load truncated to
41054     // i16. So shrink it ourselves if we can make a broadcast_load.
41055     if (SrcVT == MVT::i16 && Src.getOpcode() == ISD::TRUNCATE &&
41056         Src.hasOneUse() && Src.getOperand(0).hasOneUse()) {
41057       assert(Subtarget.hasAVX2() && "Expected AVX2");
41058       SDValue TruncIn = Src.getOperand(0);
41059 
41060       // If this is a truncate of a non extending load we can just narrow it to
41061       // use a broadcast_load.
41062       if (ISD::isNormalLoad(TruncIn.getNode())) {
41063         LoadSDNode *LN = cast<LoadSDNode>(TruncIn);
41064         // Unless its volatile or atomic.
41065         if (LN->isSimple()) {
41066           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41067           SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41068           SDValue BcastLd = DAG.getMemIntrinsicNode(
41069               X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
41070               LN->getPointerInfo(), LN->getOriginalAlign(),
41071               LN->getMemOperand()->getFlags());
41072           DCI.CombineTo(N.getNode(), BcastLd);
41073           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41074           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
41075           return N; // Return N so it doesn't get rechecked!
41076         }
41077       }
41078 
41079       // If this is a truncate of an i16 extload, we can directly replace it.
41080       if (ISD::isUNINDEXEDLoad(Src.getOperand(0).getNode()) &&
41081           ISD::isEXTLoad(Src.getOperand(0).getNode())) {
41082         LoadSDNode *LN = cast<LoadSDNode>(Src.getOperand(0));
41083         if (LN->getMemoryVT().getSizeInBits() == 16) {
41084           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41085           SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41086           SDValue BcastLd =
41087               DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
41088                                       LN->getMemoryVT(), LN->getMemOperand());
41089           DCI.CombineTo(N.getNode(), BcastLd);
41090           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41091           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
41092           return N; // Return N so it doesn't get rechecked!
41093         }
41094       }
41095 
41096       // If this is a truncate of load that has been shifted right, we can
41097       // offset the pointer and use a narrower load.
41098       if (TruncIn.getOpcode() == ISD::SRL &&
41099           TruncIn.getOperand(0).hasOneUse() &&
41100           isa<ConstantSDNode>(TruncIn.getOperand(1)) &&
41101           ISD::isNormalLoad(TruncIn.getOperand(0).getNode())) {
41102         LoadSDNode *LN = cast<LoadSDNode>(TruncIn.getOperand(0));
41103         unsigned ShiftAmt = TruncIn.getConstantOperandVal(1);
41104         // Make sure the shift amount and the load size are divisible by 16.
41105         // Don't do this if the load is volatile or atomic.
41106         if (ShiftAmt % 16 == 0 && TruncIn.getValueSizeInBits() % 16 == 0 &&
41107             LN->isSimple()) {
41108           unsigned Offset = ShiftAmt / 8;
41109           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41110           SDValue Ptr = DAG.getMemBasePlusOffset(LN->getBasePtr(),
41111                                                  TypeSize::Fixed(Offset), DL);
41112           SDValue Ops[] = { LN->getChain(), Ptr };
41113           SDValue BcastLd = DAG.getMemIntrinsicNode(
41114               X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
41115               LN->getPointerInfo().getWithOffset(Offset),
41116               LN->getOriginalAlign(),
41117               LN->getMemOperand()->getFlags());
41118           DCI.CombineTo(N.getNode(), BcastLd);
41119           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41120           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
41121           return N; // Return N so it doesn't get rechecked!
41122         }
41123       }
41124     }
41125 
41126     // vbroadcast(vzload X) -> vbroadcast_load X
41127     if (Src.getOpcode() == X86ISD::VZEXT_LOAD && Src.hasOneUse()) {
41128       MemSDNode *LN = cast<MemIntrinsicSDNode>(Src);
41129       if (LN->getMemoryVT().getSizeInBits() == VT.getScalarSizeInBits()) {
41130         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41131         SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41132         SDValue BcastLd =
41133             DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
41134                                     LN->getMemoryVT(), LN->getMemOperand());
41135         DCI.CombineTo(N.getNode(), BcastLd);
41136         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41137         DCI.recursivelyDeleteUnusedNodes(LN);
41138         return N; // Return N so it doesn't get rechecked!
41139       }
41140     }
41141 
41142     // vbroadcast(vector load X) -> vbroadcast_load
41143     if ((SrcVT == MVT::v2f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v2i64 ||
41144          SrcVT == MVT::v4i32) &&
41145         Src.hasOneUse() && ISD::isNormalLoad(Src.getNode())) {
41146       LoadSDNode *LN = cast<LoadSDNode>(Src);
41147       // Unless the load is volatile or atomic.
41148       if (LN->isSimple()) {
41149         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41150         SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
41151         SDValue BcastLd = DAG.getMemIntrinsicNode(
41152             X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SrcVT.getScalarType(),
41153             LN->getPointerInfo(), LN->getOriginalAlign(),
41154             LN->getMemOperand()->getFlags());
41155         DCI.CombineTo(N.getNode(), BcastLd);
41156         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41157         DCI.recursivelyDeleteUnusedNodes(LN);
41158         return N; // Return N so it doesn't get rechecked!
41159       }
41160     }
41161 
41162     return SDValue();
41163   }
41164   case X86ISD::VZEXT_MOVL: {
41165     SDValue N0 = N.getOperand(0);
41166 
41167     // If this a vzmovl of a full vector load, replace it with a vzload, unless
41168     // the load is volatile.
41169     if (N0.hasOneUse() && ISD::isNormalLoad(N0.getNode())) {
41170       auto *LN = cast<LoadSDNode>(N0);
41171       if (SDValue VZLoad =
41172               narrowLoadToVZLoad(LN, VT.getVectorElementType(), VT, DAG)) {
41173         DCI.CombineTo(N.getNode(), VZLoad);
41174         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41175         DCI.recursivelyDeleteUnusedNodes(LN);
41176         return N;
41177       }
41178     }
41179 
41180     // If this a VZEXT_MOVL of a VBROADCAST_LOAD, we don't need the broadcast
41181     // and can just use a VZEXT_LOAD.
41182     // FIXME: Is there some way to do this with SimplifyDemandedVectorElts?
41183     if (N0.hasOneUse() && N0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
41184       auto *LN = cast<MemSDNode>(N0);
41185       if (VT.getScalarSizeInBits() == LN->getMemoryVT().getSizeInBits()) {
41186         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41187         SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
41188         SDValue VZLoad =
41189             DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
41190                                     LN->getMemoryVT(), LN->getMemOperand());
41191         DCI.CombineTo(N.getNode(), VZLoad);
41192         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41193         DCI.recursivelyDeleteUnusedNodes(LN);
41194         return N;
41195       }
41196     }
41197 
41198     // Turn (v2i64 (vzext_movl (scalar_to_vector (i64 X)))) into
41199     // (v2i64 (bitcast (v4i32 (vzext_movl (scalar_to_vector (i32 (trunc X)))))))
41200     // if the upper bits of the i64 are zero.
41201     if (N0.hasOneUse() && N0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
41202         N0.getOperand(0).hasOneUse() &&
41203         N0.getOperand(0).getValueType() == MVT::i64) {
41204       SDValue In = N0.getOperand(0);
41205       APInt Mask = APInt::getHighBitsSet(64, 32);
41206       if (DAG.MaskedValueIsZero(In, Mask)) {
41207         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, In);
41208         MVT VecVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
41209         SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Trunc);
41210         SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, SclVec);
41211         return DAG.getBitcast(VT, Movl);
41212       }
41213     }
41214 
41215     // Load a scalar integer constant directly to XMM instead of transferring an
41216     // immediate value from GPR.
41217     // vzext_movl (scalar_to_vector C) --> load [C,0...]
41218     if (N0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
41219       if (auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
41220         // Create a vector constant - scalar constant followed by zeros.
41221         EVT ScalarVT = N0.getOperand(0).getValueType();
41222         Type *ScalarTy = ScalarVT.getTypeForEVT(*DAG.getContext());
41223         unsigned NumElts = VT.getVectorNumElements();
41224         Constant *Zero = ConstantInt::getNullValue(ScalarTy);
41225         SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
41226         ConstantVec[0] = const_cast<ConstantInt *>(C->getConstantIntValue());
41227 
41228         // Load the vector constant from constant pool.
41229         MVT PVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
41230         SDValue CP = DAG.getConstantPool(ConstantVector::get(ConstantVec), PVT);
41231         MachinePointerInfo MPI =
41232             MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
41233         Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
41234         return DAG.getLoad(VT, DL, DAG.getEntryNode(), CP, MPI, Alignment,
41235                            MachineMemOperand::MOLoad);
41236       }
41237     }
41238 
41239     // Pull subvector inserts into undef through VZEXT_MOVL by making it an
41240     // insert into a zero vector. This helps get VZEXT_MOVL closer to
41241     // scalar_to_vectors where 256/512 are canonicalized to an insert and a
41242     // 128-bit scalar_to_vector. This reduces the number of isel patterns.
41243     if (!DCI.isBeforeLegalizeOps() && N0.hasOneUse()) {
41244       SDValue V = peekThroughOneUseBitcasts(N0);
41245 
41246       if (V.getOpcode() == ISD::INSERT_SUBVECTOR && V.getOperand(0).isUndef() &&
41247           isNullConstant(V.getOperand(2))) {
41248         SDValue In = V.getOperand(1);
41249         MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
41250                                      In.getValueSizeInBits() /
41251                                          VT.getScalarSizeInBits());
41252         In = DAG.getBitcast(SubVT, In);
41253         SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, SubVT, In);
41254         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
41255                            getZeroVector(VT, Subtarget, DAG, DL), Movl,
41256                            V.getOperand(2));
41257       }
41258     }
41259 
41260     return SDValue();
41261   }
41262   case X86ISD::BLENDI: {
41263     SDValue N0 = N.getOperand(0);
41264     SDValue N1 = N.getOperand(1);
41265 
41266     // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
41267     // TODO: Handle MVT::v16i16 repeated blend mask.
41268     if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
41269         N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
41270       MVT SrcVT = N0.getOperand(0).getSimpleValueType();
41271       if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
41272           SrcVT.getScalarSizeInBits() >= 32) {
41273         unsigned BlendMask = N.getConstantOperandVal(2);
41274         unsigned Size = VT.getVectorNumElements();
41275         unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
41276         BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
41277         return DAG.getBitcast(
41278             VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
41279                             N1.getOperand(0),
41280                             DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
41281       }
41282     }
41283     return SDValue();
41284   }
41285   case X86ISD::SHUFP: {
41286     // Fold shufps(shuffle(x),shuffle(y)) -> shufps(x,y).
41287     // This is a more relaxed shuffle combiner that can ignore oneuse limits.
41288     // TODO: Support types other than v4f32.
41289     if (VT == MVT::v4f32) {
41290       bool Updated = false;
41291       SmallVector<int> Mask;
41292       SmallVector<SDValue> Ops;
41293       if (getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask) &&
41294           Ops.size() == 2) {
41295         for (int i = 0; i != 2; ++i) {
41296           SmallVector<SDValue> SubOps;
41297           SmallVector<int> SubMask, SubScaledMask;
41298           SDValue Sub = peekThroughBitcasts(Ops[i]);
41299           // TODO: Scaling might be easier if we specify the demanded elts.
41300           if (getTargetShuffleInputs(Sub, SubOps, SubMask, DAG, 0, false) &&
41301               scaleShuffleElements(SubMask, 4, SubScaledMask) &&
41302               SubOps.size() == 1 && isUndefOrInRange(SubScaledMask, 0, 4)) {
41303             int Ofs = i * 2;
41304             Mask[Ofs + 0] = SubScaledMask[Mask[Ofs + 0] % 4] + (i * 4);
41305             Mask[Ofs + 1] = SubScaledMask[Mask[Ofs + 1] % 4] + (i * 4);
41306             Ops[i] = DAG.getBitcast(VT, SubOps[0]);
41307             Updated = true;
41308           }
41309         }
41310       }
41311       if (Updated) {
41312         for (int &M : Mask)
41313           M %= 4;
41314         Ops.push_back(getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
41315         return DAG.getNode(X86ISD::SHUFP, DL, VT, Ops);
41316       }
41317     }
41318     return SDValue();
41319   }
41320   case X86ISD::VPERMI: {
41321     // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
41322     // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
41323     SDValue N0 = N.getOperand(0);
41324     SDValue N1 = N.getOperand(1);
41325     unsigned EltSizeInBits = VT.getScalarSizeInBits();
41326     if (N0.getOpcode() == ISD::BITCAST &&
41327         N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
41328       SDValue Src = N0.getOperand(0);
41329       EVT SrcVT = Src.getValueType();
41330       SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
41331       return DAG.getBitcast(VT, Res);
41332     }
41333     return SDValue();
41334   }
41335   case X86ISD::VPERM2X128: {
41336     // Fold vperm2x128(bitcast(x),bitcast(y),c) -> bitcast(vperm2x128(x,y,c)).
41337     SDValue LHS = N->getOperand(0);
41338     SDValue RHS = N->getOperand(1);
41339     if (LHS.getOpcode() == ISD::BITCAST &&
41340         (RHS.getOpcode() == ISD::BITCAST || RHS.isUndef())) {
41341       EVT SrcVT = LHS.getOperand(0).getValueType();
41342       if (RHS.isUndef() || SrcVT == RHS.getOperand(0).getValueType()) {
41343         return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT,
41344                                               DAG.getBitcast(SrcVT, LHS),
41345                                               DAG.getBitcast(SrcVT, RHS),
41346                                               N->getOperand(2)));
41347       }
41348     }
41349 
41350     // Fold vperm2x128(op(),op()) -> op(vperm2x128(),vperm2x128()).
41351     if (SDValue Res = canonicalizeLaneShuffleWithRepeatedOps(N, DAG, DL))
41352       return Res;
41353 
41354     // Fold vperm2x128 subvector shuffle with an inner concat pattern.
41355     // vperm2x128(concat(X,Y),concat(Z,W)) --> concat X,Y etc.
41356     auto FindSubVector128 = [&](unsigned Idx) {
41357       if (Idx > 3)
41358         return SDValue();
41359       SDValue Src = peekThroughBitcasts(N.getOperand(Idx < 2 ? 0 : 1));
41360       SmallVector<SDValue> SubOps;
41361       if (collectConcatOps(Src.getNode(), SubOps, DAG) && SubOps.size() == 2)
41362         return SubOps[Idx & 1];
41363       unsigned NumElts = Src.getValueType().getVectorNumElements();
41364       if ((Idx & 1) == 1 && Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
41365           Src.getOperand(1).getValueSizeInBits() == 128 &&
41366           Src.getConstantOperandAPInt(2) == (NumElts / 2)) {
41367         return Src.getOperand(1);
41368       }
41369       return SDValue();
41370     };
41371     unsigned Imm = N.getConstantOperandVal(2);
41372     if (SDValue SubLo = FindSubVector128(Imm & 0x0F)) {
41373       if (SDValue SubHi = FindSubVector128((Imm & 0xF0) >> 4)) {
41374         MVT SubVT = VT.getHalfNumVectorElementsVT();
41375         SubLo = DAG.getBitcast(SubVT, SubLo);
41376         SubHi = DAG.getBitcast(SubVT, SubHi);
41377         return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, SubLo, SubHi);
41378       }
41379     }
41380     return SDValue();
41381   }
41382   case X86ISD::PSHUFD:
41383   case X86ISD::PSHUFLW:
41384   case X86ISD::PSHUFHW:
41385     Mask = getPSHUFShuffleMask(N);
41386     assert(Mask.size() == 4);
41387     break;
41388   case X86ISD::MOVSD:
41389   case X86ISD::MOVSH:
41390   case X86ISD::MOVSS: {
41391     SDValue N0 = N.getOperand(0);
41392     SDValue N1 = N.getOperand(1);
41393 
41394     // Canonicalize scalar FPOps:
41395     // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
41396     // If commutable, allow OP(N1[0], N0[0]).
41397     unsigned Opcode1 = N1.getOpcode();
41398     if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
41399         Opcode1 == ISD::FDIV) {
41400       SDValue N10 = N1.getOperand(0);
41401       SDValue N11 = N1.getOperand(1);
41402       if (N10 == N0 ||
41403           (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
41404         if (N10 != N0)
41405           std::swap(N10, N11);
41406         MVT SVT = VT.getVectorElementType();
41407         SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
41408         N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
41409         N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
41410         SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
41411         SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
41412         return DAG.getNode(Opcode, DL, VT, N0, SclVec);
41413       }
41414     }
41415 
41416     return SDValue();
41417   }
41418   case X86ISD::INSERTPS: {
41419     assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
41420     SDValue Op0 = N.getOperand(0);
41421     SDValue Op1 = N.getOperand(1);
41422     unsigned InsertPSMask = N.getConstantOperandVal(2);
41423     unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
41424     unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
41425     unsigned ZeroMask = InsertPSMask & 0xF;
41426 
41427     // If we zero out all elements from Op0 then we don't need to reference it.
41428     if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
41429       return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
41430                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41431 
41432     // If we zero out the element from Op1 then we don't need to reference it.
41433     if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
41434       return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
41435                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41436 
41437     // Attempt to merge insertps Op1 with an inner target shuffle node.
41438     SmallVector<int, 8> TargetMask1;
41439     SmallVector<SDValue, 2> Ops1;
41440     APInt KnownUndef1, KnownZero1;
41441     if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
41442                                      KnownZero1)) {
41443       if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
41444         // Zero/UNDEF insertion - zero out element and remove dependency.
41445         InsertPSMask |= (1u << DstIdx);
41446         return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
41447                            DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41448       }
41449       // Update insertps mask srcidx and reference the source input directly.
41450       int M = TargetMask1[SrcIdx];
41451       assert(0 <= M && M < 8 && "Shuffle index out of range");
41452       InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
41453       Op1 = Ops1[M < 4 ? 0 : 1];
41454       return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
41455                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41456     }
41457 
41458     // Attempt to merge insertps Op0 with an inner target shuffle node.
41459     SmallVector<int, 8> TargetMask0;
41460     SmallVector<SDValue, 2> Ops0;
41461     APInt KnownUndef0, KnownZero0;
41462     if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
41463                                      KnownZero0)) {
41464       bool Updated = false;
41465       bool UseInput00 = false;
41466       bool UseInput01 = false;
41467       for (int i = 0; i != 4; ++i) {
41468         if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
41469           // No change if element is already zero or the inserted element.
41470           continue;
41471         }
41472 
41473         if (KnownUndef0[i] || KnownZero0[i]) {
41474           // If the target mask is undef/zero then we must zero the element.
41475           InsertPSMask |= (1u << i);
41476           Updated = true;
41477           continue;
41478         }
41479 
41480         // The input vector element must be inline.
41481         int M = TargetMask0[i];
41482         if (M != i && M != (i + 4))
41483           return SDValue();
41484 
41485         // Determine which inputs of the target shuffle we're using.
41486         UseInput00 |= (0 <= M && M < 4);
41487         UseInput01 |= (4 <= M);
41488       }
41489 
41490       // If we're not using both inputs of the target shuffle then use the
41491       // referenced input directly.
41492       if (UseInput00 && !UseInput01) {
41493         Updated = true;
41494         Op0 = Ops0[0];
41495       } else if (!UseInput00 && UseInput01) {
41496         Updated = true;
41497         Op0 = Ops0[1];
41498       }
41499 
41500       if (Updated)
41501         return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
41502                            DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41503     }
41504 
41505     // If we're inserting an element from a vbroadcast load, fold the
41506     // load into the X86insertps instruction. We need to convert the scalar
41507     // load to a vector and clear the source lane of the INSERTPS control.
41508     if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
41509       auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
41510       if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
41511         SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
41512                                    MemIntr->getBasePtr(),
41513                                    MemIntr->getMemOperand());
41514         SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
41515                            DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
41516                                        Load),
41517                            DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
41518         DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
41519         return Insert;
41520       }
41521     }
41522 
41523     return SDValue();
41524   }
41525   default:
41526     return SDValue();
41527   }
41528 
41529   // Nuke no-op shuffles that show up after combining.
41530   if (isNoopShuffleMask(Mask))
41531     return N.getOperand(0);
41532 
41533   // Look for simplifications involving one or two shuffle instructions.
41534   SDValue V = N.getOperand(0);
41535   switch (N.getOpcode()) {
41536   default:
41537     break;
41538   case X86ISD::PSHUFLW:
41539   case X86ISD::PSHUFHW:
41540     assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
41541 
41542     // See if this reduces to a PSHUFD which is no more expensive and can
41543     // combine with more operations. Note that it has to at least flip the
41544     // dwords as otherwise it would have been removed as a no-op.
41545     if (ArrayRef(Mask).equals({2, 3, 0, 1})) {
41546       int DMask[] = {0, 1, 2, 3};
41547       int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
41548       DMask[DOffset + 0] = DOffset + 1;
41549       DMask[DOffset + 1] = DOffset + 0;
41550       MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
41551       V = DAG.getBitcast(DVT, V);
41552       V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
41553                       getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
41554       return DAG.getBitcast(VT, V);
41555     }
41556 
41557     // Look for shuffle patterns which can be implemented as a single unpack.
41558     // FIXME: This doesn't handle the location of the PSHUFD generically, and
41559     // only works when we have a PSHUFD followed by two half-shuffles.
41560     if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
41561         (V.getOpcode() == X86ISD::PSHUFLW ||
41562          V.getOpcode() == X86ISD::PSHUFHW) &&
41563         V.getOpcode() != N.getOpcode() &&
41564         V.hasOneUse() && V.getOperand(0).hasOneUse()) {
41565       SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
41566       if (D.getOpcode() == X86ISD::PSHUFD) {
41567         SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
41568         SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
41569         int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
41570         int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
41571         int WordMask[8];
41572         for (int i = 0; i < 4; ++i) {
41573           WordMask[i + NOffset] = Mask[i] + NOffset;
41574           WordMask[i + VOffset] = VMask[i] + VOffset;
41575         }
41576         // Map the word mask through the DWord mask.
41577         int MappedMask[8];
41578         for (int i = 0; i < 8; ++i)
41579           MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
41580         if (ArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
41581             ArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
41582           // We can replace all three shuffles with an unpack.
41583           V = DAG.getBitcast(VT, D.getOperand(0));
41584           return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
41585                                                 : X86ISD::UNPCKH,
41586                              DL, VT, V, V);
41587         }
41588       }
41589     }
41590 
41591     break;
41592 
41593   case X86ISD::PSHUFD:
41594     if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
41595       return NewN;
41596 
41597     break;
41598   }
41599 
41600   return SDValue();
41601 }
41602 
41603 /// Checks if the shuffle mask takes subsequent elements
41604 /// alternately from two vectors.
41605 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
isAddSubOrSubAddMask(ArrayRef<int> Mask,bool & Op0Even)41606 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
41607 
41608   int ParitySrc[2] = {-1, -1};
41609   unsigned Size = Mask.size();
41610   for (unsigned i = 0; i != Size; ++i) {
41611     int M = Mask[i];
41612     if (M < 0)
41613       continue;
41614 
41615     // Make sure we are using the matching element from the input.
41616     if ((M % Size) != i)
41617       return false;
41618 
41619     // Make sure we use the same input for all elements of the same parity.
41620     int Src = M / Size;
41621     if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
41622       return false;
41623     ParitySrc[i % 2] = Src;
41624   }
41625 
41626   // Make sure each input is used.
41627   if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
41628     return false;
41629 
41630   Op0Even = ParitySrc[0] == 0;
41631   return true;
41632 }
41633 
41634 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
41635 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
41636 /// are written to the parameters \p Opnd0 and \p Opnd1.
41637 ///
41638 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
41639 /// so it is easier to generically match. We also insert dummy vector shuffle
41640 /// nodes for the operands which explicitly discard the lanes which are unused
41641 /// by this operation to try to flow through the rest of the combiner
41642 /// the fact that they're unused.
isAddSubOrSubAdd(SDNode * N,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & Opnd0,SDValue & Opnd1,bool & IsSubAdd)41643 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
41644                              SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
41645                              bool &IsSubAdd) {
41646 
41647   EVT VT = N->getValueType(0);
41648   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41649   if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
41650       !VT.getSimpleVT().isFloatingPoint())
41651     return false;
41652 
41653   // We only handle target-independent shuffles.
41654   // FIXME: It would be easy and harmless to use the target shuffle mask
41655   // extraction tool to support more.
41656   if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
41657     return false;
41658 
41659   SDValue V1 = N->getOperand(0);
41660   SDValue V2 = N->getOperand(1);
41661 
41662   // Make sure we have an FADD and an FSUB.
41663   if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
41664       (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
41665       V1.getOpcode() == V2.getOpcode())
41666     return false;
41667 
41668   // If there are other uses of these operations we can't fold them.
41669   if (!V1->hasOneUse() || !V2->hasOneUse())
41670     return false;
41671 
41672   // Ensure that both operations have the same operands. Note that we can
41673   // commute the FADD operands.
41674   SDValue LHS, RHS;
41675   if (V1.getOpcode() == ISD::FSUB) {
41676     LHS = V1->getOperand(0); RHS = V1->getOperand(1);
41677     if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
41678         (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
41679       return false;
41680   } else {
41681     assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
41682     LHS = V2->getOperand(0); RHS = V2->getOperand(1);
41683     if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
41684         (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
41685       return false;
41686   }
41687 
41688   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
41689   bool Op0Even;
41690   if (!isAddSubOrSubAddMask(Mask, Op0Even))
41691     return false;
41692 
41693   // It's a subadd if the vector in the even parity is an FADD.
41694   IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
41695                      : V2->getOpcode() == ISD::FADD;
41696 
41697   Opnd0 = LHS;
41698   Opnd1 = RHS;
41699   return true;
41700 }
41701 
41702 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
combineShuffleToFMAddSub(SDNode * N,const X86Subtarget & Subtarget,SelectionDAG & DAG)41703 static SDValue combineShuffleToFMAddSub(SDNode *N,
41704                                         const X86Subtarget &Subtarget,
41705                                         SelectionDAG &DAG) {
41706   // We only handle target-independent shuffles.
41707   // FIXME: It would be easy and harmless to use the target shuffle mask
41708   // extraction tool to support more.
41709   if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
41710     return SDValue();
41711 
41712   MVT VT = N->getSimpleValueType(0);
41713   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41714   if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
41715     return SDValue();
41716 
41717   // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
41718   SDValue Op0 = N->getOperand(0);
41719   SDValue Op1 = N->getOperand(1);
41720   SDValue FMAdd = Op0, FMSub = Op1;
41721   if (FMSub.getOpcode() != X86ISD::FMSUB)
41722     std::swap(FMAdd, FMSub);
41723 
41724   if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
41725       FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
41726       FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
41727       FMAdd.getOperand(2) != FMSub.getOperand(2))
41728     return SDValue();
41729 
41730   // Check for correct shuffle mask.
41731   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
41732   bool Op0Even;
41733   if (!isAddSubOrSubAddMask(Mask, Op0Even))
41734     return SDValue();
41735 
41736   // FMAddSub takes zeroth operand from FMSub node.
41737   SDLoc DL(N);
41738   bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
41739   unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
41740   return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
41741                      FMAdd.getOperand(2));
41742 }
41743 
41744 /// Try to combine a shuffle into a target-specific add-sub or
41745 /// mul-add-sub node.
combineShuffleToAddSubOrFMAddSub(SDNode * N,const X86Subtarget & Subtarget,SelectionDAG & DAG)41746 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
41747                                                 const X86Subtarget &Subtarget,
41748                                                 SelectionDAG &DAG) {
41749   if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
41750     return V;
41751 
41752   SDValue Opnd0, Opnd1;
41753   bool IsSubAdd;
41754   if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
41755     return SDValue();
41756 
41757   MVT VT = N->getSimpleValueType(0);
41758   SDLoc DL(N);
41759 
41760   // Try to generate X86ISD::FMADDSUB node here.
41761   SDValue Opnd2;
41762   if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
41763     unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
41764     return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
41765   }
41766 
41767   if (IsSubAdd)
41768     return SDValue();
41769 
41770   // Do not generate X86ISD::ADDSUB node for 512-bit types even though
41771   // the ADDSUB idiom has been successfully recognized. There are no known
41772   // X86 targets with 512-bit ADDSUB instructions!
41773   if (VT.is512BitVector())
41774     return SDValue();
41775 
41776   // Do not generate X86ISD::ADDSUB node for FP16's vector types even though
41777   // the ADDSUB idiom has been successfully recognized. There are no known
41778   // X86 targets with FP16 ADDSUB instructions!
41779   if (VT.getVectorElementType() == MVT::f16)
41780     return SDValue();
41781 
41782   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
41783 }
41784 
41785 // We are looking for a shuffle where both sources are concatenated with undef
41786 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
41787 // if we can express this as a single-source shuffle, that's preferable.
combineShuffleOfConcatUndef(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)41788 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
41789                                            const X86Subtarget &Subtarget) {
41790   if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
41791     return SDValue();
41792 
41793   EVT VT = N->getValueType(0);
41794 
41795   // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
41796   if (!VT.is128BitVector() && !VT.is256BitVector())
41797     return SDValue();
41798 
41799   if (VT.getVectorElementType() != MVT::i32 &&
41800       VT.getVectorElementType() != MVT::i64 &&
41801       VT.getVectorElementType() != MVT::f32 &&
41802       VT.getVectorElementType() != MVT::f64)
41803     return SDValue();
41804 
41805   SDValue N0 = N->getOperand(0);
41806   SDValue N1 = N->getOperand(1);
41807 
41808   // Check that both sources are concats with undef.
41809   if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
41810       N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
41811       N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
41812       !N1.getOperand(1).isUndef())
41813     return SDValue();
41814 
41815   // Construct the new shuffle mask. Elements from the first source retain their
41816   // index, but elements from the second source no longer need to skip an undef.
41817   SmallVector<int, 8> Mask;
41818   int NumElts = VT.getVectorNumElements();
41819 
41820   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
41821   for (int Elt : SVOp->getMask())
41822     Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
41823 
41824   SDLoc DL(N);
41825   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
41826                                N1.getOperand(0));
41827   return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
41828 }
41829 
41830 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
41831 /// low half of each source vector and does not set any high half elements in
41832 /// the destination vector, narrow the shuffle to half its original size.
narrowShuffle(ShuffleVectorSDNode * Shuf,SelectionDAG & DAG)41833 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
41834   if (!Shuf->getValueType(0).isSimple())
41835     return SDValue();
41836   MVT VT = Shuf->getSimpleValueType(0);
41837   if (!VT.is256BitVector() && !VT.is512BitVector())
41838     return SDValue();
41839 
41840   // See if we can ignore all of the high elements of the shuffle.
41841   ArrayRef<int> Mask = Shuf->getMask();
41842   if (!isUndefUpperHalf(Mask))
41843     return SDValue();
41844 
41845   // Check if the shuffle mask accesses only the low half of each input vector
41846   // (half-index output is 0 or 2).
41847   int HalfIdx1, HalfIdx2;
41848   SmallVector<int, 8> HalfMask(Mask.size() / 2);
41849   if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
41850       (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
41851     return SDValue();
41852 
41853   // Create a half-width shuffle to replace the unnecessarily wide shuffle.
41854   // The trick is knowing that all of the insert/extract are actually free
41855   // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
41856   // of narrow inputs into a narrow output, and that is always cheaper than
41857   // the wide shuffle that we started with.
41858   return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
41859                                Shuf->getOperand(1), HalfMask, HalfIdx1,
41860                                HalfIdx2, false, DAG, /*UseConcat*/true);
41861 }
41862 
combineShuffle(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)41863 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
41864                               TargetLowering::DAGCombinerInfo &DCI,
41865                               const X86Subtarget &Subtarget) {
41866   if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
41867     if (SDValue V = narrowShuffle(Shuf, DAG))
41868       return V;
41869 
41870   // If we have legalized the vector types, look for blends of FADD and FSUB
41871   // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
41872   SDLoc dl(N);
41873   EVT VT = N->getValueType(0);
41874   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41875   if (TLI.isTypeLegal(VT))
41876     if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
41877       return AddSub;
41878 
41879   // Attempt to combine into a vector load/broadcast.
41880   if (SDValue LD = combineToConsecutiveLoads(
41881           VT, SDValue(N, 0), dl, DAG, Subtarget, /*IsAfterLegalize*/ true))
41882     return LD;
41883 
41884   // For AVX2, we sometimes want to combine
41885   // (vector_shuffle <mask> (concat_vectors t1, undef)
41886   //                        (concat_vectors t2, undef))
41887   // Into:
41888   // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
41889   // Since the latter can be efficiently lowered with VPERMD/VPERMQ
41890   if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
41891     return ShufConcat;
41892 
41893   if (isTargetShuffle(N->getOpcode())) {
41894     SDValue Op(N, 0);
41895     if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
41896       return Shuffle;
41897 
41898     // Try recursively combining arbitrary sequences of x86 shuffle
41899     // instructions into higher-order shuffles. We do this after combining
41900     // specific PSHUF instruction sequences into their minimal form so that we
41901     // can evaluate how many specialized shuffle instructions are involved in
41902     // a particular chain.
41903     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
41904       return Res;
41905 
41906     // Simplify source operands based on shuffle mask.
41907     // TODO - merge this into combineX86ShufflesRecursively.
41908     APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
41909     if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, DCI))
41910       return SDValue(N, 0);
41911 
41912     // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
41913     // Perform this after other shuffle combines to allow inner shuffles to be
41914     // combined away first.
41915     if (SDValue BinOp = canonicalizeShuffleWithBinOps(Op, DAG, dl))
41916       return BinOp;
41917   }
41918 
41919   return SDValue();
41920 }
41921 
41922 // Simplify variable target shuffle masks based on the demanded elements.
41923 // TODO: Handle DemandedBits in mask indices as well?
SimplifyDemandedVectorEltsForTargetShuffle(SDValue Op,const APInt & DemandedElts,unsigned MaskIndex,TargetLowering::TargetLoweringOpt & TLO,unsigned Depth) const41924 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
41925     SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
41926     TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {
41927   // If we're demanding all elements don't bother trying to simplify the mask.
41928   unsigned NumElts = DemandedElts.getBitWidth();
41929   if (DemandedElts.isAllOnes())
41930     return false;
41931 
41932   SDValue Mask = Op.getOperand(MaskIndex);
41933   if (!Mask.hasOneUse())
41934     return false;
41935 
41936   // Attempt to generically simplify the variable shuffle mask.
41937   APInt MaskUndef, MaskZero;
41938   if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
41939                                  Depth + 1))
41940     return true;
41941 
41942   // Attempt to extract+simplify a (constant pool load) shuffle mask.
41943   // TODO: Support other types from getTargetShuffleMaskIndices?
41944   SDValue BC = peekThroughOneUseBitcasts(Mask);
41945   EVT BCVT = BC.getValueType();
41946   auto *Load = dyn_cast<LoadSDNode>(BC);
41947   if (!Load)
41948     return false;
41949 
41950   const Constant *C = getTargetConstantFromNode(Load);
41951   if (!C)
41952     return false;
41953 
41954   Type *CTy = C->getType();
41955   if (!CTy->isVectorTy() ||
41956       CTy->getPrimitiveSizeInBits() != Mask.getValueSizeInBits())
41957     return false;
41958 
41959   // Handle scaling for i64 elements on 32-bit targets.
41960   unsigned NumCstElts = cast<FixedVectorType>(CTy)->getNumElements();
41961   if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
41962     return false;
41963   unsigned Scale = NumCstElts / NumElts;
41964 
41965   // Simplify mask if we have an undemanded element that is not undef.
41966   bool Simplified = false;
41967   SmallVector<Constant *, 32> ConstVecOps;
41968   for (unsigned i = 0; i != NumCstElts; ++i) {
41969     Constant *Elt = C->getAggregateElement(i);
41970     if (!DemandedElts[i / Scale] && !isa<UndefValue>(Elt)) {
41971       ConstVecOps.push_back(UndefValue::get(Elt->getType()));
41972       Simplified = true;
41973       continue;
41974     }
41975     ConstVecOps.push_back(Elt);
41976   }
41977   if (!Simplified)
41978     return false;
41979 
41980   // Generate new constant pool entry + legalize immediately for the load.
41981   SDLoc DL(Op);
41982   SDValue CV = TLO.DAG.getConstantPool(ConstantVector::get(ConstVecOps), BCVT);
41983   SDValue LegalCV = LowerConstantPool(CV, TLO.DAG);
41984   SDValue NewMask = TLO.DAG.getLoad(
41985       BCVT, DL, TLO.DAG.getEntryNode(), LegalCV,
41986       MachinePointerInfo::getConstantPool(TLO.DAG.getMachineFunction()),
41987       Load->getAlign());
41988   return TLO.CombineTo(Mask, TLO.DAG.getBitcast(Mask.getValueType(), NewMask));
41989 }
41990 
SimplifyDemandedVectorEltsForTargetNode(SDValue Op,const APInt & DemandedElts,APInt & KnownUndef,APInt & KnownZero,TargetLoweringOpt & TLO,unsigned Depth) const41991 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
41992     SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
41993     TargetLoweringOpt &TLO, unsigned Depth) const {
41994   int NumElts = DemandedElts.getBitWidth();
41995   unsigned Opc = Op.getOpcode();
41996   EVT VT = Op.getValueType();
41997 
41998   // Handle special case opcodes.
41999   switch (Opc) {
42000   case X86ISD::PMULDQ:
42001   case X86ISD::PMULUDQ: {
42002     APInt LHSUndef, LHSZero;
42003     APInt RHSUndef, RHSZero;
42004     SDValue LHS = Op.getOperand(0);
42005     SDValue RHS = Op.getOperand(1);
42006     if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
42007                                    Depth + 1))
42008       return true;
42009     if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
42010                                    Depth + 1))
42011       return true;
42012     // Multiply by zero.
42013     KnownZero = LHSZero | RHSZero;
42014     break;
42015   }
42016   case X86ISD::VPMADDWD: {
42017     APInt LHSUndef, LHSZero;
42018     APInt RHSUndef, RHSZero;
42019     SDValue LHS = Op.getOperand(0);
42020     SDValue RHS = Op.getOperand(1);
42021     APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, 2 * NumElts);
42022 
42023     if (SimplifyDemandedVectorElts(LHS, DemandedSrcElts, LHSUndef, LHSZero, TLO,
42024                                    Depth + 1))
42025       return true;
42026     if (SimplifyDemandedVectorElts(RHS, DemandedSrcElts, RHSUndef, RHSZero, TLO,
42027                                    Depth + 1))
42028       return true;
42029 
42030     // TODO: Multiply by zero.
42031 
42032     // If RHS/LHS elements are known zero then we don't need the LHS/RHS equivalent.
42033     APInt DemandedLHSElts = DemandedSrcElts & ~RHSZero;
42034     if (SimplifyDemandedVectorElts(LHS, DemandedLHSElts, LHSUndef, LHSZero, TLO,
42035                                    Depth + 1))
42036       return true;
42037     APInt DemandedRHSElts = DemandedSrcElts & ~LHSZero;
42038     if (SimplifyDemandedVectorElts(RHS, DemandedRHSElts, RHSUndef, RHSZero, TLO,
42039                                    Depth + 1))
42040       return true;
42041     break;
42042   }
42043   case X86ISD::PSADBW: {
42044     SDValue LHS = Op.getOperand(0);
42045     SDValue RHS = Op.getOperand(1);
42046     assert(VT.getScalarType() == MVT::i64 &&
42047            LHS.getValueType() == RHS.getValueType() &&
42048            LHS.getValueType().getScalarType() == MVT::i8 &&
42049            "Unexpected PSADBW types");
42050 
42051     // Aggressively peek through ops to get at the demanded elts.
42052     if (!DemandedElts.isAllOnes()) {
42053       unsigned NumSrcElts = LHS.getValueType().getVectorNumElements();
42054       APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
42055       SDValue NewLHS = SimplifyMultipleUseDemandedVectorElts(
42056           LHS, DemandedSrcElts, TLO.DAG, Depth + 1);
42057       SDValue NewRHS = SimplifyMultipleUseDemandedVectorElts(
42058           RHS, DemandedSrcElts, TLO.DAG, Depth + 1);
42059       if (NewLHS || NewRHS) {
42060         NewLHS = NewLHS ? NewLHS : LHS;
42061         NewRHS = NewRHS ? NewRHS : RHS;
42062         return TLO.CombineTo(
42063             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
42064       }
42065     }
42066     break;
42067   }
42068   case X86ISD::VSHL:
42069   case X86ISD::VSRL:
42070   case X86ISD::VSRA: {
42071     // We only need the bottom 64-bits of the (128-bit) shift amount.
42072     SDValue Amt = Op.getOperand(1);
42073     MVT AmtVT = Amt.getSimpleValueType();
42074     assert(AmtVT.is128BitVector() && "Unexpected value type");
42075 
42076     // If we reuse the shift amount just for sse shift amounts then we know that
42077     // only the bottom 64-bits are only ever used.
42078     bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
42079       unsigned UseOpc = Use->getOpcode();
42080       return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
42081               UseOpc == X86ISD::VSRA) &&
42082              Use->getOperand(0) != Amt;
42083     });
42084 
42085     APInt AmtUndef, AmtZero;
42086     unsigned NumAmtElts = AmtVT.getVectorNumElements();
42087     APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
42088     if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
42089                                    Depth + 1, AssumeSingleUse))
42090       return true;
42091     [[fallthrough]];
42092   }
42093   case X86ISD::VSHLI:
42094   case X86ISD::VSRLI:
42095   case X86ISD::VSRAI: {
42096     SDValue Src = Op.getOperand(0);
42097     APInt SrcUndef;
42098     if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
42099                                    Depth + 1))
42100       return true;
42101 
42102     // Fold shift(0,x) -> 0
42103     if (DemandedElts.isSubsetOf(KnownZero))
42104       return TLO.CombineTo(
42105           Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
42106 
42107     // Aggressively peek through ops to get at the demanded elts.
42108     if (!DemandedElts.isAllOnes())
42109       if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
42110               Src, DemandedElts, TLO.DAG, Depth + 1))
42111         return TLO.CombineTo(
42112             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc, Op.getOperand(1)));
42113     break;
42114   }
42115   case X86ISD::VPSHA:
42116   case X86ISD::VPSHL:
42117   case X86ISD::VSHLV:
42118   case X86ISD::VSRLV:
42119   case X86ISD::VSRAV: {
42120     APInt LHSUndef, LHSZero;
42121     APInt RHSUndef, RHSZero;
42122     SDValue LHS = Op.getOperand(0);
42123     SDValue RHS = Op.getOperand(1);
42124     if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
42125                                    Depth + 1))
42126       return true;
42127 
42128     // Fold shift(0,x) -> 0
42129     if (DemandedElts.isSubsetOf(LHSZero))
42130       return TLO.CombineTo(
42131           Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
42132 
42133     if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
42134                                    Depth + 1))
42135       return true;
42136 
42137     KnownZero = LHSZero;
42138     break;
42139   }
42140   case X86ISD::KSHIFTL: {
42141     SDValue Src = Op.getOperand(0);
42142     auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
42143     assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
42144     unsigned ShiftAmt = Amt->getZExtValue();
42145 
42146     if (ShiftAmt == 0)
42147       return TLO.CombineTo(Op, Src);
42148 
42149     // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
42150     // single shift.  We can do this if the bottom bits (which are shifted
42151     // out) are never demanded.
42152     if (Src.getOpcode() == X86ISD::KSHIFTR) {
42153       if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
42154         unsigned C1 = Src.getConstantOperandVal(1);
42155         unsigned NewOpc = X86ISD::KSHIFTL;
42156         int Diff = ShiftAmt - C1;
42157         if (Diff < 0) {
42158           Diff = -Diff;
42159           NewOpc = X86ISD::KSHIFTR;
42160         }
42161 
42162         SDLoc dl(Op);
42163         SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
42164         return TLO.CombineTo(
42165             Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
42166       }
42167     }
42168 
42169     APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
42170     if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
42171                                    Depth + 1))
42172       return true;
42173 
42174     KnownUndef <<= ShiftAmt;
42175     KnownZero <<= ShiftAmt;
42176     KnownZero.setLowBits(ShiftAmt);
42177     break;
42178   }
42179   case X86ISD::KSHIFTR: {
42180     SDValue Src = Op.getOperand(0);
42181     auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
42182     assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
42183     unsigned ShiftAmt = Amt->getZExtValue();
42184 
42185     if (ShiftAmt == 0)
42186       return TLO.CombineTo(Op, Src);
42187 
42188     // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
42189     // single shift.  We can do this if the top bits (which are shifted
42190     // out) are never demanded.
42191     if (Src.getOpcode() == X86ISD::KSHIFTL) {
42192       if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
42193         unsigned C1 = Src.getConstantOperandVal(1);
42194         unsigned NewOpc = X86ISD::KSHIFTR;
42195         int Diff = ShiftAmt - C1;
42196         if (Diff < 0) {
42197           Diff = -Diff;
42198           NewOpc = X86ISD::KSHIFTL;
42199         }
42200 
42201         SDLoc dl(Op);
42202         SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
42203         return TLO.CombineTo(
42204             Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
42205       }
42206     }
42207 
42208     APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
42209     if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
42210                                    Depth + 1))
42211       return true;
42212 
42213     KnownUndef.lshrInPlace(ShiftAmt);
42214     KnownZero.lshrInPlace(ShiftAmt);
42215     KnownZero.setHighBits(ShiftAmt);
42216     break;
42217   }
42218   case X86ISD::ANDNP: {
42219     // ANDNP = (~LHS & RHS);
42220     SDValue LHS = Op.getOperand(0);
42221     SDValue RHS = Op.getOperand(1);
42222 
42223     auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
42224       APInt UndefElts;
42225       SmallVector<APInt> EltBits;
42226       int NumElts = VT.getVectorNumElements();
42227       int EltSizeInBits = VT.getScalarSizeInBits();
42228       APInt OpBits = APInt::getAllOnes(EltSizeInBits);
42229       APInt OpElts = DemandedElts;
42230       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
42231                                         EltBits)) {
42232         OpBits.clearAllBits();
42233         OpElts.clearAllBits();
42234         for (int I = 0; I != NumElts; ++I) {
42235           if (!DemandedElts[I])
42236             continue;
42237           if (UndefElts[I]) {
42238             // We can't assume an undef src element gives an undef dst - the
42239             // other src might be zero.
42240             OpBits.setAllBits();
42241             OpElts.setBit(I);
42242           } else if ((Invert && !EltBits[I].isAllOnes()) ||
42243                      (!Invert && !EltBits[I].isZero())) {
42244             OpBits |= Invert ? ~EltBits[I] : EltBits[I];
42245             OpElts.setBit(I);
42246           }
42247         }
42248       }
42249       return std::make_pair(OpBits, OpElts);
42250     };
42251     APInt BitsLHS, EltsLHS;
42252     APInt BitsRHS, EltsRHS;
42253     std::tie(BitsLHS, EltsLHS) = GetDemandedMasks(RHS);
42254     std::tie(BitsRHS, EltsRHS) = GetDemandedMasks(LHS, true);
42255 
42256     APInt LHSUndef, LHSZero;
42257     APInt RHSUndef, RHSZero;
42258     if (SimplifyDemandedVectorElts(LHS, EltsLHS, LHSUndef, LHSZero, TLO,
42259                                    Depth + 1))
42260       return true;
42261     if (SimplifyDemandedVectorElts(RHS, EltsRHS, RHSUndef, RHSZero, TLO,
42262                                    Depth + 1))
42263       return true;
42264 
42265     if (!DemandedElts.isAllOnes()) {
42266       SDValue NewLHS = SimplifyMultipleUseDemandedBits(LHS, BitsLHS, EltsLHS,
42267                                                        TLO.DAG, Depth + 1);
42268       SDValue NewRHS = SimplifyMultipleUseDemandedBits(RHS, BitsRHS, EltsRHS,
42269                                                        TLO.DAG, Depth + 1);
42270       if (NewLHS || NewRHS) {
42271         NewLHS = NewLHS ? NewLHS : LHS;
42272         NewRHS = NewRHS ? NewRHS : RHS;
42273         return TLO.CombineTo(
42274             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
42275       }
42276     }
42277     break;
42278   }
42279   case X86ISD::CVTSI2P:
42280   case X86ISD::CVTUI2P: {
42281     SDValue Src = Op.getOperand(0);
42282     MVT SrcVT = Src.getSimpleValueType();
42283     APInt SrcUndef, SrcZero;
42284     APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
42285     if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
42286                                    Depth + 1))
42287       return true;
42288     break;
42289   }
42290   case X86ISD::PACKSS:
42291   case X86ISD::PACKUS: {
42292     SDValue N0 = Op.getOperand(0);
42293     SDValue N1 = Op.getOperand(1);
42294 
42295     APInt DemandedLHS, DemandedRHS;
42296     getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
42297 
42298     APInt LHSUndef, LHSZero;
42299     if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
42300                                    Depth + 1))
42301       return true;
42302     APInt RHSUndef, RHSZero;
42303     if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
42304                                    Depth + 1))
42305       return true;
42306 
42307     // TODO - pass on known zero/undef.
42308 
42309     // Aggressively peek through ops to get at the demanded elts.
42310     // TODO - we should do this for all target/faux shuffles ops.
42311     if (!DemandedElts.isAllOnes()) {
42312       SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
42313                                                             TLO.DAG, Depth + 1);
42314       SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
42315                                                             TLO.DAG, Depth + 1);
42316       if (NewN0 || NewN1) {
42317         NewN0 = NewN0 ? NewN0 : N0;
42318         NewN1 = NewN1 ? NewN1 : N1;
42319         return TLO.CombineTo(Op,
42320                              TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
42321       }
42322     }
42323     break;
42324   }
42325   case X86ISD::HADD:
42326   case X86ISD::HSUB:
42327   case X86ISD::FHADD:
42328   case X86ISD::FHSUB: {
42329     SDValue N0 = Op.getOperand(0);
42330     SDValue N1 = Op.getOperand(1);
42331 
42332     APInt DemandedLHS, DemandedRHS;
42333     getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
42334 
42335     APInt LHSUndef, LHSZero;
42336     if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
42337                                    Depth + 1))
42338       return true;
42339     APInt RHSUndef, RHSZero;
42340     if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
42341                                    Depth + 1))
42342       return true;
42343 
42344     // TODO - pass on known zero/undef.
42345 
42346     // Aggressively peek through ops to get at the demanded elts.
42347     // TODO: Handle repeated operands.
42348     if (N0 != N1 && !DemandedElts.isAllOnes()) {
42349       SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
42350                                                             TLO.DAG, Depth + 1);
42351       SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
42352                                                             TLO.DAG, Depth + 1);
42353       if (NewN0 || NewN1) {
42354         NewN0 = NewN0 ? NewN0 : N0;
42355         NewN1 = NewN1 ? NewN1 : N1;
42356         return TLO.CombineTo(Op,
42357                              TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
42358       }
42359     }
42360     break;
42361   }
42362   case X86ISD::VTRUNC:
42363   case X86ISD::VTRUNCS:
42364   case X86ISD::VTRUNCUS: {
42365     SDValue Src = Op.getOperand(0);
42366     MVT SrcVT = Src.getSimpleValueType();
42367     APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
42368     APInt SrcUndef, SrcZero;
42369     if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
42370                                    Depth + 1))
42371       return true;
42372     KnownZero = SrcZero.zextOrTrunc(NumElts);
42373     KnownUndef = SrcUndef.zextOrTrunc(NumElts);
42374     break;
42375   }
42376   case X86ISD::BLENDV: {
42377     APInt SelUndef, SelZero;
42378     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
42379                                    SelZero, TLO, Depth + 1))
42380       return true;
42381 
42382     // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
42383     APInt LHSUndef, LHSZero;
42384     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
42385                                    LHSZero, TLO, Depth + 1))
42386       return true;
42387 
42388     APInt RHSUndef, RHSZero;
42389     if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
42390                                    RHSZero, TLO, Depth + 1))
42391       return true;
42392 
42393     KnownZero = LHSZero & RHSZero;
42394     KnownUndef = LHSUndef & RHSUndef;
42395     break;
42396   }
42397   case X86ISD::VZEXT_MOVL: {
42398     // If upper demanded elements are already zero then we have nothing to do.
42399     SDValue Src = Op.getOperand(0);
42400     APInt DemandedUpperElts = DemandedElts;
42401     DemandedUpperElts.clearLowBits(1);
42402     if (TLO.DAG.MaskedVectorIsZero(Src, DemandedUpperElts, Depth + 1))
42403       return TLO.CombineTo(Op, Src);
42404     break;
42405   }
42406   case X86ISD::VBROADCAST: {
42407     SDValue Src = Op.getOperand(0);
42408     MVT SrcVT = Src.getSimpleValueType();
42409     if (!SrcVT.isVector())
42410       break;
42411     // Don't bother broadcasting if we just need the 0'th element.
42412     if (DemandedElts == 1) {
42413       if (Src.getValueType() != VT)
42414         Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
42415                              SDLoc(Op));
42416       return TLO.CombineTo(Op, Src);
42417     }
42418     APInt SrcUndef, SrcZero;
42419     APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
42420     if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
42421                                    Depth + 1))
42422       return true;
42423     // Aggressively peek through src to get at the demanded elt.
42424     // TODO - we should do this for all target/faux shuffles ops.
42425     if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
42426             Src, SrcElts, TLO.DAG, Depth + 1))
42427       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
42428     break;
42429   }
42430   case X86ISD::VPERMV:
42431     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 0, TLO,
42432                                                    Depth))
42433       return true;
42434     break;
42435   case X86ISD::PSHUFB:
42436   case X86ISD::VPERMV3:
42437   case X86ISD::VPERMILPV:
42438     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 1, TLO,
42439                                                    Depth))
42440       return true;
42441     break;
42442   case X86ISD::VPPERM:
42443   case X86ISD::VPERMIL2:
42444     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 2, TLO,
42445                                                    Depth))
42446       return true;
42447     break;
42448   }
42449 
42450   // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
42451   // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
42452   // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
42453   if ((VT.is256BitVector() || VT.is512BitVector()) &&
42454       DemandedElts.lshr(NumElts / 2) == 0) {
42455     unsigned SizeInBits = VT.getSizeInBits();
42456     unsigned ExtSizeInBits = SizeInBits / 2;
42457 
42458     // See if 512-bit ops only use the bottom 128-bits.
42459     if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
42460       ExtSizeInBits = SizeInBits / 4;
42461 
42462     switch (Opc) {
42463       // Scalar broadcast.
42464     case X86ISD::VBROADCAST: {
42465       SDLoc DL(Op);
42466       SDValue Src = Op.getOperand(0);
42467       if (Src.getValueSizeInBits() > ExtSizeInBits)
42468         Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
42469       EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
42470                                     ExtSizeInBits / VT.getScalarSizeInBits());
42471       SDValue Bcst = TLO.DAG.getNode(X86ISD::VBROADCAST, DL, BcstVT, Src);
42472       return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
42473                                                TLO.DAG, DL, ExtSizeInBits));
42474     }
42475     case X86ISD::VBROADCAST_LOAD: {
42476       SDLoc DL(Op);
42477       auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
42478       EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
42479                                     ExtSizeInBits / VT.getScalarSizeInBits());
42480       SDVTList Tys = TLO.DAG.getVTList(BcstVT, MVT::Other);
42481       SDValue Ops[] = {MemIntr->getOperand(0), MemIntr->getOperand(1)};
42482       SDValue Bcst = TLO.DAG.getMemIntrinsicNode(
42483           X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MemIntr->getMemoryVT(),
42484           MemIntr->getMemOperand());
42485       TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
42486                                            Bcst.getValue(1));
42487       return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
42488                                                TLO.DAG, DL, ExtSizeInBits));
42489     }
42490       // Subvector broadcast.
42491     case X86ISD::SUBV_BROADCAST_LOAD: {
42492       auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
42493       EVT MemVT = MemIntr->getMemoryVT();
42494       if (ExtSizeInBits == MemVT.getStoreSizeInBits()) {
42495         SDLoc DL(Op);
42496         SDValue Ld =
42497             TLO.DAG.getLoad(MemVT, DL, MemIntr->getChain(),
42498                             MemIntr->getBasePtr(), MemIntr->getMemOperand());
42499         TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
42500                                              Ld.getValue(1));
42501         return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Ld, 0,
42502                                                  TLO.DAG, DL, ExtSizeInBits));
42503       } else if ((ExtSizeInBits % MemVT.getStoreSizeInBits()) == 0) {
42504         SDLoc DL(Op);
42505         EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
42506                                       ExtSizeInBits / VT.getScalarSizeInBits());
42507         if (SDValue BcstLd =
42508                 getBROADCAST_LOAD(Opc, DL, BcstVT, MemVT, MemIntr, 0, TLO.DAG))
42509           return TLO.CombineTo(Op,
42510                                insertSubVector(TLO.DAG.getUNDEF(VT), BcstLd, 0,
42511                                                TLO.DAG, DL, ExtSizeInBits));
42512       }
42513       break;
42514     }
42515       // Byte shifts by immediate.
42516     case X86ISD::VSHLDQ:
42517     case X86ISD::VSRLDQ:
42518       // Shift by uniform.
42519     case X86ISD::VSHL:
42520     case X86ISD::VSRL:
42521     case X86ISD::VSRA:
42522       // Shift by immediate.
42523     case X86ISD::VSHLI:
42524     case X86ISD::VSRLI:
42525     case X86ISD::VSRAI: {
42526       SDLoc DL(Op);
42527       SDValue Ext0 =
42528           extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
42529       SDValue ExtOp =
42530           TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
42531       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
42532       SDValue Insert =
42533           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
42534       return TLO.CombineTo(Op, Insert);
42535     }
42536     case X86ISD::VPERMI: {
42537       // Simplify PERMPD/PERMQ to extract_subvector.
42538       // TODO: This should be done in shuffle combining.
42539       if (VT == MVT::v4f64 || VT == MVT::v4i64) {
42540         SmallVector<int, 4> Mask;
42541         DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
42542         if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
42543           SDLoc DL(Op);
42544           SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
42545           SDValue UndefVec = TLO.DAG.getUNDEF(VT);
42546           SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
42547           return TLO.CombineTo(Op, Insert);
42548         }
42549       }
42550       break;
42551     }
42552     case X86ISD::VPERM2X128: {
42553       // Simplify VPERM2F128/VPERM2I128 to extract_subvector.
42554       SDLoc DL(Op);
42555       unsigned LoMask = Op.getConstantOperandVal(2) & 0xF;
42556       if (LoMask & 0x8)
42557         return TLO.CombineTo(
42558             Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, DL));
42559       unsigned EltIdx = (LoMask & 0x1) * (NumElts / 2);
42560       unsigned SrcIdx = (LoMask & 0x2) >> 1;
42561       SDValue ExtOp =
42562           extractSubVector(Op.getOperand(SrcIdx), EltIdx, TLO.DAG, DL, 128);
42563       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
42564       SDValue Insert =
42565           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
42566       return TLO.CombineTo(Op, Insert);
42567     }
42568       // Zero upper elements.
42569     case X86ISD::VZEXT_MOVL:
42570       // Target unary shuffles by immediate:
42571     case X86ISD::PSHUFD:
42572     case X86ISD::PSHUFLW:
42573     case X86ISD::PSHUFHW:
42574     case X86ISD::VPERMILPI:
42575       // (Non-Lane Crossing) Target Shuffles.
42576     case X86ISD::VPERMILPV:
42577     case X86ISD::VPERMIL2:
42578     case X86ISD::PSHUFB:
42579     case X86ISD::UNPCKL:
42580     case X86ISD::UNPCKH:
42581     case X86ISD::BLENDI:
42582       // Integer ops.
42583     case X86ISD::PACKSS:
42584     case X86ISD::PACKUS:
42585       // Horizontal Ops.
42586     case X86ISD::HADD:
42587     case X86ISD::HSUB:
42588     case X86ISD::FHADD:
42589     case X86ISD::FHSUB: {
42590       SDLoc DL(Op);
42591       SmallVector<SDValue, 4> Ops;
42592       for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
42593         SDValue SrcOp = Op.getOperand(i);
42594         EVT SrcVT = SrcOp.getValueType();
42595         assert((!SrcVT.isVector() || SrcVT.getSizeInBits() == SizeInBits) &&
42596                "Unsupported vector size");
42597         Ops.push_back(SrcVT.isVector() ? extractSubVector(SrcOp, 0, TLO.DAG, DL,
42598                                                           ExtSizeInBits)
42599                                        : SrcOp);
42600       }
42601       MVT ExtVT = VT.getSimpleVT();
42602       ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
42603                                ExtSizeInBits / ExtVT.getScalarSizeInBits());
42604       SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ops);
42605       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
42606       SDValue Insert =
42607           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
42608       return TLO.CombineTo(Op, Insert);
42609     }
42610     }
42611   }
42612 
42613   // For splats, unless we *only* demand the 0'th element,
42614   // stop attempts at simplification here, we aren't going to improve things,
42615   // this is better than any potential shuffle.
42616   if (!DemandedElts.isOne() && TLO.DAG.isSplatValue(Op, /*AllowUndefs*/false))
42617     return false;
42618 
42619   // Get target/faux shuffle mask.
42620   APInt OpUndef, OpZero;
42621   SmallVector<int, 64> OpMask;
42622   SmallVector<SDValue, 2> OpInputs;
42623   if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
42624                               OpZero, TLO.DAG, Depth, false))
42625     return false;
42626 
42627   // Shuffle inputs must be the same size as the result.
42628   if (OpMask.size() != (unsigned)NumElts ||
42629       llvm::any_of(OpInputs, [VT](SDValue V) {
42630         return VT.getSizeInBits() != V.getValueSizeInBits() ||
42631                !V.getValueType().isVector();
42632       }))
42633     return false;
42634 
42635   KnownZero = OpZero;
42636   KnownUndef = OpUndef;
42637 
42638   // Check if shuffle mask can be simplified to undef/zero/identity.
42639   int NumSrcs = OpInputs.size();
42640   for (int i = 0; i != NumElts; ++i)
42641     if (!DemandedElts[i])
42642       OpMask[i] = SM_SentinelUndef;
42643 
42644   if (isUndefInRange(OpMask, 0, NumElts)) {
42645     KnownUndef.setAllBits();
42646     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
42647   }
42648   if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
42649     KnownZero.setAllBits();
42650     return TLO.CombineTo(
42651         Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
42652   }
42653   for (int Src = 0; Src != NumSrcs; ++Src)
42654     if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
42655       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
42656 
42657   // Attempt to simplify inputs.
42658   for (int Src = 0; Src != NumSrcs; ++Src) {
42659     // TODO: Support inputs of different types.
42660     if (OpInputs[Src].getValueType() != VT)
42661       continue;
42662 
42663     int Lo = Src * NumElts;
42664     APInt SrcElts = APInt::getZero(NumElts);
42665     for (int i = 0; i != NumElts; ++i)
42666       if (DemandedElts[i]) {
42667         int M = OpMask[i] - Lo;
42668         if (0 <= M && M < NumElts)
42669           SrcElts.setBit(M);
42670       }
42671 
42672     // TODO - Propagate input undef/zero elts.
42673     APInt SrcUndef, SrcZero;
42674     if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
42675                                    TLO, Depth + 1))
42676       return true;
42677   }
42678 
42679   // If we don't demand all elements, then attempt to combine to a simpler
42680   // shuffle.
42681   // We need to convert the depth to something combineX86ShufflesRecursively
42682   // can handle - so pretend its Depth == 0 again, and reduce the max depth
42683   // to match. This prevents combineX86ShuffleChain from returning a
42684   // combined shuffle that's the same as the original root, causing an
42685   // infinite loop.
42686   if (!DemandedElts.isAllOnes()) {
42687     assert(Depth < X86::MaxShuffleCombineDepth && "Depth out of range");
42688 
42689     SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
42690     for (int i = 0; i != NumElts; ++i)
42691       if (DemandedElts[i])
42692         DemandedMask[i] = i;
42693 
42694     SDValue NewShuffle = combineX86ShufflesRecursively(
42695         {Op}, 0, Op, DemandedMask, {}, 0, X86::MaxShuffleCombineDepth - Depth,
42696         /*HasVarMask*/ false,
42697         /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, TLO.DAG,
42698         Subtarget);
42699     if (NewShuffle)
42700       return TLO.CombineTo(Op, NewShuffle);
42701   }
42702 
42703   return false;
42704 }
42705 
SimplifyDemandedBitsForTargetNode(SDValue Op,const APInt & OriginalDemandedBits,const APInt & OriginalDemandedElts,KnownBits & Known,TargetLoweringOpt & TLO,unsigned Depth) const42706 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
42707     SDValue Op, const APInt &OriginalDemandedBits,
42708     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
42709     unsigned Depth) const {
42710   EVT VT = Op.getValueType();
42711   unsigned BitWidth = OriginalDemandedBits.getBitWidth();
42712   unsigned Opc = Op.getOpcode();
42713   switch(Opc) {
42714   case X86ISD::VTRUNC: {
42715     KnownBits KnownOp;
42716     SDValue Src = Op.getOperand(0);
42717     MVT SrcVT = Src.getSimpleValueType();
42718 
42719     // Simplify the input, using demanded bit information.
42720     APInt TruncMask = OriginalDemandedBits.zext(SrcVT.getScalarSizeInBits());
42721     APInt DemandedElts = OriginalDemandedElts.trunc(SrcVT.getVectorNumElements());
42722     if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, KnownOp, TLO, Depth + 1))
42723       return true;
42724     break;
42725   }
42726   case X86ISD::PMULDQ:
42727   case X86ISD::PMULUDQ: {
42728     // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
42729     KnownBits KnownLHS, KnownRHS;
42730     SDValue LHS = Op.getOperand(0);
42731     SDValue RHS = Op.getOperand(1);
42732 
42733     // Don't mask bits on 32-bit AVX512 targets which might lose a broadcast.
42734     // FIXME: Can we bound this better?
42735     APInt DemandedMask = APInt::getLowBitsSet(64, 32);
42736     APInt DemandedMaskLHS = APInt::getAllOnes(64);
42737     APInt DemandedMaskRHS = APInt::getAllOnes(64);
42738 
42739     bool Is32BitAVX512 = !Subtarget.is64Bit() && Subtarget.hasAVX512();
42740     if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(LHS))
42741       DemandedMaskLHS = DemandedMask;
42742     if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(RHS))
42743       DemandedMaskRHS = DemandedMask;
42744 
42745     if (SimplifyDemandedBits(LHS, DemandedMaskLHS, OriginalDemandedElts,
42746                              KnownLHS, TLO, Depth + 1))
42747       return true;
42748     if (SimplifyDemandedBits(RHS, DemandedMaskRHS, OriginalDemandedElts,
42749                              KnownRHS, TLO, Depth + 1))
42750       return true;
42751 
42752     // PMULUDQ(X,1) -> AND(X,(1<<32)-1) 'getZeroExtendInReg'.
42753     KnownRHS = KnownRHS.trunc(32);
42754     if (Opc == X86ISD::PMULUDQ && KnownRHS.isConstant() &&
42755         KnownRHS.getConstant().isOne()) {
42756       SDLoc DL(Op);
42757       SDValue Mask = TLO.DAG.getConstant(DemandedMask, DL, VT);
42758       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, DL, VT, LHS, Mask));
42759     }
42760 
42761     // Aggressively peek through ops to get at the demanded low bits.
42762     SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
42763         LHS, DemandedMaskLHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
42764     SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
42765         RHS, DemandedMaskRHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
42766     if (DemandedLHS || DemandedRHS) {
42767       DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
42768       DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
42769       return TLO.CombineTo(
42770           Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
42771     }
42772     break;
42773   }
42774   case X86ISD::VSHLI: {
42775     SDValue Op0 = Op.getOperand(0);
42776 
42777     unsigned ShAmt = Op.getConstantOperandVal(1);
42778     if (ShAmt >= BitWidth)
42779       break;
42780 
42781     APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
42782 
42783     // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
42784     // single shift.  We can do this if the bottom bits (which are shifted
42785     // out) are never demanded.
42786     if (Op0.getOpcode() == X86ISD::VSRLI &&
42787         OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
42788       unsigned Shift2Amt = Op0.getConstantOperandVal(1);
42789       if (Shift2Amt < BitWidth) {
42790         int Diff = ShAmt - Shift2Amt;
42791         if (Diff == 0)
42792           return TLO.CombineTo(Op, Op0.getOperand(0));
42793 
42794         unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
42795         SDValue NewShift = TLO.DAG.getNode(
42796             NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
42797             TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
42798         return TLO.CombineTo(Op, NewShift);
42799       }
42800     }
42801 
42802     // If we are only demanding sign bits then we can use the shift source directly.
42803     unsigned NumSignBits =
42804         TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1);
42805     unsigned UpperDemandedBits =
42806         BitWidth - OriginalDemandedBits.countTrailingZeros();
42807     if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
42808       return TLO.CombineTo(Op, Op0);
42809 
42810     if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
42811                              TLO, Depth + 1))
42812       return true;
42813 
42814     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
42815     Known.Zero <<= ShAmt;
42816     Known.One <<= ShAmt;
42817 
42818     // Low bits known zero.
42819     Known.Zero.setLowBits(ShAmt);
42820     return false;
42821   }
42822   case X86ISD::VSRLI: {
42823     unsigned ShAmt = Op.getConstantOperandVal(1);
42824     if (ShAmt >= BitWidth)
42825       break;
42826 
42827     APInt DemandedMask = OriginalDemandedBits << ShAmt;
42828 
42829     if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
42830                              OriginalDemandedElts, Known, TLO, Depth + 1))
42831       return true;
42832 
42833     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
42834     Known.Zero.lshrInPlace(ShAmt);
42835     Known.One.lshrInPlace(ShAmt);
42836 
42837     // High bits known zero.
42838     Known.Zero.setHighBits(ShAmt);
42839     return false;
42840   }
42841   case X86ISD::VSRAI: {
42842     SDValue Op0 = Op.getOperand(0);
42843     SDValue Op1 = Op.getOperand(1);
42844 
42845     unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
42846     if (ShAmt >= BitWidth)
42847       break;
42848 
42849     APInt DemandedMask = OriginalDemandedBits << ShAmt;
42850 
42851     // If we just want the sign bit then we don't need to shift it.
42852     if (OriginalDemandedBits.isSignMask())
42853       return TLO.CombineTo(Op, Op0);
42854 
42855     // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
42856     if (Op0.getOpcode() == X86ISD::VSHLI &&
42857         Op.getOperand(1) == Op0.getOperand(1)) {
42858       SDValue Op00 = Op0.getOperand(0);
42859       unsigned NumSignBits =
42860           TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
42861       if (ShAmt < NumSignBits)
42862         return TLO.CombineTo(Op, Op00);
42863     }
42864 
42865     // If any of the demanded bits are produced by the sign extension, we also
42866     // demand the input sign bit.
42867     if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
42868       DemandedMask.setSignBit();
42869 
42870     if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
42871                              TLO, Depth + 1))
42872       return true;
42873 
42874     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
42875     Known.Zero.lshrInPlace(ShAmt);
42876     Known.One.lshrInPlace(ShAmt);
42877 
42878     // If the input sign bit is known to be zero, or if none of the top bits
42879     // are demanded, turn this into an unsigned shift right.
42880     if (Known.Zero[BitWidth - ShAmt - 1] ||
42881         OriginalDemandedBits.countLeadingZeros() >= ShAmt)
42882       return TLO.CombineTo(
42883           Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
42884 
42885     // High bits are known one.
42886     if (Known.One[BitWidth - ShAmt - 1])
42887       Known.One.setHighBits(ShAmt);
42888     return false;
42889   }
42890   case X86ISD::BLENDV: {
42891     SDValue Sel = Op.getOperand(0);
42892     SDValue LHS = Op.getOperand(1);
42893     SDValue RHS = Op.getOperand(2);
42894 
42895     APInt SignMask = APInt::getSignMask(BitWidth);
42896     SDValue NewSel = SimplifyMultipleUseDemandedBits(
42897         Sel, SignMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
42898     SDValue NewLHS = SimplifyMultipleUseDemandedBits(
42899         LHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
42900     SDValue NewRHS = SimplifyMultipleUseDemandedBits(
42901         RHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
42902 
42903     if (NewSel || NewLHS || NewRHS) {
42904       NewSel = NewSel ? NewSel : Sel;
42905       NewLHS = NewLHS ? NewLHS : LHS;
42906       NewRHS = NewRHS ? NewRHS : RHS;
42907       return TLO.CombineTo(Op, TLO.DAG.getNode(X86ISD::BLENDV, SDLoc(Op), VT,
42908                                                NewSel, NewLHS, NewRHS));
42909     }
42910     break;
42911   }
42912   case X86ISD::PEXTRB:
42913   case X86ISD::PEXTRW: {
42914     SDValue Vec = Op.getOperand(0);
42915     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
42916     MVT VecVT = Vec.getSimpleValueType();
42917     unsigned NumVecElts = VecVT.getVectorNumElements();
42918 
42919     if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
42920       unsigned Idx = CIdx->getZExtValue();
42921       unsigned VecBitWidth = VecVT.getScalarSizeInBits();
42922 
42923       // If we demand no bits from the vector then we must have demanded
42924       // bits from the implict zext - simplify to zero.
42925       APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
42926       if (DemandedVecBits == 0)
42927         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
42928 
42929       APInt KnownUndef, KnownZero;
42930       APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
42931       if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
42932                                      KnownZero, TLO, Depth + 1))
42933         return true;
42934 
42935       KnownBits KnownVec;
42936       if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
42937                                KnownVec, TLO, Depth + 1))
42938         return true;
42939 
42940       if (SDValue V = SimplifyMultipleUseDemandedBits(
42941               Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
42942         return TLO.CombineTo(
42943             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
42944 
42945       Known = KnownVec.zext(BitWidth);
42946       return false;
42947     }
42948     break;
42949   }
42950   case X86ISD::PINSRB:
42951   case X86ISD::PINSRW: {
42952     SDValue Vec = Op.getOperand(0);
42953     SDValue Scl = Op.getOperand(1);
42954     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
42955     MVT VecVT = Vec.getSimpleValueType();
42956 
42957     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
42958       unsigned Idx = CIdx->getZExtValue();
42959       if (!OriginalDemandedElts[Idx])
42960         return TLO.CombineTo(Op, Vec);
42961 
42962       KnownBits KnownVec;
42963       APInt DemandedVecElts(OriginalDemandedElts);
42964       DemandedVecElts.clearBit(Idx);
42965       if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
42966                                KnownVec, TLO, Depth + 1))
42967         return true;
42968 
42969       KnownBits KnownScl;
42970       unsigned NumSclBits = Scl.getScalarValueSizeInBits();
42971       APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
42972       if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
42973         return true;
42974 
42975       KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
42976       Known = KnownBits::commonBits(KnownVec, KnownScl);
42977       return false;
42978     }
42979     break;
42980   }
42981   case X86ISD::PACKSS:
42982     // PACKSS saturates to MIN/MAX integer values. So if we just want the
42983     // sign bit then we can just ask for the source operands sign bit.
42984     // TODO - add known bits handling.
42985     if (OriginalDemandedBits.isSignMask()) {
42986       APInt DemandedLHS, DemandedRHS;
42987       getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
42988 
42989       KnownBits KnownLHS, KnownRHS;
42990       APInt SignMask = APInt::getSignMask(BitWidth * 2);
42991       if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
42992                                KnownLHS, TLO, Depth + 1))
42993         return true;
42994       if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
42995                                KnownRHS, TLO, Depth + 1))
42996         return true;
42997 
42998       // Attempt to avoid multi-use ops if we don't need anything from them.
42999       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
43000           Op.getOperand(0), SignMask, DemandedLHS, TLO.DAG, Depth + 1);
43001       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
43002           Op.getOperand(1), SignMask, DemandedRHS, TLO.DAG, Depth + 1);
43003       if (DemandedOp0 || DemandedOp1) {
43004         SDValue Op0 = DemandedOp0 ? DemandedOp0 : Op.getOperand(0);
43005         SDValue Op1 = DemandedOp1 ? DemandedOp1 : Op.getOperand(1);
43006         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, Op0, Op1));
43007       }
43008     }
43009     // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
43010     break;
43011   case X86ISD::VBROADCAST: {
43012     SDValue Src = Op.getOperand(0);
43013     MVT SrcVT = Src.getSimpleValueType();
43014     APInt DemandedElts = APInt::getOneBitSet(
43015         SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1, 0);
43016     if (SimplifyDemandedBits(Src, OriginalDemandedBits, DemandedElts, Known,
43017                              TLO, Depth + 1))
43018       return true;
43019     // If we don't need the upper bits, attempt to narrow the broadcast source.
43020     // Don't attempt this on AVX512 as it might affect broadcast folding.
43021     // TODO: Should we attempt this for i32/i16 splats? They tend to be slower.
43022     if ((BitWidth == 64) && SrcVT.isScalarInteger() && !Subtarget.hasAVX512() &&
43023         OriginalDemandedBits.countLeadingZeros() >= (BitWidth / 2) &&
43024         Src->hasOneUse()) {
43025       MVT NewSrcVT = MVT::getIntegerVT(BitWidth / 2);
43026       SDValue NewSrc =
43027           TLO.DAG.getNode(ISD::TRUNCATE, SDLoc(Src), NewSrcVT, Src);
43028       MVT NewVT = MVT::getVectorVT(NewSrcVT, VT.getVectorNumElements() * 2);
43029       SDValue NewBcst =
43030           TLO.DAG.getNode(X86ISD::VBROADCAST, SDLoc(Op), NewVT, NewSrc);
43031       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, NewBcst));
43032     }
43033     break;
43034   }
43035   case X86ISD::PCMPGT:
43036     // icmp sgt(0, R) == ashr(R, BitWidth-1).
43037     // iff we only need the sign bit then we can use R directly.
43038     if (OriginalDemandedBits.isSignMask() &&
43039         ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
43040       return TLO.CombineTo(Op, Op.getOperand(1));
43041     break;
43042   case X86ISD::MOVMSK: {
43043     SDValue Src = Op.getOperand(0);
43044     MVT SrcVT = Src.getSimpleValueType();
43045     unsigned SrcBits = SrcVT.getScalarSizeInBits();
43046     unsigned NumElts = SrcVT.getVectorNumElements();
43047 
43048     // If we don't need the sign bits at all just return zero.
43049     if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
43050       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
43051 
43052     // See if we only demand bits from the lower 128-bit vector.
43053     if (SrcVT.is256BitVector() &&
43054         OriginalDemandedBits.getActiveBits() <= (NumElts / 2)) {
43055       SDValue NewSrc = extract128BitVector(Src, 0, TLO.DAG, SDLoc(Src));
43056       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
43057     }
43058 
43059     // Only demand the vector elements of the sign bits we need.
43060     APInt KnownUndef, KnownZero;
43061     APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
43062     if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
43063                                    TLO, Depth + 1))
43064       return true;
43065 
43066     Known.Zero = KnownZero.zext(BitWidth);
43067     Known.Zero.setHighBits(BitWidth - NumElts);
43068 
43069     // MOVMSK only uses the MSB from each vector element.
43070     KnownBits KnownSrc;
43071     APInt DemandedSrcBits = APInt::getSignMask(SrcBits);
43072     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, KnownSrc, TLO,
43073                              Depth + 1))
43074       return true;
43075 
43076     if (KnownSrc.One[SrcBits - 1])
43077       Known.One.setLowBits(NumElts);
43078     else if (KnownSrc.Zero[SrcBits - 1])
43079       Known.Zero.setLowBits(NumElts);
43080 
43081     // Attempt to avoid multi-use os if we don't need anything from it.
43082     if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
43083             Src, DemandedSrcBits, DemandedElts, TLO.DAG, Depth + 1))
43084       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
43085     return false;
43086   }
43087   case X86ISD::BEXTR:
43088   case X86ISD::BEXTRI: {
43089     SDValue Op0 = Op.getOperand(0);
43090     SDValue Op1 = Op.getOperand(1);
43091 
43092     // Only bottom 16-bits of the control bits are required.
43093     if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
43094       // NOTE: SimplifyDemandedBits won't do this for constants.
43095       uint64_t Val1 = Cst1->getZExtValue();
43096       uint64_t MaskedVal1 = Val1 & 0xFFFF;
43097       if (Opc == X86ISD::BEXTR && MaskedVal1 != Val1) {
43098         SDLoc DL(Op);
43099         return TLO.CombineTo(
43100             Op, TLO.DAG.getNode(X86ISD::BEXTR, DL, VT, Op0,
43101                                 TLO.DAG.getConstant(MaskedVal1, DL, VT)));
43102       }
43103 
43104       unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
43105       unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
43106 
43107       // If the length is 0, the result is 0.
43108       if (Length == 0) {
43109         Known.setAllZero();
43110         return false;
43111       }
43112 
43113       if ((Shift + Length) <= BitWidth) {
43114         APInt DemandedMask = APInt::getBitsSet(BitWidth, Shift, Shift + Length);
43115         if (SimplifyDemandedBits(Op0, DemandedMask, Known, TLO, Depth + 1))
43116           return true;
43117 
43118         Known = Known.extractBits(Length, Shift);
43119         Known = Known.zextOrTrunc(BitWidth);
43120         return false;
43121       }
43122     } else {
43123       assert(Opc == X86ISD::BEXTR && "Unexpected opcode!");
43124       KnownBits Known1;
43125       APInt DemandedMask(APInt::getLowBitsSet(BitWidth, 16));
43126       if (SimplifyDemandedBits(Op1, DemandedMask, Known1, TLO, Depth + 1))
43127         return true;
43128 
43129       // If the length is 0, replace with 0.
43130       KnownBits LengthBits = Known1.extractBits(8, 8);
43131       if (LengthBits.isZero())
43132         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
43133     }
43134 
43135     break;
43136   }
43137   case X86ISD::PDEP: {
43138     SDValue Op0 = Op.getOperand(0);
43139     SDValue Op1 = Op.getOperand(1);
43140 
43141     unsigned DemandedBitsLZ = OriginalDemandedBits.countLeadingZeros();
43142     APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
43143 
43144     // If the demanded bits has leading zeroes, we don't demand those from the
43145     // mask.
43146     if (SimplifyDemandedBits(Op1, LoMask, Known, TLO, Depth + 1))
43147       return true;
43148 
43149     // The number of possible 1s in the mask determines the number of LSBs of
43150     // operand 0 used. Undemanded bits from the mask don't matter so filter
43151     // them before counting.
43152     KnownBits Known2;
43153     uint64_t Count = (~Known.Zero & LoMask).countPopulation();
43154     APInt DemandedMask(APInt::getLowBitsSet(BitWidth, Count));
43155     if (SimplifyDemandedBits(Op0, DemandedMask, Known2, TLO, Depth + 1))
43156       return true;
43157 
43158     // Zeroes are retained from the mask, but not ones.
43159     Known.One.clearAllBits();
43160     // The result will have at least as many trailing zeros as the non-mask
43161     // operand since bits can only map to the same or higher bit position.
43162     Known.Zero.setLowBits(Known2.countMinTrailingZeros());
43163     return false;
43164   }
43165   }
43166 
43167   return TargetLowering::SimplifyDemandedBitsForTargetNode(
43168       Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
43169 }
43170 
SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,SelectionDAG & DAG,unsigned Depth) const43171 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
43172     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
43173     SelectionDAG &DAG, unsigned Depth) const {
43174   int NumElts = DemandedElts.getBitWidth();
43175   unsigned Opc = Op.getOpcode();
43176   EVT VT = Op.getValueType();
43177 
43178   switch (Opc) {
43179   case X86ISD::PINSRB:
43180   case X86ISD::PINSRW: {
43181     // If we don't demand the inserted element, return the base vector.
43182     SDValue Vec = Op.getOperand(0);
43183     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
43184     MVT VecVT = Vec.getSimpleValueType();
43185     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
43186         !DemandedElts[CIdx->getZExtValue()])
43187       return Vec;
43188     break;
43189   }
43190   case X86ISD::VSHLI: {
43191     // If we are only demanding sign bits then we can use the shift source
43192     // directly.
43193     SDValue Op0 = Op.getOperand(0);
43194     unsigned ShAmt = Op.getConstantOperandVal(1);
43195     unsigned BitWidth = DemandedBits.getBitWidth();
43196     unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
43197     unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
43198     if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
43199       return Op0;
43200     break;
43201   }
43202   case X86ISD::VSRAI:
43203     // iff we only need the sign bit then we can use the source directly.
43204     // TODO: generalize where we only demand extended signbits.
43205     if (DemandedBits.isSignMask())
43206       return Op.getOperand(0);
43207     break;
43208   case X86ISD::PCMPGT:
43209     // icmp sgt(0, R) == ashr(R, BitWidth-1).
43210     // iff we only need the sign bit then we can use R directly.
43211     if (DemandedBits.isSignMask() &&
43212         ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
43213       return Op.getOperand(1);
43214     break;
43215   case X86ISD::ANDNP: {
43216     // ANDNP = (~LHS & RHS);
43217     SDValue LHS = Op.getOperand(0);
43218     SDValue RHS = Op.getOperand(1);
43219 
43220     KnownBits LHSKnown = DAG.computeKnownBits(LHS, DemandedElts, Depth + 1);
43221     KnownBits RHSKnown = DAG.computeKnownBits(RHS, DemandedElts, Depth + 1);
43222 
43223     // If all of the demanded bits are known 0 on LHS and known 0 on RHS, then
43224     // the (inverted) LHS bits cannot contribute to the result of the 'andn' in
43225     // this context, so return RHS.
43226     if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero))
43227       return RHS;
43228     break;
43229   }
43230   }
43231 
43232   APInt ShuffleUndef, ShuffleZero;
43233   SmallVector<int, 16> ShuffleMask;
43234   SmallVector<SDValue, 2> ShuffleOps;
43235   if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
43236                              ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
43237     // If all the demanded elts are from one operand and are inline,
43238     // then we can use the operand directly.
43239     int NumOps = ShuffleOps.size();
43240     if (ShuffleMask.size() == (unsigned)NumElts &&
43241         llvm::all_of(ShuffleOps, [VT](SDValue V) {
43242           return VT.getSizeInBits() == V.getValueSizeInBits();
43243         })) {
43244 
43245       if (DemandedElts.isSubsetOf(ShuffleUndef))
43246         return DAG.getUNDEF(VT);
43247       if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
43248         return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
43249 
43250       // Bitmask that indicates which ops have only been accessed 'inline'.
43251       APInt IdentityOp = APInt::getAllOnes(NumOps);
43252       for (int i = 0; i != NumElts; ++i) {
43253         int M = ShuffleMask[i];
43254         if (!DemandedElts[i] || ShuffleUndef[i])
43255           continue;
43256         int OpIdx = M / NumElts;
43257         int EltIdx = M % NumElts;
43258         if (M < 0 || EltIdx != i) {
43259           IdentityOp.clearAllBits();
43260           break;
43261         }
43262         IdentityOp &= APInt::getOneBitSet(NumOps, OpIdx);
43263         if (IdentityOp == 0)
43264           break;
43265       }
43266       assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
43267              "Multiple identity shuffles detected");
43268 
43269       if (IdentityOp != 0)
43270         return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
43271     }
43272   }
43273 
43274   return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
43275       Op, DemandedBits, DemandedElts, DAG, Depth);
43276 }
43277 
isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,bool PoisonOnly,unsigned Depth) const43278 bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
43279     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
43280     bool PoisonOnly, unsigned Depth) const {
43281   unsigned EltsBits = Op.getScalarValueSizeInBits();
43282   unsigned NumElts = DemandedElts.getBitWidth();
43283 
43284   // TODO: Add more target shuffles.
43285   switch (Op.getOpcode()) {
43286   case X86ISD::PSHUFD:
43287   case X86ISD::VPERMILPI: {
43288     SmallVector<int, 8> Mask;
43289     DecodePSHUFMask(NumElts, EltsBits, Op.getConstantOperandVal(1), Mask);
43290 
43291     APInt DemandedSrcElts = APInt::getZero(NumElts);
43292     for (unsigned I = 0; I != NumElts; ++I)
43293       if (DemandedElts[I])
43294         DemandedSrcElts.setBit(Mask[I]);
43295 
43296     return DAG.isGuaranteedNotToBeUndefOrPoison(
43297         Op.getOperand(0), DemandedSrcElts, PoisonOnly, Depth + 1);
43298   }
43299   }
43300   return TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
43301       Op, DemandedElts, DAG, PoisonOnly, Depth);
43302 }
43303 
canCreateUndefOrPoisonForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,bool PoisonOnly,bool ConsiderFlags,unsigned Depth) const43304 bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
43305     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
43306     bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {
43307 
43308   // TODO: Add more target shuffles.
43309   switch (Op.getOpcode()) {
43310   case X86ISD::PSHUFD:
43311   case X86ISD::VPERMILPI:
43312     return false;
43313   }
43314   return TargetLowering::canCreateUndefOrPoisonForTargetNode(
43315       Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth);
43316 }
43317 
isSplatValueForTargetNode(SDValue Op,const APInt & DemandedElts,APInt & UndefElts,const SelectionDAG & DAG,unsigned Depth) const43318 bool X86TargetLowering::isSplatValueForTargetNode(SDValue Op,
43319                                                   const APInt &DemandedElts,
43320                                                   APInt &UndefElts,
43321                                                   const SelectionDAG &DAG,
43322                                                   unsigned Depth) const {
43323   unsigned NumElts = DemandedElts.getBitWidth();
43324   unsigned Opc = Op.getOpcode();
43325 
43326   switch (Opc) {
43327   case X86ISD::VBROADCAST:
43328   case X86ISD::VBROADCAST_LOAD:
43329     UndefElts = APInt::getNullValue(NumElts);
43330     return true;
43331   }
43332 
43333   return TargetLowering::isSplatValueForTargetNode(Op, DemandedElts, UndefElts,
43334                                                    DAG, Depth);
43335 }
43336 
43337 // Helper to peek through bitops/trunc/setcc to determine size of source vector.
43338 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
checkBitcastSrcVectorSize(SDValue Src,unsigned Size,bool AllowTruncate)43339 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size,
43340                                       bool AllowTruncate) {
43341   switch (Src.getOpcode()) {
43342   case ISD::TRUNCATE:
43343     if (!AllowTruncate)
43344       return false;
43345     [[fallthrough]];
43346   case ISD::SETCC:
43347     return Src.getOperand(0).getValueSizeInBits() == Size;
43348   case ISD::AND:
43349   case ISD::XOR:
43350   case ISD::OR:
43351     return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate) &&
43352            checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate);
43353   case ISD::VSELECT:
43354     return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
43355            checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate) &&
43356            checkBitcastSrcVectorSize(Src.getOperand(2), Size, AllowTruncate);
43357   case ISD::BUILD_VECTOR:
43358     return ISD::isBuildVectorAllZeros(Src.getNode());
43359 
43360   }
43361   return false;
43362 }
43363 
43364 // Helper to flip between AND/OR/XOR opcodes and their X86ISD FP equivalents.
getAltBitOpcode(unsigned Opcode)43365 static unsigned getAltBitOpcode(unsigned Opcode) {
43366   switch(Opcode) {
43367   case ISD::AND: return X86ISD::FAND;
43368   case ISD::OR: return X86ISD::FOR;
43369   case ISD::XOR: return X86ISD::FXOR;
43370   case X86ISD::ANDNP: return X86ISD::FANDN;
43371   }
43372   llvm_unreachable("Unknown bitwise opcode");
43373 }
43374 
43375 // Helper to adjust v4i32 MOVMSK expansion to work with SSE1-only targets.
adjustBitcastSrcVectorSSE1(SelectionDAG & DAG,SDValue Src,const SDLoc & DL)43376 static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
43377                                           const SDLoc &DL) {
43378   EVT SrcVT = Src.getValueType();
43379   if (SrcVT != MVT::v4i1)
43380     return SDValue();
43381 
43382   switch (Src.getOpcode()) {
43383   case ISD::SETCC:
43384     if (Src.getOperand(0).getValueType() == MVT::v4i32 &&
43385         ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
43386         cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT) {
43387       SDValue Op0 = Src.getOperand(0);
43388       if (ISD::isNormalLoad(Op0.getNode()))
43389         return DAG.getBitcast(MVT::v4f32, Op0);
43390       if (Op0.getOpcode() == ISD::BITCAST &&
43391           Op0.getOperand(0).getValueType() == MVT::v4f32)
43392         return Op0.getOperand(0);
43393     }
43394     break;
43395   case ISD::AND:
43396   case ISD::XOR:
43397   case ISD::OR: {
43398     SDValue Op0 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(0), DL);
43399     SDValue Op1 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(1), DL);
43400     if (Op0 && Op1)
43401       return DAG.getNode(getAltBitOpcode(Src.getOpcode()), DL, MVT::v4f32, Op0,
43402                          Op1);
43403     break;
43404   }
43405   }
43406   return SDValue();
43407 }
43408 
43409 // Helper to push sign extension of vXi1 SETCC result through bitops.
signExtendBitcastSrcVector(SelectionDAG & DAG,EVT SExtVT,SDValue Src,const SDLoc & DL)43410 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
43411                                           SDValue Src, const SDLoc &DL) {
43412   switch (Src.getOpcode()) {
43413   case ISD::SETCC:
43414   case ISD::TRUNCATE:
43415   case ISD::BUILD_VECTOR:
43416     return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
43417   case ISD::AND:
43418   case ISD::XOR:
43419   case ISD::OR:
43420     return DAG.getNode(
43421         Src.getOpcode(), DL, SExtVT,
43422         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
43423         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
43424   case ISD::VSELECT:
43425     return DAG.getSelect(
43426         DL, SExtVT, Src.getOperand(0),
43427         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL),
43428         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(2), DL));
43429   }
43430   llvm_unreachable("Unexpected node type for vXi1 sign extension");
43431 }
43432 
43433 // Try to match patterns such as
43434 // (i16 bitcast (v16i1 x))
43435 // ->
43436 // (i16 movmsk (16i8 sext (v16i1 x)))
43437 // before the illegal vector is scalarized on subtargets that don't have legal
43438 // vxi1 types.
combineBitcastvxi1(SelectionDAG & DAG,EVT VT,SDValue Src,const SDLoc & DL,const X86Subtarget & Subtarget)43439 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
43440                                   const SDLoc &DL,
43441                                   const X86Subtarget &Subtarget) {
43442   EVT SrcVT = Src.getValueType();
43443   if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
43444     return SDValue();
43445 
43446   // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
43447   // legalization destroys the v4i32 type.
43448   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) {
43449     if (SDValue V = adjustBitcastSrcVectorSSE1(DAG, Src, DL)) {
43450       V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32,
43451                       DAG.getBitcast(MVT::v4f32, V));
43452       return DAG.getZExtOrTrunc(V, DL, VT);
43453     }
43454   }
43455 
43456   // If the input is a truncate from v16i8 or v32i8 go ahead and use a
43457   // movmskb even with avx512. This will be better than truncating to vXi1 and
43458   // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
43459   // vpcmpeqb/vpcmpgtb.
43460   bool PreferMovMsk = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
43461                       (Src.getOperand(0).getValueType() == MVT::v16i8 ||
43462                        Src.getOperand(0).getValueType() == MVT::v32i8 ||
43463                        Src.getOperand(0).getValueType() == MVT::v64i8);
43464 
43465   // Prefer movmsk for AVX512 for (bitcast (setlt X, 0)) which can be handled
43466   // directly with vpmovmskb/vmovmskps/vmovmskpd.
43467   if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
43468       cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT &&
43469       ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
43470     EVT CmpVT = Src.getOperand(0).getValueType();
43471     EVT EltVT = CmpVT.getVectorElementType();
43472     if (CmpVT.getSizeInBits() <= 256 &&
43473         (EltVT == MVT::i8 || EltVT == MVT::i32 || EltVT == MVT::i64))
43474       PreferMovMsk = true;
43475   }
43476 
43477   // With AVX512 vxi1 types are legal and we prefer using k-regs.
43478   // MOVMSK is supported in SSE2 or later.
43479   if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk))
43480     return SDValue();
43481 
43482   // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
43483   // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
43484   // v8i16 and v16i16.
43485   // For these two cases, we can shuffle the upper element bytes to a
43486   // consecutive sequence at the start of the vector and treat the results as
43487   // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
43488   // for v16i16 this is not the case, because the shuffle is expensive, so we
43489   // avoid sign-extending to this type entirely.
43490   // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
43491   // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
43492   MVT SExtVT;
43493   bool PropagateSExt = false;
43494   switch (SrcVT.getSimpleVT().SimpleTy) {
43495   default:
43496     return SDValue();
43497   case MVT::v2i1:
43498     SExtVT = MVT::v2i64;
43499     break;
43500   case MVT::v4i1:
43501     SExtVT = MVT::v4i32;
43502     // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
43503     // sign-extend to a 256-bit operation to avoid truncation.
43504     if (Subtarget.hasAVX() &&
43505         checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2())) {
43506       SExtVT = MVT::v4i64;
43507       PropagateSExt = true;
43508     }
43509     break;
43510   case MVT::v8i1:
43511     SExtVT = MVT::v8i16;
43512     // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
43513     // sign-extend to a 256-bit operation to match the compare.
43514     // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
43515     // 256-bit because the shuffle is cheaper than sign extending the result of
43516     // the compare.
43517     if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256, true) ||
43518                                checkBitcastSrcVectorSize(Src, 512, true))) {
43519       SExtVT = MVT::v8i32;
43520       PropagateSExt = true;
43521     }
43522     break;
43523   case MVT::v16i1:
43524     SExtVT = MVT::v16i8;
43525     // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
43526     // it is not profitable to sign-extend to 256-bit because this will
43527     // require an extra cross-lane shuffle which is more expensive than
43528     // truncating the result of the compare to 128-bits.
43529     break;
43530   case MVT::v32i1:
43531     SExtVT = MVT::v32i8;
43532     break;
43533   case MVT::v64i1:
43534     // If we have AVX512F, but not AVX512BW and the input is truncated from
43535     // v64i8 checked earlier. Then split the input and make two pmovmskbs.
43536     if (Subtarget.hasAVX512()) {
43537       if (Subtarget.hasBWI())
43538         return SDValue();
43539       SExtVT = MVT::v64i8;
43540       break;
43541     }
43542     // Split if this is a <64 x i8> comparison result.
43543     if (checkBitcastSrcVectorSize(Src, 512, false)) {
43544       SExtVT = MVT::v64i8;
43545       break;
43546     }
43547     return SDValue();
43548   };
43549 
43550   SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
43551                             : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
43552 
43553   if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
43554     V = getPMOVMSKB(DL, V, DAG, Subtarget);
43555   } else {
43556     if (SExtVT == MVT::v8i16)
43557       V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
43558                       DAG.getUNDEF(MVT::v8i16));
43559     V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
43560   }
43561 
43562   EVT IntVT =
43563       EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
43564   V = DAG.getZExtOrTrunc(V, DL, IntVT);
43565   return DAG.getBitcast(VT, V);
43566 }
43567 
43568 // Convert a vXi1 constant build vector to the same width scalar integer.
combinevXi1ConstantToInteger(SDValue Op,SelectionDAG & DAG)43569 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
43570   EVT SrcVT = Op.getValueType();
43571   assert(SrcVT.getVectorElementType() == MVT::i1 &&
43572          "Expected a vXi1 vector");
43573   assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
43574          "Expected a constant build vector");
43575 
43576   APInt Imm(SrcVT.getVectorNumElements(), 0);
43577   for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
43578     SDValue In = Op.getOperand(Idx);
43579     if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
43580       Imm.setBit(Idx);
43581   }
43582   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
43583   return DAG.getConstant(Imm, SDLoc(Op), IntVT);
43584 }
43585 
combineCastedMaskArithmetic(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)43586 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
43587                                            TargetLowering::DAGCombinerInfo &DCI,
43588                                            const X86Subtarget &Subtarget) {
43589   assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
43590 
43591   if (!DCI.isBeforeLegalizeOps())
43592     return SDValue();
43593 
43594   // Only do this if we have k-registers.
43595   if (!Subtarget.hasAVX512())
43596     return SDValue();
43597 
43598   EVT DstVT = N->getValueType(0);
43599   SDValue Op = N->getOperand(0);
43600   EVT SrcVT = Op.getValueType();
43601 
43602   if (!Op.hasOneUse())
43603     return SDValue();
43604 
43605   // Look for logic ops.
43606   if (Op.getOpcode() != ISD::AND &&
43607       Op.getOpcode() != ISD::OR &&
43608       Op.getOpcode() != ISD::XOR)
43609     return SDValue();
43610 
43611   // Make sure we have a bitcast between mask registers and a scalar type.
43612   if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
43613         DstVT.isScalarInteger()) &&
43614       !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
43615         SrcVT.isScalarInteger()))
43616     return SDValue();
43617 
43618   SDValue LHS = Op.getOperand(0);
43619   SDValue RHS = Op.getOperand(1);
43620 
43621   if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
43622       LHS.getOperand(0).getValueType() == DstVT)
43623     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
43624                        DAG.getBitcast(DstVT, RHS));
43625 
43626   if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
43627       RHS.getOperand(0).getValueType() == DstVT)
43628     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
43629                        DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
43630 
43631   // If the RHS is a vXi1 build vector, this is a good reason to flip too.
43632   // Most of these have to move a constant from the scalar domain anyway.
43633   if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
43634     RHS = combinevXi1ConstantToInteger(RHS, DAG);
43635     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
43636                        DAG.getBitcast(DstVT, LHS), RHS);
43637   }
43638 
43639   return SDValue();
43640 }
43641 
createMMXBuildVector(BuildVectorSDNode * BV,SelectionDAG & DAG,const X86Subtarget & Subtarget)43642 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
43643                                     const X86Subtarget &Subtarget) {
43644   SDLoc DL(BV);
43645   unsigned NumElts = BV->getNumOperands();
43646   SDValue Splat = BV->getSplatValue();
43647 
43648   // Build MMX element from integer GPR or SSE float values.
43649   auto CreateMMXElement = [&](SDValue V) {
43650     if (V.isUndef())
43651       return DAG.getUNDEF(MVT::x86mmx);
43652     if (V.getValueType().isFloatingPoint()) {
43653       if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
43654         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
43655         V = DAG.getBitcast(MVT::v2i64, V);
43656         return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
43657       }
43658       V = DAG.getBitcast(MVT::i32, V);
43659     } else {
43660       V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
43661     }
43662     return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
43663   };
43664 
43665   // Convert build vector ops to MMX data in the bottom elements.
43666   SmallVector<SDValue, 8> Ops;
43667 
43668   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43669 
43670   // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
43671   if (Splat) {
43672     if (Splat.isUndef())
43673       return DAG.getUNDEF(MVT::x86mmx);
43674 
43675     Splat = CreateMMXElement(Splat);
43676 
43677     if (Subtarget.hasSSE1()) {
43678       // Unpack v8i8 to splat i8 elements to lowest 16-bits.
43679       if (NumElts == 8)
43680         Splat = DAG.getNode(
43681             ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
43682             DAG.getTargetConstant(Intrinsic::x86_mmx_punpcklbw, DL,
43683                                   TLI.getPointerTy(DAG.getDataLayout())),
43684             Splat, Splat);
43685 
43686       // Use PSHUFW to repeat 16-bit elements.
43687       unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
43688       return DAG.getNode(
43689           ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
43690           DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL,
43691                                 TLI.getPointerTy(DAG.getDataLayout())),
43692           Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
43693     }
43694     Ops.append(NumElts, Splat);
43695   } else {
43696     for (unsigned i = 0; i != NumElts; ++i)
43697       Ops.push_back(CreateMMXElement(BV->getOperand(i)));
43698   }
43699 
43700   // Use tree of PUNPCKLs to build up general MMX vector.
43701   while (Ops.size() > 1) {
43702     unsigned NumOps = Ops.size();
43703     unsigned IntrinOp =
43704         (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
43705                      : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
43706                                     : Intrinsic::x86_mmx_punpcklbw));
43707     SDValue Intrin = DAG.getTargetConstant(
43708         IntrinOp, DL, TLI.getPointerTy(DAG.getDataLayout()));
43709     for (unsigned i = 0; i != NumOps; i += 2)
43710       Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
43711                                Ops[i], Ops[i + 1]);
43712     Ops.resize(NumOps / 2);
43713   }
43714 
43715   return Ops[0];
43716 }
43717 
43718 // Recursive function that attempts to find if a bool vector node was originally
43719 // a vector/float/double that got truncated/extended/bitcast to/from a scalar
43720 // integer. If so, replace the scalar ops with bool vector equivalents back down
43721 // the chain.
combineBitcastToBoolVector(EVT VT,SDValue V,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)43722 static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
43723                                           SelectionDAG &DAG,
43724                                           const X86Subtarget &Subtarget) {
43725   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43726   unsigned Opc = V.getOpcode();
43727   switch (Opc) {
43728   case ISD::BITCAST: {
43729     // Bitcast from a vector/float/double, we can cheaply bitcast to VT.
43730     SDValue Src = V.getOperand(0);
43731     EVT SrcVT = Src.getValueType();
43732     if (SrcVT.isVector() || SrcVT.isFloatingPoint())
43733       return DAG.getBitcast(VT, Src);
43734     break;
43735   }
43736   case ISD::TRUNCATE: {
43737     // If we find a suitable source, a truncated scalar becomes a subvector.
43738     SDValue Src = V.getOperand(0);
43739     EVT NewSrcVT =
43740         EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
43741     if (TLI.isTypeLegal(NewSrcVT))
43742       if (SDValue N0 =
43743               combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
43744         return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
43745                            DAG.getIntPtrConstant(0, DL));
43746     break;
43747   }
43748   case ISD::ANY_EXTEND:
43749   case ISD::ZERO_EXTEND: {
43750     // If we find a suitable source, an extended scalar becomes a subvector.
43751     SDValue Src = V.getOperand(0);
43752     EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
43753                                     Src.getScalarValueSizeInBits());
43754     if (TLI.isTypeLegal(NewSrcVT))
43755       if (SDValue N0 =
43756               combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
43757         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
43758                            Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
43759                                                   : DAG.getConstant(0, DL, VT),
43760                            N0, DAG.getIntPtrConstant(0, DL));
43761     break;
43762   }
43763   case ISD::OR: {
43764     // If we find suitable sources, we can just move an OR to the vector domain.
43765     SDValue Src0 = V.getOperand(0);
43766     SDValue Src1 = V.getOperand(1);
43767     if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
43768       if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
43769         return DAG.getNode(Opc, DL, VT, N0, N1);
43770     break;
43771   }
43772   case ISD::SHL: {
43773     // If we find a suitable source, a SHL becomes a KSHIFTL.
43774     SDValue Src0 = V.getOperand(0);
43775     if ((VT == MVT::v8i1 && !Subtarget.hasDQI()) ||
43776         ((VT == MVT::v32i1 || VT == MVT::v64i1) && !Subtarget.hasBWI()))
43777       break;
43778 
43779     if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
43780       if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
43781         return DAG.getNode(
43782             X86ISD::KSHIFTL, DL, VT, N0,
43783             DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
43784     break;
43785   }
43786   }
43787   return SDValue();
43788 }
43789 
combineBitcast(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)43790 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
43791                               TargetLowering::DAGCombinerInfo &DCI,
43792                               const X86Subtarget &Subtarget) {
43793   SDValue N0 = N->getOperand(0);
43794   EVT VT = N->getValueType(0);
43795   EVT SrcVT = N0.getValueType();
43796   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43797 
43798   // Try to match patterns such as
43799   // (i16 bitcast (v16i1 x))
43800   // ->
43801   // (i16 movmsk (16i8 sext (v16i1 x)))
43802   // before the setcc result is scalarized on subtargets that don't have legal
43803   // vxi1 types.
43804   if (DCI.isBeforeLegalize()) {
43805     SDLoc dl(N);
43806     if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
43807       return V;
43808 
43809     // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
43810     // type, widen both sides to avoid a trip through memory.
43811     if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
43812         Subtarget.hasAVX512()) {
43813       N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
43814       N0 = DAG.getBitcast(MVT::v8i1, N0);
43815       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
43816                          DAG.getIntPtrConstant(0, dl));
43817     }
43818 
43819     // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
43820     // type, widen both sides to avoid a trip through memory.
43821     if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
43822         Subtarget.hasAVX512()) {
43823       // Use zeros for the widening if we already have some zeroes. This can
43824       // allow SimplifyDemandedBits to remove scalar ANDs that may be down
43825       // stream of this.
43826       // FIXME: It might make sense to detect a concat_vectors with a mix of
43827       // zeroes and undef and turn it into insert_subvector for i1 vectors as
43828       // a separate combine. What we can't do is canonicalize the operands of
43829       // such a concat or we'll get into a loop with SimplifyDemandedBits.
43830       if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
43831         SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
43832         if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
43833           SrcVT = LastOp.getValueType();
43834           unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
43835           SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
43836           Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
43837           N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
43838           N0 = DAG.getBitcast(MVT::i8, N0);
43839           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
43840         }
43841       }
43842 
43843       unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
43844       SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
43845       Ops[0] = N0;
43846       N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
43847       N0 = DAG.getBitcast(MVT::i8, N0);
43848       return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
43849     }
43850   } else {
43851     // If we're bitcasting from iX to vXi1, see if the integer originally
43852     // began as a vXi1 and whether we can remove the bitcast entirely.
43853     if (VT.isVector() && VT.getScalarType() == MVT::i1 &&
43854         SrcVT.isScalarInteger() && TLI.isTypeLegal(VT)) {
43855       if (SDValue V =
43856               combineBitcastToBoolVector(VT, N0, SDLoc(N), DAG, Subtarget))
43857         return V;
43858     }
43859   }
43860 
43861   // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
43862   // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
43863   // due to insert_subvector legalization on KNL. By promoting the copy to i16
43864   // we can help with known bits propagation from the vXi1 domain to the
43865   // scalar domain.
43866   if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
43867       !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
43868       N0.getOperand(0).getValueType() == MVT::v16i1 &&
43869       isNullConstant(N0.getOperand(1)))
43870     return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
43871                        DAG.getBitcast(MVT::i16, N0.getOperand(0)));
43872 
43873   // Canonicalize (bitcast (vbroadcast_load)) so that the output of the bitcast
43874   // and the vbroadcast_load are both integer or both fp. In some cases this
43875   // will remove the bitcast entirely.
43876   if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
43877        VT.isFloatingPoint() != SrcVT.isFloatingPoint() && VT.isVector()) {
43878     auto *BCast = cast<MemIntrinsicSDNode>(N0);
43879     unsigned SrcVTSize = SrcVT.getScalarSizeInBits();
43880     unsigned MemSize = BCast->getMemoryVT().getScalarSizeInBits();
43881     // Don't swap i8/i16 since don't have fp types that size.
43882     if (MemSize >= 32) {
43883       MVT MemVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(MemSize)
43884                                        : MVT::getIntegerVT(MemSize);
43885       MVT LoadVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(SrcVTSize)
43886                                         : MVT::getIntegerVT(SrcVTSize);
43887       LoadVT = MVT::getVectorVT(LoadVT, SrcVT.getVectorNumElements());
43888 
43889       SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
43890       SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
43891       SDValue ResNode =
43892           DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
43893                                   MemVT, BCast->getMemOperand());
43894       DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
43895       return DAG.getBitcast(VT, ResNode);
43896     }
43897   }
43898 
43899   // Since MMX types are special and don't usually play with other vector types,
43900   // it's better to handle them early to be sure we emit efficient code by
43901   // avoiding store-load conversions.
43902   if (VT == MVT::x86mmx) {
43903     // Detect MMX constant vectors.
43904     APInt UndefElts;
43905     SmallVector<APInt, 1> EltBits;
43906     if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
43907       SDLoc DL(N0);
43908       // Handle zero-extension of i32 with MOVD.
43909       if (EltBits[0].countLeadingZeros() >= 32)
43910         return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
43911                            DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
43912       // Else, bitcast to a double.
43913       // TODO - investigate supporting sext 32-bit immediates on x86_64.
43914       APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
43915       return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
43916     }
43917 
43918     // Detect bitcasts to x86mmx low word.
43919     if (N0.getOpcode() == ISD::BUILD_VECTOR &&
43920         (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
43921         N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
43922       bool LowUndef = true, AllUndefOrZero = true;
43923       for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
43924         SDValue Op = N0.getOperand(i);
43925         LowUndef &= Op.isUndef() || (i >= e/2);
43926         AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
43927       }
43928       if (AllUndefOrZero) {
43929         SDValue N00 = N0.getOperand(0);
43930         SDLoc dl(N00);
43931         N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
43932                        : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
43933         return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
43934       }
43935     }
43936 
43937     // Detect bitcasts of 64-bit build vectors and convert to a
43938     // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
43939     // lowest element.
43940     if (N0.getOpcode() == ISD::BUILD_VECTOR &&
43941         (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
43942          SrcVT == MVT::v8i8))
43943       return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
43944 
43945     // Detect bitcasts between element or subvector extraction to x86mmx.
43946     if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
43947          N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
43948         isNullConstant(N0.getOperand(1))) {
43949       SDValue N00 = N0.getOperand(0);
43950       if (N00.getValueType().is128BitVector())
43951         return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
43952                            DAG.getBitcast(MVT::v2i64, N00));
43953     }
43954 
43955     // Detect bitcasts from FP_TO_SINT to x86mmx.
43956     if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
43957       SDLoc DL(N0);
43958       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
43959                                 DAG.getUNDEF(MVT::v2i32));
43960       return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
43961                          DAG.getBitcast(MVT::v2i64, Res));
43962     }
43963   }
43964 
43965   // Try to remove a bitcast of constant vXi1 vector. We have to legalize
43966   // most of these to scalar anyway.
43967   if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
43968       SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
43969       ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
43970     return combinevXi1ConstantToInteger(N0, DAG);
43971   }
43972 
43973   if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
43974       VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43975       isa<ConstantSDNode>(N0)) {
43976     auto *C = cast<ConstantSDNode>(N0);
43977     if (C->isAllOnes())
43978       return DAG.getConstant(1, SDLoc(N0), VT);
43979     if (C->isZero())
43980       return DAG.getConstant(0, SDLoc(N0), VT);
43981   }
43982 
43983   // Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
43984   // Turn it into a sign bit compare that produces a k-register. This avoids
43985   // a trip through a GPR.
43986   if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
43987       VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43988       isPowerOf2_32(VT.getVectorNumElements())) {
43989     unsigned NumElts = VT.getVectorNumElements();
43990     SDValue Src = N0;
43991 
43992     // Peek through truncate.
43993     if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
43994       Src = N0.getOperand(0);
43995 
43996     if (Src.getOpcode() == X86ISD::MOVMSK && Src.hasOneUse()) {
43997       SDValue MovmskIn = Src.getOperand(0);
43998       MVT MovmskVT = MovmskIn.getSimpleValueType();
43999       unsigned MovMskElts = MovmskVT.getVectorNumElements();
44000 
44001       // We allow extra bits of the movmsk to be used since they are known zero.
44002       // We can't convert a VPMOVMSKB without avx512bw.
44003       if (MovMskElts <= NumElts &&
44004           (Subtarget.hasBWI() || MovmskVT.getVectorElementType() != MVT::i8)) {
44005         EVT IntVT = EVT(MovmskVT).changeVectorElementTypeToInteger();
44006         MovmskIn = DAG.getBitcast(IntVT, MovmskIn);
44007         SDLoc dl(N);
44008         MVT CmpVT = MVT::getVectorVT(MVT::i1, MovMskElts);
44009         SDValue Cmp = DAG.getSetCC(dl, CmpVT, MovmskIn,
44010                                    DAG.getConstant(0, dl, IntVT), ISD::SETLT);
44011         if (EVT(CmpVT) == VT)
44012           return Cmp;
44013 
44014         // Pad with zeroes up to original VT to replace the zeroes that were
44015         // being used from the MOVMSK.
44016         unsigned NumConcats = NumElts / MovMskElts;
44017         SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, CmpVT));
44018         Ops[0] = Cmp;
44019         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Ops);
44020       }
44021     }
44022   }
44023 
44024   // Try to remove bitcasts from input and output of mask arithmetic to
44025   // remove GPR<->K-register crossings.
44026   if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
44027     return V;
44028 
44029   // Convert a bitcasted integer logic operation that has one bitcasted
44030   // floating-point operand into a floating-point logic operation. This may
44031   // create a load of a constant, but that is cheaper than materializing the
44032   // constant in an integer register and transferring it to an SSE register or
44033   // transferring the SSE operand to integer register and back.
44034   unsigned FPOpcode;
44035   switch (N0.getOpcode()) {
44036     case ISD::AND: FPOpcode = X86ISD::FAND; break;
44037     case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
44038     case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
44039     default: return SDValue();
44040   }
44041 
44042   // Check if we have a bitcast from another integer type as well.
44043   if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
44044         (Subtarget.hasSSE2() && VT == MVT::f64) ||
44045         (Subtarget.hasFP16() && VT == MVT::f16) ||
44046         (Subtarget.hasSSE2() && VT.isInteger() && VT.isVector() &&
44047          TLI.isTypeLegal(VT))))
44048     return SDValue();
44049 
44050   SDValue LogicOp0 = N0.getOperand(0);
44051   SDValue LogicOp1 = N0.getOperand(1);
44052   SDLoc DL0(N0);
44053 
44054   // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
44055   if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
44056       LogicOp0.hasOneUse() && LogicOp0.getOperand(0).hasOneUse() &&
44057       LogicOp0.getOperand(0).getValueType() == VT &&
44058       !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
44059     SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
44060     unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
44061     return DAG.getNode(Opcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
44062   }
44063   // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
44064   if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
44065       LogicOp1.hasOneUse() && LogicOp1.getOperand(0).hasOneUse() &&
44066       LogicOp1.getOperand(0).getValueType() == VT &&
44067       !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
44068     SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
44069     unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
44070     return DAG.getNode(Opcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
44071   }
44072 
44073   return SDValue();
44074 }
44075 
44076 // (mul (zext a), (sext, b))
detectExtMul(SelectionDAG & DAG,const SDValue & Mul,SDValue & Op0,SDValue & Op1)44077 static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
44078                          SDValue &Op1) {
44079   Op0 = Mul.getOperand(0);
44080   Op1 = Mul.getOperand(1);
44081 
44082   // The operand1 should be signed extend
44083   if (Op0.getOpcode() == ISD::SIGN_EXTEND)
44084     std::swap(Op0, Op1);
44085 
44086   auto IsFreeTruncation = [](SDValue &Op) -> bool {
44087     if ((Op.getOpcode() == ISD::ZERO_EXTEND ||
44088          Op.getOpcode() == ISD::SIGN_EXTEND) &&
44089         Op.getOperand(0).getScalarValueSizeInBits() <= 8)
44090       return true;
44091 
44092     auto *BV = dyn_cast<BuildVectorSDNode>(Op);
44093     return (BV && BV->isConstant());
44094   };
44095 
44096   // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
44097   // value, we need to check Op0 is zero extended value. Op1 should be signed
44098   // value, so we just check the signed bits.
44099   if ((IsFreeTruncation(Op0) &&
44100        DAG.computeKnownBits(Op0).countMaxActiveBits() <= 8) &&
44101       (IsFreeTruncation(Op1) && DAG.ComputeMaxSignificantBits(Op1) <= 8))
44102     return true;
44103 
44104   return false;
44105 }
44106 
44107 // Given a ABS node, detect the following pattern:
44108 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
44109 // This is useful as it is the input into a SAD pattern.
detectZextAbsDiff(const SDValue & Abs,SDValue & Op0,SDValue & Op1)44110 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
44111   SDValue AbsOp1 = Abs->getOperand(0);
44112   if (AbsOp1.getOpcode() != ISD::SUB)
44113     return false;
44114 
44115   Op0 = AbsOp1.getOperand(0);
44116   Op1 = AbsOp1.getOperand(1);
44117 
44118   // Check if the operands of the sub are zero-extended from vectors of i8.
44119   if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
44120       Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
44121       Op1.getOpcode() != ISD::ZERO_EXTEND ||
44122       Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
44123     return false;
44124 
44125   return true;
44126 }
44127 
createVPDPBUSD(SelectionDAG & DAG,SDValue LHS,SDValue RHS,unsigned & LogBias,const SDLoc & DL,const X86Subtarget & Subtarget)44128 static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS,
44129                               unsigned &LogBias, const SDLoc &DL,
44130                               const X86Subtarget &Subtarget) {
44131   // Extend or truncate to MVT::i8 first.
44132   MVT Vi8VT =
44133       MVT::getVectorVT(MVT::i8, LHS.getValueType().getVectorElementCount());
44134   LHS = DAG.getZExtOrTrunc(LHS, DL, Vi8VT);
44135   RHS = DAG.getSExtOrTrunc(RHS, DL, Vi8VT);
44136 
44137   // VPDPBUSD(<16 x i32>C, <16 x i8>A, <16 x i8>B). For each dst element
44138   // C[0] = C[0] + A[0]B[0] + A[1]B[1] + A[2]B[2] + A[3]B[3].
44139   // The src A, B element type is i8, but the dst C element type is i32.
44140   // When we calculate the reduce stage, we use src vector type vXi8 for it
44141   // so we need logbias 2 to avoid extra 2 stages.
44142   LogBias = 2;
44143 
44144   unsigned RegSize = std::max(128u, (unsigned)Vi8VT.getSizeInBits());
44145   if (Subtarget.hasVNNI() && !Subtarget.hasVLX())
44146     RegSize = std::max(512u, RegSize);
44147 
44148   // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
44149   // fill in the missing vector elements with 0.
44150   unsigned NumConcat = RegSize / Vi8VT.getSizeInBits();
44151   SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, Vi8VT));
44152   Ops[0] = LHS;
44153   MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
44154   SDValue DpOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
44155   Ops[0] = RHS;
44156   SDValue DpOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
44157 
44158   // Actually build the DotProduct, split as 256/512 bits for
44159   // AVXVNNI/AVX512VNNI.
44160   auto DpBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44161                        ArrayRef<SDValue> Ops) {
44162     MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
44163     return DAG.getNode(X86ISD::VPDPBUSD, DL, VT, Ops);
44164   };
44165   MVT DpVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
44166   SDValue Zero = DAG.getConstant(0, DL, DpVT);
44167 
44168   return SplitOpsAndApply(DAG, Subtarget, DL, DpVT, {Zero, DpOp0, DpOp1},
44169                           DpBuilder, false);
44170 }
44171 
44172 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
44173 // to these zexts.
createPSADBW(SelectionDAG & DAG,const SDValue & Zext0,const SDValue & Zext1,const SDLoc & DL,const X86Subtarget & Subtarget)44174 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
44175                             const SDValue &Zext1, const SDLoc &DL,
44176                             const X86Subtarget &Subtarget) {
44177   // Find the appropriate width for the PSADBW.
44178   EVT InVT = Zext0.getOperand(0).getValueType();
44179   unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
44180 
44181   // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
44182   // fill in the missing vector elements with 0.
44183   unsigned NumConcat = RegSize / InVT.getSizeInBits();
44184   SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
44185   Ops[0] = Zext0.getOperand(0);
44186   MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
44187   SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
44188   Ops[0] = Zext1.getOperand(0);
44189   SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
44190 
44191   // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
44192   auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44193                           ArrayRef<SDValue> Ops) {
44194     MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
44195     return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
44196   };
44197   MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
44198   return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
44199                           PSADBWBuilder);
44200 }
44201 
44202 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
44203 // PHMINPOSUW.
combineMinMaxReduction(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)44204 static SDValue combineMinMaxReduction(SDNode *Extract, SelectionDAG &DAG,
44205                                       const X86Subtarget &Subtarget) {
44206   // Bail without SSE41.
44207   if (!Subtarget.hasSSE41())
44208     return SDValue();
44209 
44210   EVT ExtractVT = Extract->getValueType(0);
44211   if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
44212     return SDValue();
44213 
44214   // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
44215   ISD::NodeType BinOp;
44216   SDValue Src = DAG.matchBinOpReduction(
44217       Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
44218   if (!Src)
44219     return SDValue();
44220 
44221   EVT SrcVT = Src.getValueType();
44222   EVT SrcSVT = SrcVT.getScalarType();
44223   if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
44224     return SDValue();
44225 
44226   SDLoc DL(Extract);
44227   SDValue MinPos = Src;
44228 
44229   // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
44230   while (SrcVT.getSizeInBits() > 128) {
44231     SDValue Lo, Hi;
44232     std::tie(Lo, Hi) = splitVector(MinPos, DAG, DL);
44233     SrcVT = Lo.getValueType();
44234     MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
44235   }
44236   assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
44237           (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
44238          "Unexpected value type");
44239 
44240   // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
44241   // to flip the value accordingly.
44242   SDValue Mask;
44243   unsigned MaskEltsBits = ExtractVT.getSizeInBits();
44244   if (BinOp == ISD::SMAX)
44245     Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
44246   else if (BinOp == ISD::SMIN)
44247     Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
44248   else if (BinOp == ISD::UMAX)
44249     Mask = DAG.getAllOnesConstant(DL, SrcVT);
44250 
44251   if (Mask)
44252     MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
44253 
44254   // For v16i8 cases we need to perform UMIN on pairs of byte elements,
44255   // shuffling each upper element down and insert zeros. This means that the
44256   // v16i8 UMIN will leave the upper element as zero, performing zero-extension
44257   // ready for the PHMINPOS.
44258   if (ExtractVT == MVT::i8) {
44259     SDValue Upper = DAG.getVectorShuffle(
44260         SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
44261         {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
44262     MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
44263   }
44264 
44265   // Perform the PHMINPOS on a v8i16 vector,
44266   MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
44267   MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
44268   MinPos = DAG.getBitcast(SrcVT, MinPos);
44269 
44270   if (Mask)
44271     MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
44272 
44273   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
44274                      DAG.getIntPtrConstant(0, DL));
44275 }
44276 
44277 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
combinePredicateReduction(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)44278 static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
44279                                          const X86Subtarget &Subtarget) {
44280   // Bail without SSE2.
44281   if (!Subtarget.hasSSE2())
44282     return SDValue();
44283 
44284   EVT ExtractVT = Extract->getValueType(0);
44285   unsigned BitWidth = ExtractVT.getSizeInBits();
44286   if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
44287       ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
44288     return SDValue();
44289 
44290   // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
44291   ISD::NodeType BinOp;
44292   SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
44293   if (!Match && ExtractVT == MVT::i1)
44294     Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
44295   if (!Match)
44296     return SDValue();
44297 
44298   // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
44299   // which we can't support here for now.
44300   if (Match.getScalarValueSizeInBits() != BitWidth)
44301     return SDValue();
44302 
44303   SDValue Movmsk;
44304   SDLoc DL(Extract);
44305   EVT MatchVT = Match.getValueType();
44306   unsigned NumElts = MatchVT.getVectorNumElements();
44307   unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
44308   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44309 
44310   if (ExtractVT == MVT::i1) {
44311     // Special case for (pre-legalization) vXi1 reductions.
44312     if (NumElts > 64 || !isPowerOf2_32(NumElts))
44313       return SDValue();
44314     if (TLI.isTypeLegal(MatchVT)) {
44315       // If this is a legal AVX512 predicate type then we can just bitcast.
44316       EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
44317       Movmsk = DAG.getBitcast(MovmskVT, Match);
44318     } else {
44319       // For all_of(setcc(x,y,eq)) - use PMOVMSKB(PCMPEQB()).
44320       if (BinOp == ISD::AND && Match.getOpcode() == ISD::SETCC &&
44321           cast<CondCodeSDNode>(Match.getOperand(2))->get() ==
44322               ISD::CondCode::SETEQ) {
44323         EVT VecSVT = Match.getOperand(0).getValueType().getScalarType();
44324         if (VecSVT != MVT::i8 && (VecSVT.getSizeInBits() % 8) == 0) {
44325           NumElts *= VecSVT.getSizeInBits() / 8;
44326           EVT CmpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, NumElts);
44327           MatchVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
44328           Match = DAG.getSetCC(
44329               DL, MatchVT, DAG.getBitcast(CmpVT, Match.getOperand(0)),
44330               DAG.getBitcast(CmpVT, Match.getOperand(1)), ISD::CondCode::SETEQ);
44331         }
44332       }
44333 
44334       // Use combineBitcastvxi1 to create the MOVMSK.
44335       while (NumElts > MaxElts) {
44336         SDValue Lo, Hi;
44337         std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
44338         Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
44339         NumElts /= 2;
44340       }
44341       EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
44342       Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
44343     }
44344     if (!Movmsk)
44345       return SDValue();
44346     Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
44347   } else {
44348     // FIXME: Better handling of k-registers or 512-bit vectors?
44349     unsigned MatchSizeInBits = Match.getValueSizeInBits();
44350     if (!(MatchSizeInBits == 128 ||
44351           (MatchSizeInBits == 256 && Subtarget.hasAVX())))
44352       return SDValue();
44353 
44354     // Make sure this isn't a vector of 1 element. The perf win from using
44355     // MOVMSK diminishes with less elements in the reduction, but it is
44356     // generally better to get the comparison over to the GPRs as soon as
44357     // possible to reduce the number of vector ops.
44358     if (Match.getValueType().getVectorNumElements() < 2)
44359       return SDValue();
44360 
44361     // Check that we are extracting a reduction of all sign bits.
44362     if (DAG.ComputeNumSignBits(Match) != BitWidth)
44363       return SDValue();
44364 
44365     if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
44366       SDValue Lo, Hi;
44367       std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
44368       Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
44369       MatchSizeInBits = Match.getValueSizeInBits();
44370     }
44371 
44372     // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
44373     MVT MaskSrcVT;
44374     if (64 == BitWidth || 32 == BitWidth)
44375       MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
44376                                    MatchSizeInBits / BitWidth);
44377     else
44378       MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
44379 
44380     SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
44381     Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
44382     NumElts = MaskSrcVT.getVectorNumElements();
44383   }
44384   assert((NumElts <= 32 || NumElts == 64) &&
44385          "Not expecting more than 64 elements");
44386 
44387   MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
44388   if (BinOp == ISD::XOR) {
44389     // parity -> (PARITY(MOVMSK X))
44390     SDValue Result = DAG.getNode(ISD::PARITY, DL, CmpVT, Movmsk);
44391     return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
44392   }
44393 
44394   SDValue CmpC;
44395   ISD::CondCode CondCode;
44396   if (BinOp == ISD::OR) {
44397     // any_of -> MOVMSK != 0
44398     CmpC = DAG.getConstant(0, DL, CmpVT);
44399     CondCode = ISD::CondCode::SETNE;
44400   } else {
44401     // all_of -> MOVMSK == ((1 << NumElts) - 1)
44402     CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
44403                            DL, CmpVT);
44404     CondCode = ISD::CondCode::SETEQ;
44405   }
44406 
44407   // The setcc produces an i8 of 0/1, so extend that to the result width and
44408   // negate to get the final 0/-1 mask value.
44409   EVT SetccVT =
44410       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
44411   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
44412   SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
44413   SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
44414   return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
44415 }
44416 
combineVPDPBUSDPattern(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)44417 static SDValue combineVPDPBUSDPattern(SDNode *Extract, SelectionDAG &DAG,
44418                                       const X86Subtarget &Subtarget) {
44419   if (!Subtarget.hasVNNI() && !Subtarget.hasAVXVNNI())
44420     return SDValue();
44421 
44422   EVT ExtractVT = Extract->getValueType(0);
44423   // Verify the type we're extracting is i32, as the output element type of
44424   // vpdpbusd is i32.
44425   if (ExtractVT != MVT::i32)
44426     return SDValue();
44427 
44428   EVT VT = Extract->getOperand(0).getValueType();
44429   if (!isPowerOf2_32(VT.getVectorNumElements()))
44430     return SDValue();
44431 
44432   // Match shuffle + add pyramid.
44433   ISD::NodeType BinOp;
44434   SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
44435 
44436   // We can't combine to vpdpbusd for zext, because each of the 4 multiplies
44437   // done by vpdpbusd compute a signed 16-bit product that will be sign extended
44438   // before adding into the accumulator.
44439   // TODO:
44440   // We also need to verify that the multiply has at least 2x the number of bits
44441   // of the input. We shouldn't match
44442   // (sign_extend (mul (vXi9 (zext (vXi8 X))), (vXi9 (zext (vXi8 Y)))).
44443   // if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND))
44444   //   Root = Root.getOperand(0);
44445 
44446   // If there was a match, we want Root to be a mul.
44447   if (!Root || Root.getOpcode() != ISD::MUL)
44448     return SDValue();
44449 
44450   // Check whether we have an extend and mul pattern
44451   SDValue LHS, RHS;
44452   if (!detectExtMul(DAG, Root, LHS, RHS))
44453     return SDValue();
44454 
44455   // Create the dot product instruction.
44456   SDLoc DL(Extract);
44457   unsigned StageBias;
44458   SDValue DP = createVPDPBUSD(DAG, LHS, RHS, StageBias, DL, Subtarget);
44459 
44460   // If the original vector was wider than 4 elements, sum over the results
44461   // in the DP vector.
44462   unsigned Stages = Log2_32(VT.getVectorNumElements());
44463   EVT DpVT = DP.getValueType();
44464 
44465   if (Stages > StageBias) {
44466     unsigned DpElems = DpVT.getVectorNumElements();
44467 
44468     for (unsigned i = Stages - StageBias; i > 0; --i) {
44469       SmallVector<int, 16> Mask(DpElems, -1);
44470       for (unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
44471         Mask[j] = MaskEnd + j;
44472 
44473       SDValue Shuffle =
44474           DAG.getVectorShuffle(DpVT, DL, DP, DAG.getUNDEF(DpVT), Mask);
44475       DP = DAG.getNode(ISD::ADD, DL, DpVT, DP, Shuffle);
44476     }
44477   }
44478 
44479   // Return the lowest ExtractSizeInBits bits.
44480   EVT ResVT =
44481       EVT::getVectorVT(*DAG.getContext(), ExtractVT,
44482                        DpVT.getSizeInBits() / ExtractVT.getSizeInBits());
44483   DP = DAG.getBitcast(ResVT, DP);
44484   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, DP,
44485                      Extract->getOperand(1));
44486 }
44487 
combineBasicSADPattern(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)44488 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
44489                                       const X86Subtarget &Subtarget) {
44490   // PSADBW is only supported on SSE2 and up.
44491   if (!Subtarget.hasSSE2())
44492     return SDValue();
44493 
44494   EVT ExtractVT = Extract->getValueType(0);
44495   // Verify the type we're extracting is either i32 or i64.
44496   // FIXME: Could support other types, but this is what we have coverage for.
44497   if (ExtractVT != MVT::i32 && ExtractVT != MVT::i64)
44498     return SDValue();
44499 
44500   EVT VT = Extract->getOperand(0).getValueType();
44501   if (!isPowerOf2_32(VT.getVectorNumElements()))
44502     return SDValue();
44503 
44504   // Match shuffle + add pyramid.
44505   ISD::NodeType BinOp;
44506   SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
44507 
44508   // The operand is expected to be zero extended from i8
44509   // (verified in detectZextAbsDiff).
44510   // In order to convert to i64 and above, additional any/zero/sign
44511   // extend is expected.
44512   // The zero extend from 32 bit has no mathematical effect on the result.
44513   // Also the sign extend is basically zero extend
44514   // (extends the sign bit which is zero).
44515   // So it is correct to skip the sign/zero extend instruction.
44516   if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
44517                Root.getOpcode() == ISD::ZERO_EXTEND ||
44518                Root.getOpcode() == ISD::ANY_EXTEND))
44519     Root = Root.getOperand(0);
44520 
44521   // If there was a match, we want Root to be a select that is the root of an
44522   // abs-diff pattern.
44523   if (!Root || Root.getOpcode() != ISD::ABS)
44524     return SDValue();
44525 
44526   // Check whether we have an abs-diff pattern feeding into the select.
44527   SDValue Zext0, Zext1;
44528   if (!detectZextAbsDiff(Root, Zext0, Zext1))
44529     return SDValue();
44530 
44531   // Create the SAD instruction.
44532   SDLoc DL(Extract);
44533   SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
44534 
44535   // If the original vector was wider than 8 elements, sum over the results
44536   // in the SAD vector.
44537   unsigned Stages = Log2_32(VT.getVectorNumElements());
44538   EVT SadVT = SAD.getValueType();
44539   if (Stages > 3) {
44540     unsigned SadElems = SadVT.getVectorNumElements();
44541 
44542     for(unsigned i = Stages - 3; i > 0; --i) {
44543       SmallVector<int, 16> Mask(SadElems, -1);
44544       for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
44545         Mask[j] = MaskEnd + j;
44546 
44547       SDValue Shuffle =
44548           DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
44549       SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
44550     }
44551   }
44552 
44553   unsigned ExtractSizeInBits = ExtractVT.getSizeInBits();
44554   // Return the lowest ExtractSizeInBits bits.
44555   EVT ResVT = EVT::getVectorVT(*DAG.getContext(), ExtractVT,
44556                                SadVT.getSizeInBits() / ExtractSizeInBits);
44557   SAD = DAG.getBitcast(ResVT, SAD);
44558   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, SAD,
44559                      Extract->getOperand(1));
44560 }
44561 
44562 // Attempt to peek through a target shuffle and extract the scalar from the
44563 // source.
combineExtractWithShuffle(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)44564 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
44565                                          TargetLowering::DAGCombinerInfo &DCI,
44566                                          const X86Subtarget &Subtarget) {
44567   if (DCI.isBeforeLegalizeOps())
44568     return SDValue();
44569 
44570   SDLoc dl(N);
44571   SDValue Src = N->getOperand(0);
44572   SDValue Idx = N->getOperand(1);
44573 
44574   EVT VT = N->getValueType(0);
44575   EVT SrcVT = Src.getValueType();
44576   EVT SrcSVT = SrcVT.getVectorElementType();
44577   unsigned SrcEltBits = SrcSVT.getSizeInBits();
44578   unsigned NumSrcElts = SrcVT.getVectorNumElements();
44579 
44580   // Don't attempt this for boolean mask vectors or unknown extraction indices.
44581   if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
44582     return SDValue();
44583 
44584   const APInt &IdxC = N->getConstantOperandAPInt(1);
44585   if (IdxC.uge(NumSrcElts))
44586     return SDValue();
44587 
44588   SDValue SrcBC = peekThroughBitcasts(Src);
44589 
44590   // Handle extract(bitcast(broadcast(scalar_value))).
44591   if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
44592     SDValue SrcOp = SrcBC.getOperand(0);
44593     EVT SrcOpVT = SrcOp.getValueType();
44594     if (SrcOpVT.isScalarInteger() && VT.isInteger() &&
44595         (SrcOpVT.getSizeInBits() % SrcEltBits) == 0) {
44596       unsigned Scale = SrcOpVT.getSizeInBits() / SrcEltBits;
44597       unsigned Offset = IdxC.urem(Scale) * SrcEltBits;
44598       // TODO support non-zero offsets.
44599       if (Offset == 0) {
44600         SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, SrcVT.getScalarType());
44601         SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, VT);
44602         return SrcOp;
44603       }
44604     }
44605   }
44606 
44607   // If we're extracting a single element from a broadcast load and there are
44608   // no other users, just create a single load.
44609   if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
44610     auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
44611     unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
44612     if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
44613         VT.getSizeInBits() == SrcBCWidth && SrcEltBits == SrcBCWidth) {
44614       SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
44615                                  MemIntr->getBasePtr(),
44616                                  MemIntr->getPointerInfo(),
44617                                  MemIntr->getOriginalAlign(),
44618                                  MemIntr->getMemOperand()->getFlags());
44619       DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
44620       return Load;
44621     }
44622   }
44623 
44624   // Handle extract(bitcast(scalar_to_vector(scalar_value))) for integers.
44625   // TODO: Move to DAGCombine?
44626   if (SrcBC.getOpcode() == ISD::SCALAR_TO_VECTOR && VT.isInteger() &&
44627       SrcBC.getValueType().isInteger() &&
44628       (SrcBC.getScalarValueSizeInBits() % SrcEltBits) == 0 &&
44629       SrcBC.getScalarValueSizeInBits() ==
44630           SrcBC.getOperand(0).getValueSizeInBits()) {
44631     unsigned Scale = SrcBC.getScalarValueSizeInBits() / SrcEltBits;
44632     if (IdxC.ult(Scale)) {
44633       unsigned Offset = IdxC.getZExtValue() * SrcVT.getScalarSizeInBits();
44634       SDValue Scl = SrcBC.getOperand(0);
44635       EVT SclVT = Scl.getValueType();
44636       if (Offset) {
44637         Scl = DAG.getNode(ISD::SRL, dl, SclVT, Scl,
44638                           DAG.getShiftAmountConstant(Offset, SclVT, dl));
44639       }
44640       Scl = DAG.getZExtOrTrunc(Scl, dl, SrcVT.getScalarType());
44641       Scl = DAG.getZExtOrTrunc(Scl, dl, VT);
44642       return Scl;
44643     }
44644   }
44645 
44646   // Handle extract(truncate(x)) for 0'th index.
44647   // TODO: Treat this as a faux shuffle?
44648   // TODO: When can we use this for general indices?
44649   if (ISD::TRUNCATE == Src.getOpcode() && IdxC == 0 &&
44650       (SrcVT.getSizeInBits() % 128) == 0) {
44651     Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
44652     MVT ExtractVT = MVT::getVectorVT(SrcSVT.getSimpleVT(), 128 / SrcEltBits);
44653     return DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(ExtractVT, Src),
44654                        Idx);
44655   }
44656 
44657   // We can only legally extract other elements from 128-bit vectors and in
44658   // certain circumstances, depending on SSE-level.
44659   // TODO: Investigate float/double extraction if it will be just stored.
44660   auto GetLegalExtract = [&Subtarget, &DAG, &dl](SDValue Vec, EVT VecVT,
44661                                                  unsigned Idx) {
44662     EVT VecSVT = VecVT.getScalarType();
44663     if ((VecVT.is256BitVector() || VecVT.is512BitVector()) &&
44664         (VecSVT == MVT::i8 || VecSVT == MVT::i16 || VecSVT == MVT::i32 ||
44665          VecSVT == MVT::i64)) {
44666       unsigned EltSizeInBits = VecSVT.getSizeInBits();
44667       unsigned NumEltsPerLane = 128 / EltSizeInBits;
44668       unsigned LaneOffset = (Idx & ~(NumEltsPerLane - 1)) * EltSizeInBits;
44669       unsigned LaneIdx = LaneOffset / Vec.getScalarValueSizeInBits();
44670       VecVT = EVT::getVectorVT(*DAG.getContext(), VecSVT, NumEltsPerLane);
44671       Vec = extract128BitVector(Vec, LaneIdx, DAG, dl);
44672       Idx &= (NumEltsPerLane - 1);
44673     }
44674     if ((VecVT == MVT::v4i32 || VecVT == MVT::v2i64) &&
44675         ((Idx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
44676       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VecVT.getScalarType(),
44677                          DAG.getBitcast(VecVT, Vec),
44678                          DAG.getIntPtrConstant(Idx, dl));
44679     }
44680     if ((VecVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
44681         (VecVT == MVT::v16i8 && Subtarget.hasSSE41())) {
44682       unsigned OpCode = (VecVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
44683       return DAG.getNode(OpCode, dl, MVT::i32, DAG.getBitcast(VecVT, Vec),
44684                          DAG.getTargetConstant(Idx, dl, MVT::i8));
44685     }
44686     return SDValue();
44687   };
44688 
44689   // Resolve the target shuffle inputs and mask.
44690   SmallVector<int, 16> Mask;
44691   SmallVector<SDValue, 2> Ops;
44692   if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
44693     return SDValue();
44694 
44695   // Shuffle inputs must be the same size as the result.
44696   if (llvm::any_of(Ops, [SrcVT](SDValue Op) {
44697         return SrcVT.getSizeInBits() != Op.getValueSizeInBits();
44698       }))
44699     return SDValue();
44700 
44701   // Attempt to narrow/widen the shuffle mask to the correct size.
44702   if (Mask.size() != NumSrcElts) {
44703     if ((NumSrcElts % Mask.size()) == 0) {
44704       SmallVector<int, 16> ScaledMask;
44705       int Scale = NumSrcElts / Mask.size();
44706       narrowShuffleMaskElts(Scale, Mask, ScaledMask);
44707       Mask = std::move(ScaledMask);
44708     } else if ((Mask.size() % NumSrcElts) == 0) {
44709       // Simplify Mask based on demanded element.
44710       int ExtractIdx = (int)IdxC.getZExtValue();
44711       int Scale = Mask.size() / NumSrcElts;
44712       int Lo = Scale * ExtractIdx;
44713       int Hi = Scale * (ExtractIdx + 1);
44714       for (int i = 0, e = (int)Mask.size(); i != e; ++i)
44715         if (i < Lo || Hi <= i)
44716           Mask[i] = SM_SentinelUndef;
44717 
44718       SmallVector<int, 16> WidenedMask;
44719       while (Mask.size() > NumSrcElts &&
44720              canWidenShuffleElements(Mask, WidenedMask))
44721         Mask = std::move(WidenedMask);
44722     }
44723   }
44724 
44725   // If narrowing/widening failed, see if we can extract+zero-extend.
44726   int ExtractIdx;
44727   EVT ExtractVT;
44728   if (Mask.size() == NumSrcElts) {
44729     ExtractIdx = Mask[IdxC.getZExtValue()];
44730     ExtractVT = SrcVT;
44731   } else {
44732     unsigned Scale = Mask.size() / NumSrcElts;
44733     if ((Mask.size() % NumSrcElts) != 0 || SrcVT.isFloatingPoint())
44734       return SDValue();
44735     unsigned ScaledIdx = Scale * IdxC.getZExtValue();
44736     if (!isUndefOrZeroInRange(Mask, ScaledIdx + 1, Scale - 1))
44737       return SDValue();
44738     ExtractIdx = Mask[ScaledIdx];
44739     EVT ExtractSVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltBits / Scale);
44740     ExtractVT = EVT::getVectorVT(*DAG.getContext(), ExtractSVT, Mask.size());
44741     assert(SrcVT.getSizeInBits() == ExtractVT.getSizeInBits() &&
44742            "Failed to widen vector type");
44743   }
44744 
44745   // If the shuffle source element is undef/zero then we can just accept it.
44746   if (ExtractIdx == SM_SentinelUndef)
44747     return DAG.getUNDEF(VT);
44748 
44749   if (ExtractIdx == SM_SentinelZero)
44750     return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
44751                                 : DAG.getConstant(0, dl, VT);
44752 
44753   SDValue SrcOp = Ops[ExtractIdx / Mask.size()];
44754   ExtractIdx = ExtractIdx % Mask.size();
44755   if (SDValue V = GetLegalExtract(SrcOp, ExtractVT, ExtractIdx))
44756     return DAG.getZExtOrTrunc(V, dl, VT);
44757 
44758   return SDValue();
44759 }
44760 
44761 /// Extracting a scalar FP value from vector element 0 is free, so extract each
44762 /// operand first, then perform the math as a scalar op.
scalarizeExtEltFP(SDNode * ExtElt,SelectionDAG & DAG,const X86Subtarget & Subtarget)44763 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG,
44764                                  const X86Subtarget &Subtarget) {
44765   assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
44766   SDValue Vec = ExtElt->getOperand(0);
44767   SDValue Index = ExtElt->getOperand(1);
44768   EVT VT = ExtElt->getValueType(0);
44769   EVT VecVT = Vec.getValueType();
44770 
44771   // TODO: If this is a unary/expensive/expand op, allow extraction from a
44772   // non-zero element because the shuffle+scalar op will be cheaper?
44773   if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
44774     return SDValue();
44775 
44776   // Vector FP compares don't fit the pattern of FP math ops (propagate, not
44777   // extract, the condition code), so deal with those as a special-case.
44778   if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
44779     EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
44780     if (OpVT != MVT::f32 && OpVT != MVT::f64)
44781       return SDValue();
44782 
44783     // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
44784     SDLoc DL(ExtElt);
44785     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
44786                                Vec.getOperand(0), Index);
44787     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
44788                                Vec.getOperand(1), Index);
44789     return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
44790   }
44791 
44792   if (!(VT == MVT::f16 && Subtarget.hasFP16()) && VT != MVT::f32 &&
44793       VT != MVT::f64)
44794     return SDValue();
44795 
44796   // Vector FP selects don't fit the pattern of FP math ops (because the
44797   // condition has a different type and we have to change the opcode), so deal
44798   // with those here.
44799   // FIXME: This is restricted to pre type legalization by ensuring the setcc
44800   // has i1 elements. If we loosen this we need to convert vector bool to a
44801   // scalar bool.
44802   if (Vec.getOpcode() == ISD::VSELECT &&
44803       Vec.getOperand(0).getOpcode() == ISD::SETCC &&
44804       Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
44805       Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
44806     // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
44807     SDLoc DL(ExtElt);
44808     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
44809                                Vec.getOperand(0).getValueType().getScalarType(),
44810                                Vec.getOperand(0), Index);
44811     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
44812                                Vec.getOperand(1), Index);
44813     SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
44814                                Vec.getOperand(2), Index);
44815     return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
44816   }
44817 
44818   // TODO: This switch could include FNEG and the x86-specific FP logic ops
44819   // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
44820   // missed load folding and fma+fneg combining.
44821   switch (Vec.getOpcode()) {
44822   case ISD::FMA: // Begin 3 operands
44823   case ISD::FMAD:
44824   case ISD::FADD: // Begin 2 operands
44825   case ISD::FSUB:
44826   case ISD::FMUL:
44827   case ISD::FDIV:
44828   case ISD::FREM:
44829   case ISD::FCOPYSIGN:
44830   case ISD::FMINNUM:
44831   case ISD::FMAXNUM:
44832   case ISD::FMINNUM_IEEE:
44833   case ISD::FMAXNUM_IEEE:
44834   case ISD::FMAXIMUM:
44835   case ISD::FMINIMUM:
44836   case X86ISD::FMAX:
44837   case X86ISD::FMIN:
44838   case ISD::FABS: // Begin 1 operand
44839   case ISD::FSQRT:
44840   case ISD::FRINT:
44841   case ISD::FCEIL:
44842   case ISD::FTRUNC:
44843   case ISD::FNEARBYINT:
44844   case ISD::FROUND:
44845   case ISD::FFLOOR:
44846   case X86ISD::FRCP:
44847   case X86ISD::FRSQRT: {
44848     // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
44849     SDLoc DL(ExtElt);
44850     SmallVector<SDValue, 4> ExtOps;
44851     for (SDValue Op : Vec->ops())
44852       ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
44853     return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
44854   }
44855   default:
44856     return SDValue();
44857   }
44858   llvm_unreachable("All opcodes should return within switch");
44859 }
44860 
44861 /// Try to convert a vector reduction sequence composed of binops and shuffles
44862 /// into horizontal ops.
combineArithReduction(SDNode * ExtElt,SelectionDAG & DAG,const X86Subtarget & Subtarget)44863 static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
44864                                      const X86Subtarget &Subtarget) {
44865   assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
44866 
44867   // We need at least SSE2 to anything here.
44868   if (!Subtarget.hasSSE2())
44869     return SDValue();
44870 
44871   ISD::NodeType Opc;
44872   SDValue Rdx = DAG.matchBinOpReduction(ExtElt, Opc,
44873                                         {ISD::ADD, ISD::MUL, ISD::FADD}, true);
44874   if (!Rdx)
44875     return SDValue();
44876 
44877   SDValue Index = ExtElt->getOperand(1);
44878   assert(isNullConstant(Index) &&
44879          "Reduction doesn't end in an extract from index 0");
44880 
44881   EVT VT = ExtElt->getValueType(0);
44882   EVT VecVT = Rdx.getValueType();
44883   if (VecVT.getScalarType() != VT)
44884     return SDValue();
44885 
44886   SDLoc DL(ExtElt);
44887   unsigned NumElts = VecVT.getVectorNumElements();
44888   unsigned EltSizeInBits = VecVT.getScalarSizeInBits();
44889 
44890   // Extend v4i8/v8i8 vector to v16i8, with undef upper 64-bits.
44891   auto WidenToV16I8 = [&](SDValue V, bool ZeroExtend) {
44892     if (V.getValueType() == MVT::v4i8) {
44893       if (ZeroExtend && Subtarget.hasSSE41()) {
44894         V = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
44895                         DAG.getConstant(0, DL, MVT::v4i32),
44896                         DAG.getBitcast(MVT::i32, V),
44897                         DAG.getIntPtrConstant(0, DL));
44898         return DAG.getBitcast(MVT::v16i8, V);
44899       }
44900       V = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, V,
44901                       ZeroExtend ? DAG.getConstant(0, DL, MVT::v4i8)
44902                                  : DAG.getUNDEF(MVT::v4i8));
44903     }
44904     return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V,
44905                        DAG.getUNDEF(MVT::v8i8));
44906   };
44907 
44908   // vXi8 mul reduction - promote to vXi16 mul reduction.
44909   if (Opc == ISD::MUL) {
44910     if (VT != MVT::i8 || NumElts < 4 || !isPowerOf2_32(NumElts))
44911       return SDValue();
44912     if (VecVT.getSizeInBits() >= 128) {
44913       EVT WideVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts / 2);
44914       SDValue Lo = getUnpackl(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
44915       SDValue Hi = getUnpackh(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
44916       Lo = DAG.getBitcast(WideVT, Lo);
44917       Hi = DAG.getBitcast(WideVT, Hi);
44918       Rdx = DAG.getNode(Opc, DL, WideVT, Lo, Hi);
44919       while (Rdx.getValueSizeInBits() > 128) {
44920         std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44921         Rdx = DAG.getNode(Opc, DL, Lo.getValueType(), Lo, Hi);
44922       }
44923     } else {
44924       Rdx = WidenToV16I8(Rdx, false);
44925       Rdx = getUnpackl(DAG, DL, MVT::v16i8, Rdx, DAG.getUNDEF(MVT::v16i8));
44926       Rdx = DAG.getBitcast(MVT::v8i16, Rdx);
44927     }
44928     if (NumElts >= 8)
44929       Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44930                         DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44931                                              {4, 5, 6, 7, -1, -1, -1, -1}));
44932     Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44933                       DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44934                                            {2, 3, -1, -1, -1, -1, -1, -1}));
44935     Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44936                       DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44937                                            {1, -1, -1, -1, -1, -1, -1, -1}));
44938     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44939     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44940   }
44941 
44942   // vXi8 add reduction - sub 128-bit vector.
44943   if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
44944     Rdx = WidenToV16I8(Rdx, true);
44945     Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
44946                       DAG.getConstant(0, DL, MVT::v16i8));
44947     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44948     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44949   }
44950 
44951   // Must be a >=128-bit vector with pow2 elements.
44952   if ((VecVT.getSizeInBits() % 128) != 0 || !isPowerOf2_32(NumElts))
44953     return SDValue();
44954 
44955   // vXi8 add reduction - sum lo/hi halves then use PSADBW.
44956   if (VT == MVT::i8) {
44957     while (Rdx.getValueSizeInBits() > 128) {
44958       SDValue Lo, Hi;
44959       std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44960       VecVT = Lo.getValueType();
44961       Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
44962     }
44963     assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
44964 
44965     SDValue Hi = DAG.getVectorShuffle(
44966         MVT::v16i8, DL, Rdx, Rdx,
44967         {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
44968     Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
44969     Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
44970                       getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
44971     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44972     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44973   }
44974 
44975   // See if we can use vXi8 PSADBW add reduction for larger zext types.
44976   // If the source vector values are 0-255, then we can use PSADBW to
44977   // sum+zext v8i8 subvectors to vXi64, then perform the reduction.
44978   // TODO: See if its worth avoiding vXi16/i32 truncations?
44979   if (Opc == ISD::ADD && NumElts >= 4 && EltSizeInBits >= 16 &&
44980       DAG.computeKnownBits(Rdx).getMaxValue().ule(255) &&
44981       (EltSizeInBits == 16 || Rdx.getOpcode() == ISD::ZERO_EXTEND ||
44982        Subtarget.hasAVX512())) {
44983     EVT ByteVT = VecVT.changeVectorElementType(MVT::i8);
44984     Rdx = DAG.getNode(ISD::TRUNCATE, DL, ByteVT, Rdx);
44985     if (ByteVT.getSizeInBits() < 128)
44986       Rdx = WidenToV16I8(Rdx, true);
44987 
44988     // Build the PSADBW, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
44989     auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44990                             ArrayRef<SDValue> Ops) {
44991       MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
44992       SDValue Zero = DAG.getConstant(0, DL, Ops[0].getValueType());
44993       return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops[0], Zero);
44994     };
44995     MVT SadVT = MVT::getVectorVT(MVT::i64, Rdx.getValueSizeInBits() / 64);
44996     Rdx = SplitOpsAndApply(DAG, Subtarget, DL, SadVT, {Rdx}, PSADBWBuilder);
44997 
44998     // TODO: We could truncate to vXi16/vXi32 before performing the reduction.
44999     while (Rdx.getValueSizeInBits() > 128) {
45000       SDValue Lo, Hi;
45001       std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
45002       VecVT = Lo.getValueType();
45003       Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
45004     }
45005     assert(Rdx.getValueType() == MVT::v2i64 && "v2i64 reduction expected");
45006 
45007     if (NumElts > 8) {
45008       SDValue RdxHi = DAG.getVectorShuffle(MVT::v2i64, DL, Rdx, Rdx, {1, -1});
45009       Rdx = DAG.getNode(ISD::ADD, DL, MVT::v2i64, Rdx, RdxHi);
45010     }
45011 
45012     VecVT = MVT::getVectorVT(VT.getSimpleVT(), 128 / VT.getSizeInBits());
45013     Rdx = DAG.getBitcast(VecVT, Rdx);
45014     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
45015   }
45016 
45017   // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
45018   if (!shouldUseHorizontalOp(true, DAG, Subtarget))
45019     return SDValue();
45020 
45021   unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
45022 
45023   // 256-bit horizontal instructions operate on 128-bit chunks rather than
45024   // across the whole vector, so we need an extract + hop preliminary stage.
45025   // This is the only step where the operands of the hop are not the same value.
45026   // TODO: We could extend this to handle 512-bit or even longer vectors.
45027   if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
45028       ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
45029     unsigned NumElts = VecVT.getVectorNumElements();
45030     SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
45031     SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
45032     Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
45033     VecVT = Rdx.getValueType();
45034   }
45035   if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
45036       !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
45037     return SDValue();
45038 
45039   // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
45040   unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
45041   for (unsigned i = 0; i != ReductionSteps; ++i)
45042     Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
45043 
45044   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
45045 }
45046 
45047 /// Detect vector gather/scatter index generation and convert it from being a
45048 /// bunch of shuffles and extracts into a somewhat faster sequence.
45049 /// For i686, the best sequence is apparently storing the value and loading
45050 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
combineExtractVectorElt(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45051 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
45052                                        TargetLowering::DAGCombinerInfo &DCI,
45053                                        const X86Subtarget &Subtarget) {
45054   if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
45055     return NewOp;
45056 
45057   SDValue InputVector = N->getOperand(0);
45058   SDValue EltIdx = N->getOperand(1);
45059   auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
45060 
45061   EVT SrcVT = InputVector.getValueType();
45062   EVT VT = N->getValueType(0);
45063   SDLoc dl(InputVector);
45064   bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
45065   unsigned NumSrcElts = SrcVT.getVectorNumElements();
45066   unsigned NumEltBits = VT.getScalarSizeInBits();
45067   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45068 
45069   if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
45070     return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
45071 
45072   // Integer Constant Folding.
45073   if (CIdx && VT.isInteger()) {
45074     APInt UndefVecElts;
45075     SmallVector<APInt, 16> EltBits;
45076     unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
45077     if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
45078                                       EltBits, true, false)) {
45079       uint64_t Idx = CIdx->getZExtValue();
45080       if (UndefVecElts[Idx])
45081         return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
45082       return DAG.getConstant(EltBits[Idx].zext(NumEltBits), dl, VT);
45083     }
45084 
45085     // Convert extract_element(bitcast(<X x i1>) -> bitcast(extract_subvector()).
45086     // Improves lowering of bool masks on rust which splits them into byte array.
45087     if (InputVector.getOpcode() == ISD::BITCAST && (NumEltBits % 8) == 0) {
45088       SDValue Src = peekThroughBitcasts(InputVector);
45089       if (Src.getValueType().getScalarType() == MVT::i1 &&
45090           TLI.isTypeLegal(Src.getValueType())) {
45091         MVT SubVT = MVT::getVectorVT(MVT::i1, NumEltBits);
45092         SDValue Sub = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Src,
45093             DAG.getIntPtrConstant(CIdx->getZExtValue() * NumEltBits, dl));
45094         return DAG.getBitcast(VT, Sub);
45095       }
45096     }
45097   }
45098 
45099   if (IsPextr) {
45100     if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumEltBits),
45101                                  DCI))
45102       return SDValue(N, 0);
45103 
45104     // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
45105     if ((InputVector.getOpcode() == X86ISD::PINSRB ||
45106          InputVector.getOpcode() == X86ISD::PINSRW) &&
45107         InputVector.getOperand(2) == EltIdx) {
45108       assert(SrcVT == InputVector.getOperand(0).getValueType() &&
45109              "Vector type mismatch");
45110       SDValue Scl = InputVector.getOperand(1);
45111       Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
45112       return DAG.getZExtOrTrunc(Scl, dl, VT);
45113     }
45114 
45115     // TODO - Remove this once we can handle the implicit zero-extension of
45116     // X86ISD::PEXTRW/X86ISD::PEXTRB in combinePredicateReduction and
45117     // combineBasicSADPattern.
45118     return SDValue();
45119   }
45120 
45121   // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
45122   if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
45123       VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
45124     SDValue MMXSrc = InputVector.getOperand(0);
45125 
45126     // The bitcast source is a direct mmx result.
45127     if (MMXSrc.getValueType() == MVT::x86mmx)
45128       return DAG.getBitcast(VT, InputVector);
45129   }
45130 
45131   // Detect mmx to i32 conversion through a v2i32 elt extract.
45132   if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
45133       VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
45134     SDValue MMXSrc = InputVector.getOperand(0);
45135 
45136     // The bitcast source is a direct mmx result.
45137     if (MMXSrc.getValueType() == MVT::x86mmx)
45138       return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
45139   }
45140 
45141   // Check whether this extract is the root of a sum of absolute differences
45142   // pattern. This has to be done here because we really want it to happen
45143   // pre-legalization,
45144   if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
45145     return SAD;
45146 
45147   if (SDValue VPDPBUSD = combineVPDPBUSDPattern(N, DAG, Subtarget))
45148     return VPDPBUSD;
45149 
45150   // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
45151   if (SDValue Cmp = combinePredicateReduction(N, DAG, Subtarget))
45152     return Cmp;
45153 
45154   // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
45155   if (SDValue MinMax = combineMinMaxReduction(N, DAG, Subtarget))
45156     return MinMax;
45157 
45158   // Attempt to optimize ADD/FADD/MUL reductions with HADD, promotion etc..
45159   if (SDValue V = combineArithReduction(N, DAG, Subtarget))
45160     return V;
45161 
45162   if (SDValue V = scalarizeExtEltFP(N, DAG, Subtarget))
45163     return V;
45164 
45165   // Attempt to extract a i1 element by using MOVMSK to extract the signbits
45166   // and then testing the relevant element.
45167   //
45168   // Note that we only combine extracts on the *same* result number, i.e.
45169   //   t0 = merge_values a0, a1, a2, a3
45170   //   i1 = extract_vector_elt t0, Constant:i64<2>
45171   //   i1 = extract_vector_elt t0, Constant:i64<3>
45172   // but not
45173   //   i1 = extract_vector_elt t0:1, Constant:i64<2>
45174   // since the latter would need its own MOVMSK.
45175   if (SrcVT.getScalarType() == MVT::i1) {
45176     bool IsVar = !CIdx;
45177     SmallVector<SDNode *, 16> BoolExtracts;
45178     unsigned ResNo = InputVector.getResNo();
45179     auto IsBoolExtract = [&BoolExtracts, &ResNo, &IsVar](SDNode *Use) {
45180       if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
45181           Use->getOperand(0).getResNo() == ResNo &&
45182           Use->getValueType(0) == MVT::i1) {
45183         BoolExtracts.push_back(Use);
45184         IsVar |= !isa<ConstantSDNode>(Use->getOperand(1));
45185         return true;
45186       }
45187       return false;
45188     };
45189     // TODO: Can we drop the oneuse check for constant extracts?
45190     if (all_of(InputVector->uses(), IsBoolExtract) &&
45191         (IsVar || BoolExtracts.size() > 1)) {
45192       EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
45193       if (SDValue BC =
45194               combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
45195         for (SDNode *Use : BoolExtracts) {
45196           // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
45197           // Mask = 1 << MaskIdx
45198           SDValue MaskIdx = DAG.getZExtOrTrunc(Use->getOperand(1), dl, MVT::i8);
45199           SDValue MaskBit = DAG.getConstant(1, dl, BCVT);
45200           SDValue Mask = DAG.getNode(ISD::SHL, dl, BCVT, MaskBit, MaskIdx);
45201           SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
45202           Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
45203           DCI.CombineTo(Use, Res);
45204         }
45205         return SDValue(N, 0);
45206       }
45207     }
45208   }
45209 
45210   // If this extract is from a loaded vector value and will be used as an
45211   // integer, that requires a potentially expensive XMM -> GPR transfer.
45212   // Additionally, if we can convert to a scalar integer load, that will likely
45213   // be folded into a subsequent integer op.
45214   // Note: Unlike the related fold for this in DAGCombiner, this is not limited
45215   //       to a single-use of the loaded vector. For the reasons above, we
45216   //       expect this to be profitable even if it creates an extra load.
45217   bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) {
45218     return Use->getOpcode() == ISD::STORE ||
45219            Use->getOpcode() == ISD::INSERT_VECTOR_ELT ||
45220            Use->getOpcode() == ISD::SCALAR_TO_VECTOR;
45221   });
45222   auto *LoadVec = dyn_cast<LoadSDNode>(InputVector);
45223   if (LoadVec && CIdx && ISD::isNormalLoad(LoadVec) && VT.isInteger() &&
45224       SrcVT.getVectorElementType() == VT && DCI.isAfterLegalizeDAG() &&
45225       !LikelyUsedAsVector && LoadVec->isSimple()) {
45226     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45227     SDValue NewPtr =
45228         TLI.getVectorElementPointer(DAG, LoadVec->getBasePtr(), SrcVT, EltIdx);
45229     unsigned PtrOff = VT.getSizeInBits() * CIdx->getZExtValue() / 8;
45230     MachinePointerInfo MPI = LoadVec->getPointerInfo().getWithOffset(PtrOff);
45231     Align Alignment = commonAlignment(LoadVec->getAlign(), PtrOff);
45232     SDValue Load =
45233         DAG.getLoad(VT, dl, LoadVec->getChain(), NewPtr, MPI, Alignment,
45234                     LoadVec->getMemOperand()->getFlags(), LoadVec->getAAInfo());
45235     DAG.makeEquivalentMemoryOrdering(LoadVec, Load);
45236     return Load;
45237   }
45238 
45239   return SDValue();
45240 }
45241 
45242 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
45243 // This is more or less the reverse of combineBitcastvxi1.
combineToExtendBoolVectorInReg(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N0,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45244 static SDValue combineToExtendBoolVectorInReg(
45245     unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N0, SelectionDAG &DAG,
45246     TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) {
45247   if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
45248       Opcode != ISD::ANY_EXTEND)
45249     return SDValue();
45250   if (!DCI.isBeforeLegalizeOps())
45251     return SDValue();
45252   if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
45253     return SDValue();
45254 
45255   EVT SVT = VT.getScalarType();
45256   EVT InSVT = N0.getValueType().getScalarType();
45257   unsigned EltSizeInBits = SVT.getSizeInBits();
45258 
45259   // Input type must be extending a bool vector (bit-casted from a scalar
45260   // integer) to legal integer types.
45261   if (!VT.isVector())
45262     return SDValue();
45263   if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
45264     return SDValue();
45265   if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
45266     return SDValue();
45267 
45268   SDValue N00 = N0.getOperand(0);
45269   EVT SclVT = N00.getValueType();
45270   if (!SclVT.isScalarInteger())
45271     return SDValue();
45272 
45273   SDValue Vec;
45274   SmallVector<int> ShuffleMask;
45275   unsigned NumElts = VT.getVectorNumElements();
45276   assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
45277 
45278   // Broadcast the scalar integer to the vector elements.
45279   if (NumElts > EltSizeInBits) {
45280     // If the scalar integer is greater than the vector element size, then we
45281     // must split it down into sub-sections for broadcasting. For example:
45282     //   i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
45283     //   i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
45284     assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
45285     unsigned Scale = NumElts / EltSizeInBits;
45286     EVT BroadcastVT = EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
45287     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
45288     Vec = DAG.getBitcast(VT, Vec);
45289 
45290     for (unsigned i = 0; i != Scale; ++i)
45291       ShuffleMask.append(EltSizeInBits, i);
45292     Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
45293   } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
45294              (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
45295     // If we have register broadcast instructions, use the scalar size as the
45296     // element type for the shuffle. Then cast to the wider element type. The
45297     // widened bits won't be used, and this might allow the use of a broadcast
45298     // load.
45299     assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
45300     unsigned Scale = EltSizeInBits / NumElts;
45301     EVT BroadcastVT =
45302         EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
45303     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
45304     ShuffleMask.append(NumElts * Scale, 0);
45305     Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
45306     Vec = DAG.getBitcast(VT, Vec);
45307   } else {
45308     // For smaller scalar integers, we can simply any-extend it to the vector
45309     // element size (we don't care about the upper bits) and broadcast it to all
45310     // elements.
45311     SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
45312     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
45313     ShuffleMask.append(NumElts, 0);
45314     Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
45315   }
45316 
45317   // Now, mask the relevant bit in each element.
45318   SmallVector<SDValue, 32> Bits;
45319   for (unsigned i = 0; i != NumElts; ++i) {
45320     int BitIdx = (i % EltSizeInBits);
45321     APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
45322     Bits.push_back(DAG.getConstant(Bit, DL, SVT));
45323   }
45324   SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
45325   Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
45326 
45327   // Compare against the bitmask and extend the result.
45328   EVT CCVT = VT.changeVectorElementType(MVT::i1);
45329   Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
45330   Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
45331 
45332   // For SEXT, this is now done, otherwise shift the result down for
45333   // zero-extension.
45334   if (Opcode == ISD::SIGN_EXTEND)
45335     return Vec;
45336   return DAG.getNode(ISD::SRL, DL, VT, Vec,
45337                      DAG.getConstant(EltSizeInBits - 1, DL, VT));
45338 }
45339 
45340 /// If a vector select has an operand that is -1 or 0, try to simplify the
45341 /// select to a bitwise logic operation.
45342 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
45343 static SDValue
combineVSelectWithAllOnesOrZeros(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45344 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
45345                                  TargetLowering::DAGCombinerInfo &DCI,
45346                                  const X86Subtarget &Subtarget) {
45347   SDValue Cond = N->getOperand(0);
45348   SDValue LHS = N->getOperand(1);
45349   SDValue RHS = N->getOperand(2);
45350   EVT VT = LHS.getValueType();
45351   EVT CondVT = Cond.getValueType();
45352   SDLoc DL(N);
45353   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45354 
45355   if (N->getOpcode() != ISD::VSELECT)
45356     return SDValue();
45357 
45358   assert(CondVT.isVector() && "Vector select expects a vector selector!");
45359 
45360   // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
45361   // TODO: Can we assert that both operands are not zeros (because that should
45362   //       get simplified at node creation time)?
45363   bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
45364   bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
45365 
45366   // If both inputs are 0/undef, create a complete zero vector.
45367   // FIXME: As noted above this should be handled by DAGCombiner/getNode.
45368   if (TValIsAllZeros && FValIsAllZeros) {
45369     if (VT.isFloatingPoint())
45370       return DAG.getConstantFP(0.0, DL, VT);
45371     return DAG.getConstant(0, DL, VT);
45372   }
45373 
45374   // To use the condition operand as a bitwise mask, it must have elements that
45375   // are the same size as the select elements. Ie, the condition operand must
45376   // have already been promoted from the IR select condition type <N x i1>.
45377   // Don't check if the types themselves are equal because that excludes
45378   // vector floating-point selects.
45379   if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
45380     return SDValue();
45381 
45382   // Try to invert the condition if true value is not all 1s and false value is
45383   // not all 0s. Only do this if the condition has one use.
45384   bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
45385   if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
45386       // Check if the selector will be produced by CMPP*/PCMP*.
45387       Cond.getOpcode() == ISD::SETCC &&
45388       // Check if SETCC has already been promoted.
45389       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
45390           CondVT) {
45391     bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
45392 
45393     if (TValIsAllZeros || FValIsAllOnes) {
45394       SDValue CC = Cond.getOperand(2);
45395       ISD::CondCode NewCC = ISD::getSetCCInverse(
45396           cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
45397       Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
45398                           NewCC);
45399       std::swap(LHS, RHS);
45400       TValIsAllOnes = FValIsAllOnes;
45401       FValIsAllZeros = TValIsAllZeros;
45402     }
45403   }
45404 
45405   // Cond value must be 'sign splat' to be converted to a logical op.
45406   if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
45407     return SDValue();
45408 
45409   // vselect Cond, 111..., 000... -> Cond
45410   if (TValIsAllOnes && FValIsAllZeros)
45411     return DAG.getBitcast(VT, Cond);
45412 
45413   if (!TLI.isTypeLegal(CondVT))
45414     return SDValue();
45415 
45416   // vselect Cond, 111..., X -> or Cond, X
45417   if (TValIsAllOnes) {
45418     SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
45419     SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
45420     return DAG.getBitcast(VT, Or);
45421   }
45422 
45423   // vselect Cond, X, 000... -> and Cond, X
45424   if (FValIsAllZeros) {
45425     SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
45426     SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
45427     return DAG.getBitcast(VT, And);
45428   }
45429 
45430   // vselect Cond, 000..., X -> andn Cond, X
45431   if (TValIsAllZeros) {
45432     SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
45433     SDValue AndN;
45434     // The canonical form differs for i1 vectors - x86andnp is not used
45435     if (CondVT.getScalarType() == MVT::i1)
45436       AndN = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT),
45437                          CastRHS);
45438     else
45439       AndN = DAG.getNode(X86ISD::ANDNP, DL, CondVT, Cond, CastRHS);
45440     return DAG.getBitcast(VT, AndN);
45441   }
45442 
45443   return SDValue();
45444 }
45445 
45446 /// If both arms of a vector select are concatenated vectors, split the select,
45447 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
45448 ///   vselect Cond, (concat T0, T1), (concat F0, F1) -->
45449 ///   concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
narrowVectorSelect(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)45450 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
45451                                   const X86Subtarget &Subtarget) {
45452   unsigned Opcode = N->getOpcode();
45453   if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
45454     return SDValue();
45455 
45456   // TODO: Split 512-bit vectors too?
45457   EVT VT = N->getValueType(0);
45458   if (!VT.is256BitVector())
45459     return SDValue();
45460 
45461   // TODO: Split as long as any 2 of the 3 operands are concatenated?
45462   SDValue Cond = N->getOperand(0);
45463   SDValue TVal = N->getOperand(1);
45464   SDValue FVal = N->getOperand(2);
45465   SmallVector<SDValue, 4> CatOpsT, CatOpsF;
45466   if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
45467       !collectConcatOps(TVal.getNode(), CatOpsT, DAG) ||
45468       !collectConcatOps(FVal.getNode(), CatOpsF, DAG))
45469     return SDValue();
45470 
45471   auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
45472                             ArrayRef<SDValue> Ops) {
45473     return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
45474   };
45475   return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
45476                           makeBlend, /*CheckBWI*/ false);
45477 }
45478 
combineSelectOfTwoConstants(SDNode * N,SelectionDAG & DAG)45479 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
45480   SDValue Cond = N->getOperand(0);
45481   SDValue LHS = N->getOperand(1);
45482   SDValue RHS = N->getOperand(2);
45483   SDLoc DL(N);
45484 
45485   auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
45486   auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
45487   if (!TrueC || !FalseC)
45488     return SDValue();
45489 
45490   // Don't do this for crazy integer types.
45491   EVT VT = N->getValueType(0);
45492   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
45493     return SDValue();
45494 
45495   // We're going to use the condition bit in math or logic ops. We could allow
45496   // this with a wider condition value (post-legalization it becomes an i8),
45497   // but if nothing is creating selects that late, it doesn't matter.
45498   if (Cond.getValueType() != MVT::i1)
45499     return SDValue();
45500 
45501   // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
45502   // 3, 5, or 9 with i32/i64, so those get transformed too.
45503   // TODO: For constants that overflow or do not differ by power-of-2 or small
45504   // multiplier, convert to 'and' + 'add'.
45505   const APInt &TrueVal = TrueC->getAPIntValue();
45506   const APInt &FalseVal = FalseC->getAPIntValue();
45507 
45508   // We have a more efficient lowering for "(X == 0) ? Y : -1" using SBB.
45509   if ((TrueVal.isAllOnes() || FalseVal.isAllOnes()) &&
45510       Cond.getOpcode() == ISD::SETCC && isNullConstant(Cond.getOperand(1))) {
45511     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45512     if (CC == ISD::SETEQ || CC == ISD::SETNE)
45513       return SDValue();
45514   }
45515 
45516   bool OV;
45517   APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
45518   if (OV)
45519     return SDValue();
45520 
45521   APInt AbsDiff = Diff.abs();
45522   if (AbsDiff.isPowerOf2() ||
45523       ((VT == MVT::i32 || VT == MVT::i64) &&
45524        (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
45525 
45526     // We need a positive multiplier constant for shift/LEA codegen. The 'not'
45527     // of the condition can usually be folded into a compare predicate, but even
45528     // without that, the sequence should be cheaper than a CMOV alternative.
45529     if (TrueVal.slt(FalseVal)) {
45530       Cond = DAG.getNOT(DL, Cond, MVT::i1);
45531       std::swap(TrueC, FalseC);
45532     }
45533 
45534     // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
45535     SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
45536 
45537     // Multiply condition by the difference if non-one.
45538     if (!AbsDiff.isOne())
45539       R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
45540 
45541     // Add the base if non-zero.
45542     if (!FalseC->isZero())
45543       R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
45544 
45545     return R;
45546   }
45547 
45548   return SDValue();
45549 }
45550 
45551 /// If this is a *dynamic* select (non-constant condition) and we can match
45552 /// this node with one of the variable blend instructions, restructure the
45553 /// condition so that blends can use the high (sign) bit of each element.
45554 /// This function will also call SimplifyDemandedBits on already created
45555 /// BLENDV to perform additional simplifications.
combineVSelectToBLENDV(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45556 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
45557                                       TargetLowering::DAGCombinerInfo &DCI,
45558                                       const X86Subtarget &Subtarget) {
45559   SDValue Cond = N->getOperand(0);
45560   if ((N->getOpcode() != ISD::VSELECT &&
45561        N->getOpcode() != X86ISD::BLENDV) ||
45562       ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
45563     return SDValue();
45564 
45565   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45566   unsigned BitWidth = Cond.getScalarValueSizeInBits();
45567   EVT VT = N->getValueType(0);
45568 
45569   // We can only handle the cases where VSELECT is directly legal on the
45570   // subtarget. We custom lower VSELECT nodes with constant conditions and
45571   // this makes it hard to see whether a dynamic VSELECT will correctly
45572   // lower, so we both check the operation's status and explicitly handle the
45573   // cases where a *dynamic* blend will fail even though a constant-condition
45574   // blend could be custom lowered.
45575   // FIXME: We should find a better way to handle this class of problems.
45576   // Potentially, we should combine constant-condition vselect nodes
45577   // pre-legalization into shuffles and not mark as many types as custom
45578   // lowered.
45579   if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
45580     return SDValue();
45581   // FIXME: We don't support i16-element blends currently. We could and
45582   // should support them by making *all* the bits in the condition be set
45583   // rather than just the high bit and using an i8-element blend.
45584   if (VT.getVectorElementType() == MVT::i16)
45585     return SDValue();
45586   // Dynamic blending was only available from SSE4.1 onward.
45587   if (VT.is128BitVector() && !Subtarget.hasSSE41())
45588     return SDValue();
45589   // Byte blends are only available in AVX2
45590   if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
45591     return SDValue();
45592   // There are no 512-bit blend instructions that use sign bits.
45593   if (VT.is512BitVector())
45594     return SDValue();
45595 
45596   // Don't optimize before the condition has been transformed to a legal type
45597   // and don't ever optimize vector selects that map to AVX512 mask-registers.
45598   if (BitWidth < 8 || BitWidth > 64)
45599     return SDValue();
45600 
45601   auto OnlyUsedAsSelectCond = [](SDValue Cond) {
45602     for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
45603          UI != UE; ++UI)
45604       if ((UI->getOpcode() != ISD::VSELECT &&
45605            UI->getOpcode() != X86ISD::BLENDV) ||
45606           UI.getOperandNo() != 0)
45607         return false;
45608 
45609     return true;
45610   };
45611 
45612   APInt DemandedBits(APInt::getSignMask(BitWidth));
45613 
45614   if (OnlyUsedAsSelectCond(Cond)) {
45615     KnownBits Known;
45616     TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
45617                                           !DCI.isBeforeLegalizeOps());
45618     if (!TLI.SimplifyDemandedBits(Cond, DemandedBits, Known, TLO, 0, true))
45619       return SDValue();
45620 
45621     // If we changed the computation somewhere in the DAG, this change will
45622     // affect all users of Cond. Update all the nodes so that we do not use
45623     // the generic VSELECT anymore. Otherwise, we may perform wrong
45624     // optimizations as we messed with the actual expectation for the vector
45625     // boolean values.
45626     for (SDNode *U : Cond->uses()) {
45627       if (U->getOpcode() == X86ISD::BLENDV)
45628         continue;
45629 
45630       SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
45631                                Cond, U->getOperand(1), U->getOperand(2));
45632       DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
45633       DCI.AddToWorklist(U);
45634     }
45635     DCI.CommitTargetLoweringOpt(TLO);
45636     return SDValue(N, 0);
45637   }
45638 
45639   // Otherwise we can still at least try to simplify multiple use bits.
45640   if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedBits, DAG))
45641       return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), V,
45642                          N->getOperand(1), N->getOperand(2));
45643 
45644   return SDValue();
45645 }
45646 
45647 // Try to match:
45648 //   (or (and (M, (sub 0, X)), (pandn M, X)))
45649 // which is a special case of:
45650 //   (select M, (sub 0, X), X)
45651 // Per:
45652 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
45653 // We know that, if fNegate is 0 or 1:
45654 //   (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
45655 //
45656 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
45657 //   ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
45658 //   ( M      ? -X : X) == ((X ^   M     ) + (M & 1))
45659 // This lets us transform our vselect to:
45660 //   (add (xor X, M), (and M, 1))
45661 // And further to:
45662 //   (sub (xor X, M), M)
combineLogicBlendIntoConditionalNegate(EVT VT,SDValue Mask,SDValue X,SDValue Y,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)45663 static SDValue combineLogicBlendIntoConditionalNegate(
45664     EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
45665     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
45666   EVT MaskVT = Mask.getValueType();
45667   assert(MaskVT.isInteger() &&
45668          DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
45669          "Mask must be zero/all-bits");
45670 
45671   if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
45672     return SDValue();
45673   if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
45674     return SDValue();
45675 
45676   auto IsNegV = [](SDNode *N, SDValue V) {
45677     return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
45678            ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
45679   };
45680 
45681   SDValue V;
45682   if (IsNegV(Y.getNode(), X))
45683     V = X;
45684   else if (IsNegV(X.getNode(), Y))
45685     V = Y;
45686   else
45687     return SDValue();
45688 
45689   SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
45690   SDValue SubOp2 = Mask;
45691 
45692   // If the negate was on the false side of the select, then
45693   // the operands of the SUB need to be swapped. PR 27251.
45694   // This is because the pattern being matched above is
45695   // (vselect M, (sub (0, X), X)  -> (sub (xor X, M), M)
45696   // but if the pattern matched was
45697   // (vselect M, X, (sub (0, X))), that is really negation of the pattern
45698   // above, -(vselect M, (sub 0, X), X), and therefore the replacement
45699   // pattern also needs to be a negation of the replacement pattern above.
45700   // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
45701   // sub accomplishes the negation of the replacement pattern.
45702   if (V == Y)
45703     std::swap(SubOp1, SubOp2);
45704 
45705   SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
45706   return DAG.getBitcast(VT, Res);
45707 }
45708 
45709 /// Do target-specific dag combines on SELECT and VSELECT nodes.
combineSelect(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45710 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
45711                              TargetLowering::DAGCombinerInfo &DCI,
45712                              const X86Subtarget &Subtarget) {
45713   SDLoc DL(N);
45714   SDValue Cond = N->getOperand(0);
45715   SDValue LHS = N->getOperand(1);
45716   SDValue RHS = N->getOperand(2);
45717 
45718   // Try simplification again because we use this function to optimize
45719   // BLENDV nodes that are not handled by the generic combiner.
45720   if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
45721     return V;
45722 
45723   EVT VT = LHS.getValueType();
45724   EVT CondVT = Cond.getValueType();
45725   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45726   bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
45727 
45728   // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
45729   // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
45730   // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
45731   if (CondVT.isVector() && CondVT.isInteger() &&
45732       CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
45733       (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
45734       DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
45735     if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
45736                                                            DL, DAG, Subtarget))
45737       return V;
45738 
45739   // Convert vselects with constant condition into shuffles.
45740   if (CondConstantVector && DCI.isBeforeLegalizeOps() &&
45741       (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::BLENDV)) {
45742     SmallVector<int, 64> Mask;
45743     if (createShuffleMaskFromVSELECT(Mask, Cond,
45744                                      N->getOpcode() == X86ISD::BLENDV))
45745       return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
45746   }
45747 
45748   // fold vselect(cond, pshufb(x), pshufb(y)) -> or (pshufb(x), pshufb(y))
45749   // by forcing the unselected elements to zero.
45750   // TODO: Can we handle more shuffles with this?
45751   if (N->getOpcode() == ISD::VSELECT && CondVT.isVector() &&
45752       LHS.getOpcode() == X86ISD::PSHUFB && RHS.getOpcode() == X86ISD::PSHUFB &&
45753       LHS.hasOneUse() && RHS.hasOneUse()) {
45754     MVT SimpleVT = VT.getSimpleVT();
45755     SmallVector<SDValue, 1> LHSOps, RHSOps;
45756     SmallVector<int, 64> LHSMask, RHSMask, CondMask;
45757     if (createShuffleMaskFromVSELECT(CondMask, Cond) &&
45758         getTargetShuffleMask(LHS.getNode(), SimpleVT, true, LHSOps, LHSMask) &&
45759         getTargetShuffleMask(RHS.getNode(), SimpleVT, true, RHSOps, RHSMask)) {
45760       int NumElts = VT.getVectorNumElements();
45761       for (int i = 0; i != NumElts; ++i) {
45762         // getConstVector sets negative shuffle mask values as undef, so ensure
45763         // we hardcode SM_SentinelZero values to zero (0x80).
45764         if (CondMask[i] < NumElts) {
45765           LHSMask[i] = isUndefOrZero(LHSMask[i]) ? 0x80 : LHSMask[i];
45766           RHSMask[i] = 0x80;
45767         } else {
45768           LHSMask[i] = 0x80;
45769           RHSMask[i] = isUndefOrZero(RHSMask[i]) ? 0x80 : RHSMask[i];
45770         }
45771       }
45772       LHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, LHS.getOperand(0),
45773                         getConstVector(LHSMask, SimpleVT, DAG, DL, true));
45774       RHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, RHS.getOperand(0),
45775                         getConstVector(RHSMask, SimpleVT, DAG, DL, true));
45776       return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
45777     }
45778   }
45779 
45780   // If we have SSE[12] support, try to form min/max nodes. SSE min/max
45781   // instructions match the semantics of the common C idiom x<y?x:y but not
45782   // x<=y?x:y, because of how they handle negative zero (which can be
45783   // ignored in unsafe-math mode).
45784   // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
45785   if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
45786       VT != MVT::f80 && VT != MVT::f128 && !isSoftFP16(VT, Subtarget) &&
45787       (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
45788       (Subtarget.hasSSE2() ||
45789        (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
45790     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45791 
45792     unsigned Opcode = 0;
45793     // Check for x CC y ? x : y.
45794     if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
45795         DAG.isEqualTo(RHS, Cond.getOperand(1))) {
45796       switch (CC) {
45797       default: break;
45798       case ISD::SETULT:
45799         // Converting this to a min would handle NaNs incorrectly, and swapping
45800         // the operands would cause it to handle comparisons between positive
45801         // and negative zero incorrectly.
45802         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
45803           if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45804               !(DAG.isKnownNeverZeroFloat(LHS) ||
45805                 DAG.isKnownNeverZeroFloat(RHS)))
45806             break;
45807           std::swap(LHS, RHS);
45808         }
45809         Opcode = X86ISD::FMIN;
45810         break;
45811       case ISD::SETOLE:
45812         // Converting this to a min would handle comparisons between positive
45813         // and negative zero incorrectly.
45814         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45815             !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
45816           break;
45817         Opcode = X86ISD::FMIN;
45818         break;
45819       case ISD::SETULE:
45820         // Converting this to a min would handle both negative zeros and NaNs
45821         // incorrectly, but we can swap the operands to fix both.
45822         std::swap(LHS, RHS);
45823         [[fallthrough]];
45824       case ISD::SETOLT:
45825       case ISD::SETLT:
45826       case ISD::SETLE:
45827         Opcode = X86ISD::FMIN;
45828         break;
45829 
45830       case ISD::SETOGE:
45831         // Converting this to a max would handle comparisons between positive
45832         // and negative zero incorrectly.
45833         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45834             !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
45835           break;
45836         Opcode = X86ISD::FMAX;
45837         break;
45838       case ISD::SETUGT:
45839         // Converting this to a max would handle NaNs incorrectly, and swapping
45840         // the operands would cause it to handle comparisons between positive
45841         // and negative zero incorrectly.
45842         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
45843           if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45844               !(DAG.isKnownNeverZeroFloat(LHS) ||
45845                 DAG.isKnownNeverZeroFloat(RHS)))
45846             break;
45847           std::swap(LHS, RHS);
45848         }
45849         Opcode = X86ISD::FMAX;
45850         break;
45851       case ISD::SETUGE:
45852         // Converting this to a max would handle both negative zeros and NaNs
45853         // incorrectly, but we can swap the operands to fix both.
45854         std::swap(LHS, RHS);
45855         [[fallthrough]];
45856       case ISD::SETOGT:
45857       case ISD::SETGT:
45858       case ISD::SETGE:
45859         Opcode = X86ISD::FMAX;
45860         break;
45861       }
45862     // Check for x CC y ? y : x -- a min/max with reversed arms.
45863     } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
45864                DAG.isEqualTo(RHS, Cond.getOperand(0))) {
45865       switch (CC) {
45866       default: break;
45867       case ISD::SETOGE:
45868         // Converting this to a min would handle comparisons between positive
45869         // and negative zero incorrectly, and swapping the operands would
45870         // cause it to handle NaNs incorrectly.
45871         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45872             !(DAG.isKnownNeverZeroFloat(LHS) ||
45873               DAG.isKnownNeverZeroFloat(RHS))) {
45874           if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45875             break;
45876           std::swap(LHS, RHS);
45877         }
45878         Opcode = X86ISD::FMIN;
45879         break;
45880       case ISD::SETUGT:
45881         // Converting this to a min would handle NaNs incorrectly.
45882         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45883           break;
45884         Opcode = X86ISD::FMIN;
45885         break;
45886       case ISD::SETUGE:
45887         // Converting this to a min would handle both negative zeros and NaNs
45888         // incorrectly, but we can swap the operands to fix both.
45889         std::swap(LHS, RHS);
45890         [[fallthrough]];
45891       case ISD::SETOGT:
45892       case ISD::SETGT:
45893       case ISD::SETGE:
45894         Opcode = X86ISD::FMIN;
45895         break;
45896 
45897       case ISD::SETULT:
45898         // Converting this to a max would handle NaNs incorrectly.
45899         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45900           break;
45901         Opcode = X86ISD::FMAX;
45902         break;
45903       case ISD::SETOLE:
45904         // Converting this to a max would handle comparisons between positive
45905         // and negative zero incorrectly, and swapping the operands would
45906         // cause it to handle NaNs incorrectly.
45907         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45908             !DAG.isKnownNeverZeroFloat(LHS) &&
45909             !DAG.isKnownNeverZeroFloat(RHS)) {
45910           if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45911             break;
45912           std::swap(LHS, RHS);
45913         }
45914         Opcode = X86ISD::FMAX;
45915         break;
45916       case ISD::SETULE:
45917         // Converting this to a max would handle both negative zeros and NaNs
45918         // incorrectly, but we can swap the operands to fix both.
45919         std::swap(LHS, RHS);
45920         [[fallthrough]];
45921       case ISD::SETOLT:
45922       case ISD::SETLT:
45923       case ISD::SETLE:
45924         Opcode = X86ISD::FMAX;
45925         break;
45926       }
45927     }
45928 
45929     if (Opcode)
45930       return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
45931   }
45932 
45933   // Some mask scalar intrinsics rely on checking if only one bit is set
45934   // and implement it in C code like this:
45935   // A[0] = (U & 1) ? A[0] : W[0];
45936   // This creates some redundant instructions that break pattern matching.
45937   // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
45938   if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
45939       Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
45940     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45941     SDValue AndNode = Cond.getOperand(0);
45942     if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
45943         isNullConstant(Cond.getOperand(1)) &&
45944         isOneConstant(AndNode.getOperand(1))) {
45945       // LHS and RHS swapped due to
45946       // setcc outputting 1 when AND resulted in 0 and vice versa.
45947       AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
45948       return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
45949     }
45950   }
45951 
45952   // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
45953   // lowering on KNL. In this case we convert it to
45954   // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
45955   // The same situation all vectors of i8 and i16 without BWI.
45956   // Make sure we extend these even before type legalization gets a chance to
45957   // split wide vectors.
45958   // Since SKX these selects have a proper lowering.
45959   if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
45960       CondVT.getVectorElementType() == MVT::i1 &&
45961       (VT.getVectorElementType() == MVT::i8 ||
45962        VT.getVectorElementType() == MVT::i16)) {
45963     Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
45964     return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
45965   }
45966 
45967   // AVX512 - Extend select with zero to merge with target shuffle.
45968   // select(mask, extract_subvector(shuffle(x)), zero) -->
45969   // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
45970   // TODO - support non target shuffles as well.
45971   if (Subtarget.hasAVX512() && CondVT.isVector() &&
45972       CondVT.getVectorElementType() == MVT::i1) {
45973     auto SelectableOp = [&TLI](SDValue Op) {
45974       return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
45975              isTargetShuffle(Op.getOperand(0).getOpcode()) &&
45976              isNullConstant(Op.getOperand(1)) &&
45977              TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
45978              Op.hasOneUse() && Op.getOperand(0).hasOneUse();
45979     };
45980 
45981     bool SelectableLHS = SelectableOp(LHS);
45982     bool SelectableRHS = SelectableOp(RHS);
45983     bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
45984     bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
45985 
45986     if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
45987       EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
45988                                 : RHS.getOperand(0).getValueType();
45989       EVT SrcCondVT = SrcVT.changeVectorElementType(MVT::i1);
45990       LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
45991                             VT.getSizeInBits());
45992       RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
45993                             VT.getSizeInBits());
45994       Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
45995                          DAG.getUNDEF(SrcCondVT), Cond,
45996                          DAG.getIntPtrConstant(0, DL));
45997       SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
45998       return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
45999     }
46000   }
46001 
46002   if (SDValue V = combineSelectOfTwoConstants(N, DAG))
46003     return V;
46004 
46005   if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
46006       Cond.hasOneUse()) {
46007     EVT CondVT = Cond.getValueType();
46008     SDValue Cond0 = Cond.getOperand(0);
46009     SDValue Cond1 = Cond.getOperand(1);
46010     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
46011 
46012     // Canonicalize min/max:
46013     // (x > 0) ? x : 0 -> (x >= 0) ? x : 0
46014     // (x < -1) ? x : -1 -> (x <= -1) ? x : -1
46015     // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
46016     // the need for an extra compare against zero. e.g.
46017     // (a - b) > 0 : (a - b) ? 0 -> (a - b) >= 0 : (a - b) ? 0
46018     // subl   %esi, %edi
46019     // testl  %edi, %edi
46020     // movl   $0, %eax
46021     // cmovgl %edi, %eax
46022     // =>
46023     // xorl   %eax, %eax
46024     // subl   %esi, $edi
46025     // cmovsl %eax, %edi
46026     //
46027     // We can also canonicalize
46028     //  (x s> 1) ? x : 1 -> (x s>= 1) ? x : 1 -> (x s> 0) ? x : 1
46029     //  (x u> 1) ? x : 1 -> (x u>= 1) ? x : 1 -> (x != 0) ? x : 1
46030     // This allows the use of a test instruction for the compare.
46031     if (LHS == Cond0 && RHS == Cond1) {
46032       if ((CC == ISD::SETGT && (isNullConstant(RHS) || isOneConstant(RHS))) ||
46033           (CC == ISD::SETLT && isAllOnesConstant(RHS))) {
46034         ISD::CondCode NewCC = CC == ISD::SETGT ? ISD::SETGE : ISD::SETLE;
46035         Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
46036         return DAG.getSelect(DL, VT, Cond, LHS, RHS);
46037       }
46038       if (CC == ISD::SETUGT && isOneConstant(RHS)) {
46039         ISD::CondCode NewCC = ISD::SETUGE;
46040         Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
46041         return DAG.getSelect(DL, VT, Cond, LHS, RHS);
46042       }
46043     }
46044 
46045     // Similar to DAGCombine's select(or(CC0,CC1),X,Y) fold but for legal types.
46046     // fold eq + gt/lt nested selects into ge/le selects
46047     // select (cmpeq Cond0, Cond1), LHS, (select (cmpugt Cond0, Cond1), LHS, Y)
46048     // --> (select (cmpuge Cond0, Cond1), LHS, Y)
46049     // select (cmpslt Cond0, Cond1), LHS, (select (cmpeq Cond0, Cond1), LHS, Y)
46050     // --> (select (cmpsle Cond0, Cond1), LHS, Y)
46051     // .. etc ..
46052     if (RHS.getOpcode() == ISD::SELECT && RHS.getOperand(1) == LHS &&
46053         RHS.getOperand(0).getOpcode() == ISD::SETCC) {
46054       SDValue InnerSetCC = RHS.getOperand(0);
46055       ISD::CondCode InnerCC =
46056           cast<CondCodeSDNode>(InnerSetCC.getOperand(2))->get();
46057       if ((CC == ISD::SETEQ || InnerCC == ISD::SETEQ) &&
46058           Cond0 == InnerSetCC.getOperand(0) &&
46059           Cond1 == InnerSetCC.getOperand(1)) {
46060         ISD::CondCode NewCC;
46061         switch (CC == ISD::SETEQ ? InnerCC : CC) {
46062         case ISD::SETGT:  NewCC = ISD::SETGE; break;
46063         case ISD::SETLT:  NewCC = ISD::SETLE; break;
46064         case ISD::SETUGT: NewCC = ISD::SETUGE; break;
46065         case ISD::SETULT: NewCC = ISD::SETULE; break;
46066         default: NewCC = ISD::SETCC_INVALID; break;
46067         }
46068         if (NewCC != ISD::SETCC_INVALID) {
46069           Cond = DAG.getSetCC(DL, CondVT, Cond0, Cond1, NewCC);
46070           return DAG.getSelect(DL, VT, Cond, LHS, RHS.getOperand(2));
46071         }
46072       }
46073     }
46074   }
46075 
46076   // Check if the first operand is all zeros and Cond type is vXi1.
46077   // If this an avx512 target we can improve the use of zero masking by
46078   // swapping the operands and inverting the condition.
46079   if (N->getOpcode() == ISD::VSELECT && Cond.hasOneUse() &&
46080       Subtarget.hasAVX512() && CondVT.getVectorElementType() == MVT::i1 &&
46081       ISD::isBuildVectorAllZeros(LHS.getNode()) &&
46082       !ISD::isBuildVectorAllZeros(RHS.getNode())) {
46083     // Invert the cond to not(cond) : xor(op,allones)=not(op)
46084     SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
46085     // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
46086     return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
46087   }
46088 
46089   // Attempt to convert a (vXi1 bitcast(iX Cond)) selection mask before it might
46090   // get split by legalization.
46091   if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::BITCAST &&
46092       CondVT.getVectorElementType() == MVT::i1 && Cond.hasOneUse() &&
46093       TLI.isTypeLegal(VT.getScalarType())) {
46094     EVT ExtCondVT = VT.changeVectorElementTypeToInteger();
46095     if (SDValue ExtCond = combineToExtendBoolVectorInReg(
46096             ISD::SIGN_EXTEND, DL, ExtCondVT, Cond, DAG, DCI, Subtarget)) {
46097       ExtCond = DAG.getNode(ISD::TRUNCATE, DL, CondVT, ExtCond);
46098       return DAG.getSelect(DL, VT, ExtCond, LHS, RHS);
46099     }
46100   }
46101 
46102   // Early exit check
46103   if (!TLI.isTypeLegal(VT) || isSoftFP16(VT, Subtarget))
46104     return SDValue();
46105 
46106   if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
46107     return V;
46108 
46109   if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
46110     return V;
46111 
46112   if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
46113     return V;
46114 
46115   // select(~Cond, X, Y) -> select(Cond, Y, X)
46116   if (CondVT.getScalarType() != MVT::i1) {
46117     if (SDValue CondNot = IsNOT(Cond, DAG))
46118       return DAG.getNode(N->getOpcode(), DL, VT,
46119                          DAG.getBitcast(CondVT, CondNot), RHS, LHS);
46120 
46121     if (Cond.getOpcode() == X86ISD::PCMPGT && Cond.hasOneUse()) {
46122       // pcmpgt(X, -1) -> pcmpgt(0, X) to help select/blendv just use the
46123       // signbit.
46124       if (ISD::isBuildVectorAllOnes(Cond.getOperand(1).getNode())) {
46125         Cond = DAG.getNode(X86ISD::PCMPGT, DL, CondVT,
46126                            DAG.getConstant(0, DL, CondVT), Cond.getOperand(0));
46127         return DAG.getNode(N->getOpcode(), DL, VT, Cond, RHS, LHS);
46128       }
46129 
46130       // smin(LHS, RHS) : select(pcmpgt(RHS, LHS), LHS, RHS)
46131       //               -> select(pcmpgt(LHS, RHS), RHS, LHS)
46132       // iff the commuted pcmpgt() already exists.
46133       // TODO: Could DAGCombiner::combine cse search for SETCC nodes, like it
46134       // does for commutative binops?
46135       if (Cond.getOperand(0) == RHS && Cond.getOperand(1) == LHS) {
46136         if (SDNode *FlipCond =
46137                 DAG.getNodeIfExists(X86ISD::PCMPGT, DAG.getVTList(CondVT),
46138                                     {Cond.getOperand(1), Cond.getOperand(0)})) {
46139           return DAG.getNode(N->getOpcode(), DL, VT, SDValue(FlipCond, 0), RHS,
46140                              LHS);
46141         }
46142       }
46143     }
46144   }
46145 
46146   // Try to optimize vXi1 selects if both operands are either all constants or
46147   // bitcasts from scalar integer type. In that case we can convert the operands
46148   // to integer and use an integer select which will be converted to a CMOV.
46149   // We need to take a little bit of care to avoid creating an i64 type after
46150   // type legalization.
46151   if (N->getOpcode() == ISD::SELECT && VT.isVector() &&
46152       VT.getVectorElementType() == MVT::i1 &&
46153       (DCI.isBeforeLegalize() || (VT != MVT::v64i1 || Subtarget.is64Bit()))) {
46154     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
46155     bool LHSIsConst = ISD::isBuildVectorOfConstantSDNodes(LHS.getNode());
46156     bool RHSIsConst = ISD::isBuildVectorOfConstantSDNodes(RHS.getNode());
46157 
46158     if ((LHSIsConst ||
46159          (LHS.getOpcode() == ISD::BITCAST &&
46160           LHS.getOperand(0).getValueType() == IntVT)) &&
46161         (RHSIsConst ||
46162          (RHS.getOpcode() == ISD::BITCAST &&
46163           RHS.getOperand(0).getValueType() == IntVT))) {
46164       if (LHSIsConst)
46165         LHS = combinevXi1ConstantToInteger(LHS, DAG);
46166       else
46167         LHS = LHS.getOperand(0);
46168 
46169       if (RHSIsConst)
46170         RHS = combinevXi1ConstantToInteger(RHS, DAG);
46171       else
46172         RHS = RHS.getOperand(0);
46173 
46174       SDValue Select = DAG.getSelect(DL, IntVT, Cond, LHS, RHS);
46175       return DAG.getBitcast(VT, Select);
46176     }
46177   }
46178 
46179   // If this is "((X & C) == 0) ? Y : Z" and C is a constant mask vector of
46180   // single bits, then invert the predicate and swap the select operands.
46181   // This can lower using a vector shift bit-hack rather than mask and compare.
46182   if (DCI.isBeforeLegalize() && !Subtarget.hasAVX512() &&
46183       N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
46184       Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1 &&
46185       Cond.getOperand(0).getOpcode() == ISD::AND &&
46186       isNullOrNullSplat(Cond.getOperand(1)) &&
46187       cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
46188       Cond.getOperand(0).getValueType() == VT) {
46189     // The 'and' mask must be composed of power-of-2 constants.
46190     SDValue And = Cond.getOperand(0);
46191     auto *C = isConstOrConstSplat(And.getOperand(1));
46192     if (C && C->getAPIntValue().isPowerOf2()) {
46193       // vselect (X & C == 0), LHS, RHS --> vselect (X & C != 0), RHS, LHS
46194       SDValue NotCond =
46195           DAG.getSetCC(DL, CondVT, And, Cond.getOperand(1), ISD::SETNE);
46196       return DAG.getSelect(DL, VT, NotCond, RHS, LHS);
46197     }
46198 
46199     // If we have a non-splat but still powers-of-2 mask, AVX1 can use pmulld
46200     // and AVX2 can use vpsllv{dq}. 8-bit lacks a proper shift or multiply.
46201     // 16-bit lacks a proper blendv.
46202     unsigned EltBitWidth = VT.getScalarSizeInBits();
46203     bool CanShiftBlend =
46204         TLI.isTypeLegal(VT) && ((Subtarget.hasAVX() && EltBitWidth == 32) ||
46205                                 (Subtarget.hasAVX2() && EltBitWidth == 64) ||
46206                                 (Subtarget.hasXOP()));
46207     if (CanShiftBlend &&
46208         ISD::matchUnaryPredicate(And.getOperand(1), [](ConstantSDNode *C) {
46209           return C->getAPIntValue().isPowerOf2();
46210         })) {
46211       // Create a left-shift constant to get the mask bits over to the sign-bit.
46212       SDValue Mask = And.getOperand(1);
46213       SmallVector<int, 32> ShlVals;
46214       for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
46215         auto *MaskVal = cast<ConstantSDNode>(Mask.getOperand(i));
46216         ShlVals.push_back(EltBitWidth - 1 -
46217                           MaskVal->getAPIntValue().exactLogBase2());
46218       }
46219       // vsel ((X & C) == 0), LHS, RHS --> vsel ((shl X, C') < 0), RHS, LHS
46220       SDValue ShlAmt = getConstVector(ShlVals, VT.getSimpleVT(), DAG, DL);
46221       SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And.getOperand(0), ShlAmt);
46222       SDValue NewCond =
46223           DAG.getSetCC(DL, CondVT, Shl, Cond.getOperand(1), ISD::SETLT);
46224       return DAG.getSelect(DL, VT, NewCond, RHS, LHS);
46225     }
46226   }
46227 
46228   return SDValue();
46229 }
46230 
46231 /// Combine:
46232 ///   (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
46233 /// to:
46234 ///   (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
46235 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
46236 /// Note that this is only legal for some op/cc combinations.
combineSetCCAtomicArith(SDValue Cmp,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)46237 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
46238                                        SelectionDAG &DAG,
46239                                        const X86Subtarget &Subtarget) {
46240   // This combine only operates on CMP-like nodes.
46241   if (!(Cmp.getOpcode() == X86ISD::CMP ||
46242         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
46243     return SDValue();
46244 
46245   // Can't replace the cmp if it has more uses than the one we're looking at.
46246   // FIXME: We would like to be able to handle this, but would need to make sure
46247   // all uses were updated.
46248   if (!Cmp.hasOneUse())
46249     return SDValue();
46250 
46251   // This only applies to variations of the common case:
46252   //   (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
46253   //   (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
46254   //   (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
46255   //   (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
46256   // Using the proper condcodes (see below), overflow is checked for.
46257 
46258   // FIXME: We can generalize both constraints:
46259   // - XOR/OR/AND (if they were made to survive AtomicExpand)
46260   // - LHS != 1
46261   // if the result is compared.
46262 
46263   SDValue CmpLHS = Cmp.getOperand(0);
46264   SDValue CmpRHS = Cmp.getOperand(1);
46265   EVT CmpVT = CmpLHS.getValueType();
46266 
46267   if (!CmpLHS.hasOneUse())
46268     return SDValue();
46269 
46270   unsigned Opc = CmpLHS.getOpcode();
46271   if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
46272     return SDValue();
46273 
46274   SDValue OpRHS = CmpLHS.getOperand(2);
46275   auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
46276   if (!OpRHSC)
46277     return SDValue();
46278 
46279   APInt Addend = OpRHSC->getAPIntValue();
46280   if (Opc == ISD::ATOMIC_LOAD_SUB)
46281     Addend = -Addend;
46282 
46283   auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
46284   if (!CmpRHSC)
46285     return SDValue();
46286 
46287   APInt Comparison = CmpRHSC->getAPIntValue();
46288   APInt NegAddend = -Addend;
46289 
46290   // See if we can adjust the CC to make the comparison match the negated
46291   // addend.
46292   if (Comparison != NegAddend) {
46293     APInt IncComparison = Comparison + 1;
46294     if (IncComparison == NegAddend) {
46295       if (CC == X86::COND_A && !Comparison.isMaxValue()) {
46296         Comparison = IncComparison;
46297         CC = X86::COND_AE;
46298       } else if (CC == X86::COND_LE && !Comparison.isMaxSignedValue()) {
46299         Comparison = IncComparison;
46300         CC = X86::COND_L;
46301       }
46302     }
46303     APInt DecComparison = Comparison - 1;
46304     if (DecComparison == NegAddend) {
46305       if (CC == X86::COND_AE && !Comparison.isMinValue()) {
46306         Comparison = DecComparison;
46307         CC = X86::COND_A;
46308       } else if (CC == X86::COND_L && !Comparison.isMinSignedValue()) {
46309         Comparison = DecComparison;
46310         CC = X86::COND_LE;
46311       }
46312     }
46313   }
46314 
46315   // If the addend is the negation of the comparison value, then we can do
46316   // a full comparison by emitting the atomic arithmetic as a locked sub.
46317   if (Comparison == NegAddend) {
46318     // The CC is fine, but we need to rewrite the LHS of the comparison as an
46319     // atomic sub.
46320     auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
46321     auto AtomicSub = DAG.getAtomic(
46322         ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpVT,
46323         /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
46324         /*RHS*/ DAG.getConstant(NegAddend, SDLoc(CmpRHS), CmpVT),
46325         AN->getMemOperand());
46326     auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
46327     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
46328     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
46329     return LockOp;
46330   }
46331 
46332   // We can handle comparisons with zero in a number of cases by manipulating
46333   // the CC used.
46334   if (!Comparison.isZero())
46335     return SDValue();
46336 
46337   if (CC == X86::COND_S && Addend == 1)
46338     CC = X86::COND_LE;
46339   else if (CC == X86::COND_NS && Addend == 1)
46340     CC = X86::COND_G;
46341   else if (CC == X86::COND_G && Addend == -1)
46342     CC = X86::COND_GE;
46343   else if (CC == X86::COND_LE && Addend == -1)
46344     CC = X86::COND_L;
46345   else
46346     return SDValue();
46347 
46348   SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
46349   DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
46350   DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
46351   return LockOp;
46352 }
46353 
46354 // Check whether a boolean test is testing a boolean value generated by
46355 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
46356 // code.
46357 //
46358 // Simplify the following patterns:
46359 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
46360 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
46361 // to (Op EFLAGS Cond)
46362 //
46363 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
46364 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
46365 // to (Op EFLAGS !Cond)
46366 //
46367 // where Op could be BRCOND or CMOV.
46368 //
checkBoolTestSetCCCombine(SDValue Cmp,X86::CondCode & CC)46369 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
46370   // This combine only operates on CMP-like nodes.
46371   if (!(Cmp.getOpcode() == X86ISD::CMP ||
46372         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
46373     return SDValue();
46374 
46375   // Quit if not used as a boolean value.
46376   if (CC != X86::COND_E && CC != X86::COND_NE)
46377     return SDValue();
46378 
46379   // Check CMP operands. One of them should be 0 or 1 and the other should be
46380   // an SetCC or extended from it.
46381   SDValue Op1 = Cmp.getOperand(0);
46382   SDValue Op2 = Cmp.getOperand(1);
46383 
46384   SDValue SetCC;
46385   const ConstantSDNode* C = nullptr;
46386   bool needOppositeCond = (CC == X86::COND_E);
46387   bool checkAgainstTrue = false; // Is it a comparison against 1?
46388 
46389   if ((C = dyn_cast<ConstantSDNode>(Op1)))
46390     SetCC = Op2;
46391   else if ((C = dyn_cast<ConstantSDNode>(Op2)))
46392     SetCC = Op1;
46393   else // Quit if all operands are not constants.
46394     return SDValue();
46395 
46396   if (C->getZExtValue() == 1) {
46397     needOppositeCond = !needOppositeCond;
46398     checkAgainstTrue = true;
46399   } else if (C->getZExtValue() != 0)
46400     // Quit if the constant is neither 0 or 1.
46401     return SDValue();
46402 
46403   bool truncatedToBoolWithAnd = false;
46404   // Skip (zext $x), (trunc $x), or (and $x, 1) node.
46405   while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
46406          SetCC.getOpcode() == ISD::TRUNCATE ||
46407          SetCC.getOpcode() == ISD::AND) {
46408     if (SetCC.getOpcode() == ISD::AND) {
46409       int OpIdx = -1;
46410       if (isOneConstant(SetCC.getOperand(0)))
46411         OpIdx = 1;
46412       if (isOneConstant(SetCC.getOperand(1)))
46413         OpIdx = 0;
46414       if (OpIdx < 0)
46415         break;
46416       SetCC = SetCC.getOperand(OpIdx);
46417       truncatedToBoolWithAnd = true;
46418     } else
46419       SetCC = SetCC.getOperand(0);
46420   }
46421 
46422   switch (SetCC.getOpcode()) {
46423   case X86ISD::SETCC_CARRY:
46424     // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
46425     // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
46426     // i.e. it's a comparison against true but the result of SETCC_CARRY is not
46427     // truncated to i1 using 'and'.
46428     if (checkAgainstTrue && !truncatedToBoolWithAnd)
46429       break;
46430     assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
46431            "Invalid use of SETCC_CARRY!");
46432     [[fallthrough]];
46433   case X86ISD::SETCC:
46434     // Set the condition code or opposite one if necessary.
46435     CC = X86::CondCode(SetCC.getConstantOperandVal(0));
46436     if (needOppositeCond)
46437       CC = X86::GetOppositeBranchCondition(CC);
46438     return SetCC.getOperand(1);
46439   case X86ISD::CMOV: {
46440     // Check whether false/true value has canonical one, i.e. 0 or 1.
46441     ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
46442     ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
46443     // Quit if true value is not a constant.
46444     if (!TVal)
46445       return SDValue();
46446     // Quit if false value is not a constant.
46447     if (!FVal) {
46448       SDValue Op = SetCC.getOperand(0);
46449       // Skip 'zext' or 'trunc' node.
46450       if (Op.getOpcode() == ISD::ZERO_EXTEND ||
46451           Op.getOpcode() == ISD::TRUNCATE)
46452         Op = Op.getOperand(0);
46453       // A special case for rdrand/rdseed, where 0 is set if false cond is
46454       // found.
46455       if ((Op.getOpcode() != X86ISD::RDRAND &&
46456            Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
46457         return SDValue();
46458     }
46459     // Quit if false value is not the constant 0 or 1.
46460     bool FValIsFalse = true;
46461     if (FVal && FVal->getZExtValue() != 0) {
46462       if (FVal->getZExtValue() != 1)
46463         return SDValue();
46464       // If FVal is 1, opposite cond is needed.
46465       needOppositeCond = !needOppositeCond;
46466       FValIsFalse = false;
46467     }
46468     // Quit if TVal is not the constant opposite of FVal.
46469     if (FValIsFalse && TVal->getZExtValue() != 1)
46470       return SDValue();
46471     if (!FValIsFalse && TVal->getZExtValue() != 0)
46472       return SDValue();
46473     CC = X86::CondCode(SetCC.getConstantOperandVal(2));
46474     if (needOppositeCond)
46475       CC = X86::GetOppositeBranchCondition(CC);
46476     return SetCC.getOperand(3);
46477   }
46478   }
46479 
46480   return SDValue();
46481 }
46482 
46483 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
46484 /// Match:
46485 ///   (X86or (X86setcc) (X86setcc))
46486 ///   (X86cmp (and (X86setcc) (X86setcc)), 0)
checkBoolTestAndOrSetCCCombine(SDValue Cond,X86::CondCode & CC0,X86::CondCode & CC1,SDValue & Flags,bool & isAnd)46487 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
46488                                            X86::CondCode &CC1, SDValue &Flags,
46489                                            bool &isAnd) {
46490   if (Cond->getOpcode() == X86ISD::CMP) {
46491     if (!isNullConstant(Cond->getOperand(1)))
46492       return false;
46493 
46494     Cond = Cond->getOperand(0);
46495   }
46496 
46497   isAnd = false;
46498 
46499   SDValue SetCC0, SetCC1;
46500   switch (Cond->getOpcode()) {
46501   default: return false;
46502   case ISD::AND:
46503   case X86ISD::AND:
46504     isAnd = true;
46505     [[fallthrough]];
46506   case ISD::OR:
46507   case X86ISD::OR:
46508     SetCC0 = Cond->getOperand(0);
46509     SetCC1 = Cond->getOperand(1);
46510     break;
46511   };
46512 
46513   // Make sure we have SETCC nodes, using the same flags value.
46514   if (SetCC0.getOpcode() != X86ISD::SETCC ||
46515       SetCC1.getOpcode() != X86ISD::SETCC ||
46516       SetCC0->getOperand(1) != SetCC1->getOperand(1))
46517     return false;
46518 
46519   CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
46520   CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
46521   Flags = SetCC0->getOperand(1);
46522   return true;
46523 }
46524 
46525 // When legalizing carry, we create carries via add X, -1
46526 // If that comes from an actual carry, via setcc, we use the
46527 // carry directly.
combineCarryThroughADD(SDValue EFLAGS,SelectionDAG & DAG)46528 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
46529   if (EFLAGS.getOpcode() == X86ISD::ADD) {
46530     if (isAllOnesConstant(EFLAGS.getOperand(1))) {
46531       bool FoundAndLSB = false;
46532       SDValue Carry = EFLAGS.getOperand(0);
46533       while (Carry.getOpcode() == ISD::TRUNCATE ||
46534              Carry.getOpcode() == ISD::ZERO_EXTEND ||
46535              (Carry.getOpcode() == ISD::AND &&
46536               isOneConstant(Carry.getOperand(1)))) {
46537         FoundAndLSB |= Carry.getOpcode() == ISD::AND;
46538         Carry = Carry.getOperand(0);
46539       }
46540       if (Carry.getOpcode() == X86ISD::SETCC ||
46541           Carry.getOpcode() == X86ISD::SETCC_CARRY) {
46542         // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
46543         uint64_t CarryCC = Carry.getConstantOperandVal(0);
46544         SDValue CarryOp1 = Carry.getOperand(1);
46545         if (CarryCC == X86::COND_B)
46546           return CarryOp1;
46547         if (CarryCC == X86::COND_A) {
46548           // Try to convert COND_A into COND_B in an attempt to facilitate
46549           // materializing "setb reg".
46550           //
46551           // Do not flip "e > c", where "c" is a constant, because Cmp
46552           // instruction cannot take an immediate as its first operand.
46553           //
46554           if (CarryOp1.getOpcode() == X86ISD::SUB &&
46555               CarryOp1.getNode()->hasOneUse() &&
46556               CarryOp1.getValueType().isInteger() &&
46557               !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
46558             SDValue SubCommute =
46559                 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
46560                             CarryOp1.getOperand(1), CarryOp1.getOperand(0));
46561             return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
46562           }
46563         }
46564         // If this is a check of the z flag of an add with 1, switch to the
46565         // C flag.
46566         if (CarryCC == X86::COND_E &&
46567             CarryOp1.getOpcode() == X86ISD::ADD &&
46568             isOneConstant(CarryOp1.getOperand(1)))
46569           return CarryOp1;
46570       } else if (FoundAndLSB) {
46571         SDLoc DL(Carry);
46572         SDValue BitNo = DAG.getConstant(0, DL, Carry.getValueType());
46573         if (Carry.getOpcode() == ISD::SRL) {
46574           BitNo = Carry.getOperand(1);
46575           Carry = Carry.getOperand(0);
46576         }
46577         return getBT(Carry, BitNo, DL, DAG);
46578       }
46579     }
46580   }
46581 
46582   return SDValue();
46583 }
46584 
46585 /// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
46586 /// to avoid the inversion.
combinePTESTCC(SDValue EFLAGS,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)46587 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
46588                               SelectionDAG &DAG,
46589                               const X86Subtarget &Subtarget) {
46590   // TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
46591   if (EFLAGS.getOpcode() != X86ISD::PTEST &&
46592       EFLAGS.getOpcode() != X86ISD::TESTP)
46593     return SDValue();
46594 
46595   // PTEST/TESTP sets EFLAGS as:
46596   // TESTZ: ZF = (Op0 & Op1) == 0
46597   // TESTC: CF = (~Op0 & Op1) == 0
46598   // TESTNZC: ZF == 0 && CF == 0
46599   EVT VT = EFLAGS.getValueType();
46600   SDValue Op0 = EFLAGS.getOperand(0);
46601   SDValue Op1 = EFLAGS.getOperand(1);
46602   EVT OpVT = Op0.getValueType();
46603 
46604   // TEST*(~X,Y) == TEST*(X,Y)
46605   if (SDValue NotOp0 = IsNOT(Op0, DAG)) {
46606     X86::CondCode InvCC;
46607     switch (CC) {
46608     case X86::COND_B:
46609       // testc -> testz.
46610       InvCC = X86::COND_E;
46611       break;
46612     case X86::COND_AE:
46613       // !testc -> !testz.
46614       InvCC = X86::COND_NE;
46615       break;
46616     case X86::COND_E:
46617       // testz -> testc.
46618       InvCC = X86::COND_B;
46619       break;
46620     case X86::COND_NE:
46621       // !testz -> !testc.
46622       InvCC = X86::COND_AE;
46623       break;
46624     case X86::COND_A:
46625     case X86::COND_BE:
46626       // testnzc -> testnzc (no change).
46627       InvCC = CC;
46628       break;
46629     default:
46630       InvCC = X86::COND_INVALID;
46631       break;
46632     }
46633 
46634     if (InvCC != X86::COND_INVALID) {
46635       CC = InvCC;
46636       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46637                          DAG.getBitcast(OpVT, NotOp0), Op1);
46638     }
46639   }
46640 
46641   if (CC == X86::COND_E || CC == X86::COND_NE) {
46642     // TESTZ(X,~Y) == TESTC(Y,X)
46643     if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
46644       CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
46645       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46646                          DAG.getBitcast(OpVT, NotOp1), Op0);
46647     }
46648 
46649     if (Op0 == Op1) {
46650       SDValue BC = peekThroughBitcasts(Op0);
46651       EVT BCVT = BC.getValueType();
46652       assert(BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
46653              "Unexpected vector type");
46654 
46655       // TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
46656       if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
46657         return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46658                            DAG.getBitcast(OpVT, BC.getOperand(0)),
46659                            DAG.getBitcast(OpVT, BC.getOperand(1)));
46660       }
46661 
46662       // TESTZ(AND(~X,Y),AND(~X,Y)) == TESTC(X,Y)
46663       if (BC.getOpcode() == X86ISD::ANDNP || BC.getOpcode() == X86ISD::FANDN) {
46664         CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
46665         return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46666                            DAG.getBitcast(OpVT, BC.getOperand(0)),
46667                            DAG.getBitcast(OpVT, BC.getOperand(1)));
46668       }
46669 
46670       // If every element is an all-sign value, see if we can use MOVMSK to
46671       // more efficiently extract the sign bits and compare that.
46672       // TODO: Handle TESTC with comparison inversion.
46673       // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
46674       // MOVMSK combines to make sure its never worse than PTEST?
46675       unsigned EltBits = BCVT.getScalarSizeInBits();
46676       if (DAG.ComputeNumSignBits(BC) == EltBits) {
46677         assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
46678         APInt SignMask = APInt::getSignMask(EltBits);
46679         const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46680         if (SDValue Res =
46681                 TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
46682           // For vXi16 cases we need to use pmovmksb and extract every other
46683           // sign bit.
46684           SDLoc DL(EFLAGS);
46685           if (EltBits == 16) {
46686             MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
46687             Res = DAG.getBitcast(MovmskVT, Res);
46688             Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
46689             Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
46690                               DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
46691           } else {
46692             Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
46693           }
46694           return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
46695                              DAG.getConstant(0, DL, MVT::i32));
46696         }
46697       }
46698     }
46699 
46700     // TESTZ(-1,X) == TESTZ(X,X)
46701     if (ISD::isBuildVectorAllOnes(Op0.getNode()))
46702       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1);
46703 
46704     // TESTZ(X,-1) == TESTZ(X,X)
46705     if (ISD::isBuildVectorAllOnes(Op1.getNode()))
46706       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
46707 
46708     // TESTZ(OR(LO(X),HI(X)),OR(LO(Y),HI(Y))) -> TESTZ(X,Y)
46709     // TODO: Add COND_NE handling?
46710     if (CC == X86::COND_E && OpVT.is128BitVector() && Subtarget.hasAVX()) {
46711       SDValue Src0 = peekThroughBitcasts(Op0);
46712       SDValue Src1 = peekThroughBitcasts(Op1);
46713       if (Src0.getOpcode() == ISD::OR && Src1.getOpcode() == ISD::OR) {
46714         Src0 = getSplitVectorSrc(peekThroughBitcasts(Src0.getOperand(0)),
46715                                  peekThroughBitcasts(Src0.getOperand(1)), true);
46716         Src1 = getSplitVectorSrc(peekThroughBitcasts(Src1.getOperand(0)),
46717                                  peekThroughBitcasts(Src1.getOperand(1)), true);
46718         if (Src0 && Src1)
46719           return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46720                              DAG.getBitcast(MVT::v4i64, Src0),
46721                              DAG.getBitcast(MVT::v4i64, Src1));
46722       }
46723     }
46724   }
46725 
46726   return SDValue();
46727 }
46728 
46729 // Attempt to simplify the MOVMSK input based on the comparison type.
combineSetCCMOVMSK(SDValue EFLAGS,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)46730 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
46731                                   SelectionDAG &DAG,
46732                                   const X86Subtarget &Subtarget) {
46733   // Handle eq/ne against zero (any_of).
46734   // Handle eq/ne against -1 (all_of).
46735   if (!(CC == X86::COND_E || CC == X86::COND_NE))
46736     return SDValue();
46737   if (EFLAGS.getValueType() != MVT::i32)
46738     return SDValue();
46739   unsigned CmpOpcode = EFLAGS.getOpcode();
46740   if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
46741     return SDValue();
46742   auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
46743   if (!CmpConstant)
46744     return SDValue();
46745   const APInt &CmpVal = CmpConstant->getAPIntValue();
46746 
46747   SDValue CmpOp = EFLAGS.getOperand(0);
46748   unsigned CmpBits = CmpOp.getValueSizeInBits();
46749   assert(CmpBits == CmpVal.getBitWidth() && "Value size mismatch");
46750 
46751   // Peek through any truncate.
46752   if (CmpOp.getOpcode() == ISD::TRUNCATE)
46753     CmpOp = CmpOp.getOperand(0);
46754 
46755   // Bail if we don't find a MOVMSK.
46756   if (CmpOp.getOpcode() != X86ISD::MOVMSK)
46757     return SDValue();
46758 
46759   SDValue Vec = CmpOp.getOperand(0);
46760   MVT VecVT = Vec.getSimpleValueType();
46761   assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
46762          "Unexpected MOVMSK operand");
46763   unsigned NumElts = VecVT.getVectorNumElements();
46764   unsigned NumEltBits = VecVT.getScalarSizeInBits();
46765 
46766   bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isZero();
46767   bool IsAllOf = (CmpOpcode == X86ISD::SUB || CmpOpcode == X86ISD::CMP) &&
46768                  NumElts <= CmpBits && CmpVal.isMask(NumElts);
46769   if (!IsAnyOf && !IsAllOf)
46770     return SDValue();
46771 
46772   // TODO: Check more combining cases for me.
46773   // Here we check the cmp use number to decide do combining or not.
46774   // Currently we only get 2 tests about combining "MOVMSK(CONCAT(..))"
46775   // and "MOVMSK(PCMPEQ(..))" are fit to use this constraint.
46776   bool IsOneUse = CmpOp.getNode()->hasOneUse();
46777 
46778   // See if we can peek through to a vector with a wider element type, if the
46779   // signbits extend down to all the sub-elements as well.
46780   // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
46781   // potential SimplifyDemandedBits/Elts cases.
46782   // If we looked through a truncate that discard bits, we can't do this
46783   // transform.
46784   // FIXME: We could do this transform for truncates that discarded bits by
46785   // inserting an AND mask between the new MOVMSK and the CMP.
46786   if (Vec.getOpcode() == ISD::BITCAST && NumElts <= CmpBits) {
46787     SDValue BC = peekThroughBitcasts(Vec);
46788     MVT BCVT = BC.getSimpleValueType();
46789     unsigned BCNumElts = BCVT.getVectorNumElements();
46790     unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
46791     if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
46792         BCNumEltBits > NumEltBits &&
46793         DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
46794       SDLoc DL(EFLAGS);
46795       APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : BCNumElts);
46796       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
46797                          DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
46798                          DAG.getConstant(CmpMask, DL, MVT::i32));
46799     }
46800   }
46801 
46802   // MOVMSK(CONCAT(X,Y)) == 0 ->  MOVMSK(OR(X,Y)).
46803   // MOVMSK(CONCAT(X,Y)) != 0 ->  MOVMSK(OR(X,Y)).
46804   // MOVMSK(CONCAT(X,Y)) == -1 ->  MOVMSK(AND(X,Y)).
46805   // MOVMSK(CONCAT(X,Y)) != -1 ->  MOVMSK(AND(X,Y)).
46806   if (VecVT.is256BitVector() && NumElts <= CmpBits && IsOneUse) {
46807     SmallVector<SDValue> Ops;
46808     if (collectConcatOps(peekThroughBitcasts(Vec).getNode(), Ops, DAG) &&
46809         Ops.size() == 2) {
46810       SDLoc DL(EFLAGS);
46811       EVT SubVT = Ops[0].getValueType().changeTypeToInteger();
46812       APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : NumElts / 2);
46813       SDValue V = DAG.getNode(IsAnyOf ? ISD::OR : ISD::AND, DL, SubVT,
46814                               DAG.getBitcast(SubVT, Ops[0]),
46815                               DAG.getBitcast(SubVT, Ops[1]));
46816       V = DAG.getBitcast(VecVT.getHalfNumVectorElementsVT(), V);
46817       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
46818                          DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V),
46819                          DAG.getConstant(CmpMask, DL, MVT::i32));
46820     }
46821   }
46822 
46823   // MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
46824   // MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
46825   // MOVMSK(PCMPEQ(X,Y)) == -1 -> PTESTZ(SUB(X,Y),SUB(X,Y)).
46826   // MOVMSK(PCMPEQ(X,Y)) != -1 -> !PTESTZ(SUB(X,Y),SUB(X,Y)).
46827   if (IsAllOf && Subtarget.hasSSE41() && IsOneUse) {
46828     MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
46829     SDValue BC = peekThroughBitcasts(Vec);
46830     // Ensure MOVMSK was testing every signbit of BC.
46831     if (BC.getValueType().getVectorNumElements() <= NumElts) {
46832       if (BC.getOpcode() == X86ISD::PCMPEQ) {
46833         SDValue V = DAG.getNode(ISD::SUB, SDLoc(BC), BC.getValueType(),
46834                                 BC.getOperand(0), BC.getOperand(1));
46835         V = DAG.getBitcast(TestVT, V);
46836         return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
46837       }
46838       // Check for 256-bit split vector cases.
46839       if (BC.getOpcode() == ISD::AND &&
46840           BC.getOperand(0).getOpcode() == X86ISD::PCMPEQ &&
46841           BC.getOperand(1).getOpcode() == X86ISD::PCMPEQ) {
46842         SDValue LHS = BC.getOperand(0);
46843         SDValue RHS = BC.getOperand(1);
46844         LHS = DAG.getNode(ISD::SUB, SDLoc(LHS), LHS.getValueType(),
46845                           LHS.getOperand(0), LHS.getOperand(1));
46846         RHS = DAG.getNode(ISD::SUB, SDLoc(RHS), RHS.getValueType(),
46847                           RHS.getOperand(0), RHS.getOperand(1));
46848         LHS = DAG.getBitcast(TestVT, LHS);
46849         RHS = DAG.getBitcast(TestVT, RHS);
46850         SDValue V = DAG.getNode(ISD::OR, SDLoc(EFLAGS), TestVT, LHS, RHS);
46851         return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
46852       }
46853     }
46854   }
46855 
46856   // See if we can avoid a PACKSS by calling MOVMSK on the sources.
46857   // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
46858   // sign bits prior to the comparison with zero unless we know that
46859   // the vXi16 splats the sign bit down to the lower i8 half.
46860   // TODO: Handle all_of patterns.
46861   if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
46862     SDValue VecOp0 = Vec.getOperand(0);
46863     SDValue VecOp1 = Vec.getOperand(1);
46864     bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
46865     bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
46866     // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
46867     if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
46868       SDLoc DL(EFLAGS);
46869       SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
46870       Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46871       Result = DAG.getZExtOrTrunc(Result, DL, MVT::i16);
46872       if (!SignExt0) {
46873         Result = DAG.getNode(ISD::AND, DL, MVT::i16, Result,
46874                              DAG.getConstant(0xAAAA, DL, MVT::i16));
46875       }
46876       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46877                          DAG.getConstant(0, DL, MVT::i16));
46878     }
46879     // PMOVMSKB(PACKSSBW(LO(X), HI(X)))
46880     // -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
46881     if (CmpBits >= 16 && Subtarget.hasInt256() &&
46882         (IsAnyOf || (SignExt0 && SignExt1))) {
46883       if (SDValue Src = getSplitVectorSrc(VecOp0, VecOp1, true)) {
46884         SDLoc DL(EFLAGS);
46885         SDValue Result = peekThroughBitcasts(Src);
46886         if (IsAllOf && Result.getOpcode() == X86ISD::PCMPEQ &&
46887             Result.getValueType().getVectorNumElements() <= NumElts) {
46888           SDValue V = DAG.getNode(ISD::SUB, DL, Result.getValueType(),
46889                                   Result.getOperand(0), Result.getOperand(1));
46890           V = DAG.getBitcast(MVT::v4i64, V);
46891           return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
46892         }
46893         Result = DAG.getBitcast(MVT::v32i8, Result);
46894         Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46895         unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
46896         if (!SignExt0 || !SignExt1) {
46897           assert(IsAnyOf &&
46898                  "Only perform v16i16 signmasks for any_of patterns");
46899           Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
46900                                DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
46901         }
46902         return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46903                            DAG.getConstant(CmpMask, DL, MVT::i32));
46904       }
46905     }
46906   }
46907 
46908   // MOVMSK(SHUFFLE(X,u)) -> MOVMSK(X) iff every element is referenced.
46909   SmallVector<int, 32> ShuffleMask;
46910   SmallVector<SDValue, 2> ShuffleInputs;
46911   if (NumElts <= CmpBits &&
46912       getTargetShuffleInputs(peekThroughBitcasts(Vec), ShuffleInputs,
46913                              ShuffleMask, DAG) &&
46914       ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
46915       ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits()) {
46916     unsigned NumShuffleElts = ShuffleMask.size();
46917     APInt DemandedElts = APInt::getZero(NumShuffleElts);
46918     for (int M : ShuffleMask) {
46919       assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
46920       DemandedElts.setBit(M);
46921     }
46922     if (DemandedElts.isAllOnes()) {
46923       SDLoc DL(EFLAGS);
46924       SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]);
46925       Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46926       Result =
46927           DAG.getZExtOrTrunc(Result, DL, EFLAGS.getOperand(0).getValueType());
46928       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46929                          EFLAGS.getOperand(1));
46930     }
46931   }
46932 
46933   return SDValue();
46934 }
46935 
46936 /// Optimize an EFLAGS definition used according to the condition code \p CC
46937 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
46938 /// uses of chain values.
combineSetCCEFLAGS(SDValue EFLAGS,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)46939 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
46940                                   SelectionDAG &DAG,
46941                                   const X86Subtarget &Subtarget) {
46942   if (CC == X86::COND_B)
46943     if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
46944       return Flags;
46945 
46946   if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
46947     return R;
46948 
46949   if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
46950     return R;
46951 
46952   if (SDValue R = combineSetCCMOVMSK(EFLAGS, CC, DAG, Subtarget))
46953     return R;
46954 
46955   return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
46956 }
46957 
46958 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
combineCMov(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)46959 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
46960                            TargetLowering::DAGCombinerInfo &DCI,
46961                            const X86Subtarget &Subtarget) {
46962   SDLoc DL(N);
46963 
46964   SDValue FalseOp = N->getOperand(0);
46965   SDValue TrueOp = N->getOperand(1);
46966   X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
46967   SDValue Cond = N->getOperand(3);
46968 
46969   // cmov X, X, ?, ? --> X
46970   if (TrueOp == FalseOp)
46971     return TrueOp;
46972 
46973   // Try to simplify the EFLAGS and condition code operands.
46974   // We can't always do this as FCMOV only supports a subset of X86 cond.
46975   if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
46976     if (!(FalseOp.getValueType() == MVT::f80 ||
46977           (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) ||
46978           (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) ||
46979         !Subtarget.canUseCMOV() || hasFPCMov(CC)) {
46980       SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
46981                        Flags};
46982       return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46983     }
46984   }
46985 
46986   // If this is a select between two integer constants, try to do some
46987   // optimizations.  Note that the operands are ordered the opposite of SELECT
46988   // operands.
46989   if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
46990     if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
46991       // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
46992       // larger than FalseC (the false value).
46993       if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
46994         CC = X86::GetOppositeBranchCondition(CC);
46995         std::swap(TrueC, FalseC);
46996         std::swap(TrueOp, FalseOp);
46997       }
46998 
46999       // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3.  Likewise for any pow2/0.
47000       // This is efficient for any integer data type (including i8/i16) and
47001       // shift amount.
47002       if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
47003         Cond = getSETCC(CC, Cond, DL, DAG);
47004 
47005         // Zero extend the condition if needed.
47006         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
47007 
47008         unsigned ShAmt = TrueC->getAPIntValue().logBase2();
47009         Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
47010                            DAG.getConstant(ShAmt, DL, MVT::i8));
47011         return Cond;
47012       }
47013 
47014       // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.  This is efficient
47015       // for any integer data type, including i8/i16.
47016       if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
47017         Cond = getSETCC(CC, Cond, DL, DAG);
47018 
47019         // Zero extend the condition if needed.
47020         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
47021                            FalseC->getValueType(0), Cond);
47022         Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
47023                            SDValue(FalseC, 0));
47024         return Cond;
47025       }
47026 
47027       // Optimize cases that will turn into an LEA instruction.  This requires
47028       // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
47029       if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
47030         APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
47031         assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
47032                "Implicit constant truncation");
47033 
47034         bool isFastMultiplier = false;
47035         if (Diff.ult(10)) {
47036           switch (Diff.getZExtValue()) {
47037           default: break;
47038           case 1:  // result = add base, cond
47039           case 2:  // result = lea base(    , cond*2)
47040           case 3:  // result = lea base(cond, cond*2)
47041           case 4:  // result = lea base(    , cond*4)
47042           case 5:  // result = lea base(cond, cond*4)
47043           case 8:  // result = lea base(    , cond*8)
47044           case 9:  // result = lea base(cond, cond*8)
47045             isFastMultiplier = true;
47046             break;
47047           }
47048         }
47049 
47050         if (isFastMultiplier) {
47051           Cond = getSETCC(CC, Cond, DL ,DAG);
47052           // Zero extend the condition if needed.
47053           Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
47054                              Cond);
47055           // Scale the condition by the difference.
47056           if (Diff != 1)
47057             Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
47058                                DAG.getConstant(Diff, DL, Cond.getValueType()));
47059 
47060           // Add the base if non-zero.
47061           if (FalseC->getAPIntValue() != 0)
47062             Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
47063                                SDValue(FalseC, 0));
47064           return Cond;
47065         }
47066       }
47067     }
47068   }
47069 
47070   // Handle these cases:
47071   //   (select (x != c), e, c) -> select (x != c), e, x),
47072   //   (select (x == c), c, e) -> select (x == c), x, e)
47073   // where the c is an integer constant, and the "select" is the combination
47074   // of CMOV and CMP.
47075   //
47076   // The rationale for this change is that the conditional-move from a constant
47077   // needs two instructions, however, conditional-move from a register needs
47078   // only one instruction.
47079   //
47080   // CAVEAT: By replacing a constant with a symbolic value, it may obscure
47081   //  some instruction-combining opportunities. This opt needs to be
47082   //  postponed as late as possible.
47083   //
47084   if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
47085     // the DCI.xxxx conditions are provided to postpone the optimization as
47086     // late as possible.
47087 
47088     ConstantSDNode *CmpAgainst = nullptr;
47089     if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
47090         (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
47091         !isa<ConstantSDNode>(Cond.getOperand(0))) {
47092 
47093       if (CC == X86::COND_NE &&
47094           CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
47095         CC = X86::GetOppositeBranchCondition(CC);
47096         std::swap(TrueOp, FalseOp);
47097       }
47098 
47099       if (CC == X86::COND_E &&
47100           CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
47101         SDValue Ops[] = {FalseOp, Cond.getOperand(0),
47102                          DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
47103         return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
47104       }
47105     }
47106   }
47107 
47108   // Fold and/or of setcc's to double CMOV:
47109   //   (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
47110   //   (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
47111   //
47112   // This combine lets us generate:
47113   //   cmovcc1 (jcc1 if we don't have CMOV)
47114   //   cmovcc2 (same)
47115   // instead of:
47116   //   setcc1
47117   //   setcc2
47118   //   and/or
47119   //   cmovne (jne if we don't have CMOV)
47120   // When we can't use the CMOV instruction, it might increase branch
47121   // mispredicts.
47122   // When we can use CMOV, or when there is no mispredict, this improves
47123   // throughput and reduces register pressure.
47124   //
47125   if (CC == X86::COND_NE) {
47126     SDValue Flags;
47127     X86::CondCode CC0, CC1;
47128     bool isAndSetCC;
47129     if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
47130       if (isAndSetCC) {
47131         std::swap(FalseOp, TrueOp);
47132         CC0 = X86::GetOppositeBranchCondition(CC0);
47133         CC1 = X86::GetOppositeBranchCondition(CC1);
47134       }
47135 
47136       SDValue LOps[] = {FalseOp, TrueOp,
47137                         DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
47138       SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
47139       SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
47140                        Flags};
47141       SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
47142       return CMOV;
47143     }
47144   }
47145 
47146   // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
47147   //      (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
47148   // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
47149   //    (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
47150   if ((CC == X86::COND_NE || CC == X86::COND_E) &&
47151       Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
47152     SDValue Add = TrueOp;
47153     SDValue Const = FalseOp;
47154     // Canonicalize the condition code for easier matching and output.
47155     if (CC == X86::COND_E)
47156       std::swap(Add, Const);
47157 
47158     // We might have replaced the constant in the cmov with the LHS of the
47159     // compare. If so change it to the RHS of the compare.
47160     if (Const == Cond.getOperand(0))
47161       Const = Cond.getOperand(1);
47162 
47163     // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
47164     if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
47165         Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
47166         (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
47167          Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
47168         Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
47169       EVT VT = N->getValueType(0);
47170       // This should constant fold.
47171       SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
47172       SDValue CMov =
47173           DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
47174                       DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
47175       return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
47176     }
47177   }
47178 
47179   return SDValue();
47180 }
47181 
47182 /// Different mul shrinking modes.
47183 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
47184 
canReduceVMulWidth(SDNode * N,SelectionDAG & DAG,ShrinkMode & Mode)47185 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
47186   EVT VT = N->getOperand(0).getValueType();
47187   if (VT.getScalarSizeInBits() != 32)
47188     return false;
47189 
47190   assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
47191   unsigned SignBits[2] = {1, 1};
47192   bool IsPositive[2] = {false, false};
47193   for (unsigned i = 0; i < 2; i++) {
47194     SDValue Opd = N->getOperand(i);
47195 
47196     SignBits[i] = DAG.ComputeNumSignBits(Opd);
47197     IsPositive[i] = DAG.SignBitIsZero(Opd);
47198   }
47199 
47200   bool AllPositive = IsPositive[0] && IsPositive[1];
47201   unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
47202   // When ranges are from -128 ~ 127, use MULS8 mode.
47203   if (MinSignBits >= 25)
47204     Mode = ShrinkMode::MULS8;
47205   // When ranges are from 0 ~ 255, use MULU8 mode.
47206   else if (AllPositive && MinSignBits >= 24)
47207     Mode = ShrinkMode::MULU8;
47208   // When ranges are from -32768 ~ 32767, use MULS16 mode.
47209   else if (MinSignBits >= 17)
47210     Mode = ShrinkMode::MULS16;
47211   // When ranges are from 0 ~ 65535, use MULU16 mode.
47212   else if (AllPositive && MinSignBits >= 16)
47213     Mode = ShrinkMode::MULU16;
47214   else
47215     return false;
47216   return true;
47217 }
47218 
47219 /// When the operands of vector mul are extended from smaller size values,
47220 /// like i8 and i16, the type of mul may be shrinked to generate more
47221 /// efficient code. Two typical patterns are handled:
47222 /// Pattern1:
47223 ///     %2 = sext/zext <N x i8> %1 to <N x i32>
47224 ///     %4 = sext/zext <N x i8> %3 to <N x i32>
47225 //   or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
47226 ///     %5 = mul <N x i32> %2, %4
47227 ///
47228 /// Pattern2:
47229 ///     %2 = zext/sext <N x i16> %1 to <N x i32>
47230 ///     %4 = zext/sext <N x i16> %3 to <N x i32>
47231 ///  or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
47232 ///     %5 = mul <N x i32> %2, %4
47233 ///
47234 /// There are four mul shrinking modes:
47235 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
47236 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
47237 /// generate pmullw+sext32 for it (MULS8 mode).
47238 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
47239 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
47240 /// generate pmullw+zext32 for it (MULU8 mode).
47241 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
47242 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
47243 /// generate pmullw+pmulhw for it (MULS16 mode).
47244 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
47245 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
47246 /// generate pmullw+pmulhuw for it (MULU16 mode).
reduceVMULWidth(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47247 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
47248                                const X86Subtarget &Subtarget) {
47249   // Check for legality
47250   // pmullw/pmulhw are not supported by SSE.
47251   if (!Subtarget.hasSSE2())
47252     return SDValue();
47253 
47254   // Check for profitability
47255   // pmulld is supported since SSE41. It is better to use pmulld
47256   // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
47257   // the expansion.
47258   bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
47259   if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
47260     return SDValue();
47261 
47262   ShrinkMode Mode;
47263   if (!canReduceVMulWidth(N, DAG, Mode))
47264     return SDValue();
47265 
47266   SDLoc DL(N);
47267   SDValue N0 = N->getOperand(0);
47268   SDValue N1 = N->getOperand(1);
47269   EVT VT = N->getOperand(0).getValueType();
47270   unsigned NumElts = VT.getVectorNumElements();
47271   if ((NumElts % 2) != 0)
47272     return SDValue();
47273 
47274   EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
47275 
47276   // Shrink the operands of mul.
47277   SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
47278   SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
47279 
47280   // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
47281   // lower part is needed.
47282   SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
47283   if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
47284     return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
47285                                                    : ISD::SIGN_EXTEND,
47286                        DL, VT, MulLo);
47287 
47288   EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
47289   // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
47290   // the higher part is also needed.
47291   SDValue MulHi =
47292       DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
47293                   ReducedVT, NewN0, NewN1);
47294 
47295   // Repack the lower part and higher part result of mul into a wider
47296   // result.
47297   // Generate shuffle functioning as punpcklwd.
47298   SmallVector<int, 16> ShuffleMask(NumElts);
47299   for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
47300     ShuffleMask[2 * i] = i;
47301     ShuffleMask[2 * i + 1] = i + NumElts;
47302   }
47303   SDValue ResLo =
47304       DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
47305   ResLo = DAG.getBitcast(ResVT, ResLo);
47306   // Generate shuffle functioning as punpckhwd.
47307   for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
47308     ShuffleMask[2 * i] = i + NumElts / 2;
47309     ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
47310   }
47311   SDValue ResHi =
47312       DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
47313   ResHi = DAG.getBitcast(ResVT, ResHi);
47314   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
47315 }
47316 
combineMulSpecial(uint64_t MulAmt,SDNode * N,SelectionDAG & DAG,EVT VT,const SDLoc & DL)47317 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
47318                                  EVT VT, const SDLoc &DL) {
47319 
47320   auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
47321     SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
47322                                  DAG.getConstant(Mult, DL, VT));
47323     Result = DAG.getNode(ISD::SHL, DL, VT, Result,
47324                          DAG.getConstant(Shift, DL, MVT::i8));
47325     Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
47326                          N->getOperand(0));
47327     return Result;
47328   };
47329 
47330   auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
47331     SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
47332                                  DAG.getConstant(Mul1, DL, VT));
47333     Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
47334                          DAG.getConstant(Mul2, DL, VT));
47335     Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
47336                          N->getOperand(0));
47337     return Result;
47338   };
47339 
47340   switch (MulAmt) {
47341   default:
47342     break;
47343   case 11:
47344     // mul x, 11 => add ((shl (mul x, 5), 1), x)
47345     return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
47346   case 21:
47347     // mul x, 21 => add ((shl (mul x, 5), 2), x)
47348     return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
47349   case 41:
47350     // mul x, 41 => add ((shl (mul x, 5), 3), x)
47351     return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
47352   case 22:
47353     // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
47354     return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
47355                        combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
47356   case 19:
47357     // mul x, 19 => add ((shl (mul x, 9), 1), x)
47358     return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
47359   case 37:
47360     // mul x, 37 => add ((shl (mul x, 9), 2), x)
47361     return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
47362   case 73:
47363     // mul x, 73 => add ((shl (mul x, 9), 3), x)
47364     return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
47365   case 13:
47366     // mul x, 13 => add ((shl (mul x, 3), 2), x)
47367     return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
47368   case 23:
47369     // mul x, 23 => sub ((shl (mul x, 3), 3), x)
47370     return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
47371   case 26:
47372     // mul x, 26 => add ((mul (mul x, 5), 5), x)
47373     return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
47374   case 28:
47375     // mul x, 28 => add ((mul (mul x, 9), 3), x)
47376     return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
47377   case 29:
47378     // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
47379     return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
47380                        combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
47381   }
47382 
47383   // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
47384   // by a single LEA.
47385   // First check if this a sum of two power of 2s because that's easy. Then
47386   // count how many zeros are up to the first bit.
47387   // TODO: We can do this even without LEA at a cost of two shifts and an add.
47388   if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
47389     unsigned ScaleShift = countTrailingZeros(MulAmt);
47390     if (ScaleShift >= 1 && ScaleShift < 4) {
47391       unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
47392       SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47393                                    DAG.getConstant(ShiftAmt, DL, MVT::i8));
47394       SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47395                                    DAG.getConstant(ScaleShift, DL, MVT::i8));
47396       return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
47397     }
47398   }
47399 
47400   return SDValue();
47401 }
47402 
47403 // If the upper 17 bits of either element are zero and the other element are
47404 // zero/sign bits then we can use PMADDWD, which is always at least as quick as
47405 // PMULLD, except on KNL.
combineMulToPMADDWD(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47406 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
47407                                    const X86Subtarget &Subtarget) {
47408   if (!Subtarget.hasSSE2())
47409     return SDValue();
47410 
47411   if (Subtarget.isPMADDWDSlow())
47412     return SDValue();
47413 
47414   EVT VT = N->getValueType(0);
47415 
47416   // Only support vXi32 vectors.
47417   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
47418     return SDValue();
47419 
47420   // Make sure the type is legal or can split/widen to a legal type.
47421   // With AVX512 but without BWI, we would need to split v32i16.
47422   unsigned NumElts = VT.getVectorNumElements();
47423   if (NumElts == 1 || !isPowerOf2_32(NumElts))
47424     return SDValue();
47425 
47426   // With AVX512 but without BWI, we would need to split v32i16.
47427   if (32 <= (2 * NumElts) && Subtarget.hasAVX512() && !Subtarget.hasBWI())
47428     return SDValue();
47429 
47430   SDValue N0 = N->getOperand(0);
47431   SDValue N1 = N->getOperand(1);
47432 
47433   // If we are zero/sign extending two steps without SSE4.1, its better to
47434   // reduce the vmul width instead.
47435   if (!Subtarget.hasSSE41() &&
47436       (((N0.getOpcode() == ISD::ZERO_EXTEND &&
47437          N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
47438         (N1.getOpcode() == ISD::ZERO_EXTEND &&
47439          N1.getOperand(0).getScalarValueSizeInBits() <= 8)) ||
47440        ((N0.getOpcode() == ISD::SIGN_EXTEND &&
47441          N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
47442         (N1.getOpcode() == ISD::SIGN_EXTEND &&
47443          N1.getOperand(0).getScalarValueSizeInBits() <= 8))))
47444     return SDValue();
47445 
47446   // If we are sign extending a wide vector without SSE4.1, its better to reduce
47447   // the vmul width instead.
47448   if (!Subtarget.hasSSE41() &&
47449       (N0.getOpcode() == ISD::SIGN_EXTEND &&
47450        N0.getOperand(0).getValueSizeInBits() > 128) &&
47451       (N1.getOpcode() == ISD::SIGN_EXTEND &&
47452        N1.getOperand(0).getValueSizeInBits() > 128))
47453     return SDValue();
47454 
47455   // Sign bits must extend down to the lowest i16.
47456   if (DAG.ComputeMaxSignificantBits(N1) > 16 ||
47457       DAG.ComputeMaxSignificantBits(N0) > 16)
47458     return SDValue();
47459 
47460   // At least one of the elements must be zero in the upper 17 bits, or can be
47461   // safely made zero without altering the final result.
47462   auto GetZeroableOp = [&](SDValue Op) {
47463     APInt Mask17 = APInt::getHighBitsSet(32, 17);
47464     if (DAG.MaskedValueIsZero(Op, Mask17))
47465       return Op;
47466     // Mask off upper 16-bits of sign-extended constants.
47467     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()))
47468       return DAG.getNode(ISD::AND, SDLoc(N), VT, Op,
47469                          DAG.getConstant(0xFFFF, SDLoc(N), VT));
47470     if (Op.getOpcode() == ISD::SIGN_EXTEND && N->isOnlyUserOf(Op.getNode())) {
47471       SDValue Src = Op.getOperand(0);
47472       // Convert sext(vXi16) to zext(vXi16).
47473       if (Src.getScalarValueSizeInBits() == 16 && VT.getSizeInBits() <= 128)
47474         return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
47475       // Convert sext(vXi8) to zext(vXi16 sext(vXi8)) on pre-SSE41 targets
47476       // which will expand the extension.
47477       if (Src.getScalarValueSizeInBits() < 16 && !Subtarget.hasSSE41()) {
47478         EVT ExtVT = VT.changeVectorElementType(MVT::i16);
47479         Src = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), ExtVT, Src);
47480         return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
47481       }
47482     }
47483     // Convert SIGN_EXTEND_VECTOR_INREG to ZEXT_EXTEND_VECTOR_INREG.
47484     if (Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
47485         N->isOnlyUserOf(Op.getNode())) {
47486       SDValue Src = Op.getOperand(0);
47487       if (Src.getScalarValueSizeInBits() == 16)
47488         return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(N), VT, Src);
47489     }
47490     // Convert VSRAI(Op, 16) to VSRLI(Op, 16).
47491     if (Op.getOpcode() == X86ISD::VSRAI && Op.getConstantOperandVal(1) == 16 &&
47492         N->isOnlyUserOf(Op.getNode())) {
47493       return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, Op.getOperand(0),
47494                          Op.getOperand(1));
47495     }
47496     return SDValue();
47497   };
47498   SDValue ZeroN0 = GetZeroableOp(N0);
47499   SDValue ZeroN1 = GetZeroableOp(N1);
47500   if (!ZeroN0 && !ZeroN1)
47501     return SDValue();
47502   N0 = ZeroN0 ? ZeroN0 : N0;
47503   N1 = ZeroN1 ? ZeroN1 : N1;
47504 
47505   // Use SplitOpsAndApply to handle AVX splitting.
47506   auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47507                            ArrayRef<SDValue> Ops) {
47508     MVT ResVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
47509     MVT OpVT = MVT::getVectorVT(MVT::i16, Ops[0].getValueSizeInBits() / 16);
47510     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
47511                        DAG.getBitcast(OpVT, Ops[0]),
47512                        DAG.getBitcast(OpVT, Ops[1]));
47513   };
47514   return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {N0, N1},
47515                           PMADDWDBuilder);
47516 }
47517 
combineMulToPMULDQ(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47518 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
47519                                   const X86Subtarget &Subtarget) {
47520   if (!Subtarget.hasSSE2())
47521     return SDValue();
47522 
47523   EVT VT = N->getValueType(0);
47524 
47525   // Only support vXi64 vectors.
47526   if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
47527       VT.getVectorNumElements() < 2 ||
47528       !isPowerOf2_32(VT.getVectorNumElements()))
47529     return SDValue();
47530 
47531   SDValue N0 = N->getOperand(0);
47532   SDValue N1 = N->getOperand(1);
47533 
47534   // MULDQ returns the 64-bit result of the signed multiplication of the lower
47535   // 32-bits. We can lower with this if the sign bits stretch that far.
47536   if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
47537       DAG.ComputeNumSignBits(N1) > 32) {
47538     auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47539                             ArrayRef<SDValue> Ops) {
47540       return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
47541     };
47542     return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
47543                             PMULDQBuilder, /*CheckBWI*/false);
47544   }
47545 
47546   // If the upper bits are zero we can use a single pmuludq.
47547   APInt Mask = APInt::getHighBitsSet(64, 32);
47548   if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
47549     auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47550                              ArrayRef<SDValue> Ops) {
47551       return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
47552     };
47553     return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
47554                             PMULUDQBuilder, /*CheckBWI*/false);
47555   }
47556 
47557   return SDValue();
47558 }
47559 
combineMul(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47560 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
47561                           TargetLowering::DAGCombinerInfo &DCI,
47562                           const X86Subtarget &Subtarget) {
47563   EVT VT = N->getValueType(0);
47564 
47565   if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
47566     return V;
47567 
47568   if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
47569     return V;
47570 
47571   if (DCI.isBeforeLegalize() && VT.isVector())
47572     return reduceVMULWidth(N, DAG, Subtarget);
47573 
47574   // Optimize a single multiply with constant into two operations in order to
47575   // implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
47576   if (!MulConstantOptimization)
47577     return SDValue();
47578 
47579   // An imul is usually smaller than the alternative sequence.
47580   if (DAG.getMachineFunction().getFunction().hasMinSize())
47581     return SDValue();
47582 
47583   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
47584     return SDValue();
47585 
47586   if (VT != MVT::i64 && VT != MVT::i32)
47587     return SDValue();
47588 
47589   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
47590   if (!C)
47591     return SDValue();
47592   if (isPowerOf2_64(C->getZExtValue()))
47593     return SDValue();
47594 
47595   int64_t SignMulAmt = C->getSExtValue();
47596   assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
47597   uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
47598 
47599   SDLoc DL(N);
47600   if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
47601     SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
47602                                  DAG.getConstant(AbsMulAmt, DL, VT));
47603     if (SignMulAmt < 0)
47604       NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
47605                            NewMul);
47606 
47607     return NewMul;
47608   }
47609 
47610   uint64_t MulAmt1 = 0;
47611   uint64_t MulAmt2 = 0;
47612   if ((AbsMulAmt % 9) == 0) {
47613     MulAmt1 = 9;
47614     MulAmt2 = AbsMulAmt / 9;
47615   } else if ((AbsMulAmt % 5) == 0) {
47616     MulAmt1 = 5;
47617     MulAmt2 = AbsMulAmt / 5;
47618   } else if ((AbsMulAmt % 3) == 0) {
47619     MulAmt1 = 3;
47620     MulAmt2 = AbsMulAmt / 3;
47621   }
47622 
47623   SDValue NewMul;
47624   // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
47625   if (MulAmt2 &&
47626       (isPowerOf2_64(MulAmt2) ||
47627        (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
47628 
47629     if (isPowerOf2_64(MulAmt2) &&
47630         !(SignMulAmt >= 0 && N->hasOneUse() &&
47631           N->use_begin()->getOpcode() == ISD::ADD))
47632       // If second multiplifer is pow2, issue it first. We want the multiply by
47633       // 3, 5, or 9 to be folded into the addressing mode unless the lone use
47634       // is an add. Only do this for positive multiply amounts since the
47635       // negate would prevent it from being used as an address mode anyway.
47636       std::swap(MulAmt1, MulAmt2);
47637 
47638     if (isPowerOf2_64(MulAmt1))
47639       NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47640                            DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
47641     else
47642       NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
47643                            DAG.getConstant(MulAmt1, DL, VT));
47644 
47645     if (isPowerOf2_64(MulAmt2))
47646       NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
47647                            DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
47648     else
47649       NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
47650                            DAG.getConstant(MulAmt2, DL, VT));
47651 
47652     // Negate the result.
47653     if (SignMulAmt < 0)
47654       NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
47655                            NewMul);
47656   } else if (!Subtarget.slowLEA())
47657     NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
47658 
47659   if (!NewMul) {
47660     assert(C->getZExtValue() != 0 &&
47661            C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
47662            "Both cases that could cause potential overflows should have "
47663            "already been handled.");
47664     if (isPowerOf2_64(AbsMulAmt - 1)) {
47665       // (mul x, 2^N + 1) => (add (shl x, N), x)
47666       NewMul = DAG.getNode(
47667           ISD::ADD, DL, VT, N->getOperand(0),
47668           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47669                       DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
47670                                       MVT::i8)));
47671       // To negate, subtract the number from zero
47672       if (SignMulAmt < 0)
47673         NewMul = DAG.getNode(ISD::SUB, DL, VT,
47674                              DAG.getConstant(0, DL, VT), NewMul);
47675     } else if (isPowerOf2_64(AbsMulAmt + 1)) {
47676       // (mul x, 2^N - 1) => (sub (shl x, N), x)
47677       NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47678                            DAG.getConstant(Log2_64(AbsMulAmt + 1),
47679                                            DL, MVT::i8));
47680       // To negate, reverse the operands of the subtract.
47681       if (SignMulAmt < 0)
47682         NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
47683       else
47684         NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
47685     } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
47686       // (mul x, 2^N + 2) => (add (shl x, N), (add x, x))
47687       NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47688                            DAG.getConstant(Log2_64(AbsMulAmt - 2),
47689                                            DL, MVT::i8));
47690       NewMul = DAG.getNode(
47691           ISD::ADD, DL, VT, NewMul,
47692           DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
47693     } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
47694       // (mul x, 2^N - 2) => (sub (shl x, N), (add x, x))
47695       NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47696                            DAG.getConstant(Log2_64(AbsMulAmt + 2),
47697                                            DL, MVT::i8));
47698       NewMul = DAG.getNode(
47699           ISD::SUB, DL, VT, NewMul,
47700           DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
47701     }
47702   }
47703 
47704   return NewMul;
47705 }
47706 
47707 // Try to form a MULHU or MULHS node by looking for
47708 // (srl (mul ext, ext), 16)
47709 // TODO: This is X86 specific because we want to be able to handle wide types
47710 // before type legalization. But we can only do it if the vector will be
47711 // legalized via widening/splitting. Type legalization can't handle promotion
47712 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
47713 // combiner.
combineShiftToPMULH(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47714 static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
47715                                    const X86Subtarget &Subtarget) {
47716   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
47717            "SRL or SRA node is required here!");
47718   SDLoc DL(N);
47719 
47720   if (!Subtarget.hasSSE2())
47721     return SDValue();
47722 
47723   // The operation feeding into the shift must be a multiply.
47724   SDValue ShiftOperand = N->getOperand(0);
47725   if (ShiftOperand.getOpcode() != ISD::MUL || !ShiftOperand.hasOneUse())
47726     return SDValue();
47727 
47728   // Input type should be at least vXi32.
47729   EVT VT = N->getValueType(0);
47730   if (!VT.isVector() || VT.getVectorElementType().getSizeInBits() < 32)
47731     return SDValue();
47732 
47733   // Need a shift by 16.
47734   APInt ShiftAmt;
47735   if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), ShiftAmt) ||
47736       ShiftAmt != 16)
47737     return SDValue();
47738 
47739   SDValue LHS = ShiftOperand.getOperand(0);
47740   SDValue RHS = ShiftOperand.getOperand(1);
47741 
47742   unsigned ExtOpc = LHS.getOpcode();
47743   if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
47744       RHS.getOpcode() != ExtOpc)
47745     return SDValue();
47746 
47747   // Peek through the extends.
47748   LHS = LHS.getOperand(0);
47749   RHS = RHS.getOperand(0);
47750 
47751   // Ensure the input types match.
47752   EVT MulVT = LHS.getValueType();
47753   if (MulVT.getVectorElementType() != MVT::i16 || RHS.getValueType() != MulVT)
47754     return SDValue();
47755 
47756   unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
47757   SDValue Mulh = DAG.getNode(Opc, DL, MulVT, LHS, RHS);
47758 
47759   ExtOpc = N->getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
47760   return DAG.getNode(ExtOpc, DL, VT, Mulh);
47761 }
47762 
combineShiftLeft(SDNode * N,SelectionDAG & DAG)47763 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
47764   SDValue N0 = N->getOperand(0);
47765   SDValue N1 = N->getOperand(1);
47766   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
47767   EVT VT = N0.getValueType();
47768 
47769   // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
47770   // since the result of setcc_c is all zero's or all ones.
47771   if (VT.isInteger() && !VT.isVector() &&
47772       N1C && N0.getOpcode() == ISD::AND &&
47773       N0.getOperand(1).getOpcode() == ISD::Constant) {
47774     SDValue N00 = N0.getOperand(0);
47775     APInt Mask = N0.getConstantOperandAPInt(1);
47776     Mask <<= N1C->getAPIntValue();
47777     bool MaskOK = false;
47778     // We can handle cases concerning bit-widening nodes containing setcc_c if
47779     // we carefully interrogate the mask to make sure we are semantics
47780     // preserving.
47781     // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
47782     // of the underlying setcc_c operation if the setcc_c was zero extended.
47783     // Consider the following example:
47784     //   zext(setcc_c)                 -> i32 0x0000FFFF
47785     //   c1                            -> i32 0x0000FFFF
47786     //   c2                            -> i32 0x00000001
47787     //   (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
47788     //   (and setcc_c, (c1 << c2))     -> i32 0x0000FFFE
47789     if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
47790       MaskOK = true;
47791     } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
47792                N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
47793       MaskOK = true;
47794     } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
47795                 N00.getOpcode() == ISD::ANY_EXTEND) &&
47796                N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
47797       MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
47798     }
47799     if (MaskOK && Mask != 0) {
47800       SDLoc DL(N);
47801       return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
47802     }
47803   }
47804 
47805   return SDValue();
47806 }
47807 
combineShiftRightArithmetic(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47808 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
47809                                            const X86Subtarget &Subtarget) {
47810   SDValue N0 = N->getOperand(0);
47811   SDValue N1 = N->getOperand(1);
47812   EVT VT = N0.getValueType();
47813   unsigned Size = VT.getSizeInBits();
47814 
47815   if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
47816     return V;
47817 
47818   // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
47819   // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
47820   // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
47821   // depending on sign of (SarConst - [56,48,32,24,16])
47822 
47823   // sexts in X86 are MOVs. The MOVs have the same code size
47824   // as above SHIFTs (only SHIFT on 1 has lower code size).
47825   // However the MOVs have 2 advantages to a SHIFT:
47826   // 1. MOVs can write to a register that differs from source
47827   // 2. MOVs accept memory operands
47828 
47829   if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
47830       N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
47831       N0.getOperand(1).getOpcode() != ISD::Constant)
47832     return SDValue();
47833 
47834   SDValue N00 = N0.getOperand(0);
47835   SDValue N01 = N0.getOperand(1);
47836   APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
47837   APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
47838   EVT CVT = N1.getValueType();
47839 
47840   if (SarConst.isNegative())
47841     return SDValue();
47842 
47843   for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
47844     unsigned ShiftSize = SVT.getSizeInBits();
47845     // skipping types without corresponding sext/zext and
47846     // ShlConst that is not one of [56,48,32,24,16]
47847     if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
47848       continue;
47849     SDLoc DL(N);
47850     SDValue NN =
47851         DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
47852     SarConst = SarConst - (Size - ShiftSize);
47853     if (SarConst == 0)
47854       return NN;
47855     if (SarConst.isNegative())
47856       return DAG.getNode(ISD::SHL, DL, VT, NN,
47857                          DAG.getConstant(-SarConst, DL, CVT));
47858     return DAG.getNode(ISD::SRA, DL, VT, NN,
47859                        DAG.getConstant(SarConst, DL, CVT));
47860   }
47861   return SDValue();
47862 }
47863 
combineShiftRightLogical(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47864 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
47865                                         TargetLowering::DAGCombinerInfo &DCI,
47866                                         const X86Subtarget &Subtarget) {
47867   SDValue N0 = N->getOperand(0);
47868   SDValue N1 = N->getOperand(1);
47869   EVT VT = N0.getValueType();
47870 
47871   if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
47872     return V;
47873 
47874   // Only do this on the last DAG combine as it can interfere with other
47875   // combines.
47876   if (!DCI.isAfterLegalizeDAG())
47877     return SDValue();
47878 
47879   // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
47880   // TODO: This is a generic DAG combine that became an x86-only combine to
47881   // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
47882   // and-not ('andn').
47883   if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
47884     return SDValue();
47885 
47886   auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
47887   auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
47888   if (!ShiftC || !AndC)
47889     return SDValue();
47890 
47891   // If we can shrink the constant mask below 8-bits or 32-bits, then this
47892   // transform should reduce code size. It may also enable secondary transforms
47893   // from improved known-bits analysis or instruction selection.
47894   APInt MaskVal = AndC->getAPIntValue();
47895 
47896   // If this can be matched by a zero extend, don't optimize.
47897   if (MaskVal.isMask()) {
47898     unsigned TO = MaskVal.countTrailingOnes();
47899     if (TO >= 8 && isPowerOf2_32(TO))
47900       return SDValue();
47901   }
47902 
47903   APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
47904   unsigned OldMaskSize = MaskVal.getMinSignedBits();
47905   unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
47906   if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
47907       (OldMaskSize > 32 && NewMaskSize <= 32)) {
47908     // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
47909     SDLoc DL(N);
47910     SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
47911     SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
47912     return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
47913   }
47914   return SDValue();
47915 }
47916 
combineHorizOpWithShuffle(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47917 static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
47918                                          const X86Subtarget &Subtarget) {
47919   unsigned Opcode = N->getOpcode();
47920   assert(isHorizOp(Opcode) && "Unexpected hadd/hsub/pack opcode");
47921 
47922   SDLoc DL(N);
47923   EVT VT = N->getValueType(0);
47924   SDValue N0 = N->getOperand(0);
47925   SDValue N1 = N->getOperand(1);
47926   EVT SrcVT = N0.getValueType();
47927 
47928   SDValue BC0 =
47929       N->isOnlyUserOf(N0.getNode()) ? peekThroughOneUseBitcasts(N0) : N0;
47930   SDValue BC1 =
47931       N->isOnlyUserOf(N1.getNode()) ? peekThroughOneUseBitcasts(N1) : N1;
47932 
47933   // Attempt to fold HOP(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X)))
47934   // to SHUFFLE(HOP(LOSUBVECTOR(X),HISUBVECTOR(X))), this is mainly for
47935   // truncation trees that help us avoid lane crossing shuffles.
47936   // TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
47937   // TODO: We don't handle vXf64 shuffles yet.
47938   if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47939     if (SDValue BCSrc = getSplitVectorSrc(BC0, BC1, false)) {
47940       SmallVector<SDValue> ShuffleOps;
47941       SmallVector<int> ShuffleMask, ScaledMask;
47942       SDValue Vec = peekThroughBitcasts(BCSrc);
47943       if (getTargetShuffleInputs(Vec, ShuffleOps, ShuffleMask, DAG)) {
47944         resolveTargetShuffleInputsAndMask(ShuffleOps, ShuffleMask);
47945         // To keep the HOP LHS/RHS coherency, we must be able to scale the unary
47946         // shuffle to a v4X64 width - we can probably relax this in the future.
47947         if (!isAnyZero(ShuffleMask) && ShuffleOps.size() == 1 &&
47948             ShuffleOps[0].getValueType().is256BitVector() &&
47949             scaleShuffleElements(ShuffleMask, 4, ScaledMask)) {
47950           SDValue Lo, Hi;
47951           MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
47952           std::tie(Lo, Hi) = DAG.SplitVector(ShuffleOps[0], DL);
47953           Lo = DAG.getBitcast(SrcVT, Lo);
47954           Hi = DAG.getBitcast(SrcVT, Hi);
47955           SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
47956           Res = DAG.getBitcast(ShufVT, Res);
47957           Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ScaledMask);
47958           return DAG.getBitcast(VT, Res);
47959         }
47960       }
47961     }
47962   }
47963 
47964   // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(Z,W)) -> SHUFFLE(HOP()).
47965   if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47966     // If either/both ops are a shuffle that can scale to v2x64,
47967     // then see if we can perform this as a v4x32 post shuffle.
47968     SmallVector<SDValue> Ops0, Ops1;
47969     SmallVector<int> Mask0, Mask1, ScaledMask0, ScaledMask1;
47970     bool IsShuf0 =
47971         getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
47972         scaleShuffleElements(Mask0, 2, ScaledMask0) &&
47973         all_of(Ops0, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47974     bool IsShuf1 =
47975         getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
47976         scaleShuffleElements(Mask1, 2, ScaledMask1) &&
47977         all_of(Ops1, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47978     if (IsShuf0 || IsShuf1) {
47979       if (!IsShuf0) {
47980         Ops0.assign({BC0});
47981         ScaledMask0.assign({0, 1});
47982       }
47983       if (!IsShuf1) {
47984         Ops1.assign({BC1});
47985         ScaledMask1.assign({0, 1});
47986       }
47987 
47988       SDValue LHS, RHS;
47989       int PostShuffle[4] = {-1, -1, -1, -1};
47990       auto FindShuffleOpAndIdx = [&](int M, int &Idx, ArrayRef<SDValue> Ops) {
47991         if (M < 0)
47992           return true;
47993         Idx = M % 2;
47994         SDValue Src = Ops[M / 2];
47995         if (!LHS || LHS == Src) {
47996           LHS = Src;
47997           return true;
47998         }
47999         if (!RHS || RHS == Src) {
48000           Idx += 2;
48001           RHS = Src;
48002           return true;
48003         }
48004         return false;
48005       };
48006       if (FindShuffleOpAndIdx(ScaledMask0[0], PostShuffle[0], Ops0) &&
48007           FindShuffleOpAndIdx(ScaledMask0[1], PostShuffle[1], Ops0) &&
48008           FindShuffleOpAndIdx(ScaledMask1[0], PostShuffle[2], Ops1) &&
48009           FindShuffleOpAndIdx(ScaledMask1[1], PostShuffle[3], Ops1)) {
48010         LHS = DAG.getBitcast(SrcVT, LHS);
48011         RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
48012         MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
48013         SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
48014         Res = DAG.getBitcast(ShufVT, Res);
48015         Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, PostShuffle);
48016         return DAG.getBitcast(VT, Res);
48017       }
48018     }
48019   }
48020 
48021   // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
48022   if (VT.is256BitVector() && Subtarget.hasInt256()) {
48023     SmallVector<int> Mask0, Mask1;
48024     SmallVector<SDValue> Ops0, Ops1;
48025     SmallVector<int, 2> ScaledMask0, ScaledMask1;
48026     if (getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
48027         getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
48028         !Ops0.empty() && !Ops1.empty() &&
48029         all_of(Ops0,
48030                [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
48031         all_of(Ops1,
48032                [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
48033         scaleShuffleElements(Mask0, 2, ScaledMask0) &&
48034         scaleShuffleElements(Mask1, 2, ScaledMask1)) {
48035       SDValue Op00 = peekThroughBitcasts(Ops0.front());
48036       SDValue Op10 = peekThroughBitcasts(Ops1.front());
48037       SDValue Op01 = peekThroughBitcasts(Ops0.back());
48038       SDValue Op11 = peekThroughBitcasts(Ops1.back());
48039       if ((Op00 == Op11) && (Op01 == Op10)) {
48040         std::swap(Op10, Op11);
48041         ShuffleVectorSDNode::commuteMask(ScaledMask1);
48042       }
48043       if ((Op00 == Op10) && (Op01 == Op11)) {
48044         const int Map[4] = {0, 2, 1, 3};
48045         SmallVector<int, 4> ShuffleMask(
48046             {Map[ScaledMask0[0]], Map[ScaledMask1[0]], Map[ScaledMask0[1]],
48047              Map[ScaledMask1[1]]});
48048         MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
48049         SDValue Res = DAG.getNode(Opcode, DL, VT, DAG.getBitcast(SrcVT, Op00),
48050                                   DAG.getBitcast(SrcVT, Op01));
48051         Res = DAG.getBitcast(ShufVT, Res);
48052         Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ShuffleMask);
48053         return DAG.getBitcast(VT, Res);
48054       }
48055     }
48056   }
48057 
48058   return SDValue();
48059 }
48060 
combineVectorPack(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48061 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
48062                                  TargetLowering::DAGCombinerInfo &DCI,
48063                                  const X86Subtarget &Subtarget) {
48064   unsigned Opcode = N->getOpcode();
48065   assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
48066          "Unexpected pack opcode");
48067 
48068   EVT VT = N->getValueType(0);
48069   SDValue N0 = N->getOperand(0);
48070   SDValue N1 = N->getOperand(1);
48071   unsigned NumDstElts = VT.getVectorNumElements();
48072   unsigned DstBitsPerElt = VT.getScalarSizeInBits();
48073   unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
48074   assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
48075          N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
48076          "Unexpected PACKSS/PACKUS input type");
48077 
48078   bool IsSigned = (X86ISD::PACKSS == Opcode);
48079 
48080   // Constant Folding.
48081   APInt UndefElts0, UndefElts1;
48082   SmallVector<APInt, 32> EltBits0, EltBits1;
48083   if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
48084       (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
48085       getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
48086       getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
48087     unsigned NumLanes = VT.getSizeInBits() / 128;
48088     unsigned NumSrcElts = NumDstElts / 2;
48089     unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
48090     unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
48091 
48092     APInt Undefs(NumDstElts, 0);
48093     SmallVector<APInt, 32> Bits(NumDstElts, APInt::getZero(DstBitsPerElt));
48094     for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
48095       for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
48096         unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
48097         auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
48098         auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
48099 
48100         if (UndefElts[SrcIdx]) {
48101           Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
48102           continue;
48103         }
48104 
48105         APInt &Val = EltBits[SrcIdx];
48106         if (IsSigned) {
48107           // PACKSS: Truncate signed value with signed saturation.
48108           // Source values less than dst minint are saturated to minint.
48109           // Source values greater than dst maxint are saturated to maxint.
48110           if (Val.isSignedIntN(DstBitsPerElt))
48111             Val = Val.trunc(DstBitsPerElt);
48112           else if (Val.isNegative())
48113             Val = APInt::getSignedMinValue(DstBitsPerElt);
48114           else
48115             Val = APInt::getSignedMaxValue(DstBitsPerElt);
48116         } else {
48117           // PACKUS: Truncate signed value with unsigned saturation.
48118           // Source values less than zero are saturated to zero.
48119           // Source values greater than dst maxuint are saturated to maxuint.
48120           if (Val.isIntN(DstBitsPerElt))
48121             Val = Val.trunc(DstBitsPerElt);
48122           else if (Val.isNegative())
48123             Val = APInt::getZero(DstBitsPerElt);
48124           else
48125             Val = APInt::getAllOnes(DstBitsPerElt);
48126         }
48127         Bits[Lane * NumDstEltsPerLane + Elt] = Val;
48128       }
48129     }
48130 
48131     return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
48132   }
48133 
48134   // Try to fold PACK(SHUFFLE(),SHUFFLE()) -> SHUFFLE(PACK()).
48135   if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
48136     return V;
48137 
48138   // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
48139   // truncate to create a larger truncate.
48140   if (Subtarget.hasAVX512() &&
48141       N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
48142       N0.getOperand(0).getValueType() == MVT::v8i32) {
48143     if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
48144         (!IsSigned &&
48145          DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
48146       if (Subtarget.hasVLX())
48147         return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
48148 
48149       // Widen input to v16i32 so we can truncate that.
48150       SDLoc dl(N);
48151       SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
48152                                    N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
48153       return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
48154     }
48155   }
48156 
48157   // Try to fold PACK(EXTEND(X),EXTEND(Y)) -> CONCAT(X,Y) subvectors.
48158   if (VT.is128BitVector()) {
48159     unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
48160     SDValue Src0, Src1;
48161     if (N0.getOpcode() == ExtOpc &&
48162         N0.getOperand(0).getValueType().is64BitVector() &&
48163         N0.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
48164       Src0 = N0.getOperand(0);
48165     }
48166     if (N1.getOpcode() == ExtOpc &&
48167         N1.getOperand(0).getValueType().is64BitVector() &&
48168         N1.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
48169       Src1 = N1.getOperand(0);
48170     }
48171     if ((Src0 || N0.isUndef()) && (Src1 || N1.isUndef())) {
48172       assert((Src0 || Src1) && "Found PACK(UNDEF,UNDEF)");
48173       Src0 = Src0 ? Src0 : DAG.getUNDEF(Src1.getValueType());
48174       Src1 = Src1 ? Src1 : DAG.getUNDEF(Src0.getValueType());
48175       return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Src0, Src1);
48176     }
48177 
48178     // Try again with pack(*_extend_vector_inreg, undef).
48179     unsigned VecInRegOpc = IsSigned ? ISD::SIGN_EXTEND_VECTOR_INREG
48180                                     : ISD::ZERO_EXTEND_VECTOR_INREG;
48181     if (N0.getOpcode() == VecInRegOpc && N1.isUndef() &&
48182         N0.getOperand(0).getScalarValueSizeInBits() < DstBitsPerElt)
48183       return getEXTEND_VECTOR_INREG(ExtOpc, SDLoc(N), VT, N0.getOperand(0),
48184                                     DAG);
48185   }
48186 
48187   // Attempt to combine as shuffle.
48188   SDValue Op(N, 0);
48189   if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48190     return Res;
48191 
48192   return SDValue();
48193 }
48194 
combineVectorHADDSUB(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48195 static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
48196                                     TargetLowering::DAGCombinerInfo &DCI,
48197                                     const X86Subtarget &Subtarget) {
48198   assert((X86ISD::HADD == N->getOpcode() || X86ISD::FHADD == N->getOpcode() ||
48199           X86ISD::HSUB == N->getOpcode() || X86ISD::FHSUB == N->getOpcode()) &&
48200          "Unexpected horizontal add/sub opcode");
48201 
48202   if (!shouldUseHorizontalOp(true, DAG, Subtarget)) {
48203     MVT VT = N->getSimpleValueType(0);
48204     SDValue LHS = N->getOperand(0);
48205     SDValue RHS = N->getOperand(1);
48206 
48207     // HOP(HOP'(X,X),HOP'(Y,Y)) -> HOP(PERMUTE(HOP'(X,Y)),PERMUTE(HOP'(X,Y)).
48208     if (LHS != RHS && LHS.getOpcode() == N->getOpcode() &&
48209         LHS.getOpcode() == RHS.getOpcode() &&
48210         LHS.getValueType() == RHS.getValueType() &&
48211         N->isOnlyUserOf(LHS.getNode()) && N->isOnlyUserOf(RHS.getNode())) {
48212       SDValue LHS0 = LHS.getOperand(0);
48213       SDValue LHS1 = LHS.getOperand(1);
48214       SDValue RHS0 = RHS.getOperand(0);
48215       SDValue RHS1 = RHS.getOperand(1);
48216       if ((LHS0 == LHS1 || LHS0.isUndef() || LHS1.isUndef()) &&
48217           (RHS0 == RHS1 || RHS0.isUndef() || RHS1.isUndef())) {
48218         SDLoc DL(N);
48219         SDValue Res = DAG.getNode(LHS.getOpcode(), DL, LHS.getValueType(),
48220                                   LHS0.isUndef() ? LHS1 : LHS0,
48221                                   RHS0.isUndef() ? RHS1 : RHS0);
48222         MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
48223         Res = DAG.getBitcast(ShufVT, Res);
48224         SDValue NewLHS =
48225             DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
48226                         getV4X86ShuffleImm8ForMask({0, 1, 0, 1}, DL, DAG));
48227         SDValue NewRHS =
48228             DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
48229                         getV4X86ShuffleImm8ForMask({2, 3, 2, 3}, DL, DAG));
48230         return DAG.getNode(N->getOpcode(), DL, VT, DAG.getBitcast(VT, NewLHS),
48231                            DAG.getBitcast(VT, NewRHS));
48232       }
48233     }
48234   }
48235 
48236   // Try to fold HOP(SHUFFLE(),SHUFFLE()) -> SHUFFLE(HOP()).
48237   if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
48238     return V;
48239 
48240   return SDValue();
48241 }
48242 
combineVectorShiftVar(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48243 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
48244                                      TargetLowering::DAGCombinerInfo &DCI,
48245                                      const X86Subtarget &Subtarget) {
48246   assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
48247           X86ISD::VSRL == N->getOpcode()) &&
48248          "Unexpected shift opcode");
48249   EVT VT = N->getValueType(0);
48250   SDValue N0 = N->getOperand(0);
48251   SDValue N1 = N->getOperand(1);
48252 
48253   // Shift zero -> zero.
48254   if (ISD::isBuildVectorAllZeros(N0.getNode()))
48255     return DAG.getConstant(0, SDLoc(N), VT);
48256 
48257   // Detect constant shift amounts.
48258   APInt UndefElts;
48259   SmallVector<APInt, 32> EltBits;
48260   if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
48261     unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
48262     return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
48263                                       EltBits[0].getZExtValue(), DAG);
48264   }
48265 
48266   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48267   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
48268   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
48269     return SDValue(N, 0);
48270 
48271   return SDValue();
48272 }
48273 
combineVectorShiftImm(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48274 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
48275                                      TargetLowering::DAGCombinerInfo &DCI,
48276                                      const X86Subtarget &Subtarget) {
48277   unsigned Opcode = N->getOpcode();
48278   assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
48279           X86ISD::VSRLI == Opcode) &&
48280          "Unexpected shift opcode");
48281   bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
48282   EVT VT = N->getValueType(0);
48283   SDValue N0 = N->getOperand(0);
48284   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
48285   assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
48286          "Unexpected value type");
48287   assert(N->getOperand(1).getValueType() == MVT::i8 &&
48288          "Unexpected shift amount type");
48289 
48290   // (shift undef, X) -> 0
48291   if (N0.isUndef())
48292     return DAG.getConstant(0, SDLoc(N), VT);
48293 
48294   // Out of range logical bit shifts are guaranteed to be zero.
48295   // Out of range arithmetic bit shifts splat the sign bit.
48296   unsigned ShiftVal = N->getConstantOperandVal(1);
48297   if (ShiftVal >= NumBitsPerElt) {
48298     if (LogicalShift)
48299       return DAG.getConstant(0, SDLoc(N), VT);
48300     ShiftVal = NumBitsPerElt - 1;
48301   }
48302 
48303   // (shift X, 0) -> X
48304   if (!ShiftVal)
48305     return N0;
48306 
48307   // (shift 0, C) -> 0
48308   if (ISD::isBuildVectorAllZeros(N0.getNode()))
48309     // N0 is all zeros or undef. We guarantee that the bits shifted into the
48310     // result are all zeros, not undef.
48311     return DAG.getConstant(0, SDLoc(N), VT);
48312 
48313   // (VSRAI -1, C) -> -1
48314   if (!LogicalShift && ISD::isBuildVectorAllOnes(N0.getNode()))
48315     // N0 is all ones or undef. We guarantee that the bits shifted into the
48316     // result are all ones, not undef.
48317     return DAG.getConstant(-1, SDLoc(N), VT);
48318 
48319   auto MergeShifts = [&](SDValue X, uint64_t Amt0, uint64_t Amt1) {
48320     unsigned NewShiftVal = Amt0 + Amt1;
48321     if (NewShiftVal >= NumBitsPerElt) {
48322       // Out of range logical bit shifts are guaranteed to be zero.
48323       // Out of range arithmetic bit shifts splat the sign bit.
48324       if (LogicalShift)
48325         return DAG.getConstant(0, SDLoc(N), VT);
48326       NewShiftVal = NumBitsPerElt - 1;
48327     }
48328     return DAG.getNode(Opcode, SDLoc(N), VT, N0.getOperand(0),
48329                        DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
48330   };
48331 
48332   // (shift (shift X, C2), C1) -> (shift X, (C1 + C2))
48333   if (Opcode == N0.getOpcode())
48334     return MergeShifts(N0.getOperand(0), ShiftVal, N0.getConstantOperandVal(1));
48335 
48336   // (shl (add X, X), C) -> (shl X, (C + 1))
48337   if (Opcode == X86ISD::VSHLI && N0.getOpcode() == ISD::ADD &&
48338       N0.getOperand(0) == N0.getOperand(1))
48339     return MergeShifts(N0.getOperand(0), ShiftVal, 1);
48340 
48341   // We can decode 'whole byte' logical bit shifts as shuffles.
48342   if (LogicalShift && (ShiftVal % 8) == 0) {
48343     SDValue Op(N, 0);
48344     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48345       return Res;
48346   }
48347 
48348   // Constant Folding.
48349   APInt UndefElts;
48350   SmallVector<APInt, 32> EltBits;
48351   if (N->isOnlyUserOf(N0.getNode()) &&
48352       getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
48353     assert(EltBits.size() == VT.getVectorNumElements() &&
48354            "Unexpected shift value type");
48355     // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
48356     // created an undef input due to no input bits being demanded, but user
48357     // still expects 0 in other bits.
48358     for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
48359       APInt &Elt = EltBits[i];
48360       if (UndefElts[i])
48361         Elt = 0;
48362       else if (X86ISD::VSHLI == Opcode)
48363         Elt <<= ShiftVal;
48364       else if (X86ISD::VSRAI == Opcode)
48365         Elt.ashrInPlace(ShiftVal);
48366       else
48367         Elt.lshrInPlace(ShiftVal);
48368     }
48369     // Reset undef elements since they were zeroed above.
48370     UndefElts = 0;
48371     return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
48372   }
48373 
48374   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48375   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBitsPerElt),
48376                                DCI))
48377     return SDValue(N, 0);
48378 
48379   return SDValue();
48380 }
48381 
combineVectorInsert(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48382 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
48383                                    TargetLowering::DAGCombinerInfo &DCI,
48384                                    const X86Subtarget &Subtarget) {
48385   EVT VT = N->getValueType(0);
48386   unsigned Opcode = N->getOpcode();
48387   assert(((Opcode == X86ISD::PINSRB && VT == MVT::v16i8) ||
48388           (Opcode == X86ISD::PINSRW && VT == MVT::v8i16) ||
48389           Opcode == ISD::INSERT_VECTOR_ELT) &&
48390          "Unexpected vector insertion");
48391 
48392   // Fold insert_vector_elt(undef, elt, 0) --> scalar_to_vector(elt).
48393   if (Opcode == ISD::INSERT_VECTOR_ELT && N->getOperand(0).isUndef() &&
48394       isNullConstant(N->getOperand(2)))
48395     return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, N->getOperand(1));
48396 
48397   if (Opcode == X86ISD::PINSRB || Opcode == X86ISD::PINSRW) {
48398     unsigned NumBitsPerElt = VT.getScalarSizeInBits();
48399     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48400     if (TLI.SimplifyDemandedBits(SDValue(N, 0),
48401                                  APInt::getAllOnes(NumBitsPerElt), DCI))
48402       return SDValue(N, 0);
48403   }
48404 
48405   // Attempt to combine insertion patterns to a shuffle.
48406   if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
48407     SDValue Op(N, 0);
48408     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48409       return Res;
48410   }
48411 
48412   return SDValue();
48413 }
48414 
48415 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
48416 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
48417 /// OR -> CMPNEQSS.
combineCompareEqual(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48418 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
48419                                    TargetLowering::DAGCombinerInfo &DCI,
48420                                    const X86Subtarget &Subtarget) {
48421   unsigned opcode;
48422 
48423   // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
48424   // we're requiring SSE2 for both.
48425   if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
48426     SDValue N0 = N->getOperand(0);
48427     SDValue N1 = N->getOperand(1);
48428     SDValue CMP0 = N0.getOperand(1);
48429     SDValue CMP1 = N1.getOperand(1);
48430     SDLoc DL(N);
48431 
48432     // The SETCCs should both refer to the same CMP.
48433     if (CMP0.getOpcode() != X86ISD::FCMP || CMP0 != CMP1)
48434       return SDValue();
48435 
48436     SDValue CMP00 = CMP0->getOperand(0);
48437     SDValue CMP01 = CMP0->getOperand(1);
48438     EVT     VT    = CMP00.getValueType();
48439 
48440     if (VT == MVT::f32 || VT == MVT::f64 ||
48441         (VT == MVT::f16 && Subtarget.hasFP16())) {
48442       bool ExpectingFlags = false;
48443       // Check for any users that want flags:
48444       for (const SDNode *U : N->uses()) {
48445         if (ExpectingFlags)
48446           break;
48447 
48448         switch (U->getOpcode()) {
48449         default:
48450         case ISD::BR_CC:
48451         case ISD::BRCOND:
48452         case ISD::SELECT:
48453           ExpectingFlags = true;
48454           break;
48455         case ISD::CopyToReg:
48456         case ISD::SIGN_EXTEND:
48457         case ISD::ZERO_EXTEND:
48458         case ISD::ANY_EXTEND:
48459           break;
48460         }
48461       }
48462 
48463       if (!ExpectingFlags) {
48464         enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
48465         enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
48466 
48467         if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
48468           X86::CondCode tmp = cc0;
48469           cc0 = cc1;
48470           cc1 = tmp;
48471         }
48472 
48473         if ((cc0 == X86::COND_E  && cc1 == X86::COND_NP) ||
48474             (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
48475           // FIXME: need symbolic constants for these magic numbers.
48476           // See X86ATTInstPrinter.cpp:printSSECC().
48477           unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
48478           if (Subtarget.hasAVX512()) {
48479             SDValue FSetCC =
48480                 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
48481                             DAG.getTargetConstant(x86cc, DL, MVT::i8));
48482             // Need to fill with zeros to ensure the bitcast will produce zeroes
48483             // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
48484             SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
48485                                       DAG.getConstant(0, DL, MVT::v16i1),
48486                                       FSetCC, DAG.getIntPtrConstant(0, DL));
48487             return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
48488                                       N->getSimpleValueType(0));
48489           }
48490           SDValue OnesOrZeroesF =
48491               DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
48492                           CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
48493 
48494           bool is64BitFP = (CMP00.getValueType() == MVT::f64);
48495           MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
48496 
48497           if (is64BitFP && !Subtarget.is64Bit()) {
48498             // On a 32-bit target, we cannot bitcast the 64-bit float to a
48499             // 64-bit integer, since that's not a legal type. Since
48500             // OnesOrZeroesF is all ones or all zeroes, we don't need all the
48501             // bits, but can do this little dance to extract the lowest 32 bits
48502             // and work with those going forward.
48503             SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
48504                                            OnesOrZeroesF);
48505             SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
48506             OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
48507                                         Vector32, DAG.getIntPtrConstant(0, DL));
48508             IntVT = MVT::i32;
48509           }
48510 
48511           SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
48512           SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
48513                                       DAG.getConstant(1, DL, IntVT));
48514           SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
48515                                               ANDed);
48516           return OneBitOfTruth;
48517         }
48518       }
48519     }
48520   }
48521   return SDValue();
48522 }
48523 
48524 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
combineAndNotIntoANDNP(SDNode * N,SelectionDAG & DAG)48525 static SDValue combineAndNotIntoANDNP(SDNode *N, SelectionDAG &DAG) {
48526   assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
48527 
48528   MVT VT = N->getSimpleValueType(0);
48529   if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
48530     return SDValue();
48531 
48532   SDValue X, Y;
48533   SDValue N0 = N->getOperand(0);
48534   SDValue N1 = N->getOperand(1);
48535 
48536   if (SDValue Not = IsNOT(N0, DAG)) {
48537     X = Not;
48538     Y = N1;
48539   } else if (SDValue Not = IsNOT(N1, DAG)) {
48540     X = Not;
48541     Y = N0;
48542   } else
48543     return SDValue();
48544 
48545   X = DAG.getBitcast(VT, X);
48546   Y = DAG.getBitcast(VT, Y);
48547   return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
48548 }
48549 
48550 /// Try to fold:
48551 ///   and (vector_shuffle<Z,...,Z>
48552 ///            (insert_vector_elt undef, (xor X, -1), Z), undef), Y
48553 ///   ->
48554 ///   andnp (vector_shuffle<Z,...,Z>
48555 ///              (insert_vector_elt undef, X, Z), undef), Y
combineAndShuffleNot(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48556 static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
48557                                     const X86Subtarget &Subtarget) {
48558   assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
48559 
48560   EVT VT = N->getValueType(0);
48561   // Do not split 256 and 512 bit vectors with SSE2 as they overwrite original
48562   // value and require extra moves.
48563   if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
48564         ((VT.is256BitVector() || VT.is512BitVector()) && Subtarget.hasAVX())))
48565     return SDValue();
48566 
48567   auto GetNot = [&DAG](SDValue V) {
48568     auto *SVN = dyn_cast<ShuffleVectorSDNode>(peekThroughOneUseBitcasts(V));
48569     // TODO: SVN->hasOneUse() is a strong condition. It can be relaxed if all
48570     // end-users are ISD::AND including cases
48571     // (and(extract_vector_element(SVN), Y)).
48572     if (!SVN || !SVN->hasOneUse() || !SVN->isSplat() ||
48573         !SVN->getOperand(1).isUndef()) {
48574       return SDValue();
48575     }
48576     SDValue IVEN = SVN->getOperand(0);
48577     if (IVEN.getOpcode() != ISD::INSERT_VECTOR_ELT ||
48578         !IVEN.getOperand(0).isUndef() || !IVEN.hasOneUse())
48579       return SDValue();
48580     if (!isa<ConstantSDNode>(IVEN.getOperand(2)) ||
48581         IVEN.getConstantOperandAPInt(2) != SVN->getSplatIndex())
48582       return SDValue();
48583     SDValue Src = IVEN.getOperand(1);
48584     if (SDValue Not = IsNOT(Src, DAG)) {
48585       SDValue NotSrc = DAG.getBitcast(Src.getValueType(), Not);
48586       SDValue NotIVEN =
48587           DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(IVEN), IVEN.getValueType(),
48588                       IVEN.getOperand(0), NotSrc, IVEN.getOperand(2));
48589       return DAG.getVectorShuffle(SVN->getValueType(0), SDLoc(SVN), NotIVEN,
48590                                   SVN->getOperand(1), SVN->getMask());
48591     }
48592     return SDValue();
48593   };
48594 
48595   SDValue X, Y;
48596   SDValue N0 = N->getOperand(0);
48597   SDValue N1 = N->getOperand(1);
48598 
48599   if (SDValue Not = GetNot(N0)) {
48600     X = Not;
48601     Y = N1;
48602   } else if (SDValue Not = GetNot(N1)) {
48603     X = Not;
48604     Y = N0;
48605   } else
48606     return SDValue();
48607 
48608   X = DAG.getBitcast(VT, X);
48609   Y = DAG.getBitcast(VT, Y);
48610   SDLoc DL(N);
48611   // We do not split for SSE at all, but we need to split vectors for AVX1 and
48612   // AVX2.
48613   if (!Subtarget.useAVX512Regs() && VT.is512BitVector()) {
48614     SDValue LoX, HiX;
48615     std::tie(LoX, HiX) = splitVector(X, DAG, DL);
48616     SDValue LoY, HiY;
48617     std::tie(LoY, HiY) = splitVector(Y, DAG, DL);
48618     EVT SplitVT = LoX.getValueType();
48619     SDValue LoV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {LoX, LoY});
48620     SDValue HiV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {HiX, HiY});
48621     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, {LoV, HiV});
48622   }
48623   return DAG.getNode(X86ISD::ANDNP, DL, VT, {X, Y});
48624 }
48625 
48626 // Try to widen AND, OR and XOR nodes to VT in order to remove casts around
48627 // logical operations, like in the example below.
48628 //   or (and (truncate x, truncate y)),
48629 //      (xor (truncate z, build_vector (constants)))
48630 // Given a target type \p VT, we generate
48631 //   or (and x, y), (xor z, zext(build_vector (constants)))
48632 // given x, y and z are of type \p VT. We can do so, if operands are either
48633 // truncates from VT types, the second operand is a vector of constants or can
48634 // be recursively promoted.
PromoteMaskArithmetic(SDNode * N,EVT VT,SelectionDAG & DAG,unsigned Depth)48635 static SDValue PromoteMaskArithmetic(SDNode *N, EVT VT, SelectionDAG &DAG,
48636                                      unsigned Depth) {
48637   // Limit recursion to avoid excessive compile times.
48638   if (Depth >= SelectionDAG::MaxRecursionDepth)
48639     return SDValue();
48640 
48641   if (N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND &&
48642       N->getOpcode() != ISD::OR)
48643     return SDValue();
48644 
48645   SDValue N0 = N->getOperand(0);
48646   SDValue N1 = N->getOperand(1);
48647   SDLoc DL(N);
48648 
48649   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48650   if (!TLI.isOperationLegalOrPromote(N->getOpcode(), VT))
48651     return SDValue();
48652 
48653   if (SDValue NN0 = PromoteMaskArithmetic(N0.getNode(), VT, DAG, Depth + 1))
48654     N0 = NN0;
48655   else {
48656     // The Left side has to be a trunc.
48657     if (N0.getOpcode() != ISD::TRUNCATE)
48658       return SDValue();
48659 
48660     // The type of the truncated inputs.
48661     if (N0.getOperand(0).getValueType() != VT)
48662       return SDValue();
48663 
48664     N0 = N0.getOperand(0);
48665   }
48666 
48667   if (SDValue NN1 = PromoteMaskArithmetic(N1.getNode(), VT, DAG, Depth + 1))
48668     N1 = NN1;
48669   else {
48670     // The right side has to be a 'trunc' or a constant vector.
48671     bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
48672                     N1.getOperand(0).getValueType() == VT;
48673     if (!RHSTrunc && !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
48674       return SDValue();
48675 
48676     if (RHSTrunc)
48677       N1 = N1.getOperand(0);
48678     else
48679       N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
48680   }
48681 
48682   return DAG.getNode(N->getOpcode(), DL, VT, N0, N1);
48683 }
48684 
48685 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
48686 // register. In most cases we actually compare or select YMM-sized registers
48687 // and mixing the two types creates horrible code. This method optimizes
48688 // some of the transition sequences.
48689 // Even with AVX-512 this is still useful for removing casts around logical
48690 // operations on vXi1 mask types.
PromoteMaskArithmetic(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48691 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
48692                                      const X86Subtarget &Subtarget) {
48693   EVT VT = N->getValueType(0);
48694   assert(VT.isVector() && "Expected vector type");
48695 
48696   SDLoc DL(N);
48697   assert((N->getOpcode() == ISD::ANY_EXTEND ||
48698           N->getOpcode() == ISD::ZERO_EXTEND ||
48699           N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
48700 
48701   SDValue Narrow = N->getOperand(0);
48702   EVT NarrowVT = Narrow.getValueType();
48703 
48704   // Generate the wide operation.
48705   SDValue Op = PromoteMaskArithmetic(Narrow.getNode(), VT, DAG, 0);
48706   if (!Op)
48707     return SDValue();
48708   switch (N->getOpcode()) {
48709   default: llvm_unreachable("Unexpected opcode");
48710   case ISD::ANY_EXTEND:
48711     return Op;
48712   case ISD::ZERO_EXTEND:
48713     return DAG.getZeroExtendInReg(Op, DL, NarrowVT);
48714   case ISD::SIGN_EXTEND:
48715     return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
48716                        Op, DAG.getValueType(NarrowVT));
48717   }
48718 }
48719 
convertIntLogicToFPLogicOpcode(unsigned Opcode)48720 static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
48721   unsigned FPOpcode;
48722   switch (Opcode) {
48723   default: llvm_unreachable("Unexpected input node for FP logic conversion");
48724   case ISD::AND: FPOpcode = X86ISD::FAND; break;
48725   case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
48726   case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
48727   }
48728   return FPOpcode;
48729 }
48730 
48731 /// If both input operands of a logic op are being cast from floating-point
48732 /// types or FP compares, try to convert this into a floating-point logic node
48733 /// to avoid unnecessary moves from SSE to integer registers.
convertIntLogicToFPLogic(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48734 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
48735                                         TargetLowering::DAGCombinerInfo &DCI,
48736                                         const X86Subtarget &Subtarget) {
48737   EVT VT = N->getValueType(0);
48738   SDValue N0 = N->getOperand(0);
48739   SDValue N1 = N->getOperand(1);
48740   SDLoc DL(N);
48741 
48742   if (!((N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) ||
48743         (N0.getOpcode() == ISD::SETCC && N1.getOpcode() == ISD::SETCC)))
48744     return SDValue();
48745 
48746   SDValue N00 = N0.getOperand(0);
48747   SDValue N10 = N1.getOperand(0);
48748   EVT N00Type = N00.getValueType();
48749   EVT N10Type = N10.getValueType();
48750 
48751   // Ensure that both types are the same and are legal scalar fp types.
48752   if (N00Type != N10Type || !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
48753                               (Subtarget.hasSSE2() && N00Type == MVT::f64) ||
48754                               (Subtarget.hasFP16() && N00Type == MVT::f16)))
48755     return SDValue();
48756 
48757   if (N0.getOpcode() == ISD::BITCAST && !DCI.isBeforeLegalizeOps()) {
48758     unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
48759     SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
48760     return DAG.getBitcast(VT, FPLogic);
48761   }
48762 
48763   if (VT != MVT::i1 || N0.getOpcode() != ISD::SETCC || !N0.hasOneUse() ||
48764       !N1.hasOneUse())
48765     return SDValue();
48766 
48767   ISD::CondCode CC0 = cast<CondCodeSDNode>(N0.getOperand(2))->get();
48768   ISD::CondCode CC1 = cast<CondCodeSDNode>(N1.getOperand(2))->get();
48769 
48770   // The vector ISA for FP predicates is incomplete before AVX, so converting
48771   // COMIS* to CMPS* may not be a win before AVX.
48772   if (!Subtarget.hasAVX() &&
48773       !(cheapX86FSETCC_SSE(CC0) && cheapX86FSETCC_SSE(CC1)))
48774     return SDValue();
48775 
48776   // Convert scalar FP compares and logic to vector compares (COMIS* to CMPS*)
48777   // and vector logic:
48778   // logic (setcc N00, N01), (setcc N10, N11) -->
48779   // extelt (logic (setcc (s2v N00), (s2v N01)), setcc (s2v N10), (s2v N11))), 0
48780   unsigned NumElts = 128 / N00Type.getSizeInBits();
48781   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), N00Type, NumElts);
48782   EVT BoolVecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
48783   SDValue ZeroIndex = DAG.getVectorIdxConstant(0, DL);
48784   SDValue N01 = N0.getOperand(1);
48785   SDValue N11 = N1.getOperand(1);
48786   SDValue Vec00 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N00);
48787   SDValue Vec01 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N01);
48788   SDValue Vec10 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N10);
48789   SDValue Vec11 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N11);
48790   SDValue Setcc0 = DAG.getSetCC(DL, BoolVecVT, Vec00, Vec01, CC0);
48791   SDValue Setcc1 = DAG.getSetCC(DL, BoolVecVT, Vec10, Vec11, CC1);
48792   SDValue Logic = DAG.getNode(N->getOpcode(), DL, BoolVecVT, Setcc0, Setcc1);
48793   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Logic, ZeroIndex);
48794 }
48795 
48796 // Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
48797 // to reduce XMM->GPR traffic.
combineBitOpWithMOVMSK(SDNode * N,SelectionDAG & DAG)48798 static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
48799   unsigned Opc = N->getOpcode();
48800   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48801          "Unexpected bit opcode");
48802 
48803   SDValue N0 = N->getOperand(0);
48804   SDValue N1 = N->getOperand(1);
48805 
48806   // Both operands must be single use MOVMSK.
48807   if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
48808       N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
48809     return SDValue();
48810 
48811   SDValue Vec0 = N0.getOperand(0);
48812   SDValue Vec1 = N1.getOperand(0);
48813   EVT VecVT0 = Vec0.getValueType();
48814   EVT VecVT1 = Vec1.getValueType();
48815 
48816   // Both MOVMSK operands must be from vectors of the same size and same element
48817   // size, but its OK for a fp/int diff.
48818   if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
48819       VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
48820     return SDValue();
48821 
48822   SDLoc DL(N);
48823   unsigned VecOpc =
48824       VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
48825   SDValue Result =
48826       DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
48827   return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
48828 }
48829 
48830 // Attempt to fold BITOP(SHIFT(X,Z),SHIFT(Y,Z)) -> SHIFT(BITOP(X,Y),Z).
48831 // NOTE: This is a very limited case of what SimplifyUsingDistributiveLaws
48832 // handles in InstCombine.
combineBitOpWithShift(SDNode * N,SelectionDAG & DAG)48833 static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {
48834   unsigned Opc = N->getOpcode();
48835   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48836          "Unexpected bit opcode");
48837 
48838   SDValue N0 = N->getOperand(0);
48839   SDValue N1 = N->getOperand(1);
48840   EVT VT = N->getValueType(0);
48841 
48842   // Both operands must be single use.
48843   if (!N0.hasOneUse() || !N1.hasOneUse())
48844     return SDValue();
48845 
48846   // Search for matching shifts.
48847   SDValue BC0 = peekThroughOneUseBitcasts(N0);
48848   SDValue BC1 = peekThroughOneUseBitcasts(N1);
48849 
48850   unsigned BCOpc = BC0.getOpcode();
48851   EVT BCVT = BC0.getValueType();
48852   if (BCOpc != BC1->getOpcode() || BCVT != BC1.getValueType())
48853     return SDValue();
48854 
48855   switch (BCOpc) {
48856   case X86ISD::VSHLI:
48857   case X86ISD::VSRLI:
48858   case X86ISD::VSRAI: {
48859     if (BC0.getOperand(1) != BC1.getOperand(1))
48860       return SDValue();
48861 
48862     SDLoc DL(N);
48863     SDValue BitOp =
48864         DAG.getNode(Opc, DL, BCVT, BC0.getOperand(0), BC1.getOperand(0));
48865     SDValue Shift = DAG.getNode(BCOpc, DL, BCVT, BitOp, BC0.getOperand(1));
48866     return DAG.getBitcast(VT, Shift);
48867   }
48868   }
48869 
48870   return SDValue();
48871 }
48872 
48873 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
48874 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
48875 /// with a shift-right to eliminate loading the vector constant mask value.
combineAndMaskToShift(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48876 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
48877                                      const X86Subtarget &Subtarget) {
48878   SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
48879   SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
48880   EVT VT = Op0.getValueType();
48881   if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger())
48882     return SDValue();
48883 
48884   // Try to convert an "is positive" signbit masking operation into arithmetic
48885   // shift and "andn". This saves a materialization of a -1 vector constant.
48886   // The "is negative" variant should be handled more generally because it only
48887   // requires "and" rather than "andn":
48888   // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y
48889   //
48890   // This is limited to the original type to avoid producing even more bitcasts.
48891   // If the bitcasts can't be eliminated, then it is unlikely that this fold
48892   // will be profitable.
48893   if (N->getValueType(0) == VT &&
48894       supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRA)) {
48895     SDValue X, Y;
48896     if (Op1.hasOneUse() && Op1.getOpcode() == X86ISD::PCMPGT &&
48897         isAllOnesOrAllOnesSplat(Op1.getOperand(1))) {
48898       X = Op1.getOperand(0);
48899       Y = Op0;
48900     } else if (Op0.hasOneUse() && Op0.getOpcode() == X86ISD::PCMPGT &&
48901                isAllOnesOrAllOnesSplat(Op0.getOperand(1))) {
48902       X = Op0.getOperand(0);
48903       Y = Op1;
48904     }
48905     if (X && Y) {
48906       SDLoc DL(N);
48907       SDValue Sra =
48908           getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X,
48909                                      VT.getScalarSizeInBits() - 1, DAG);
48910       return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y);
48911     }
48912   }
48913 
48914   APInt SplatVal;
48915   if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
48916       !SplatVal.isMask())
48917     return SDValue();
48918 
48919   // Don't prevent creation of ANDN.
48920   if (isBitwiseNot(Op0))
48921     return SDValue();
48922 
48923   if (!supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRL))
48924     return SDValue();
48925 
48926   unsigned EltBitWidth = VT.getScalarSizeInBits();
48927   if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
48928     return SDValue();
48929 
48930   SDLoc DL(N);
48931   unsigned ShiftVal = SplatVal.countTrailingOnes();
48932   SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
48933   SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT, Op0, ShAmt);
48934   return DAG.getBitcast(N->getValueType(0), Shift);
48935 }
48936 
48937 // Get the index node from the lowered DAG of a GEP IR instruction with one
48938 // indexing dimension.
getIndexFromUnindexedLoad(LoadSDNode * Ld)48939 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
48940   if (Ld->isIndexed())
48941     return SDValue();
48942 
48943   SDValue Base = Ld->getBasePtr();
48944 
48945   if (Base.getOpcode() != ISD::ADD)
48946     return SDValue();
48947 
48948   SDValue ShiftedIndex = Base.getOperand(0);
48949 
48950   if (ShiftedIndex.getOpcode() != ISD::SHL)
48951     return SDValue();
48952 
48953   return ShiftedIndex.getOperand(0);
48954 
48955 }
48956 
hasBZHI(const X86Subtarget & Subtarget,MVT VT)48957 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
48958   if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
48959     switch (VT.getSizeInBits()) {
48960     default: return false;
48961     case 64: return Subtarget.is64Bit() ? true : false;
48962     case 32: return true;
48963     }
48964   }
48965   return false;
48966 }
48967 
48968 // This function recognizes cases where X86 bzhi instruction can replace and
48969 // 'and-load' sequence.
48970 // In case of loading integer value from an array of constants which is defined
48971 // as follows:
48972 //
48973 //   int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
48974 //
48975 // then applying a bitwise and on the result with another input.
48976 // It's equivalent to performing bzhi (zero high bits) on the input, with the
48977 // same index of the load.
combineAndLoadToBZHI(SDNode * Node,SelectionDAG & DAG,const X86Subtarget & Subtarget)48978 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
48979                                     const X86Subtarget &Subtarget) {
48980   MVT VT = Node->getSimpleValueType(0);
48981   SDLoc dl(Node);
48982 
48983   // Check if subtarget has BZHI instruction for the node's type
48984   if (!hasBZHI(Subtarget, VT))
48985     return SDValue();
48986 
48987   // Try matching the pattern for both operands.
48988   for (unsigned i = 0; i < 2; i++) {
48989     SDValue N = Node->getOperand(i);
48990     LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
48991 
48992      // continue if the operand is not a load instruction
48993     if (!Ld)
48994       return SDValue();
48995 
48996     const Value *MemOp = Ld->getMemOperand()->getValue();
48997 
48998     if (!MemOp)
48999       return SDValue();
49000 
49001     if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
49002       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
49003         if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
49004 
49005           Constant *Init = GV->getInitializer();
49006           Type *Ty = Init->getType();
49007           if (!isa<ConstantDataArray>(Init) ||
49008               !Ty->getArrayElementType()->isIntegerTy() ||
49009               Ty->getArrayElementType()->getScalarSizeInBits() !=
49010                   VT.getSizeInBits() ||
49011               Ty->getArrayNumElements() >
49012                   Ty->getArrayElementType()->getScalarSizeInBits())
49013             continue;
49014 
49015           // Check if the array's constant elements are suitable to our case.
49016           uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
49017           bool ConstantsMatch = true;
49018           for (uint64_t j = 0; j < ArrayElementCount; j++) {
49019             auto *Elem = cast<ConstantInt>(Init->getAggregateElement(j));
49020             if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
49021               ConstantsMatch = false;
49022               break;
49023             }
49024           }
49025           if (!ConstantsMatch)
49026             continue;
49027 
49028           // Do the transformation (For 32-bit type):
49029           // -> (and (load arr[idx]), inp)
49030           // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
49031           //    that will be replaced with one bzhi instruction.
49032           SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
49033           SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
49034 
49035           // Get the Node which indexes into the array.
49036           SDValue Index = getIndexFromUnindexedLoad(Ld);
49037           if (!Index)
49038             return SDValue();
49039           Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
49040 
49041           SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
49042           Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
49043 
49044           SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
49045           SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
49046 
49047           return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
49048         }
49049       }
49050     }
49051   }
49052   return SDValue();
49053 }
49054 
49055 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
49056 // Where C is a mask containing the same number of bits as the setcc and
49057 // where the setcc will freely 0 upper bits of k-register. We can replace the
49058 // undef in the concat with 0s and remove the AND. This mainly helps with
49059 // v2i1/v4i1 setcc being casted to scalar.
combineScalarAndWithMaskSetcc(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)49060 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
49061                                              const X86Subtarget &Subtarget) {
49062   assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
49063 
49064   EVT VT = N->getValueType(0);
49065 
49066   // Make sure this is an AND with constant. We will check the value of the
49067   // constant later.
49068   auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
49069   if (!C1)
49070     return SDValue();
49071 
49072   // This is implied by the ConstantSDNode.
49073   assert(!VT.isVector() && "Expected scalar VT!");
49074 
49075   SDValue Src = N->getOperand(0);
49076   if (!Src.hasOneUse())
49077     return SDValue();
49078 
49079   // (Optionally) peek through any_extend().
49080   if (Src.getOpcode() == ISD::ANY_EXTEND) {
49081     if (!Src.getOperand(0).hasOneUse())
49082       return SDValue();
49083     Src = Src.getOperand(0);
49084   }
49085 
49086   if (Src.getOpcode() != ISD::BITCAST || !Src.getOperand(0).hasOneUse())
49087     return SDValue();
49088 
49089   Src = Src.getOperand(0);
49090   EVT SrcVT = Src.getValueType();
49091 
49092   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49093   if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
49094       !TLI.isTypeLegal(SrcVT))
49095     return SDValue();
49096 
49097   if (Src.getOpcode() != ISD::CONCAT_VECTORS)
49098     return SDValue();
49099 
49100   // We only care about the first subvector of the concat, we expect the
49101   // other subvectors to be ignored due to the AND if we make the change.
49102   SDValue SubVec = Src.getOperand(0);
49103   EVT SubVecVT = SubVec.getValueType();
49104 
49105   // The RHS of the AND should be a mask with as many bits as SubVec.
49106   if (!TLI.isTypeLegal(SubVecVT) ||
49107       !C1->getAPIntValue().isMask(SubVecVT.getVectorNumElements()))
49108     return SDValue();
49109 
49110   // First subvector should be a setcc with a legal result type or a
49111   // AND containing at least one setcc with a legal result type.
49112   auto IsLegalSetCC = [&](SDValue V) {
49113     if (V.getOpcode() != ISD::SETCC)
49114       return false;
49115     EVT SetccVT = V.getOperand(0).getValueType();
49116     if (!TLI.isTypeLegal(SetccVT) ||
49117         !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
49118       return false;
49119     if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
49120       return false;
49121     return true;
49122   };
49123   if (!(IsLegalSetCC(SubVec) || (SubVec.getOpcode() == ISD::AND &&
49124                                  (IsLegalSetCC(SubVec.getOperand(0)) ||
49125                                   IsLegalSetCC(SubVec.getOperand(1))))))
49126     return SDValue();
49127 
49128   // We passed all the checks. Rebuild the concat_vectors with zeroes
49129   // and cast it back to VT.
49130   SDLoc dl(N);
49131   SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
49132                               DAG.getConstant(0, dl, SubVecVT));
49133   Ops[0] = SubVec;
49134   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
49135                                Ops);
49136   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getSizeInBits());
49137   return DAG.getZExtOrTrunc(DAG.getBitcast(IntVT, Concat), dl, VT);
49138 }
49139 
combineAnd(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)49140 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
49141                           TargetLowering::DAGCombinerInfo &DCI,
49142                           const X86Subtarget &Subtarget) {
49143   SDValue N0 = N->getOperand(0);
49144   SDValue N1 = N->getOperand(1);
49145   EVT VT = N->getValueType(0);
49146   SDLoc dl(N);
49147   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49148 
49149   // If this is SSE1 only convert to FAND to avoid scalarization.
49150   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
49151     return DAG.getBitcast(MVT::v4i32,
49152                           DAG.getNode(X86ISD::FAND, dl, MVT::v4f32,
49153                                       DAG.getBitcast(MVT::v4f32, N0),
49154                                       DAG.getBitcast(MVT::v4f32, N1)));
49155   }
49156 
49157   // Use a 32-bit and+zext if upper bits known zero.
49158   if (VT == MVT::i64 && Subtarget.is64Bit() && !isa<ConstantSDNode>(N1)) {
49159     APInt HiMask = APInt::getHighBitsSet(64, 32);
49160     if (DAG.MaskedValueIsZero(N1, HiMask) ||
49161         DAG.MaskedValueIsZero(N0, HiMask)) {
49162       SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N0);
49163       SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N1);
49164       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
49165                          DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
49166     }
49167   }
49168 
49169   // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
49170   // TODO: Support multiple SrcOps.
49171   if (VT == MVT::i1) {
49172     SmallVector<SDValue, 2> SrcOps;
49173     SmallVector<APInt, 2> SrcPartials;
49174     if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps, &SrcPartials) &&
49175         SrcOps.size() == 1) {
49176       unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
49177       EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49178       SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
49179       if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
49180         Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
49181       if (Mask) {
49182         assert(SrcPartials[0].getBitWidth() == NumElts &&
49183                "Unexpected partial reduction mask");
49184         SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
49185         Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
49186         return DAG.getSetCC(dl, MVT::i1, Mask, PartialBits, ISD::SETEQ);
49187       }
49188     }
49189   }
49190 
49191   if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
49192     return V;
49193 
49194   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
49195     return R;
49196 
49197   if (SDValue R = combineBitOpWithShift(N, DAG))
49198     return R;
49199 
49200   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
49201     return FPLogic;
49202 
49203   if (SDValue R = combineAndShuffleNot(N, DAG, Subtarget))
49204     return R;
49205 
49206   if (DCI.isBeforeLegalizeOps())
49207     return SDValue();
49208 
49209   if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
49210     return R;
49211 
49212   if (SDValue R = combineAndNotIntoANDNP(N, DAG))
49213     return R;
49214 
49215   if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
49216     return ShiftRight;
49217 
49218   if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
49219     return R;
49220 
49221   // fold (and (mul x, c1), c2) -> (mul x, (and c1, c2))
49222   // iff c2 is all/no bits mask - i.e. a select-with-zero mask.
49223   // TODO: Handle PMULDQ/PMULUDQ/VPMADDWD/VPMADDUBSW?
49224   if (VT.isVector() && getTargetConstantFromNode(N1)) {
49225     unsigned Opc0 = N0.getOpcode();
49226     if ((Opc0 == ISD::MUL || Opc0 == ISD::MULHU || Opc0 == ISD::MULHS) &&
49227         getTargetConstantFromNode(N0.getOperand(1)) &&
49228         DAG.ComputeNumSignBits(N1) == VT.getScalarSizeInBits() &&
49229         N0->hasOneUse() && N0.getOperand(1)->hasOneUse()) {
49230       SDValue MaskMul = DAG.getNode(ISD::AND, dl, VT, N0.getOperand(1), N1);
49231       return DAG.getNode(Opc0, dl, VT, N0.getOperand(0), MaskMul);
49232     }
49233   }
49234 
49235   // Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y), COND_B) iff Y is not a constant
49236   // avoids slow variable shift (moving shift amount to ECX etc.)
49237   if (isOneConstant(N1) && N0->hasOneUse()) {
49238     SDValue Src = N0;
49239     while ((Src.getOpcode() == ISD::ZERO_EXTEND ||
49240             Src.getOpcode() == ISD::TRUNCATE) &&
49241            Src.getOperand(0)->hasOneUse())
49242       Src = Src.getOperand(0);
49243     bool ContainsNOT = false;
49244     X86::CondCode X86CC = X86::COND_B;
49245     // Peek through AND(NOT(SRL(X,Y)),1).
49246     if (isBitwiseNot(Src)) {
49247       Src = Src.getOperand(0);
49248       X86CC = X86::COND_AE;
49249       ContainsNOT = true;
49250     }
49251     if (Src.getOpcode() == ISD::SRL &&
49252         !isa<ConstantSDNode>(Src.getOperand(1))) {
49253       SDValue BitNo = Src.getOperand(1);
49254       Src = Src.getOperand(0);
49255       // Peek through AND(SRL(NOT(X),Y),1).
49256       if (isBitwiseNot(Src)) {
49257         Src = Src.getOperand(0);
49258         X86CC = X86CC == X86::COND_AE ? X86::COND_B : X86::COND_AE;
49259         ContainsNOT = true;
49260       }
49261       // If we have BMI2 then SHRX should be faster for i32/i64 cases.
49262       if (!(Subtarget.hasBMI2() && !ContainsNOT && VT.getSizeInBits() >= 32))
49263         if (SDValue BT = getBT(Src, BitNo, dl, DAG))
49264           return DAG.getZExtOrTrunc(getSETCC(X86CC, BT, dl, DAG), dl, VT);
49265     }
49266   }
49267 
49268   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
49269     // Attempt to recursively combine a bitmask AND with shuffles.
49270     SDValue Op(N, 0);
49271     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
49272       return Res;
49273 
49274     // If either operand is a constant mask, then only the elements that aren't
49275     // zero are actually demanded by the other operand.
49276     auto GetDemandedMasks = [&](SDValue Op) {
49277       APInt UndefElts;
49278       SmallVector<APInt> EltBits;
49279       int NumElts = VT.getVectorNumElements();
49280       int EltSizeInBits = VT.getScalarSizeInBits();
49281       APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
49282       APInt DemandedElts = APInt::getAllOnes(NumElts);
49283       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
49284                                         EltBits)) {
49285         DemandedBits.clearAllBits();
49286         DemandedElts.clearAllBits();
49287         for (int I = 0; I != NumElts; ++I) {
49288           if (UndefElts[I]) {
49289             // We can't assume an undef src element gives an undef dst - the
49290             // other src might be zero.
49291             DemandedBits.setAllBits();
49292             DemandedElts.setBit(I);
49293           } else if (!EltBits[I].isZero()) {
49294             DemandedBits |= EltBits[I];
49295             DemandedElts.setBit(I);
49296           }
49297         }
49298       }
49299       return std::make_pair(DemandedBits, DemandedElts);
49300     };
49301     APInt Bits0, Elts0;
49302     APInt Bits1, Elts1;
49303     std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
49304     std::tie(Bits1, Elts1) = GetDemandedMasks(N0);
49305 
49306     if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
49307         TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
49308         TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
49309         TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
49310       if (N->getOpcode() != ISD::DELETED_NODE)
49311         DCI.AddToWorklist(N);
49312       return SDValue(N, 0);
49313     }
49314 
49315     SDValue NewN0 = TLI.SimplifyMultipleUseDemandedBits(N0, Bits0, Elts0, DAG);
49316     SDValue NewN1 = TLI.SimplifyMultipleUseDemandedBits(N1, Bits1, Elts1, DAG);
49317     if (NewN0 || NewN1)
49318       return DAG.getNode(ISD::AND, dl, VT, NewN0 ? NewN0 : N0,
49319                          NewN1 ? NewN1 : N1);
49320   }
49321 
49322   // Attempt to combine a scalar bitmask AND with an extracted shuffle.
49323   if ((VT.getScalarSizeInBits() % 8) == 0 &&
49324       N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
49325       isa<ConstantSDNode>(N0.getOperand(1))) {
49326     SDValue BitMask = N1;
49327     SDValue SrcVec = N0.getOperand(0);
49328     EVT SrcVecVT = SrcVec.getValueType();
49329 
49330     // Check that the constant bitmask masks whole bytes.
49331     APInt UndefElts;
49332     SmallVector<APInt, 64> EltBits;
49333     if (VT == SrcVecVT.getScalarType() && N0->isOnlyUserOf(SrcVec.getNode()) &&
49334         getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
49335         llvm::all_of(EltBits, [](const APInt &M) {
49336           return M.isZero() || M.isAllOnes();
49337         })) {
49338       unsigned NumElts = SrcVecVT.getVectorNumElements();
49339       unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
49340       unsigned Idx = N0.getConstantOperandVal(1);
49341 
49342       // Create a root shuffle mask from the byte mask and the extracted index.
49343       SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
49344       for (unsigned i = 0; i != Scale; ++i) {
49345         if (UndefElts[i])
49346           continue;
49347         int VecIdx = Scale * Idx + i;
49348         ShuffleMask[VecIdx] = EltBits[i].isZero() ? SM_SentinelZero : VecIdx;
49349       }
49350 
49351       if (SDValue Shuffle = combineX86ShufflesRecursively(
49352               {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
49353               X86::MaxShuffleCombineDepth,
49354               /*HasVarMask*/ false, /*AllowVarCrossLaneMask*/ true,
49355               /*AllowVarPerLaneMask*/ true, DAG, Subtarget))
49356         return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Shuffle,
49357                            N0.getOperand(1));
49358     }
49359   }
49360 
49361   return SDValue();
49362 }
49363 
49364 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
canonicalizeBitSelect(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)49365 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
49366                                      const X86Subtarget &Subtarget) {
49367   assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
49368 
49369   MVT VT = N->getSimpleValueType(0);
49370   unsigned EltSizeInBits = VT.getScalarSizeInBits();
49371   if (!VT.isVector() || (EltSizeInBits % 8) != 0)
49372     return SDValue();
49373 
49374   SDValue N0 = peekThroughBitcasts(N->getOperand(0));
49375   SDValue N1 = peekThroughBitcasts(N->getOperand(1));
49376   if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
49377     return SDValue();
49378 
49379   // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
49380   // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
49381   if (!(Subtarget.hasXOP() || useVPTERNLOG(Subtarget, VT) ||
49382         !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
49383     return SDValue();
49384 
49385   // Attempt to extract constant byte masks.
49386   APInt UndefElts0, UndefElts1;
49387   SmallVector<APInt, 32> EltBits0, EltBits1;
49388   if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
49389                                      false, false))
49390     return SDValue();
49391   if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
49392                                      false, false))
49393     return SDValue();
49394 
49395   for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
49396     // TODO - add UNDEF elts support.
49397     if (UndefElts0[i] || UndefElts1[i])
49398       return SDValue();
49399     if (EltBits0[i] != ~EltBits1[i])
49400       return SDValue();
49401   }
49402 
49403   SDLoc DL(N);
49404 
49405   if (useVPTERNLOG(Subtarget, VT)) {
49406     // Emit a VPTERNLOG node directly - 0xCA is the imm code for A?B:C.
49407     // VPTERNLOG is only available as vXi32/64-bit types.
49408     MVT OpSVT = EltSizeInBits == 32 ? MVT::i32 : MVT::i64;
49409     MVT OpVT =
49410         MVT::getVectorVT(OpSVT, VT.getSizeInBits() / OpSVT.getSizeInBits());
49411     SDValue A = DAG.getBitcast(OpVT, N0.getOperand(1));
49412     SDValue B = DAG.getBitcast(OpVT, N0.getOperand(0));
49413     SDValue C = DAG.getBitcast(OpVT, N1.getOperand(0));
49414     SDValue Imm = DAG.getTargetConstant(0xCA, DL, MVT::i8);
49415     SDValue Res = getAVX512Node(X86ISD::VPTERNLOG, DL, OpVT, {A, B, C, Imm},
49416                                 DAG, Subtarget);
49417     return DAG.getBitcast(VT, Res);
49418   }
49419 
49420   SDValue X = N->getOperand(0);
49421   SDValue Y =
49422       DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
49423                   DAG.getBitcast(VT, N1.getOperand(0)));
49424   return DAG.getNode(ISD::OR, DL, VT, X, Y);
49425 }
49426 
49427 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
matchLogicBlend(SDNode * N,SDValue & X,SDValue & Y,SDValue & Mask)49428 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
49429   if (N->getOpcode() != ISD::OR)
49430     return false;
49431 
49432   SDValue N0 = N->getOperand(0);
49433   SDValue N1 = N->getOperand(1);
49434 
49435   // Canonicalize AND to LHS.
49436   if (N1.getOpcode() == ISD::AND)
49437     std::swap(N0, N1);
49438 
49439   // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
49440   if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
49441     return false;
49442 
49443   Mask = N1.getOperand(0);
49444   X = N1.getOperand(1);
49445 
49446   // Check to see if the mask appeared in both the AND and ANDNP.
49447   if (N0.getOperand(0) == Mask)
49448     Y = N0.getOperand(1);
49449   else if (N0.getOperand(1) == Mask)
49450     Y = N0.getOperand(0);
49451   else
49452     return false;
49453 
49454   // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
49455   // ANDNP combine allows other combines to happen that prevent matching.
49456   return true;
49457 }
49458 
49459 // Try to fold:
49460 //   (or (and (m, y), (pandn m, x)))
49461 // into:
49462 //   (vselect m, x, y)
49463 // As a special case, try to fold:
49464 //   (or (and (m, (sub 0, x)), (pandn m, x)))
49465 // into:
49466 //   (sub (xor X, M), M)
combineLogicBlendIntoPBLENDV(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)49467 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
49468                                             const X86Subtarget &Subtarget) {
49469   assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
49470 
49471   EVT VT = N->getValueType(0);
49472   if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
49473         (VT.is256BitVector() && Subtarget.hasInt256())))
49474     return SDValue();
49475 
49476   SDValue X, Y, Mask;
49477   if (!matchLogicBlend(N, X, Y, Mask))
49478     return SDValue();
49479 
49480   // Validate that X, Y, and Mask are bitcasts, and see through them.
49481   Mask = peekThroughBitcasts(Mask);
49482   X = peekThroughBitcasts(X);
49483   Y = peekThroughBitcasts(Y);
49484 
49485   EVT MaskVT = Mask.getValueType();
49486   unsigned EltBits = MaskVT.getScalarSizeInBits();
49487 
49488   // TODO: Attempt to handle floating point cases as well?
49489   if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
49490     return SDValue();
49491 
49492   SDLoc DL(N);
49493 
49494   // Attempt to combine to conditional negate: (sub (xor X, M), M)
49495   if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
49496                                                            DAG, Subtarget))
49497     return Res;
49498 
49499   // PBLENDVB is only available on SSE 4.1.
49500   if (!Subtarget.hasSSE41())
49501     return SDValue();
49502 
49503   // If we have VPTERNLOG we should prefer that since PBLENDVB is multiple uops.
49504   if (Subtarget.hasVLX())
49505     return SDValue();
49506 
49507   MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
49508 
49509   X = DAG.getBitcast(BlendVT, X);
49510   Y = DAG.getBitcast(BlendVT, Y);
49511   Mask = DAG.getBitcast(BlendVT, Mask);
49512   Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
49513   return DAG.getBitcast(VT, Mask);
49514 }
49515 
49516 // Helper function for combineOrCmpEqZeroToCtlzSrl
49517 // Transforms:
49518 //   seteq(cmp x, 0)
49519 //   into:
49520 //   srl(ctlz x), log2(bitsize(x))
49521 // Input pattern is checked by caller.
lowerX86CmpEqZeroToCtlzSrl(SDValue Op,SelectionDAG & DAG)49522 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) {
49523   SDValue Cmp = Op.getOperand(1);
49524   EVT VT = Cmp.getOperand(0).getValueType();
49525   unsigned Log2b = Log2_32(VT.getSizeInBits());
49526   SDLoc dl(Op);
49527   SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
49528   // The result of the shift is true or false, and on X86, the 32-bit
49529   // encoding of shr and lzcnt is more desirable.
49530   SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
49531   SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
49532                             DAG.getConstant(Log2b, dl, MVT::i8));
49533   return Scc;
49534 }
49535 
49536 // Try to transform:
49537 //   zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
49538 //   into:
49539 //   srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
49540 // Will also attempt to match more generic cases, eg:
49541 //   zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
49542 // Only applies if the target supports the FastLZCNT feature.
combineOrCmpEqZeroToCtlzSrl(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)49543 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
49544                                            TargetLowering::DAGCombinerInfo &DCI,
49545                                            const X86Subtarget &Subtarget) {
49546   if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
49547     return SDValue();
49548 
49549   auto isORCandidate = [](SDValue N) {
49550     return (N->getOpcode() == ISD::OR && N->hasOneUse());
49551   };
49552 
49553   // Check the zero extend is extending to 32-bit or more. The code generated by
49554   // srl(ctlz) for 16-bit or less variants of the pattern would require extra
49555   // instructions to clear the upper bits.
49556   if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
49557       !isORCandidate(N->getOperand(0)))
49558     return SDValue();
49559 
49560   // Check the node matches: setcc(eq, cmp 0)
49561   auto isSetCCCandidate = [](SDValue N) {
49562     return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
49563            X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
49564            N->getOperand(1).getOpcode() == X86ISD::CMP &&
49565            isNullConstant(N->getOperand(1).getOperand(1)) &&
49566            N->getOperand(1).getValueType().bitsGE(MVT::i32);
49567   };
49568 
49569   SDNode *OR = N->getOperand(0).getNode();
49570   SDValue LHS = OR->getOperand(0);
49571   SDValue RHS = OR->getOperand(1);
49572 
49573   // Save nodes matching or(or, setcc(eq, cmp 0)).
49574   SmallVector<SDNode *, 2> ORNodes;
49575   while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
49576           (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
49577     ORNodes.push_back(OR);
49578     OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
49579     LHS = OR->getOperand(0);
49580     RHS = OR->getOperand(1);
49581   }
49582 
49583   // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
49584   if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
49585       !isORCandidate(SDValue(OR, 0)))
49586     return SDValue();
49587 
49588   // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
49589   // to
49590   // or(srl(ctlz),srl(ctlz)).
49591   // The dag combiner can then fold it into:
49592   // srl(or(ctlz, ctlz)).
49593   SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, DAG);
49594   SDValue Ret, NewRHS;
49595   if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG)))
49596     Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, NewLHS, NewRHS);
49597 
49598   if (!Ret)
49599     return SDValue();
49600 
49601   // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
49602   while (ORNodes.size() > 0) {
49603     OR = ORNodes.pop_back_val();
49604     LHS = OR->getOperand(0);
49605     RHS = OR->getOperand(1);
49606     // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
49607     if (RHS->getOpcode() == ISD::OR)
49608       std::swap(LHS, RHS);
49609     NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG);
49610     if (!NewRHS)
49611       return SDValue();
49612     Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, Ret, NewRHS);
49613   }
49614 
49615   return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
49616 }
49617 
foldMaskedMergeImpl(SDValue And0_L,SDValue And0_R,SDValue And1_L,SDValue And1_R,const SDLoc & DL,SelectionDAG & DAG)49618 static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
49619                                    SDValue And1_L, SDValue And1_R,
49620                                    const SDLoc &DL, SelectionDAG &DAG) {
49621   if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
49622     return SDValue();
49623   SDValue NotOp = And0_L->getOperand(0);
49624   if (NotOp == And1_R)
49625     std::swap(And1_R, And1_L);
49626   if (NotOp != And1_L)
49627     return SDValue();
49628 
49629   // (~(NotOp) & And0_R) | (NotOp & And1_R)
49630   // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
49631   EVT VT = And1_L->getValueType(0);
49632   SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
49633   SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
49634   SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
49635   SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
49636   return Xor1;
49637 }
49638 
49639 /// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
49640 /// equivalent `((x ^ y) & m) ^ y)` pattern.
49641 /// This is typically a better representation for  targets without a fused
49642 /// "and-not" operation. This function is intended to be called from a
49643 /// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
foldMaskedMerge(SDNode * Node,SelectionDAG & DAG)49644 static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
49645   // Note that masked-merge variants using XOR or ADD expressions are
49646   // normalized to OR by InstCombine so we only check for OR.
49647   assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
49648   SDValue N0 = Node->getOperand(0);
49649   if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
49650     return SDValue();
49651   SDValue N1 = Node->getOperand(1);
49652   if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
49653     return SDValue();
49654 
49655   SDLoc DL(Node);
49656   SDValue N00 = N0->getOperand(0);
49657   SDValue N01 = N0->getOperand(1);
49658   SDValue N10 = N1->getOperand(0);
49659   SDValue N11 = N1->getOperand(1);
49660   if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
49661     return Result;
49662   if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
49663     return Result;
49664   if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
49665     return Result;
49666   if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
49667     return Result;
49668   return SDValue();
49669 }
49670 
combineOr(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)49671 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
49672                          TargetLowering::DAGCombinerInfo &DCI,
49673                          const X86Subtarget &Subtarget) {
49674   SDValue N0 = N->getOperand(0);
49675   SDValue N1 = N->getOperand(1);
49676   EVT VT = N->getValueType(0);
49677   SDLoc dl(N);
49678   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49679 
49680   // If this is SSE1 only convert to FOR to avoid scalarization.
49681   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
49682     return DAG.getBitcast(MVT::v4i32,
49683                           DAG.getNode(X86ISD::FOR, dl, MVT::v4f32,
49684                                       DAG.getBitcast(MVT::v4f32, N0),
49685                                       DAG.getBitcast(MVT::v4f32, N1)));
49686   }
49687 
49688   // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
49689   // TODO: Support multiple SrcOps.
49690   if (VT == MVT::i1) {
49691     SmallVector<SDValue, 2> SrcOps;
49692     SmallVector<APInt, 2> SrcPartials;
49693     if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps, &SrcPartials) &&
49694         SrcOps.size() == 1) {
49695       unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
49696       EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49697       SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
49698       if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
49699         Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
49700       if (Mask) {
49701         assert(SrcPartials[0].getBitWidth() == NumElts &&
49702                "Unexpected partial reduction mask");
49703         SDValue ZeroBits = DAG.getConstant(0, dl, MaskVT);
49704         SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
49705         Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
49706         return DAG.getSetCC(dl, MVT::i1, Mask, ZeroBits, ISD::SETNE);
49707       }
49708     }
49709   }
49710 
49711   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
49712     return R;
49713 
49714   if (SDValue R = combineBitOpWithShift(N, DAG))
49715     return R;
49716 
49717   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
49718     return FPLogic;
49719 
49720   if (DCI.isBeforeLegalizeOps())
49721     return SDValue();
49722 
49723   if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
49724     return R;
49725 
49726   if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
49727     return R;
49728 
49729   if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
49730     return R;
49731 
49732   // (0 - SetCC) | C -> (zext (not SetCC)) * (C + 1) - 1 if we can get a LEA out of it.
49733   if ((VT == MVT::i32 || VT == MVT::i64) &&
49734       N0.getOpcode() == ISD::SUB && N0.hasOneUse() &&
49735       isNullConstant(N0.getOperand(0))) {
49736     SDValue Cond = N0.getOperand(1);
49737     if (Cond.getOpcode() == ISD::ZERO_EXTEND && Cond.hasOneUse())
49738       Cond = Cond.getOperand(0);
49739 
49740     if (Cond.getOpcode() == X86ISD::SETCC && Cond.hasOneUse()) {
49741       if (auto *CN = dyn_cast<ConstantSDNode>(N1)) {
49742         uint64_t Val = CN->getZExtValue();
49743         if (Val == 1 || Val == 2 || Val == 3 || Val == 4 || Val == 7 || Val == 8) {
49744           X86::CondCode CCode = (X86::CondCode)Cond.getConstantOperandVal(0);
49745           CCode = X86::GetOppositeBranchCondition(CCode);
49746           SDValue NotCond = getSETCC(CCode, Cond.getOperand(1), SDLoc(Cond), DAG);
49747 
49748           SDValue R = DAG.getZExtOrTrunc(NotCond, dl, VT);
49749           R = DAG.getNode(ISD::MUL, dl, VT, R, DAG.getConstant(Val + 1, dl, VT));
49750           R = DAG.getNode(ISD::SUB, dl, VT, R, DAG.getConstant(1, dl, VT));
49751           return R;
49752         }
49753       }
49754     }
49755   }
49756 
49757   // Combine OR(X,KSHIFTL(Y,Elts/2)) -> CONCAT_VECTORS(X,Y) == KUNPCK(X,Y).
49758   // Combine OR(KSHIFTL(X,Elts/2),Y) -> CONCAT_VECTORS(Y,X) == KUNPCK(Y,X).
49759   // iff the upper elements of the non-shifted arg are zero.
49760   // KUNPCK require 16+ bool vector elements.
49761   if (N0.getOpcode() == X86ISD::KSHIFTL || N1.getOpcode() == X86ISD::KSHIFTL) {
49762     unsigned NumElts = VT.getVectorNumElements();
49763     unsigned HalfElts = NumElts / 2;
49764     APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
49765     if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
49766         N1.getConstantOperandAPInt(1) == HalfElts &&
49767         DAG.MaskedVectorIsZero(N0, UpperElts)) {
49768       return DAG.getNode(
49769           ISD::CONCAT_VECTORS, dl, VT,
49770           extractSubVector(N0, 0, DAG, dl, HalfElts),
49771           extractSubVector(N1.getOperand(0), 0, DAG, dl, HalfElts));
49772     }
49773     if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
49774         N0.getConstantOperandAPInt(1) == HalfElts &&
49775         DAG.MaskedVectorIsZero(N1, UpperElts)) {
49776       return DAG.getNode(
49777           ISD::CONCAT_VECTORS, dl, VT,
49778           extractSubVector(N1, 0, DAG, dl, HalfElts),
49779           extractSubVector(N0.getOperand(0), 0, DAG, dl, HalfElts));
49780     }
49781   }
49782 
49783   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
49784     // Attempt to recursively combine an OR of shuffles.
49785     SDValue Op(N, 0);
49786     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
49787       return Res;
49788 
49789     // If either operand is a constant mask, then only the elements that aren't
49790     // allones are actually demanded by the other operand.
49791     auto SimplifyUndemandedElts = [&](SDValue Op, SDValue OtherOp) {
49792       APInt UndefElts;
49793       SmallVector<APInt> EltBits;
49794       int NumElts = VT.getVectorNumElements();
49795       int EltSizeInBits = VT.getScalarSizeInBits();
49796       if (!getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, EltBits))
49797         return false;
49798 
49799       APInt DemandedElts = APInt::getZero(NumElts);
49800       for (int I = 0; I != NumElts; ++I)
49801         if (!EltBits[I].isAllOnes())
49802           DemandedElts.setBit(I);
49803 
49804       return TLI.SimplifyDemandedVectorElts(OtherOp, DemandedElts, DCI);
49805     };
49806     if (SimplifyUndemandedElts(N0, N1) || SimplifyUndemandedElts(N1, N0)) {
49807       if (N->getOpcode() != ISD::DELETED_NODE)
49808         DCI.AddToWorklist(N);
49809       return SDValue(N, 0);
49810     }
49811   }
49812 
49813   // We should fold "masked merge" patterns when `andn` is not available.
49814   if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
49815     if (SDValue R = foldMaskedMerge(N, DAG))
49816       return R;
49817 
49818   return SDValue();
49819 }
49820 
49821 /// Try to turn tests against the signbit in the form of:
49822 ///   XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
49823 /// into:
49824 ///   SETGT(X, -1)
foldXorTruncShiftIntoCmp(SDNode * N,SelectionDAG & DAG)49825 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
49826   // This is only worth doing if the output type is i8 or i1.
49827   EVT ResultType = N->getValueType(0);
49828   if (ResultType != MVT::i8 && ResultType != MVT::i1)
49829     return SDValue();
49830 
49831   SDValue N0 = N->getOperand(0);
49832   SDValue N1 = N->getOperand(1);
49833 
49834   // We should be performing an xor against a truncated shift.
49835   if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
49836     return SDValue();
49837 
49838   // Make sure we are performing an xor against one.
49839   if (!isOneConstant(N1))
49840     return SDValue();
49841 
49842   // SetCC on x86 zero extends so only act on this if it's a logical shift.
49843   SDValue Shift = N0.getOperand(0);
49844   if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
49845     return SDValue();
49846 
49847   // Make sure we are truncating from one of i16, i32 or i64.
49848   EVT ShiftTy = Shift.getValueType();
49849   if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
49850     return SDValue();
49851 
49852   // Make sure the shift amount extracts the sign bit.
49853   if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
49854       Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
49855     return SDValue();
49856 
49857   // Create a greater-than comparison against -1.
49858   // N.B. Using SETGE against 0 works but we want a canonical looking
49859   // comparison, using SETGT matches up with what TranslateX86CC.
49860   SDLoc DL(N);
49861   SDValue ShiftOp = Shift.getOperand(0);
49862   EVT ShiftOpTy = ShiftOp.getValueType();
49863   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49864   EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
49865                                                *DAG.getContext(), ResultType);
49866   SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
49867                               DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
49868   if (SetCCResultType != ResultType)
49869     Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
49870   return Cond;
49871 }
49872 
49873 /// Turn vector tests of the signbit in the form of:
49874 ///   xor (sra X, elt_size(X)-1), -1
49875 /// into:
49876 ///   pcmpgt X, -1
49877 ///
49878 /// This should be called before type legalization because the pattern may not
49879 /// persist after that.
foldVectorXorShiftIntoCmp(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)49880 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
49881                                          const X86Subtarget &Subtarget) {
49882   EVT VT = N->getValueType(0);
49883   if (!VT.isSimple())
49884     return SDValue();
49885 
49886   switch (VT.getSimpleVT().SimpleTy) {
49887   default: return SDValue();
49888   case MVT::v16i8:
49889   case MVT::v8i16:
49890   case MVT::v4i32:
49891   case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
49892   case MVT::v32i8:
49893   case MVT::v16i16:
49894   case MVT::v8i32:
49895   case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
49896   }
49897 
49898   // There must be a shift right algebraic before the xor, and the xor must be a
49899   // 'not' operation.
49900   SDValue Shift = N->getOperand(0);
49901   SDValue Ones = N->getOperand(1);
49902   if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
49903       !ISD::isBuildVectorAllOnes(Ones.getNode()))
49904     return SDValue();
49905 
49906   // The shift should be smearing the sign bit across each vector element.
49907   auto *ShiftAmt =
49908       isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
49909   if (!ShiftAmt ||
49910       ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
49911     return SDValue();
49912 
49913   // Create a greater-than comparison against -1. We don't use the more obvious
49914   // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
49915   return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
49916 }
49917 
49918 /// Detect patterns of truncation with unsigned saturation:
49919 ///
49920 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
49921 ///   Return the source value x to be truncated or SDValue() if the pattern was
49922 ///   not matched.
49923 ///
49924 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
49925 ///   where C1 >= 0 and C2 is unsigned max of destination type.
49926 ///
49927 ///    (truncate (smax (smin (x, C2), C1)) to dest_type)
49928 ///   where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
49929 ///
49930 ///   These two patterns are equivalent to:
49931 ///   (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
49932 ///   So return the smax(x, C1) value to be truncated or SDValue() if the
49933 ///   pattern was not matched.
detectUSatPattern(SDValue In,EVT VT,SelectionDAG & DAG,const SDLoc & DL)49934 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
49935                                  const SDLoc &DL) {
49936   EVT InVT = In.getValueType();
49937 
49938   // Saturation with truncation. We truncate from InVT to VT.
49939   assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
49940          "Unexpected types for truncate operation");
49941 
49942   // Match min/max and return limit value as a parameter.
49943   auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
49944     if (V.getOpcode() == Opcode &&
49945         ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
49946       return V.getOperand(0);
49947     return SDValue();
49948   };
49949 
49950   APInt C1, C2;
49951   if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
49952     // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
49953     // the element size of the destination type.
49954     if (C2.isMask(VT.getScalarSizeInBits()))
49955       return UMin;
49956 
49957   if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
49958     if (MatchMinMax(SMin, ISD::SMAX, C1))
49959       if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
49960         return SMin;
49961 
49962   if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
49963     if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
49964       if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
49965           C2.uge(C1)) {
49966         return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
49967       }
49968 
49969   return SDValue();
49970 }
49971 
49972 /// Detect patterns of truncation with signed saturation:
49973 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
49974 ///                  signed_max_of_dest_type)) to dest_type)
49975 /// or:
49976 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
49977 ///                  signed_min_of_dest_type)) to dest_type).
49978 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
49979 /// Return the source value to be truncated or SDValue() if the pattern was not
49980 /// matched.
detectSSatPattern(SDValue In,EVT VT,bool MatchPackUS=false)49981 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
49982   unsigned NumDstBits = VT.getScalarSizeInBits();
49983   unsigned NumSrcBits = In.getScalarValueSizeInBits();
49984   assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
49985 
49986   auto MatchMinMax = [](SDValue V, unsigned Opcode,
49987                         const APInt &Limit) -> SDValue {
49988     APInt C;
49989     if (V.getOpcode() == Opcode &&
49990         ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
49991       return V.getOperand(0);
49992     return SDValue();
49993   };
49994 
49995   APInt SignedMax, SignedMin;
49996   if (MatchPackUS) {
49997     SignedMax = APInt::getAllOnes(NumDstBits).zext(NumSrcBits);
49998     SignedMin = APInt(NumSrcBits, 0);
49999   } else {
50000     SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
50001     SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
50002   }
50003 
50004   if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
50005     if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
50006       return SMax;
50007 
50008   if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
50009     if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
50010       return SMin;
50011 
50012   return SDValue();
50013 }
50014 
combineTruncateWithSat(SDValue In,EVT VT,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)50015 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
50016                                       SelectionDAG &DAG,
50017                                       const X86Subtarget &Subtarget) {
50018   if (!Subtarget.hasSSE2() || !VT.isVector())
50019     return SDValue();
50020 
50021   EVT SVT = VT.getVectorElementType();
50022   EVT InVT = In.getValueType();
50023   EVT InSVT = InVT.getVectorElementType();
50024 
50025   // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
50026   // split across two registers. We can use a packusdw+perm to clamp to 0-65535
50027   // and concatenate at the same time. Then we can use a final vpmovuswb to
50028   // clip to 0-255.
50029   if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
50030       InVT == MVT::v16i32 && VT == MVT::v16i8) {
50031     if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
50032       // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
50033       SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
50034                                            DL, DAG, Subtarget);
50035       assert(Mid && "Failed to pack!");
50036       return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
50037     }
50038   }
50039 
50040   // vXi32 truncate instructions are available with AVX512F.
50041   // vXi16 truncate instructions are only available with AVX512BW.
50042   // For 256-bit or smaller vectors, we require VLX.
50043   // FIXME: We could widen truncates to 512 to remove the VLX restriction.
50044   // If the result type is 256-bits or larger and we have disable 512-bit
50045   // registers, we should go ahead and use the pack instructions if possible.
50046   bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
50047                        (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
50048                       (InVT.getSizeInBits() > 128) &&
50049                       (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
50050                       !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
50051 
50052   if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
50053       VT.getSizeInBits() >= 64 &&
50054       (SVT == MVT::i8 || SVT == MVT::i16) &&
50055       (InSVT == MVT::i16 || InSVT == MVT::i32)) {
50056     if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
50057       // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
50058       // Only do this when the result is at least 64 bits or we'll leaving
50059       // dangling PACKSSDW nodes.
50060       if (SVT == MVT::i8 && InSVT == MVT::i32) {
50061         EVT MidVT = VT.changeVectorElementType(MVT::i16);
50062         SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
50063                                              DAG, Subtarget);
50064         assert(Mid && "Failed to pack!");
50065         SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
50066                                            Subtarget);
50067         assert(V && "Failed to pack!");
50068         return V;
50069       } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
50070         return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
50071                                       Subtarget);
50072     }
50073     if (SDValue SSatVal = detectSSatPattern(In, VT))
50074       return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
50075                                     Subtarget);
50076   }
50077 
50078   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50079   if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
50080       Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI()) &&
50081       (SVT == MVT::i32 || SVT == MVT::i16 || SVT == MVT::i8)) {
50082     unsigned TruncOpc = 0;
50083     SDValue SatVal;
50084     if (SDValue SSatVal = detectSSatPattern(In, VT)) {
50085       SatVal = SSatVal;
50086       TruncOpc = X86ISD::VTRUNCS;
50087     } else if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL)) {
50088       SatVal = USatVal;
50089       TruncOpc = X86ISD::VTRUNCUS;
50090     }
50091     if (SatVal) {
50092       unsigned ResElts = VT.getVectorNumElements();
50093       // If the input type is less than 512 bits and we don't have VLX, we need
50094       // to widen to 512 bits.
50095       if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
50096         unsigned NumConcats = 512 / InVT.getSizeInBits();
50097         ResElts *= NumConcats;
50098         SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
50099         ConcatOps[0] = SatVal;
50100         InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
50101                                 NumConcats * InVT.getVectorNumElements());
50102         SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
50103       }
50104       // Widen the result if its narrower than 128 bits.
50105       if (ResElts * SVT.getSizeInBits() < 128)
50106         ResElts = 128 / SVT.getSizeInBits();
50107       EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
50108       SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
50109       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
50110                          DAG.getIntPtrConstant(0, DL));
50111     }
50112   }
50113 
50114   return SDValue();
50115 }
50116 
50117 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
50118 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
50119 /// ISD::AVGCEILU (AVG) instruction.
detectAVGPattern(SDValue In,EVT VT,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)50120 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
50121                                 const X86Subtarget &Subtarget,
50122                                 const SDLoc &DL) {
50123   if (!VT.isVector())
50124     return SDValue();
50125   EVT InVT = In.getValueType();
50126   unsigned NumElems = VT.getVectorNumElements();
50127 
50128   EVT ScalarVT = VT.getVectorElementType();
50129   if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) && NumElems >= 2))
50130     return SDValue();
50131 
50132   // InScalarVT is the intermediate type in AVG pattern and it should be greater
50133   // than the original input type (i8/i16).
50134   EVT InScalarVT = InVT.getVectorElementType();
50135   if (InScalarVT.getFixedSizeInBits() <= ScalarVT.getFixedSizeInBits())
50136     return SDValue();
50137 
50138   if (!Subtarget.hasSSE2())
50139     return SDValue();
50140 
50141   // Detect the following pattern:
50142   //
50143   //   %1 = zext <N x i8> %a to <N x i32>
50144   //   %2 = zext <N x i8> %b to <N x i32>
50145   //   %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
50146   //   %4 = add nuw nsw <N x i32> %3, %2
50147   //   %5 = lshr <N x i32> %N, <i32 1 x N>
50148   //   %6 = trunc <N x i32> %5 to <N x i8>
50149   //
50150   // In AVX512, the last instruction can also be a trunc store.
50151   if (In.getOpcode() != ISD::SRL)
50152     return SDValue();
50153 
50154   // A lambda checking the given SDValue is a constant vector and each element
50155   // is in the range [Min, Max].
50156   auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
50157     return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
50158       return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
50159     });
50160   };
50161 
50162   auto IsZExtLike = [DAG = &DAG, ScalarVT](SDValue V) {
50163     unsigned MaxActiveBits = DAG->computeKnownBits(V).countMaxActiveBits();
50164     return MaxActiveBits <= ScalarVT.getSizeInBits();
50165   };
50166 
50167   // Check if each element of the vector is right-shifted by one.
50168   SDValue LHS = In.getOperand(0);
50169   SDValue RHS = In.getOperand(1);
50170   if (!IsConstVectorInRange(RHS, 1, 1))
50171     return SDValue();
50172   if (LHS.getOpcode() != ISD::ADD)
50173     return SDValue();
50174 
50175   // Detect a pattern of a + b + 1 where the order doesn't matter.
50176   SDValue Operands[3];
50177   Operands[0] = LHS.getOperand(0);
50178   Operands[1] = LHS.getOperand(1);
50179 
50180   auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
50181                        ArrayRef<SDValue> Ops) {
50182     return DAG.getNode(ISD::AVGCEILU, DL, Ops[0].getValueType(), Ops);
50183   };
50184 
50185   auto AVGSplitter = [&](std::array<SDValue, 2> Ops) {
50186     for (SDValue &Op : Ops)
50187       if (Op.getValueType() != VT)
50188         Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
50189     // Pad to a power-of-2 vector, split+apply and extract the original vector.
50190     unsigned NumElemsPow2 = PowerOf2Ceil(NumElems);
50191     EVT Pow2VT = EVT::getVectorVT(*DAG.getContext(), ScalarVT, NumElemsPow2);
50192     if (NumElemsPow2 != NumElems) {
50193       for (SDValue &Op : Ops) {
50194         SmallVector<SDValue, 32> EltsOfOp(NumElemsPow2, DAG.getUNDEF(ScalarVT));
50195         for (unsigned i = 0; i != NumElems; ++i) {
50196           SDValue Idx = DAG.getIntPtrConstant(i, DL);
50197           EltsOfOp[i] =
50198               DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, Op, Idx);
50199         }
50200         Op = DAG.getBuildVector(Pow2VT, DL, EltsOfOp);
50201       }
50202     }
50203     SDValue Res = SplitOpsAndApply(DAG, Subtarget, DL, Pow2VT, Ops, AVGBuilder);
50204     if (NumElemsPow2 == NumElems)
50205       return Res;
50206     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
50207                        DAG.getIntPtrConstant(0, DL));
50208   };
50209 
50210   // Take care of the case when one of the operands is a constant vector whose
50211   // element is in the range [1, 256].
50212   if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
50213       IsZExtLike(Operands[0])) {
50214     // The pattern is detected. Subtract one from the constant vector, then
50215     // demote it and emit X86ISD::AVG instruction.
50216     SDValue VecOnes = DAG.getConstant(1, DL, InVT);
50217     Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
50218     return AVGSplitter({Operands[0], Operands[1]});
50219   }
50220 
50221   // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
50222   // Match the or case only if its 'add-like' - can be replaced by an add.
50223   auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
50224     if (ISD::ADD == V.getOpcode()) {
50225       Op0 = V.getOperand(0);
50226       Op1 = V.getOperand(1);
50227       return true;
50228     }
50229     if (ISD::ZERO_EXTEND != V.getOpcode())
50230       return false;
50231     V = V.getOperand(0);
50232     if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
50233         !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
50234       return false;
50235     Op0 = V.getOperand(0);
50236     Op1 = V.getOperand(1);
50237     return true;
50238   };
50239 
50240   SDValue Op0, Op1;
50241   if (FindAddLike(Operands[0], Op0, Op1))
50242     std::swap(Operands[0], Operands[1]);
50243   else if (!FindAddLike(Operands[1], Op0, Op1))
50244     return SDValue();
50245   Operands[2] = Op0;
50246   Operands[1] = Op1;
50247 
50248   // Now we have three operands of two additions. Check that one of them is a
50249   // constant vector with ones, and the other two can be promoted from i8/i16.
50250   for (SDValue &Op : Operands) {
50251     if (!IsConstVectorInRange(Op, 1, 1))
50252       continue;
50253     std::swap(Op, Operands[2]);
50254 
50255     // Check if Operands[0] and Operands[1] are results of type promotion.
50256     for (int j = 0; j < 2; ++j)
50257       if (Operands[j].getValueType() != VT)
50258         if (!IsZExtLike(Operands[j]))
50259           return SDValue();
50260 
50261     // The pattern is detected, emit X86ISD::AVG instruction(s).
50262     return AVGSplitter({Operands[0], Operands[1]});
50263   }
50264 
50265   return SDValue();
50266 }
50267 
combineLoad(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50268 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
50269                            TargetLowering::DAGCombinerInfo &DCI,
50270                            const X86Subtarget &Subtarget) {
50271   LoadSDNode *Ld = cast<LoadSDNode>(N);
50272   EVT RegVT = Ld->getValueType(0);
50273   EVT MemVT = Ld->getMemoryVT();
50274   SDLoc dl(Ld);
50275   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50276 
50277   // For chips with slow 32-byte unaligned loads, break the 32-byte operation
50278   // into two 16-byte operations. Also split non-temporal aligned loads on
50279   // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
50280   ISD::LoadExtType Ext = Ld->getExtensionType();
50281   unsigned Fast;
50282   if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
50283       Ext == ISD::NON_EXTLOAD &&
50284       ((Ld->isNonTemporal() && !Subtarget.hasInt256() &&
50285         Ld->getAlign() >= Align(16)) ||
50286        (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
50287                                *Ld->getMemOperand(), &Fast) &&
50288         !Fast))) {
50289     unsigned NumElems = RegVT.getVectorNumElements();
50290     if (NumElems < 2)
50291       return SDValue();
50292 
50293     unsigned HalfOffset = 16;
50294     SDValue Ptr1 = Ld->getBasePtr();
50295     SDValue Ptr2 =
50296         DAG.getMemBasePlusOffset(Ptr1, TypeSize::Fixed(HalfOffset), dl);
50297     EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
50298                                   NumElems / 2);
50299     SDValue Load1 =
50300         DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
50301                     Ld->getOriginalAlign(),
50302                     Ld->getMemOperand()->getFlags());
50303     SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
50304                                 Ld->getPointerInfo().getWithOffset(HalfOffset),
50305                                 Ld->getOriginalAlign(),
50306                                 Ld->getMemOperand()->getFlags());
50307     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
50308                              Load1.getValue(1), Load2.getValue(1));
50309 
50310     SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
50311     return DCI.CombineTo(N, NewVec, TF, true);
50312   }
50313 
50314   // Bool vector load - attempt to cast to an integer, as we have good
50315   // (vXiY *ext(vXi1 bitcast(iX))) handling.
50316   if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
50317       RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
50318     unsigned NumElts = RegVT.getVectorNumElements();
50319     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
50320     if (TLI.isTypeLegal(IntVT)) {
50321       SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
50322                                     Ld->getPointerInfo(),
50323                                     Ld->getOriginalAlign(),
50324                                     Ld->getMemOperand()->getFlags());
50325       SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
50326       return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
50327     }
50328   }
50329 
50330   // If we also broadcast this as a subvector to a wider type, then just extract
50331   // the lowest subvector.
50332   if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
50333       (RegVT.is128BitVector() || RegVT.is256BitVector())) {
50334     SDValue Ptr = Ld->getBasePtr();
50335     SDValue Chain = Ld->getChain();
50336     for (SDNode *User : Ptr->uses()) {
50337       if (User != N && User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
50338           cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
50339           cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
50340           cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
50341               MemVT.getSizeInBits() &&
50342           !User->hasAnyUseOfValue(1) &&
50343           User->getValueSizeInBits(0).getFixedValue() >
50344               RegVT.getFixedSizeInBits()) {
50345         SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
50346                                            RegVT.getSizeInBits());
50347         Extract = DAG.getBitcast(RegVT, Extract);
50348         return DCI.CombineTo(N, Extract, SDValue(User, 1));
50349       }
50350     }
50351   }
50352 
50353   // Cast ptr32 and ptr64 pointers to the default address space before a load.
50354   unsigned AddrSpace = Ld->getAddressSpace();
50355   if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
50356       AddrSpace == X86AS::PTR32_UPTR) {
50357     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
50358     if (PtrVT != Ld->getBasePtr().getSimpleValueType()) {
50359       SDValue Cast =
50360           DAG.getAddrSpaceCast(dl, PtrVT, Ld->getBasePtr(), AddrSpace, 0);
50361       return DAG.getLoad(RegVT, dl, Ld->getChain(), Cast, Ld->getPointerInfo(),
50362                          Ld->getOriginalAlign(),
50363                          Ld->getMemOperand()->getFlags());
50364     }
50365   }
50366 
50367   return SDValue();
50368 }
50369 
50370 /// If V is a build vector of boolean constants and exactly one of those
50371 /// constants is true, return the operand index of that true element.
50372 /// Otherwise, return -1.
getOneTrueElt(SDValue V)50373 static int getOneTrueElt(SDValue V) {
50374   // This needs to be a build vector of booleans.
50375   // TODO: Checking for the i1 type matches the IR definition for the mask,
50376   // but the mask check could be loosened to i8 or other types. That might
50377   // also require checking more than 'allOnesValue'; eg, the x86 HW
50378   // instructions only require that the MSB is set for each mask element.
50379   // The ISD::MSTORE comments/definition do not specify how the mask operand
50380   // is formatted.
50381   auto *BV = dyn_cast<BuildVectorSDNode>(V);
50382   if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
50383     return -1;
50384 
50385   int TrueIndex = -1;
50386   unsigned NumElts = BV->getValueType(0).getVectorNumElements();
50387   for (unsigned i = 0; i < NumElts; ++i) {
50388     const SDValue &Op = BV->getOperand(i);
50389     if (Op.isUndef())
50390       continue;
50391     auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
50392     if (!ConstNode)
50393       return -1;
50394     if (ConstNode->getAPIntValue().countTrailingOnes() >= 1) {
50395       // If we already found a one, this is too many.
50396       if (TrueIndex >= 0)
50397         return -1;
50398       TrueIndex = i;
50399     }
50400   }
50401   return TrueIndex;
50402 }
50403 
50404 /// Given a masked memory load/store operation, return true if it has one mask
50405 /// bit set. If it has one mask bit set, then also return the memory address of
50406 /// the scalar element to load/store, the vector index to insert/extract that
50407 /// scalar element, and the alignment for the scalar memory access.
getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode * MaskedOp,SelectionDAG & DAG,SDValue & Addr,SDValue & Index,Align & Alignment,unsigned & Offset)50408 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
50409                                          SelectionDAG &DAG, SDValue &Addr,
50410                                          SDValue &Index, Align &Alignment,
50411                                          unsigned &Offset) {
50412   int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
50413   if (TrueMaskElt < 0)
50414     return false;
50415 
50416   // Get the address of the one scalar element that is specified by the mask
50417   // using the appropriate offset from the base pointer.
50418   EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
50419   Offset = 0;
50420   Addr = MaskedOp->getBasePtr();
50421   if (TrueMaskElt != 0) {
50422     Offset = TrueMaskElt * EltVT.getStoreSize();
50423     Addr = DAG.getMemBasePlusOffset(Addr, TypeSize::Fixed(Offset),
50424                                     SDLoc(MaskedOp));
50425   }
50426 
50427   Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
50428   Alignment = commonAlignment(MaskedOp->getOriginalAlign(),
50429                               EltVT.getStoreSize());
50430   return true;
50431 }
50432 
50433 /// If exactly one element of the mask is set for a non-extending masked load,
50434 /// it is a scalar load and vector insert.
50435 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50436 /// mask have already been optimized in IR, so we don't bother with those here.
50437 static SDValue
reduceMaskedLoadToScalarLoad(MaskedLoadSDNode * ML,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50438 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50439                              TargetLowering::DAGCombinerInfo &DCI,
50440                              const X86Subtarget &Subtarget) {
50441   assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50442   // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50443   // However, some target hooks may need to be added to know when the transform
50444   // is profitable. Endianness would also have to be considered.
50445 
50446   SDValue Addr, VecIndex;
50447   Align Alignment;
50448   unsigned Offset;
50449   if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment, Offset))
50450     return SDValue();
50451 
50452   // Load the one scalar element that is specified by the mask using the
50453   // appropriate offset from the base pointer.
50454   SDLoc DL(ML);
50455   EVT VT = ML->getValueType(0);
50456   EVT EltVT = VT.getVectorElementType();
50457 
50458   EVT CastVT = VT;
50459   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50460     EltVT = MVT::f64;
50461     CastVT = VT.changeVectorElementType(EltVT);
50462   }
50463 
50464   SDValue Load =
50465       DAG.getLoad(EltVT, DL, ML->getChain(), Addr,
50466                   ML->getPointerInfo().getWithOffset(Offset),
50467                   Alignment, ML->getMemOperand()->getFlags());
50468 
50469   SDValue PassThru = DAG.getBitcast(CastVT, ML->getPassThru());
50470 
50471   // Insert the loaded element into the appropriate place in the vector.
50472   SDValue Insert =
50473       DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, CastVT, PassThru, Load, VecIndex);
50474   Insert = DAG.getBitcast(VT, Insert);
50475   return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
50476 }
50477 
50478 static SDValue
combineMaskedLoadConstantMask(MaskedLoadSDNode * ML,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)50479 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50480                               TargetLowering::DAGCombinerInfo &DCI) {
50481   assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50482   if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
50483     return SDValue();
50484 
50485   SDLoc DL(ML);
50486   EVT VT = ML->getValueType(0);
50487 
50488   // If we are loading the first and last elements of a vector, it is safe and
50489   // always faster to load the whole vector. Replace the masked load with a
50490   // vector load and select.
50491   unsigned NumElts = VT.getVectorNumElements();
50492   BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
50493   bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
50494   bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
50495   if (LoadFirstElt && LoadLastElt) {
50496     SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
50497                                 ML->getMemOperand());
50498     SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
50499                                   ML->getPassThru());
50500     return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
50501   }
50502 
50503   // Convert a masked load with a constant mask into a masked load and a select.
50504   // This allows the select operation to use a faster kind of select instruction
50505   // (for example, vblendvps -> vblendps).
50506 
50507   // Don't try this if the pass-through operand is already undefined. That would
50508   // cause an infinite loop because that's what we're about to create.
50509   if (ML->getPassThru().isUndef())
50510     return SDValue();
50511 
50512   if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
50513     return SDValue();
50514 
50515   // The new masked load has an undef pass-through operand. The select uses the
50516   // original pass-through operand.
50517   SDValue NewML = DAG.getMaskedLoad(
50518       VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
50519       DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
50520       ML->getAddressingMode(), ML->getExtensionType());
50521   SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
50522                                 ML->getPassThru());
50523 
50524   return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
50525 }
50526 
combineMaskedLoad(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50527 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
50528                                  TargetLowering::DAGCombinerInfo &DCI,
50529                                  const X86Subtarget &Subtarget) {
50530   auto *Mld = cast<MaskedLoadSDNode>(N);
50531 
50532   // TODO: Expanding load with constant mask may be optimized as well.
50533   if (Mld->isExpandingLoad())
50534     return SDValue();
50535 
50536   if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
50537     if (SDValue ScalarLoad =
50538             reduceMaskedLoadToScalarLoad(Mld, DAG, DCI, Subtarget))
50539       return ScalarLoad;
50540 
50541     // TODO: Do some AVX512 subsets benefit from this transform?
50542     if (!Subtarget.hasAVX512())
50543       if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
50544         return Blend;
50545   }
50546 
50547   // If the mask value has been legalized to a non-boolean vector, try to
50548   // simplify ops leading up to it. We only demand the MSB of each lane.
50549   SDValue Mask = Mld->getMask();
50550   if (Mask.getScalarValueSizeInBits() != 1) {
50551     EVT VT = Mld->getValueType(0);
50552     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50553     APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50554     if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50555       if (N->getOpcode() != ISD::DELETED_NODE)
50556         DCI.AddToWorklist(N);
50557       return SDValue(N, 0);
50558     }
50559     if (SDValue NewMask =
50560             TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50561       return DAG.getMaskedLoad(
50562           VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
50563           NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
50564           Mld->getAddressingMode(), Mld->getExtensionType());
50565   }
50566 
50567   return SDValue();
50568 }
50569 
50570 /// If exactly one element of the mask is set for a non-truncating masked store,
50571 /// it is a vector extract and scalar store.
50572 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50573 /// mask have already been optimized in IR, so we don't bother with those here.
reduceMaskedStoreToScalarStore(MaskedStoreSDNode * MS,SelectionDAG & DAG,const X86Subtarget & Subtarget)50574 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
50575                                               SelectionDAG &DAG,
50576                                               const X86Subtarget &Subtarget) {
50577   // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50578   // However, some target hooks may need to be added to know when the transform
50579   // is profitable. Endianness would also have to be considered.
50580 
50581   SDValue Addr, VecIndex;
50582   Align Alignment;
50583   unsigned Offset;
50584   if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment, Offset))
50585     return SDValue();
50586 
50587   // Extract the one scalar element that is actually being stored.
50588   SDLoc DL(MS);
50589   SDValue Value = MS->getValue();
50590   EVT VT = Value.getValueType();
50591   EVT EltVT = VT.getVectorElementType();
50592   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50593     EltVT = MVT::f64;
50594     EVT CastVT = VT.changeVectorElementType(EltVT);
50595     Value = DAG.getBitcast(CastVT, Value);
50596   }
50597   SDValue Extract =
50598       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Value, VecIndex);
50599 
50600   // Store that element at the appropriate offset from the base pointer.
50601   return DAG.getStore(MS->getChain(), DL, Extract, Addr,
50602                       MS->getPointerInfo().getWithOffset(Offset),
50603                       Alignment, MS->getMemOperand()->getFlags());
50604 }
50605 
combineMaskedStore(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50606 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
50607                                   TargetLowering::DAGCombinerInfo &DCI,
50608                                   const X86Subtarget &Subtarget) {
50609   MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
50610   if (Mst->isCompressingStore())
50611     return SDValue();
50612 
50613   EVT VT = Mst->getValue().getValueType();
50614   SDLoc dl(Mst);
50615   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50616 
50617   if (Mst->isTruncatingStore())
50618     return SDValue();
50619 
50620   if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG, Subtarget))
50621     return ScalarStore;
50622 
50623   // If the mask value has been legalized to a non-boolean vector, try to
50624   // simplify ops leading up to it. We only demand the MSB of each lane.
50625   SDValue Mask = Mst->getMask();
50626   if (Mask.getScalarValueSizeInBits() != 1) {
50627     APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50628     if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50629       if (N->getOpcode() != ISD::DELETED_NODE)
50630         DCI.AddToWorklist(N);
50631       return SDValue(N, 0);
50632     }
50633     if (SDValue NewMask =
50634             TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50635       return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
50636                                 Mst->getBasePtr(), Mst->getOffset(), NewMask,
50637                                 Mst->getMemoryVT(), Mst->getMemOperand(),
50638                                 Mst->getAddressingMode());
50639   }
50640 
50641   SDValue Value = Mst->getValue();
50642   if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
50643       TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
50644                             Mst->getMemoryVT())) {
50645     return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
50646                               Mst->getBasePtr(), Mst->getOffset(), Mask,
50647                               Mst->getMemoryVT(), Mst->getMemOperand(),
50648                               Mst->getAddressingMode(), true);
50649   }
50650 
50651   return SDValue();
50652 }
50653 
combineStore(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50654 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
50655                             TargetLowering::DAGCombinerInfo &DCI,
50656                             const X86Subtarget &Subtarget) {
50657   StoreSDNode *St = cast<StoreSDNode>(N);
50658   EVT StVT = St->getMemoryVT();
50659   SDLoc dl(St);
50660   SDValue StoredVal = St->getValue();
50661   EVT VT = StoredVal.getValueType();
50662   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50663 
50664   // Convert a store of vXi1 into a store of iX and a bitcast.
50665   if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
50666       VT.getVectorElementType() == MVT::i1) {
50667 
50668     EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
50669     StoredVal = DAG.getBitcast(NewVT, StoredVal);
50670 
50671     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50672                         St->getPointerInfo(), St->getOriginalAlign(),
50673                         St->getMemOperand()->getFlags());
50674   }
50675 
50676   // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
50677   // This will avoid a copy to k-register.
50678   if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
50679       StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
50680       StoredVal.getOperand(0).getValueType() == MVT::i8) {
50681     SDValue Val = StoredVal.getOperand(0);
50682     // We must store zeros to the unused bits.
50683     Val = DAG.getZeroExtendInReg(Val, dl, MVT::i1);
50684     return DAG.getStore(St->getChain(), dl, Val,
50685                         St->getBasePtr(), St->getPointerInfo(),
50686                         St->getOriginalAlign(),
50687                         St->getMemOperand()->getFlags());
50688   }
50689 
50690   // Widen v2i1/v4i1 stores to v8i1.
50691   if ((VT == MVT::v1i1 || VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
50692       Subtarget.hasAVX512()) {
50693     unsigned NumConcats = 8 / VT.getVectorNumElements();
50694     // We must store zeros to the unused bits.
50695     SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, VT));
50696     Ops[0] = StoredVal;
50697     StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
50698     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50699                         St->getPointerInfo(), St->getOriginalAlign(),
50700                         St->getMemOperand()->getFlags());
50701   }
50702 
50703   // Turn vXi1 stores of constants into a scalar store.
50704   if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
50705        VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
50706       ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
50707     // If its a v64i1 store without 64-bit support, we need two stores.
50708     if (!DCI.isBeforeLegalize() && VT == MVT::v64i1 && !Subtarget.is64Bit()) {
50709       SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
50710                                       StoredVal->ops().slice(0, 32));
50711       Lo = combinevXi1ConstantToInteger(Lo, DAG);
50712       SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
50713                                       StoredVal->ops().slice(32, 32));
50714       Hi = combinevXi1ConstantToInteger(Hi, DAG);
50715 
50716       SDValue Ptr0 = St->getBasePtr();
50717       SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(4), dl);
50718 
50719       SDValue Ch0 =
50720           DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
50721                        St->getOriginalAlign(),
50722                        St->getMemOperand()->getFlags());
50723       SDValue Ch1 =
50724           DAG.getStore(St->getChain(), dl, Hi, Ptr1,
50725                        St->getPointerInfo().getWithOffset(4),
50726                        St->getOriginalAlign(),
50727                        St->getMemOperand()->getFlags());
50728       return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
50729     }
50730 
50731     StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
50732     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50733                         St->getPointerInfo(), St->getOriginalAlign(),
50734                         St->getMemOperand()->getFlags());
50735   }
50736 
50737   // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
50738   // Sandy Bridge, perform two 16-byte stores.
50739   unsigned Fast;
50740   if (VT.is256BitVector() && StVT == VT &&
50741       TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
50742                              *St->getMemOperand(), &Fast) &&
50743       !Fast) {
50744     unsigned NumElems = VT.getVectorNumElements();
50745     if (NumElems < 2)
50746       return SDValue();
50747 
50748     return splitVectorStore(St, DAG);
50749   }
50750 
50751   // Split under-aligned vector non-temporal stores.
50752   if (St->isNonTemporal() && StVT == VT &&
50753       St->getAlign().value() < VT.getStoreSize()) {
50754     // ZMM/YMM nt-stores - either it can be stored as a series of shorter
50755     // vectors or the legalizer can scalarize it to use MOVNTI.
50756     if (VT.is256BitVector() || VT.is512BitVector()) {
50757       unsigned NumElems = VT.getVectorNumElements();
50758       if (NumElems < 2)
50759         return SDValue();
50760       return splitVectorStore(St, DAG);
50761     }
50762 
50763     // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
50764     // to use MOVNTI.
50765     if (VT.is128BitVector() && Subtarget.hasSSE2()) {
50766       MVT NTVT = Subtarget.hasSSE4A()
50767                      ? MVT::v2f64
50768                      : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
50769       return scalarizeVectorStore(St, NTVT, DAG);
50770     }
50771   }
50772 
50773   // Try to optimize v16i16->v16i8 truncating stores when BWI is not
50774   // supported, but avx512f is by extending to v16i32 and truncating.
50775   if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
50776       St->getValue().getOpcode() == ISD::TRUNCATE &&
50777       St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
50778       TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
50779       St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
50780     SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32,
50781                               St->getValue().getOperand(0));
50782     return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
50783                              MVT::v16i8, St->getMemOperand());
50784   }
50785 
50786   // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
50787   if (!St->isTruncatingStore() &&
50788       (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
50789        StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
50790       StoredVal.hasOneUse() &&
50791       TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
50792     bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
50793     return EmitTruncSStore(IsSigned, St->getChain(),
50794                            dl, StoredVal.getOperand(0), St->getBasePtr(),
50795                            VT, St->getMemOperand(), DAG);
50796   }
50797 
50798   // Try to fold a extract_element(VTRUNC) pattern into a truncating store.
50799   if (!St->isTruncatingStore()) {
50800     auto IsExtractedElement = [](SDValue V) {
50801       if (V.getOpcode() == ISD::TRUNCATE && V.hasOneUse())
50802         V = V.getOperand(0);
50803       unsigned Opc = V.getOpcode();
50804       if ((Opc == ISD::EXTRACT_VECTOR_ELT || Opc == X86ISD::PEXTRW) &&
50805           isNullConstant(V.getOperand(1)) && V.hasOneUse() &&
50806           V.getOperand(0).hasOneUse())
50807         return V.getOperand(0);
50808       return SDValue();
50809     };
50810     if (SDValue Extract = IsExtractedElement(StoredVal)) {
50811       SDValue Trunc = peekThroughOneUseBitcasts(Extract);
50812       if (Trunc.getOpcode() == X86ISD::VTRUNC) {
50813         SDValue Src = Trunc.getOperand(0);
50814         MVT DstVT = Trunc.getSimpleValueType();
50815         MVT SrcVT = Src.getSimpleValueType();
50816         unsigned NumSrcElts = SrcVT.getVectorNumElements();
50817         unsigned NumTruncBits = DstVT.getScalarSizeInBits() * NumSrcElts;
50818         MVT TruncVT = MVT::getVectorVT(DstVT.getScalarType(), NumSrcElts);
50819         if (NumTruncBits == VT.getSizeInBits() &&
50820             TLI.isTruncStoreLegal(SrcVT, TruncVT)) {
50821           return DAG.getTruncStore(St->getChain(), dl, Src, St->getBasePtr(),
50822                                    TruncVT, St->getMemOperand());
50823         }
50824       }
50825     }
50826   }
50827 
50828   // Optimize trunc store (of multiple scalars) to shuffle and store.
50829   // First, pack all of the elements in one place. Next, store to memory
50830   // in fewer chunks.
50831   if (St->isTruncatingStore() && VT.isVector()) {
50832     // Check if we can detect an AVG pattern from the truncation. If yes,
50833     // replace the trunc store by a normal store with the result of X86ISD::AVG
50834     // instruction.
50835     if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
50836       if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
50837                                          Subtarget, dl))
50838         return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
50839                             St->getPointerInfo(), St->getOriginalAlign(),
50840                             St->getMemOperand()->getFlags());
50841 
50842     if (TLI.isTruncStoreLegal(VT, StVT)) {
50843       if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
50844         return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
50845                                dl, Val, St->getBasePtr(),
50846                                St->getMemoryVT(), St->getMemOperand(), DAG);
50847       if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
50848                                           DAG, dl))
50849         return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
50850                                dl, Val, St->getBasePtr(),
50851                                St->getMemoryVT(), St->getMemOperand(), DAG);
50852     }
50853 
50854     return SDValue();
50855   }
50856 
50857   // Cast ptr32 and ptr64 pointers to the default address space before a store.
50858   unsigned AddrSpace = St->getAddressSpace();
50859   if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
50860       AddrSpace == X86AS::PTR32_UPTR) {
50861     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
50862     if (PtrVT != St->getBasePtr().getSimpleValueType()) {
50863       SDValue Cast =
50864           DAG.getAddrSpaceCast(dl, PtrVT, St->getBasePtr(), AddrSpace, 0);
50865       return DAG.getStore(St->getChain(), dl, StoredVal, Cast,
50866                           St->getPointerInfo(), St->getOriginalAlign(),
50867                           St->getMemOperand()->getFlags(), St->getAAInfo());
50868     }
50869   }
50870 
50871   // Turn load->store of MMX types into GPR load/stores.  This avoids clobbering
50872   // the FP state in cases where an emms may be missing.
50873   // A preferable solution to the general problem is to figure out the right
50874   // places to insert EMMS.  This qualifies as a quick hack.
50875 
50876   // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
50877   if (VT.getSizeInBits() != 64)
50878     return SDValue();
50879 
50880   const Function &F = DAG.getMachineFunction().getFunction();
50881   bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
50882   bool F64IsLegal =
50883       !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
50884   if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
50885       isa<LoadSDNode>(St->getValue()) &&
50886       cast<LoadSDNode>(St->getValue())->isSimple() &&
50887       St->getChain().hasOneUse() && St->isSimple()) {
50888     LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
50889 
50890     if (!ISD::isNormalLoad(Ld))
50891       return SDValue();
50892 
50893     // Avoid the transformation if there are multiple uses of the loaded value.
50894     if (!Ld->hasNUsesOfValue(1, 0))
50895       return SDValue();
50896 
50897     SDLoc LdDL(Ld);
50898     SDLoc StDL(N);
50899     // Lower to a single movq load/store pair.
50900     SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
50901                                 Ld->getBasePtr(), Ld->getMemOperand());
50902 
50903     // Make sure new load is placed in same chain order.
50904     DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
50905     return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
50906                         St->getMemOperand());
50907   }
50908 
50909   // This is similar to the above case, but here we handle a scalar 64-bit
50910   // integer store that is extracted from a vector on a 32-bit target.
50911   // If we have SSE2, then we can treat it like a floating-point double
50912   // to get past legalization. The execution dependencies fixup pass will
50913   // choose the optimal machine instruction for the store if this really is
50914   // an integer or v2f32 rather than an f64.
50915   if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
50916       St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
50917     SDValue OldExtract = St->getOperand(1);
50918     SDValue ExtOp0 = OldExtract.getOperand(0);
50919     unsigned VecSize = ExtOp0.getValueSizeInBits();
50920     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
50921     SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
50922     SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
50923                                      BitCast, OldExtract.getOperand(1));
50924     return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
50925                         St->getPointerInfo(), St->getOriginalAlign(),
50926                         St->getMemOperand()->getFlags());
50927   }
50928 
50929   return SDValue();
50930 }
50931 
combineVEXTRACT_STORE(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50932 static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
50933                                      TargetLowering::DAGCombinerInfo &DCI,
50934                                      const X86Subtarget &Subtarget) {
50935   auto *St = cast<MemIntrinsicSDNode>(N);
50936 
50937   SDValue StoredVal = N->getOperand(1);
50938   MVT VT = StoredVal.getSimpleValueType();
50939   EVT MemVT = St->getMemoryVT();
50940 
50941   // Figure out which elements we demand.
50942   unsigned StElts = MemVT.getSizeInBits() / VT.getScalarSizeInBits();
50943   APInt DemandedElts = APInt::getLowBitsSet(VT.getVectorNumElements(), StElts);
50944 
50945   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50946   if (TLI.SimplifyDemandedVectorElts(StoredVal, DemandedElts, DCI)) {
50947     if (N->getOpcode() != ISD::DELETED_NODE)
50948       DCI.AddToWorklist(N);
50949     return SDValue(N, 0);
50950   }
50951 
50952   return SDValue();
50953 }
50954 
50955 /// Return 'true' if this vector operation is "horizontal"
50956 /// and return the operands for the horizontal operation in LHS and RHS.  A
50957 /// horizontal operation performs the binary operation on successive elements
50958 /// of its first operand, then on successive elements of its second operand,
50959 /// returning the resulting values in a vector.  For example, if
50960 ///   A = < float a0, float a1, float a2, float a3 >
50961 /// and
50962 ///   B = < float b0, float b1, float b2, float b3 >
50963 /// then the result of doing a horizontal operation on A and B is
50964 ///   A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
50965 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
50966 /// A horizontal-op B, for some already available A and B, and if so then LHS is
50967 /// set to A, RHS to B, and the routine returns 'true'.
isHorizontalBinOp(unsigned HOpcode,SDValue & LHS,SDValue & RHS,SelectionDAG & DAG,const X86Subtarget & Subtarget,bool IsCommutative,SmallVectorImpl<int> & PostShuffleMask)50968 static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
50969                               SelectionDAG &DAG, const X86Subtarget &Subtarget,
50970                               bool IsCommutative,
50971                               SmallVectorImpl<int> &PostShuffleMask) {
50972   // If either operand is undef, bail out. The binop should be simplified.
50973   if (LHS.isUndef() || RHS.isUndef())
50974     return false;
50975 
50976   // Look for the following pattern:
50977   //   A = < float a0, float a1, float a2, float a3 >
50978   //   B = < float b0, float b1, float b2, float b3 >
50979   // and
50980   //   LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
50981   //   RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
50982   // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
50983   // which is A horizontal-op B.
50984 
50985   MVT VT = LHS.getSimpleValueType();
50986   assert((VT.is128BitVector() || VT.is256BitVector()) &&
50987          "Unsupported vector type for horizontal add/sub");
50988   unsigned NumElts = VT.getVectorNumElements();
50989 
50990   auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
50991                         SmallVectorImpl<int> &ShuffleMask) {
50992     bool UseSubVector = false;
50993     if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
50994         Op.getOperand(0).getValueType().is256BitVector() &&
50995         llvm::isNullConstant(Op.getOperand(1))) {
50996       Op = Op.getOperand(0);
50997       UseSubVector = true;
50998     }
50999     SmallVector<SDValue, 2> SrcOps;
51000     SmallVector<int, 16> SrcMask, ScaledMask;
51001     SDValue BC = peekThroughBitcasts(Op);
51002     if (getTargetShuffleInputs(BC, SrcOps, SrcMask, DAG) &&
51003         !isAnyZero(SrcMask) && all_of(SrcOps, [BC](SDValue Op) {
51004           return Op.getValueSizeInBits() == BC.getValueSizeInBits();
51005         })) {
51006       resolveTargetShuffleInputsAndMask(SrcOps, SrcMask);
51007       if (!UseSubVector && SrcOps.size() <= 2 &&
51008           scaleShuffleElements(SrcMask, NumElts, ScaledMask)) {
51009         N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
51010         N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
51011         ShuffleMask.assign(ScaledMask.begin(), ScaledMask.end());
51012       }
51013       if (UseSubVector && SrcOps.size() == 1 &&
51014           scaleShuffleElements(SrcMask, 2 * NumElts, ScaledMask)) {
51015         std::tie(N0, N1) = DAG.SplitVector(SrcOps[0], SDLoc(Op));
51016         ArrayRef<int> Mask = ArrayRef<int>(ScaledMask).slice(0, NumElts);
51017         ShuffleMask.assign(Mask.begin(), Mask.end());
51018       }
51019     }
51020   };
51021 
51022   // View LHS in the form
51023   //   LHS = VECTOR_SHUFFLE A, B, LMask
51024   // If LHS is not a shuffle, then pretend it is the identity shuffle:
51025   //   LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
51026   // NOTE: A default initialized SDValue represents an UNDEF of type VT.
51027   SDValue A, B;
51028   SmallVector<int, 16> LMask;
51029   GetShuffle(LHS, A, B, LMask);
51030 
51031   // Likewise, view RHS in the form
51032   //   RHS = VECTOR_SHUFFLE C, D, RMask
51033   SDValue C, D;
51034   SmallVector<int, 16> RMask;
51035   GetShuffle(RHS, C, D, RMask);
51036 
51037   // At least one of the operands should be a vector shuffle.
51038   unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
51039   if (NumShuffles == 0)
51040     return false;
51041 
51042   if (LMask.empty()) {
51043     A = LHS;
51044     for (unsigned i = 0; i != NumElts; ++i)
51045       LMask.push_back(i);
51046   }
51047 
51048   if (RMask.empty()) {
51049     C = RHS;
51050     for (unsigned i = 0; i != NumElts; ++i)
51051       RMask.push_back(i);
51052   }
51053 
51054   // If we have an unary mask, ensure the other op is set to null.
51055   if (isUndefOrInRange(LMask, 0, NumElts))
51056     B = SDValue();
51057   else if (isUndefOrInRange(LMask, NumElts, NumElts * 2))
51058     A = SDValue();
51059 
51060   if (isUndefOrInRange(RMask, 0, NumElts))
51061     D = SDValue();
51062   else if (isUndefOrInRange(RMask, NumElts, NumElts * 2))
51063     C = SDValue();
51064 
51065   // If A and B occur in reverse order in RHS, then canonicalize by commuting
51066   // RHS operands and shuffle mask.
51067   if (A != C) {
51068     std::swap(C, D);
51069     ShuffleVectorSDNode::commuteMask(RMask);
51070   }
51071   // Check that the shuffles are both shuffling the same vectors.
51072   if (!(A == C && B == D))
51073     return false;
51074 
51075   PostShuffleMask.clear();
51076   PostShuffleMask.append(NumElts, SM_SentinelUndef);
51077 
51078   // LHS and RHS are now:
51079   //   LHS = shuffle A, B, LMask
51080   //   RHS = shuffle A, B, RMask
51081   // Check that the masks correspond to performing a horizontal operation.
51082   // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
51083   // so we just repeat the inner loop if this is a 256-bit op.
51084   unsigned Num128BitChunks = VT.getSizeInBits() / 128;
51085   unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
51086   unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
51087   assert((NumEltsPer128BitChunk % 2 == 0) &&
51088          "Vector type should have an even number of elements in each lane");
51089   for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
51090     for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
51091       // Ignore undefined components.
51092       int LIdx = LMask[i + j], RIdx = RMask[i + j];
51093       if (LIdx < 0 || RIdx < 0 ||
51094           (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
51095           (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
51096         continue;
51097 
51098       // Check that successive odd/even elements are being operated on. If not,
51099       // this is not a horizontal operation.
51100       if (!((RIdx & 1) == 1 && (LIdx + 1) == RIdx) &&
51101           !((LIdx & 1) == 1 && (RIdx + 1) == LIdx && IsCommutative))
51102         return false;
51103 
51104       // Compute the post-shuffle mask index based on where the element
51105       // is stored in the HOP result, and where it needs to be moved to.
51106       int Base = LIdx & ~1u;
51107       int Index = ((Base % NumEltsPer128BitChunk) / 2) +
51108                   ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
51109 
51110       // The  low half of the 128-bit result must choose from A.
51111       // The high half of the 128-bit result must choose from B,
51112       // unless B is undef. In that case, we are always choosing from A.
51113       if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
51114         Index += NumEltsPer64BitChunk;
51115       PostShuffleMask[i + j] = Index;
51116     }
51117   }
51118 
51119   SDValue NewLHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
51120   SDValue NewRHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
51121 
51122   bool IsIdentityPostShuffle =
51123       isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
51124   if (IsIdentityPostShuffle)
51125     PostShuffleMask.clear();
51126 
51127   // Avoid 128-bit multi lane shuffles if pre-AVX2 and FP (integer will split).
51128   if (!IsIdentityPostShuffle && !Subtarget.hasAVX2() && VT.isFloatingPoint() &&
51129       isMultiLaneShuffleMask(128, VT.getScalarSizeInBits(), PostShuffleMask))
51130     return false;
51131 
51132   // If the source nodes are already used in HorizOps then always accept this.
51133   // Shuffle folding should merge these back together.
51134   bool FoundHorizLHS = llvm::any_of(NewLHS->uses(), [&](SDNode *User) {
51135     return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
51136   });
51137   bool FoundHorizRHS = llvm::any_of(NewRHS->uses(), [&](SDNode *User) {
51138     return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
51139   });
51140   bool ForceHorizOp = FoundHorizLHS && FoundHorizRHS;
51141 
51142   // Assume a SingleSource HOP if we only shuffle one input and don't need to
51143   // shuffle the result.
51144   if (!ForceHorizOp &&
51145       !shouldUseHorizontalOp(NewLHS == NewRHS &&
51146                                  (NumShuffles < 2 || !IsIdentityPostShuffle),
51147                              DAG, Subtarget))
51148     return false;
51149 
51150   LHS = DAG.getBitcast(VT, NewLHS);
51151   RHS = DAG.getBitcast(VT, NewRHS);
51152   return true;
51153 }
51154 
51155 // Try to synthesize horizontal (f)hadd/hsub from (f)adds/subs of shuffles.
combineToHorizontalAddSub(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51156 static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG,
51157                                          const X86Subtarget &Subtarget) {
51158   EVT VT = N->getValueType(0);
51159   unsigned Opcode = N->getOpcode();
51160   bool IsAdd = (Opcode == ISD::FADD) || (Opcode == ISD::ADD);
51161   SmallVector<int, 8> PostShuffleMask;
51162 
51163   switch (Opcode) {
51164   case ISD::FADD:
51165   case ISD::FSUB:
51166     if ((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
51167         (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
51168       SDValue LHS = N->getOperand(0);
51169       SDValue RHS = N->getOperand(1);
51170       auto HorizOpcode = IsAdd ? X86ISD::FHADD : X86ISD::FHSUB;
51171       if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
51172                             PostShuffleMask)) {
51173         SDValue HorizBinOp = DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
51174         if (!PostShuffleMask.empty())
51175           HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
51176                                             DAG.getUNDEF(VT), PostShuffleMask);
51177         return HorizBinOp;
51178       }
51179     }
51180     break;
51181   case ISD::ADD:
51182   case ISD::SUB:
51183     if (Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
51184                                  VT == MVT::v16i16 || VT == MVT::v8i32)) {
51185       SDValue LHS = N->getOperand(0);
51186       SDValue RHS = N->getOperand(1);
51187       auto HorizOpcode = IsAdd ? X86ISD::HADD : X86ISD::HSUB;
51188       if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
51189                             PostShuffleMask)) {
51190         auto HOpBuilder = [HorizOpcode](SelectionDAG &DAG, const SDLoc &DL,
51191                                         ArrayRef<SDValue> Ops) {
51192           return DAG.getNode(HorizOpcode, DL, Ops[0].getValueType(), Ops);
51193         };
51194         SDValue HorizBinOp = SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
51195                                               {LHS, RHS}, HOpBuilder);
51196         if (!PostShuffleMask.empty())
51197           HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
51198                                             DAG.getUNDEF(VT), PostShuffleMask);
51199         return HorizBinOp;
51200       }
51201     }
51202     break;
51203   }
51204 
51205   return SDValue();
51206 }
51207 
51208 //  Try to combine the following nodes
51209 //  t29: i64 = X86ISD::Wrapper TargetConstantPool:i64
51210 //    <i32 -2147483648[float -0.000000e+00]> 0
51211 //  t27: v16i32[v16f32],ch = X86ISD::VBROADCAST_LOAD
51212 //    <(load 4 from constant-pool)> t0, t29
51213 //  [t30: v16i32 = bitcast t27]
51214 //  t6: v16i32 = xor t7, t27[t30]
51215 //  t11: v16f32 = bitcast t6
51216 //  t21: v16f32 = X86ISD::VFMULC[X86ISD::VCFMULC] t11, t8
51217 //  into X86ISD::VFCMULC[X86ISD::VFMULC] if possible:
51218 //  t22: v16f32 = bitcast t7
51219 //  t23: v16f32 = X86ISD::VFCMULC[X86ISD::VFMULC] t8, t22
51220 //  t24: v32f16 = bitcast t23
combineFMulcFCMulc(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51221 static SDValue combineFMulcFCMulc(SDNode *N, SelectionDAG &DAG,
51222                                   const X86Subtarget &Subtarget) {
51223   EVT VT = N->getValueType(0);
51224   SDValue LHS = N->getOperand(0);
51225   SDValue RHS = N->getOperand(1);
51226   int CombineOpcode =
51227       N->getOpcode() == X86ISD::VFCMULC ? X86ISD::VFMULC : X86ISD::VFCMULC;
51228   auto isConjugationConstant = [](const Constant *c) {
51229     if (const auto *CI = dyn_cast<ConstantInt>(c)) {
51230       APInt ConjugationInt32 = APInt(32, 0x80000000, true);
51231       APInt ConjugationInt64 = APInt(64, 0x8000000080000000ULL, true);
51232       switch (CI->getBitWidth()) {
51233       case 16:
51234         return false;
51235       case 32:
51236         return CI->getValue() == ConjugationInt32;
51237       case 64:
51238         return CI->getValue() == ConjugationInt64;
51239       default:
51240         llvm_unreachable("Unexpected bit width");
51241       }
51242     }
51243     if (const auto *CF = dyn_cast<ConstantFP>(c))
51244       return CF->isNegativeZeroValue();
51245     return false;
51246   };
51247   auto combineConjugation = [&](SDValue &r) {
51248     if (LHS->getOpcode() == ISD::BITCAST && RHS.hasOneUse()) {
51249       SDValue XOR = LHS.getOperand(0);
51250       if (XOR->getOpcode() == ISD::XOR && XOR.hasOneUse()) {
51251         SDValue XORRHS = XOR.getOperand(1);
51252         if (XORRHS.getOpcode() == ISD::BITCAST && XORRHS.hasOneUse())
51253           XORRHS = XORRHS.getOperand(0);
51254         if (XORRHS.getOpcode() == X86ISD::VBROADCAST_LOAD &&
51255             XORRHS.getOperand(1).getNumOperands()) {
51256           ConstantPoolSDNode *CP =
51257               dyn_cast<ConstantPoolSDNode>(XORRHS.getOperand(1).getOperand(0));
51258           if (CP && isConjugationConstant(CP->getConstVal())) {
51259             SelectionDAG::FlagInserter FlagsInserter(DAG, N);
51260             SDValue I2F = DAG.getBitcast(VT, LHS.getOperand(0).getOperand(0));
51261             SDValue FCMulC = DAG.getNode(CombineOpcode, SDLoc(N), VT, RHS, I2F);
51262             r = DAG.getBitcast(VT, FCMulC);
51263             return true;
51264           }
51265         }
51266       }
51267     }
51268     return false;
51269   };
51270   SDValue Res;
51271   if (combineConjugation(Res))
51272     return Res;
51273   std::swap(LHS, RHS);
51274   if (combineConjugation(Res))
51275     return Res;
51276   return Res;
51277 }
51278 
51279 //  Try to combine the following nodes:
51280 //  FADD(A, FMA(B, C, 0)) and FADD(A, FMUL(B, C)) to FMA(B, C, A)
combineFaddCFmul(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51281 static SDValue combineFaddCFmul(SDNode *N, SelectionDAG &DAG,
51282                                 const X86Subtarget &Subtarget) {
51283   auto AllowContract = [&DAG](const SDNodeFlags &Flags) {
51284     return DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
51285            Flags.hasAllowContract();
51286   };
51287 
51288   auto HasNoSignedZero = [&DAG](const SDNodeFlags &Flags) {
51289     return DAG.getTarget().Options.NoSignedZerosFPMath ||
51290            Flags.hasNoSignedZeros();
51291   };
51292   auto IsVectorAllNegativeZero = [](const SDNode *N) {
51293     if (N->getOpcode() != X86ISD::VBROADCAST_LOAD)
51294       return false;
51295     assert(N->getSimpleValueType(0).getScalarType() == MVT::f32 &&
51296            "Unexpected vector type!");
51297     if (ConstantPoolSDNode *CP =
51298             dyn_cast<ConstantPoolSDNode>(N->getOperand(1)->getOperand(0))) {
51299       APInt AI = APInt(32, 0x80008000, true);
51300       if (const auto *CI = dyn_cast<ConstantInt>(CP->getConstVal()))
51301         return CI->getValue() == AI;
51302       if (const auto *CF = dyn_cast<ConstantFP>(CP->getConstVal()))
51303         return CF->getValue() == APFloat(APFloat::IEEEsingle(), AI);
51304     }
51305     return false;
51306   };
51307 
51308   if (N->getOpcode() != ISD::FADD || !Subtarget.hasFP16() ||
51309       !AllowContract(N->getFlags()))
51310     return SDValue();
51311 
51312   EVT VT = N->getValueType(0);
51313   if (VT != MVT::v8f16 && VT != MVT::v16f16 && VT != MVT::v32f16)
51314     return SDValue();
51315 
51316   SDValue LHS = N->getOperand(0);
51317   SDValue RHS = N->getOperand(1);
51318   bool IsConj;
51319   SDValue FAddOp1, MulOp0, MulOp1;
51320   auto GetCFmulFrom = [&MulOp0, &MulOp1, &IsConj, &AllowContract,
51321                        &IsVectorAllNegativeZero,
51322                        &HasNoSignedZero](SDValue N) -> bool {
51323     if (!N.hasOneUse() || N.getOpcode() != ISD::BITCAST)
51324       return false;
51325     SDValue Op0 = N.getOperand(0);
51326     unsigned Opcode = Op0.getOpcode();
51327     if (Op0.hasOneUse() && AllowContract(Op0->getFlags())) {
51328       if ((Opcode == X86ISD::VFMULC || Opcode == X86ISD::VFCMULC)) {
51329         MulOp0 = Op0.getOperand(0);
51330         MulOp1 = Op0.getOperand(1);
51331         IsConj = Opcode == X86ISD::VFCMULC;
51332         return true;
51333       }
51334       if ((Opcode == X86ISD::VFMADDC || Opcode == X86ISD::VFCMADDC) &&
51335           ((ISD::isBuildVectorAllZeros(Op0->getOperand(2).getNode()) &&
51336             HasNoSignedZero(Op0->getFlags())) ||
51337            IsVectorAllNegativeZero(Op0->getOperand(2).getNode()))) {
51338         MulOp0 = Op0.getOperand(0);
51339         MulOp1 = Op0.getOperand(1);
51340         IsConj = Opcode == X86ISD::VFCMADDC;
51341         return true;
51342       }
51343     }
51344     return false;
51345   };
51346 
51347   if (GetCFmulFrom(LHS))
51348     FAddOp1 = RHS;
51349   else if (GetCFmulFrom(RHS))
51350     FAddOp1 = LHS;
51351   else
51352     return SDValue();
51353 
51354   MVT CVT = MVT::getVectorVT(MVT::f32, VT.getVectorNumElements() / 2);
51355   FAddOp1 = DAG.getBitcast(CVT, FAddOp1);
51356   unsigned NewOp = IsConj ? X86ISD::VFCMADDC : X86ISD::VFMADDC;
51357   // FIXME: How do we handle when fast math flags of FADD are different from
51358   // CFMUL's?
51359   SDValue CFmul =
51360       DAG.getNode(NewOp, SDLoc(N), CVT, MulOp0, MulOp1, FAddOp1, N->getFlags());
51361   return DAG.getBitcast(VT, CFmul);
51362 }
51363 
51364 /// Do target-specific dag combines on floating-point adds/subs.
combineFaddFsub(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51365 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
51366                                const X86Subtarget &Subtarget) {
51367   if (SDValue HOp = combineToHorizontalAddSub(N, DAG, Subtarget))
51368     return HOp;
51369 
51370   if (SDValue COp = combineFaddCFmul(N, DAG, Subtarget))
51371     return COp;
51372 
51373   return SDValue();
51374 }
51375 
51376 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
51377 /// the codegen.
51378 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
51379 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
51380 ///       anything that is guaranteed to be transformed by DAGCombiner.
combineTruncatedArithmetic(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)51381 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
51382                                           const X86Subtarget &Subtarget,
51383                                           const SDLoc &DL) {
51384   assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
51385   SDValue Src = N->getOperand(0);
51386   unsigned SrcOpcode = Src.getOpcode();
51387   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51388 
51389   EVT VT = N->getValueType(0);
51390   EVT SrcVT = Src.getValueType();
51391 
51392   auto IsFreeTruncation = [VT](SDValue Op) {
51393     unsigned TruncSizeInBits = VT.getScalarSizeInBits();
51394 
51395     // See if this has been extended from a smaller/equal size to
51396     // the truncation size, allowing a truncation to combine with the extend.
51397     unsigned Opcode = Op.getOpcode();
51398     if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
51399          Opcode == ISD::ZERO_EXTEND) &&
51400         Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
51401       return true;
51402 
51403     // See if this is a single use constant which can be constant folded.
51404     // NOTE: We don't peek throught bitcasts here because there is currently
51405     // no support for constant folding truncate+bitcast+vector_of_constants. So
51406     // we'll just send up with a truncate on both operands which will
51407     // get turned back into (truncate (binop)) causing an infinite loop.
51408     return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
51409   };
51410 
51411   auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
51412     SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
51413     SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
51414     return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
51415   };
51416 
51417   // Don't combine if the operation has other uses.
51418   if (!Src.hasOneUse())
51419     return SDValue();
51420 
51421   // Only support vector truncation for now.
51422   // TODO: i64 scalar math would benefit as well.
51423   if (!VT.isVector())
51424     return SDValue();
51425 
51426   // In most cases its only worth pre-truncating if we're only facing the cost
51427   // of one truncation.
51428   // i.e. if one of the inputs will constant fold or the input is repeated.
51429   switch (SrcOpcode) {
51430   case ISD::MUL:
51431     // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
51432     // better to truncate if we have the chance.
51433     if (SrcVT.getScalarType() == MVT::i64 &&
51434         TLI.isOperationLegal(SrcOpcode, VT) &&
51435         !TLI.isOperationLegal(SrcOpcode, SrcVT))
51436       return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
51437     [[fallthrough]];
51438   case ISD::AND:
51439   case ISD::XOR:
51440   case ISD::OR:
51441   case ISD::ADD:
51442   case ISD::SUB: {
51443     SDValue Op0 = Src.getOperand(0);
51444     SDValue Op1 = Src.getOperand(1);
51445     if (TLI.isOperationLegal(SrcOpcode, VT) &&
51446         (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
51447       return TruncateArithmetic(Op0, Op1);
51448     break;
51449   }
51450   }
51451 
51452   return SDValue();
51453 }
51454 
51455 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
51456 /// e.g. trunc <8 x i32> X to <8 x i16> -->
51457 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
51458 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
combineVectorTruncationWithPACKUS(SDNode * N,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)51459 static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
51460                                                  const X86Subtarget &Subtarget,
51461                                                  SelectionDAG &DAG) {
51462   SDValue In = N->getOperand(0);
51463   EVT InVT = In.getValueType();
51464   EVT OutVT = N->getValueType(0);
51465 
51466   APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
51467                                     OutVT.getScalarSizeInBits());
51468   In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
51469   return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
51470 }
51471 
51472 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
combineVectorTruncationWithPACKSS(SDNode * N,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)51473 static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
51474                                                  const X86Subtarget &Subtarget,
51475                                                  SelectionDAG &DAG) {
51476   SDValue In = N->getOperand(0);
51477   EVT InVT = In.getValueType();
51478   EVT OutVT = N->getValueType(0);
51479   In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
51480                    DAG.getValueType(OutVT));
51481   return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
51482 }
51483 
51484 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
51485 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
51486 /// legalization the truncation will be translated into a BUILD_VECTOR with each
51487 /// element that is extracted from a vector and then truncated, and it is
51488 /// difficult to do this optimization based on them.
combineVectorTruncation(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51489 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
51490                                        const X86Subtarget &Subtarget) {
51491   EVT OutVT = N->getValueType(0);
51492   if (!OutVT.isVector())
51493     return SDValue();
51494 
51495   SDValue In = N->getOperand(0);
51496   if (!In.getValueType().isSimple())
51497     return SDValue();
51498 
51499   EVT InVT = In.getValueType();
51500   unsigned NumElems = OutVT.getVectorNumElements();
51501 
51502   // AVX512 provides fast truncate ops.
51503   if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
51504     return SDValue();
51505 
51506   EVT OutSVT = OutVT.getVectorElementType();
51507   EVT InSVT = InVT.getVectorElementType();
51508   if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
51509         (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
51510         NumElems >= 8))
51511     return SDValue();
51512 
51513   // SSSE3's pshufb results in less instructions in the cases below.
51514   if (Subtarget.hasSSSE3() && NumElems == 8) {
51515     if (InSVT == MVT::i16)
51516       return SDValue();
51517     if (InSVT == MVT::i32 &&
51518         (OutSVT == MVT::i8 || !Subtarget.hasSSE41() || Subtarget.hasInt256()))
51519       return SDValue();
51520   }
51521 
51522   SDLoc DL(N);
51523   // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
51524   // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
51525   // truncate 2 x v4i32 to v8i16.
51526   if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
51527     return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
51528   if (InSVT == MVT::i32)
51529     return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
51530 
51531   return SDValue();
51532 }
51533 
51534 /// This function transforms vector truncation of 'extended sign-bits' or
51535 /// 'extended zero-bits' values.
51536 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
combineVectorSignBitsTruncation(SDNode * N,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)51537 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
51538                                                SelectionDAG &DAG,
51539                                                const X86Subtarget &Subtarget) {
51540   // Requires SSE2.
51541   if (!Subtarget.hasSSE2())
51542     return SDValue();
51543 
51544   if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
51545     return SDValue();
51546 
51547   SDValue In = N->getOperand(0);
51548   if (!In.getValueType().isSimple())
51549     return SDValue();
51550 
51551   MVT VT = N->getValueType(0).getSimpleVT();
51552   MVT SVT = VT.getScalarType();
51553 
51554   MVT InVT = In.getValueType().getSimpleVT();
51555   MVT InSVT = InVT.getScalarType();
51556 
51557   // Check we have a truncation suited for PACKSS/PACKUS.
51558   if (!isPowerOf2_32(VT.getVectorNumElements()))
51559     return SDValue();
51560   if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
51561     return SDValue();
51562   if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
51563     return SDValue();
51564 
51565   // Truncation to sub-128bit vXi32 can be better handled with shuffles.
51566   if (SVT == MVT::i32 && VT.getSizeInBits() < 128)
51567     return SDValue();
51568 
51569   // AVX512 has fast truncate, but if the input is already going to be split,
51570   // there's no harm in trying pack.
51571   if (Subtarget.hasAVX512() &&
51572       !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
51573         InVT.is512BitVector())) {
51574     // PACK should still be worth it for 128-bit vectors if the sources were
51575     // originally concatenated from subvectors.
51576     SmallVector<SDValue> ConcatOps;
51577     if (VT.getSizeInBits() > 128 ||
51578         !collectConcatOps(In.getNode(), ConcatOps, DAG))
51579       return SDValue();
51580   }
51581 
51582   unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
51583   unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
51584 
51585   // Use PACKUS if the input has zero-bits that extend all the way to the
51586   // packed/truncated value. e.g. masks, zext_in_reg, etc.
51587   KnownBits Known = DAG.computeKnownBits(In);
51588   unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
51589   if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
51590     return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
51591 
51592   // Use PACKSS if the input has sign-bits that extend all the way to the
51593   // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
51594   unsigned NumSignBits = DAG.ComputeNumSignBits(In);
51595 
51596   // Don't use PACKSS for vXi64 -> vXi32 truncations unless we're dealing with
51597   // a sign splat. ComputeNumSignBits struggles to see through BITCASTs later
51598   // on and combines/simplifications can't then use it.
51599   if (SVT == MVT::i32 && NumSignBits != InSVT.getSizeInBits())
51600     return SDValue();
51601 
51602   unsigned MinSignBits = InSVT.getSizeInBits() - NumPackedSignBits;
51603   if (NumSignBits > MinSignBits)
51604     return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
51605 
51606   // If we have a srl that only generates signbits that we will discard in
51607   // the truncation then we can use PACKSS by converting the srl to a sra.
51608   // SimplifyDemandedBits often relaxes sra to srl so we need to reverse it.
51609   if (In.getOpcode() == ISD::SRL && N->isOnlyUserOf(In.getNode()))
51610     if (const APInt *ShAmt = DAG.getValidShiftAmountConstant(
51611             In, APInt::getAllOnes(VT.getVectorNumElements()))) {
51612       if (*ShAmt == MinSignBits) {
51613         SDValue NewIn = DAG.getNode(ISD::SRA, DL, InVT, In->ops());
51614         return truncateVectorWithPACK(X86ISD::PACKSS, VT, NewIn, DL, DAG,
51615                                       Subtarget);
51616       }
51617     }
51618 
51619   return SDValue();
51620 }
51621 
51622 // Try to form a MULHU or MULHS node by looking for
51623 // (trunc (srl (mul ext, ext), 16))
51624 // TODO: This is X86 specific because we want to be able to handle wide types
51625 // before type legalization. But we can only do it if the vector will be
51626 // legalized via widening/splitting. Type legalization can't handle promotion
51627 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
51628 // combiner.
combinePMULH(SDValue Src,EVT VT,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)51629 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
51630                             SelectionDAG &DAG, const X86Subtarget &Subtarget) {
51631   // First instruction should be a right shift of a multiply.
51632   if (Src.getOpcode() != ISD::SRL ||
51633       Src.getOperand(0).getOpcode() != ISD::MUL)
51634     return SDValue();
51635 
51636   if (!Subtarget.hasSSE2())
51637     return SDValue();
51638 
51639   // Only handle vXi16 types that are at least 128-bits unless they will be
51640   // widened.
51641   if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
51642     return SDValue();
51643 
51644   // Input type should be at least vXi32.
51645   EVT InVT = Src.getValueType();
51646   if (InVT.getVectorElementType().getSizeInBits() < 32)
51647     return SDValue();
51648 
51649   // Need a shift by 16.
51650   APInt ShiftAmt;
51651   if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
51652       ShiftAmt != 16)
51653     return SDValue();
51654 
51655   SDValue LHS = Src.getOperand(0).getOperand(0);
51656   SDValue RHS = Src.getOperand(0).getOperand(1);
51657 
51658   // Count leading sign/zero bits on both inputs - if there are enough then
51659   // truncation back to vXi16 will be cheap - either as a pack/shuffle
51660   // sequence or using AVX512 truncations. If the inputs are sext/zext then the
51661   // truncations may actually be free by peeking through to the ext source.
51662   auto IsSext = [&DAG](SDValue V) {
51663     return DAG.ComputeMaxSignificantBits(V) <= 16;
51664   };
51665   auto IsZext = [&DAG](SDValue V) {
51666     return DAG.computeKnownBits(V).countMaxActiveBits() <= 16;
51667   };
51668 
51669   bool IsSigned = IsSext(LHS) && IsSext(RHS);
51670   bool IsUnsigned = IsZext(LHS) && IsZext(RHS);
51671   if (!IsSigned && !IsUnsigned)
51672     return SDValue();
51673 
51674   // Check if both inputs are extensions, which will be removed by truncation.
51675   bool IsTruncateFree = (LHS.getOpcode() == ISD::SIGN_EXTEND ||
51676                          LHS.getOpcode() == ISD::ZERO_EXTEND) &&
51677                         (RHS.getOpcode() == ISD::SIGN_EXTEND ||
51678                          RHS.getOpcode() == ISD::ZERO_EXTEND) &&
51679                         LHS.getOperand(0).getScalarValueSizeInBits() <= 16 &&
51680                         RHS.getOperand(0).getScalarValueSizeInBits() <= 16;
51681 
51682   // For AVX2+ targets, with the upper bits known zero, we can perform MULHU on
51683   // the (bitcasted) inputs directly, and then cheaply pack/truncate the result
51684   // (upper elts will be zero). Don't attempt this with just AVX512F as MULHU
51685   // will have to split anyway.
51686   unsigned InSizeInBits = InVT.getSizeInBits();
51687   if (IsUnsigned && !IsTruncateFree && Subtarget.hasInt256() &&
51688       !(Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.is256BitVector()) &&
51689       (InSizeInBits % 16) == 0) {
51690     EVT BCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51691                                 InVT.getSizeInBits() / 16);
51692     SDValue Res = DAG.getNode(ISD::MULHU, DL, BCVT, DAG.getBitcast(BCVT, LHS),
51693                               DAG.getBitcast(BCVT, RHS));
51694     return DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getBitcast(InVT, Res));
51695   }
51696 
51697   // Truncate back to source type.
51698   LHS = DAG.getNode(ISD::TRUNCATE, DL, VT, LHS);
51699   RHS = DAG.getNode(ISD::TRUNCATE, DL, VT, RHS);
51700 
51701   unsigned Opc = IsSigned ? ISD::MULHS : ISD::MULHU;
51702   return DAG.getNode(Opc, DL, VT, LHS, RHS);
51703 }
51704 
51705 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
51706 // from one vector with signed bytes from another vector, adds together
51707 // adjacent pairs of 16-bit products, and saturates the result before
51708 // truncating to 16-bits.
51709 //
51710 // Which looks something like this:
51711 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
51712 //                 (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
detectPMADDUBSW(SDValue In,EVT VT,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)51713 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
51714                                const X86Subtarget &Subtarget,
51715                                const SDLoc &DL) {
51716   if (!VT.isVector() || !Subtarget.hasSSSE3())
51717     return SDValue();
51718 
51719   unsigned NumElems = VT.getVectorNumElements();
51720   EVT ScalarVT = VT.getVectorElementType();
51721   if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
51722     return SDValue();
51723 
51724   SDValue SSatVal = detectSSatPattern(In, VT);
51725   if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
51726     return SDValue();
51727 
51728   // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
51729   // of multiplies from even/odd elements.
51730   SDValue N0 = SSatVal.getOperand(0);
51731   SDValue N1 = SSatVal.getOperand(1);
51732 
51733   if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
51734     return SDValue();
51735 
51736   SDValue N00 = N0.getOperand(0);
51737   SDValue N01 = N0.getOperand(1);
51738   SDValue N10 = N1.getOperand(0);
51739   SDValue N11 = N1.getOperand(1);
51740 
51741   // TODO: Handle constant vectors and use knownbits/computenumsignbits?
51742   // Canonicalize zero_extend to LHS.
51743   if (N01.getOpcode() == ISD::ZERO_EXTEND)
51744     std::swap(N00, N01);
51745   if (N11.getOpcode() == ISD::ZERO_EXTEND)
51746     std::swap(N10, N11);
51747 
51748   // Ensure we have a zero_extend and a sign_extend.
51749   if (N00.getOpcode() != ISD::ZERO_EXTEND ||
51750       N01.getOpcode() != ISD::SIGN_EXTEND ||
51751       N10.getOpcode() != ISD::ZERO_EXTEND ||
51752       N11.getOpcode() != ISD::SIGN_EXTEND)
51753     return SDValue();
51754 
51755   // Peek through the extends.
51756   N00 = N00.getOperand(0);
51757   N01 = N01.getOperand(0);
51758   N10 = N10.getOperand(0);
51759   N11 = N11.getOperand(0);
51760 
51761   // Ensure the extend is from vXi8.
51762   if (N00.getValueType().getVectorElementType() != MVT::i8 ||
51763       N01.getValueType().getVectorElementType() != MVT::i8 ||
51764       N10.getValueType().getVectorElementType() != MVT::i8 ||
51765       N11.getValueType().getVectorElementType() != MVT::i8)
51766     return SDValue();
51767 
51768   // All inputs should be build_vectors.
51769   if (N00.getOpcode() != ISD::BUILD_VECTOR ||
51770       N01.getOpcode() != ISD::BUILD_VECTOR ||
51771       N10.getOpcode() != ISD::BUILD_VECTOR ||
51772       N11.getOpcode() != ISD::BUILD_VECTOR)
51773     return SDValue();
51774 
51775   // N00/N10 are zero extended. N01/N11 are sign extended.
51776 
51777   // For each element, we need to ensure we have an odd element from one vector
51778   // multiplied by the odd element of another vector and the even element from
51779   // one of the same vectors being multiplied by the even element from the
51780   // other vector. So we need to make sure for each element i, this operator
51781   // is being performed:
51782   //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
51783   SDValue ZExtIn, SExtIn;
51784   for (unsigned i = 0; i != NumElems; ++i) {
51785     SDValue N00Elt = N00.getOperand(i);
51786     SDValue N01Elt = N01.getOperand(i);
51787     SDValue N10Elt = N10.getOperand(i);
51788     SDValue N11Elt = N11.getOperand(i);
51789     // TODO: Be more tolerant to undefs.
51790     if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51791         N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51792         N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51793         N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
51794       return SDValue();
51795     auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
51796     auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
51797     auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
51798     auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
51799     if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
51800       return SDValue();
51801     unsigned IdxN00 = ConstN00Elt->getZExtValue();
51802     unsigned IdxN01 = ConstN01Elt->getZExtValue();
51803     unsigned IdxN10 = ConstN10Elt->getZExtValue();
51804     unsigned IdxN11 = ConstN11Elt->getZExtValue();
51805     // Add is commutative so indices can be reordered.
51806     if (IdxN00 > IdxN10) {
51807       std::swap(IdxN00, IdxN10);
51808       std::swap(IdxN01, IdxN11);
51809     }
51810     // N0 indices be the even element. N1 indices must be the next odd element.
51811     if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
51812         IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
51813       return SDValue();
51814     SDValue N00In = N00Elt.getOperand(0);
51815     SDValue N01In = N01Elt.getOperand(0);
51816     SDValue N10In = N10Elt.getOperand(0);
51817     SDValue N11In = N11Elt.getOperand(0);
51818     // First time we find an input capture it.
51819     if (!ZExtIn) {
51820       ZExtIn = N00In;
51821       SExtIn = N01In;
51822     }
51823     if (ZExtIn != N00In || SExtIn != N01In ||
51824         ZExtIn != N10In || SExtIn != N11In)
51825       return SDValue();
51826   }
51827 
51828   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
51829                          ArrayRef<SDValue> Ops) {
51830     // Shrink by adding truncate nodes and let DAGCombine fold with the
51831     // sources.
51832     EVT InVT = Ops[0].getValueType();
51833     assert(InVT.getScalarType() == MVT::i8 &&
51834            "Unexpected scalar element type");
51835     assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
51836     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51837                                  InVT.getVectorNumElements() / 2);
51838     return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
51839   };
51840   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
51841                           PMADDBuilder);
51842 }
51843 
combineTruncate(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51844 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
51845                                const X86Subtarget &Subtarget) {
51846   EVT VT = N->getValueType(0);
51847   SDValue Src = N->getOperand(0);
51848   SDLoc DL(N);
51849 
51850   // Attempt to pre-truncate inputs to arithmetic ops instead.
51851   if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
51852     return V;
51853 
51854   // Try to detect AVG pattern first.
51855   if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
51856     return Avg;
51857 
51858   // Try to detect PMADD
51859   if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
51860     return PMAdd;
51861 
51862   // Try to combine truncation with signed/unsigned saturation.
51863   if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
51864     return Val;
51865 
51866   // Try to combine PMULHUW/PMULHW for vXi16.
51867   if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
51868     return V;
51869 
51870   // The bitcast source is a direct mmx result.
51871   // Detect bitcasts between i32 to x86mmx
51872   if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
51873     SDValue BCSrc = Src.getOperand(0);
51874     if (BCSrc.getValueType() == MVT::x86mmx)
51875       return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
51876   }
51877 
51878   // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
51879   if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
51880     return V;
51881 
51882   return combineVectorTruncation(N, DAG, Subtarget);
51883 }
51884 
combineVTRUNC(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)51885 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
51886                              TargetLowering::DAGCombinerInfo &DCI) {
51887   EVT VT = N->getValueType(0);
51888   SDValue In = N->getOperand(0);
51889   SDLoc DL(N);
51890 
51891   if (SDValue SSatVal = detectSSatPattern(In, VT))
51892     return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
51893   if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL))
51894     return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
51895 
51896   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51897   APInt DemandedMask(APInt::getAllOnes(VT.getScalarSizeInBits()));
51898   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
51899     return SDValue(N, 0);
51900 
51901   return SDValue();
51902 }
51903 
51904 /// Returns the negated value if the node \p N flips sign of FP value.
51905 ///
51906 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
51907 /// or FSUB(0, x)
51908 /// AVX512F does not have FXOR, so FNEG is lowered as
51909 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
51910 /// In this case we go though all bitcasts.
51911 /// This also recognizes splat of a negated value and returns the splat of that
51912 /// value.
isFNEG(SelectionDAG & DAG,SDNode * N,unsigned Depth=0)51913 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
51914   if (N->getOpcode() == ISD::FNEG)
51915     return N->getOperand(0);
51916 
51917   // Don't recurse exponentially.
51918   if (Depth > SelectionDAG::MaxRecursionDepth)
51919     return SDValue();
51920 
51921   unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
51922 
51923   SDValue Op = peekThroughBitcasts(SDValue(N, 0));
51924   EVT VT = Op->getValueType(0);
51925 
51926   // Make sure the element size doesn't change.
51927   if (VT.getScalarSizeInBits() != ScalarSize)
51928     return SDValue();
51929 
51930   unsigned Opc = Op.getOpcode();
51931   switch (Opc) {
51932   case ISD::VECTOR_SHUFFLE: {
51933     // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
51934     // of this is VECTOR_SHUFFLE(-VEC1, UNDEF).  The mask can be anything here.
51935     if (!Op.getOperand(1).isUndef())
51936       return SDValue();
51937     if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
51938       if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
51939         return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
51940                                     cast<ShuffleVectorSDNode>(Op)->getMask());
51941     break;
51942   }
51943   case ISD::INSERT_VECTOR_ELT: {
51944     // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
51945     // -V, INDEX).
51946     SDValue InsVector = Op.getOperand(0);
51947     SDValue InsVal = Op.getOperand(1);
51948     if (!InsVector.isUndef())
51949       return SDValue();
51950     if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
51951       if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
51952         return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
51953                            NegInsVal, Op.getOperand(2));
51954     break;
51955   }
51956   case ISD::FSUB:
51957   case ISD::XOR:
51958   case X86ISD::FXOR: {
51959     SDValue Op1 = Op.getOperand(1);
51960     SDValue Op0 = Op.getOperand(0);
51961 
51962     // For XOR and FXOR, we want to check if constant
51963     // bits of Op1 are sign bit masks. For FSUB, we
51964     // have to check if constant bits of Op0 are sign
51965     // bit masks and hence we swap the operands.
51966     if (Opc == ISD::FSUB)
51967       std::swap(Op0, Op1);
51968 
51969     APInt UndefElts;
51970     SmallVector<APInt, 16> EltBits;
51971     // Extract constant bits and see if they are all
51972     // sign bit masks. Ignore the undef elements.
51973     if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
51974                                       /* AllowWholeUndefs */ true,
51975                                       /* AllowPartialUndefs */ false)) {
51976       for (unsigned I = 0, E = EltBits.size(); I < E; I++)
51977         if (!UndefElts[I] && !EltBits[I].isSignMask())
51978           return SDValue();
51979 
51980       // Only allow bitcast from correctly-sized constant.
51981       Op0 = peekThroughBitcasts(Op0);
51982       if (Op0.getScalarValueSizeInBits() == ScalarSize)
51983         return Op0;
51984     }
51985     break;
51986   } // case
51987   } // switch
51988 
51989   return SDValue();
51990 }
51991 
negateFMAOpcode(unsigned Opcode,bool NegMul,bool NegAcc,bool NegRes)51992 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
51993                                 bool NegRes) {
51994   if (NegMul) {
51995     switch (Opcode) {
51996     default: llvm_unreachable("Unexpected opcode");
51997     case ISD::FMA:              Opcode = X86ISD::FNMADD;        break;
51998     case ISD::STRICT_FMA:       Opcode = X86ISD::STRICT_FNMADD; break;
51999     case X86ISD::FMADD_RND:     Opcode = X86ISD::FNMADD_RND;    break;
52000     case X86ISD::FMSUB:         Opcode = X86ISD::FNMSUB;        break;
52001     case X86ISD::STRICT_FMSUB:  Opcode = X86ISD::STRICT_FNMSUB; break;
52002     case X86ISD::FMSUB_RND:     Opcode = X86ISD::FNMSUB_RND;    break;
52003     case X86ISD::FNMADD:        Opcode = ISD::FMA;              break;
52004     case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA;       break;
52005     case X86ISD::FNMADD_RND:    Opcode = X86ISD::FMADD_RND;     break;
52006     case X86ISD::FNMSUB:        Opcode = X86ISD::FMSUB;         break;
52007     case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB;  break;
52008     case X86ISD::FNMSUB_RND:    Opcode = X86ISD::FMSUB_RND;     break;
52009     }
52010   }
52011 
52012   if (NegAcc) {
52013     switch (Opcode) {
52014     default: llvm_unreachable("Unexpected opcode");
52015     case ISD::FMA:              Opcode = X86ISD::FMSUB;         break;
52016     case ISD::STRICT_FMA:       Opcode = X86ISD::STRICT_FMSUB;  break;
52017     case X86ISD::FMADD_RND:     Opcode = X86ISD::FMSUB_RND;     break;
52018     case X86ISD::FMSUB:         Opcode = ISD::FMA;              break;
52019     case X86ISD::STRICT_FMSUB:  Opcode = ISD::STRICT_FMA;       break;
52020     case X86ISD::FMSUB_RND:     Opcode = X86ISD::FMADD_RND;     break;
52021     case X86ISD::FNMADD:        Opcode = X86ISD::FNMSUB;        break;
52022     case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break;
52023     case X86ISD::FNMADD_RND:    Opcode = X86ISD::FNMSUB_RND;    break;
52024     case X86ISD::FNMSUB:        Opcode = X86ISD::FNMADD;        break;
52025     case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break;
52026     case X86ISD::FNMSUB_RND:    Opcode = X86ISD::FNMADD_RND;    break;
52027     case X86ISD::FMADDSUB:      Opcode = X86ISD::FMSUBADD;      break;
52028     case X86ISD::FMADDSUB_RND:  Opcode = X86ISD::FMSUBADD_RND;  break;
52029     case X86ISD::FMSUBADD:      Opcode = X86ISD::FMADDSUB;      break;
52030     case X86ISD::FMSUBADD_RND:  Opcode = X86ISD::FMADDSUB_RND;  break;
52031     }
52032   }
52033 
52034   if (NegRes) {
52035     switch (Opcode) {
52036     // For accuracy reason, we never combine fneg and fma under strict FP.
52037     default: llvm_unreachable("Unexpected opcode");
52038     case ISD::FMA:             Opcode = X86ISD::FNMSUB;       break;
52039     case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
52040     case X86ISD::FMSUB:        Opcode = X86ISD::FNMADD;       break;
52041     case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMADD_RND;   break;
52042     case X86ISD::FNMADD:       Opcode = X86ISD::FMSUB;        break;
52043     case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMSUB_RND;    break;
52044     case X86ISD::FNMSUB:       Opcode = ISD::FMA;             break;
52045     case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMADD_RND;    break;
52046     }
52047   }
52048 
52049   return Opcode;
52050 }
52051 
52052 /// Do target-specific dag combines on floating point negations.
combineFneg(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52053 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
52054                            TargetLowering::DAGCombinerInfo &DCI,
52055                            const X86Subtarget &Subtarget) {
52056   EVT OrigVT = N->getValueType(0);
52057   SDValue Arg = isFNEG(DAG, N);
52058   if (!Arg)
52059     return SDValue();
52060 
52061   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52062   EVT VT = Arg.getValueType();
52063   EVT SVT = VT.getScalarType();
52064   SDLoc DL(N);
52065 
52066   // Let legalize expand this if it isn't a legal type yet.
52067   if (!TLI.isTypeLegal(VT))
52068     return SDValue();
52069 
52070   // If we're negating a FMUL node on a target with FMA, then we can avoid the
52071   // use of a constant by performing (-0 - A*B) instead.
52072   // FIXME: Check rounding control flags as well once it becomes available.
52073   if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
52074       Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
52075     SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
52076     SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
52077                                   Arg.getOperand(1), Zero);
52078     return DAG.getBitcast(OrigVT, NewNode);
52079   }
52080 
52081   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
52082   bool LegalOperations = !DCI.isBeforeLegalizeOps();
52083   if (SDValue NegArg =
52084           TLI.getNegatedExpression(Arg, DAG, LegalOperations, CodeSize))
52085     return DAG.getBitcast(OrigVT, NegArg);
52086 
52087   return SDValue();
52088 }
52089 
getNegatedExpression(SDValue Op,SelectionDAG & DAG,bool LegalOperations,bool ForCodeSize,NegatibleCost & Cost,unsigned Depth) const52090 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
52091                                                 bool LegalOperations,
52092                                                 bool ForCodeSize,
52093                                                 NegatibleCost &Cost,
52094                                                 unsigned Depth) const {
52095   // fneg patterns are removable even if they have multiple uses.
52096   if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth)) {
52097     Cost = NegatibleCost::Cheaper;
52098     return DAG.getBitcast(Op.getValueType(), Arg);
52099   }
52100 
52101   EVT VT = Op.getValueType();
52102   EVT SVT = VT.getScalarType();
52103   unsigned Opc = Op.getOpcode();
52104   SDNodeFlags Flags = Op.getNode()->getFlags();
52105   switch (Opc) {
52106   case ISD::FMA:
52107   case X86ISD::FMSUB:
52108   case X86ISD::FNMADD:
52109   case X86ISD::FNMSUB:
52110   case X86ISD::FMADD_RND:
52111   case X86ISD::FMSUB_RND:
52112   case X86ISD::FNMADD_RND:
52113   case X86ISD::FNMSUB_RND: {
52114     if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
52115         !(SVT == MVT::f32 || SVT == MVT::f64) ||
52116         !isOperationLegal(ISD::FMA, VT))
52117       break;
52118 
52119     // Don't fold (fneg (fma (fneg x), y, (fneg z))) to (fma x, y, z)
52120     // if it may have signed zeros.
52121     if (!Flags.hasNoSignedZeros())
52122       break;
52123 
52124     // This is always negatible for free but we might be able to remove some
52125     // extra operand negations as well.
52126     SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
52127     for (int i = 0; i != 3; ++i)
52128       NewOps[i] = getCheaperNegatedExpression(
52129           Op.getOperand(i), DAG, LegalOperations, ForCodeSize, Depth + 1);
52130 
52131     bool NegA = !!NewOps[0];
52132     bool NegB = !!NewOps[1];
52133     bool NegC = !!NewOps[2];
52134     unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
52135 
52136     Cost = (NegA || NegB || NegC) ? NegatibleCost::Cheaper
52137                                   : NegatibleCost::Neutral;
52138 
52139     // Fill in the non-negated ops with the original values.
52140     for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
52141       if (!NewOps[i])
52142         NewOps[i] = Op.getOperand(i);
52143     return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
52144   }
52145   case X86ISD::FRCP:
52146     if (SDValue NegOp0 =
52147             getNegatedExpression(Op.getOperand(0), DAG, LegalOperations,
52148                                  ForCodeSize, Cost, Depth + 1))
52149       return DAG.getNode(Opc, SDLoc(Op), VT, NegOp0);
52150     break;
52151   }
52152 
52153   return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
52154                                               ForCodeSize, Cost, Depth);
52155 }
52156 
lowerX86FPLogicOp(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52157 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
52158                                  const X86Subtarget &Subtarget) {
52159   MVT VT = N->getSimpleValueType(0);
52160   // If we have integer vector types available, use the integer opcodes.
52161   if (!VT.isVector() || !Subtarget.hasSSE2())
52162     return SDValue();
52163 
52164   SDLoc dl(N);
52165 
52166   unsigned IntBits = VT.getScalarSizeInBits();
52167   MVT IntSVT = MVT::getIntegerVT(IntBits);
52168   MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
52169 
52170   SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
52171   SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
52172   unsigned IntOpcode;
52173   switch (N->getOpcode()) {
52174   default: llvm_unreachable("Unexpected FP logic op");
52175   case X86ISD::FOR:   IntOpcode = ISD::OR; break;
52176   case X86ISD::FXOR:  IntOpcode = ISD::XOR; break;
52177   case X86ISD::FAND:  IntOpcode = ISD::AND; break;
52178   case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
52179   }
52180   SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
52181   return DAG.getBitcast(VT, IntOp);
52182 }
52183 
52184 
52185 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
foldXor1SetCC(SDNode * N,SelectionDAG & DAG)52186 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
52187   if (N->getOpcode() != ISD::XOR)
52188     return SDValue();
52189 
52190   SDValue LHS = N->getOperand(0);
52191   if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
52192     return SDValue();
52193 
52194   X86::CondCode NewCC = X86::GetOppositeBranchCondition(
52195       X86::CondCode(LHS->getConstantOperandVal(0)));
52196   SDLoc DL(N);
52197   return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
52198 }
52199 
combineXor(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52200 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
52201                           TargetLowering::DAGCombinerInfo &DCI,
52202                           const X86Subtarget &Subtarget) {
52203   SDValue N0 = N->getOperand(0);
52204   SDValue N1 = N->getOperand(1);
52205   EVT VT = N->getValueType(0);
52206 
52207   // If this is SSE1 only convert to FXOR to avoid scalarization.
52208   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
52209     return DAG.getBitcast(MVT::v4i32,
52210                           DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
52211                                       DAG.getBitcast(MVT::v4f32, N0),
52212                                       DAG.getBitcast(MVT::v4f32, N1)));
52213   }
52214 
52215   if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
52216     return Cmp;
52217 
52218   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
52219     return R;
52220 
52221   if (SDValue R = combineBitOpWithShift(N, DAG))
52222     return R;
52223 
52224   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
52225     return FPLogic;
52226 
52227   if (DCI.isBeforeLegalizeOps())
52228     return SDValue();
52229 
52230   if (SDValue SetCC = foldXor1SetCC(N, DAG))
52231     return SetCC;
52232 
52233   if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
52234     return RV;
52235 
52236   // Fold not(iX bitcast(vXi1)) -> (iX bitcast(not(vec))) for legal boolvecs.
52237   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52238   if (llvm::isAllOnesConstant(N1) && N0.getOpcode() == ISD::BITCAST &&
52239       N0.getOperand(0).getValueType().isVector() &&
52240       N0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
52241       TLI.isTypeLegal(N0.getOperand(0).getValueType()) && N0.hasOneUse()) {
52242     return DAG.getBitcast(VT, DAG.getNOT(SDLoc(N), N0.getOperand(0),
52243                                          N0.getOperand(0).getValueType()));
52244   }
52245 
52246   // Handle AVX512 mask widening.
52247   // Fold not(insert_subvector(undef,sub)) -> insert_subvector(undef,not(sub))
52248   if (ISD::isBuildVectorAllOnes(N1.getNode()) && VT.isVector() &&
52249       VT.getVectorElementType() == MVT::i1 &&
52250       N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.getOperand(0).isUndef() &&
52251       TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
52252     return DAG.getNode(
52253         ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
52254         DAG.getNOT(SDLoc(N), N0.getOperand(1), N0.getOperand(1).getValueType()),
52255         N0.getOperand(2));
52256   }
52257 
52258   // Fold xor(zext(xor(x,c1)),c2) -> xor(zext(x),xor(zext(c1),c2))
52259   // Fold xor(truncate(xor(x,c1)),c2) -> xor(truncate(x),xor(truncate(c1),c2))
52260   // TODO: Under what circumstances could this be performed in DAGCombine?
52261   if ((N0.getOpcode() == ISD::TRUNCATE || N0.getOpcode() == ISD::ZERO_EXTEND) &&
52262       N0.getOperand(0).getOpcode() == N->getOpcode()) {
52263     SDValue TruncExtSrc = N0.getOperand(0);
52264     auto *N1C = dyn_cast<ConstantSDNode>(N1);
52265     auto *N001C = dyn_cast<ConstantSDNode>(TruncExtSrc.getOperand(1));
52266     if (N1C && !N1C->isOpaque() && N001C && !N001C->isOpaque()) {
52267       SDLoc DL(N);
52268       SDValue LHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(0), DL, VT);
52269       SDValue RHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(1), DL, VT);
52270       return DAG.getNode(ISD::XOR, DL, VT, LHS,
52271                          DAG.getNode(ISD::XOR, DL, VT, RHS, N1));
52272     }
52273   }
52274 
52275   return combineFneg(N, DAG, DCI, Subtarget);
52276 }
52277 
combineBEXTR(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52278 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
52279                             TargetLowering::DAGCombinerInfo &DCI,
52280                             const X86Subtarget &Subtarget) {
52281   EVT VT = N->getValueType(0);
52282   unsigned NumBits = VT.getSizeInBits();
52283 
52284   // TODO - Constant Folding.
52285 
52286   // Simplify the inputs.
52287   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52288   APInt DemandedMask(APInt::getAllOnes(NumBits));
52289   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
52290     return SDValue(N, 0);
52291 
52292   return SDValue();
52293 }
52294 
isNullFPScalarOrVectorConst(SDValue V)52295 static bool isNullFPScalarOrVectorConst(SDValue V) {
52296   return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
52297 }
52298 
52299 /// If a value is a scalar FP zero or a vector FP zero (potentially including
52300 /// undefined elements), return a zero constant that may be used to fold away
52301 /// that value. In the case of a vector, the returned constant will not contain
52302 /// undefined elements even if the input parameter does. This makes it suitable
52303 /// to be used as a replacement operand with operations (eg, bitwise-and) where
52304 /// an undef should not propagate.
getNullFPConstForNullVal(SDValue V,SelectionDAG & DAG,const X86Subtarget & Subtarget)52305 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
52306                                         const X86Subtarget &Subtarget) {
52307   if (!isNullFPScalarOrVectorConst(V))
52308     return SDValue();
52309 
52310   if (V.getValueType().isVector())
52311     return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
52312 
52313   return V;
52314 }
52315 
combineFAndFNotToFAndn(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52316 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
52317                                       const X86Subtarget &Subtarget) {
52318   SDValue N0 = N->getOperand(0);
52319   SDValue N1 = N->getOperand(1);
52320   EVT VT = N->getValueType(0);
52321   SDLoc DL(N);
52322 
52323   // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
52324   if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
52325         (VT == MVT::f64 && Subtarget.hasSSE2()) ||
52326         (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
52327     return SDValue();
52328 
52329   auto isAllOnesConstantFP = [](SDValue V) {
52330     if (V.getSimpleValueType().isVector())
52331       return ISD::isBuildVectorAllOnes(V.getNode());
52332     auto *C = dyn_cast<ConstantFPSDNode>(V);
52333     return C && C->getConstantFPValue()->isAllOnesValue();
52334   };
52335 
52336   // fand (fxor X, -1), Y --> fandn X, Y
52337   if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
52338     return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
52339 
52340   // fand X, (fxor Y, -1) --> fandn Y, X
52341   if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
52342     return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
52343 
52344   return SDValue();
52345 }
52346 
52347 /// Do target-specific dag combines on X86ISD::FAND nodes.
combineFAnd(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52348 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
52349                            const X86Subtarget &Subtarget) {
52350   // FAND(0.0, x) -> 0.0
52351   if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
52352     return V;
52353 
52354   // FAND(x, 0.0) -> 0.0
52355   if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
52356     return V;
52357 
52358   if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
52359     return V;
52360 
52361   return lowerX86FPLogicOp(N, DAG, Subtarget);
52362 }
52363 
52364 /// Do target-specific dag combines on X86ISD::FANDN nodes.
combineFAndn(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52365 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
52366                             const X86Subtarget &Subtarget) {
52367   // FANDN(0.0, x) -> x
52368   if (isNullFPScalarOrVectorConst(N->getOperand(0)))
52369     return N->getOperand(1);
52370 
52371   // FANDN(x, 0.0) -> 0.0
52372   if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
52373     return V;
52374 
52375   return lowerX86FPLogicOp(N, DAG, Subtarget);
52376 }
52377 
52378 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
combineFOr(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52379 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
52380                           TargetLowering::DAGCombinerInfo &DCI,
52381                           const X86Subtarget &Subtarget) {
52382   assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
52383 
52384   // F[X]OR(0.0, x) -> x
52385   if (isNullFPScalarOrVectorConst(N->getOperand(0)))
52386     return N->getOperand(1);
52387 
52388   // F[X]OR(x, 0.0) -> x
52389   if (isNullFPScalarOrVectorConst(N->getOperand(1)))
52390     return N->getOperand(0);
52391 
52392   if (SDValue NewVal = combineFneg(N, DAG, DCI, Subtarget))
52393     return NewVal;
52394 
52395   return lowerX86FPLogicOp(N, DAG, Subtarget);
52396 }
52397 
52398 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
combineFMinFMax(SDNode * N,SelectionDAG & DAG)52399 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
52400   assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
52401 
52402   // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
52403   if (!DAG.getTarget().Options.NoNaNsFPMath ||
52404       !DAG.getTarget().Options.NoSignedZerosFPMath)
52405     return SDValue();
52406 
52407   // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
52408   // into FMINC and FMAXC, which are Commutative operations.
52409   unsigned NewOp = 0;
52410   switch (N->getOpcode()) {
52411     default: llvm_unreachable("unknown opcode");
52412     case X86ISD::FMIN:  NewOp = X86ISD::FMINC; break;
52413     case X86ISD::FMAX:  NewOp = X86ISD::FMAXC; break;
52414   }
52415 
52416   return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
52417                      N->getOperand(0), N->getOperand(1));
52418 }
52419 
combineFMinNumFMaxNum(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52420 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
52421                                      const X86Subtarget &Subtarget) {
52422   EVT VT = N->getValueType(0);
52423   if (Subtarget.useSoftFloat() || isSoftFP16(VT, Subtarget))
52424     return SDValue();
52425 
52426   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52427 
52428   if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
52429         (Subtarget.hasSSE2() && VT == MVT::f64) ||
52430         (Subtarget.hasFP16() && VT == MVT::f16) ||
52431         (VT.isVector() && TLI.isTypeLegal(VT))))
52432     return SDValue();
52433 
52434   SDValue Op0 = N->getOperand(0);
52435   SDValue Op1 = N->getOperand(1);
52436   SDLoc DL(N);
52437   auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
52438 
52439   // If we don't have to respect NaN inputs, this is a direct translation to x86
52440   // min/max instructions.
52441   if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
52442     return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
52443 
52444   // If one of the operands is known non-NaN use the native min/max instructions
52445   // with the non-NaN input as second operand.
52446   if (DAG.isKnownNeverNaN(Op1))
52447     return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
52448   if (DAG.isKnownNeverNaN(Op0))
52449     return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
52450 
52451   // If we have to respect NaN inputs, this takes at least 3 instructions.
52452   // Favor a library call when operating on a scalar and minimizing code size.
52453   if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
52454     return SDValue();
52455 
52456   EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
52457                                          VT);
52458 
52459   // There are 4 possibilities involving NaN inputs, and these are the required
52460   // outputs:
52461   //                   Op1
52462   //               Num     NaN
52463   //            ----------------
52464   //       Num  |  Max  |  Op0 |
52465   // Op0        ----------------
52466   //       NaN  |  Op1  |  NaN |
52467   //            ----------------
52468   //
52469   // The SSE FP max/min instructions were not designed for this case, but rather
52470   // to implement:
52471   //   Min = Op1 < Op0 ? Op1 : Op0
52472   //   Max = Op1 > Op0 ? Op1 : Op0
52473   //
52474   // So they always return Op0 if either input is a NaN. However, we can still
52475   // use those instructions for fmaxnum by selecting away a NaN input.
52476 
52477   // If either operand is NaN, the 2nd source operand (Op0) is passed through.
52478   SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
52479   SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
52480 
52481   // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
52482   // are NaN, the NaN value of Op1 is the result.
52483   return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
52484 }
52485 
combineX86INT_TO_FP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52486 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
52487                                    TargetLowering::DAGCombinerInfo &DCI) {
52488   EVT VT = N->getValueType(0);
52489   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52490 
52491   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
52492   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
52493     return SDValue(N, 0);
52494 
52495   // Convert a full vector load into vzload when not all bits are needed.
52496   SDValue In = N->getOperand(0);
52497   MVT InVT = In.getSimpleValueType();
52498   if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52499       ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52500     assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52501     LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
52502     unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52503     MVT MemVT = MVT::getIntegerVT(NumBits);
52504     MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52505     if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52506       SDLoc dl(N);
52507       SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
52508                                     DAG.getBitcast(InVT, VZLoad));
52509       DCI.CombineTo(N, Convert);
52510       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52511       DCI.recursivelyDeleteUnusedNodes(LN);
52512       return SDValue(N, 0);
52513     }
52514   }
52515 
52516   return SDValue();
52517 }
52518 
combineCVTP2I_CVTTP2I(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52519 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
52520                                      TargetLowering::DAGCombinerInfo &DCI) {
52521   bool IsStrict = N->isTargetStrictFPOpcode();
52522   EVT VT = N->getValueType(0);
52523 
52524   // Convert a full vector load into vzload when not all bits are needed.
52525   SDValue In = N->getOperand(IsStrict ? 1 : 0);
52526   MVT InVT = In.getSimpleValueType();
52527   if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52528       ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52529     assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52530     LoadSDNode *LN = cast<LoadSDNode>(In);
52531     unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52532     MVT MemVT = MVT::getFloatingPointVT(NumBits);
52533     MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52534     if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52535       SDLoc dl(N);
52536       if (IsStrict) {
52537         SDValue Convert =
52538             DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
52539                         {N->getOperand(0), DAG.getBitcast(InVT, VZLoad)});
52540         DCI.CombineTo(N, Convert, Convert.getValue(1));
52541       } else {
52542         SDValue Convert =
52543             DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad));
52544         DCI.CombineTo(N, Convert);
52545       }
52546       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52547       DCI.recursivelyDeleteUnusedNodes(LN);
52548       return SDValue(N, 0);
52549     }
52550   }
52551 
52552   return SDValue();
52553 }
52554 
52555 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
combineAndnp(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52556 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
52557                             TargetLowering::DAGCombinerInfo &DCI,
52558                             const X86Subtarget &Subtarget) {
52559   SDValue N0 = N->getOperand(0);
52560   SDValue N1 = N->getOperand(1);
52561   MVT VT = N->getSimpleValueType(0);
52562   int NumElts = VT.getVectorNumElements();
52563   unsigned EltSizeInBits = VT.getScalarSizeInBits();
52564 
52565   // ANDNP(undef, x) -> 0
52566   // ANDNP(x, undef) -> 0
52567   if (N0.isUndef() || N1.isUndef())
52568     return DAG.getConstant(0, SDLoc(N), VT);
52569 
52570   // ANDNP(0, x) -> x
52571   if (ISD::isBuildVectorAllZeros(N0.getNode()))
52572     return N1;
52573 
52574   // ANDNP(x, 0) -> 0
52575   if (ISD::isBuildVectorAllZeros(N1.getNode()))
52576     return DAG.getConstant(0, SDLoc(N), VT);
52577 
52578   // Turn ANDNP back to AND if input is inverted.
52579   if (SDValue Not = IsNOT(N0, DAG))
52580     return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not), N1);
52581 
52582   // Constant Folding
52583   APInt Undefs0, Undefs1;
52584   SmallVector<APInt> EltBits0, EltBits1;
52585   if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0)) {
52586     SDLoc DL(N);
52587     APInt ResultUndefs = APInt::getZero(NumElts);
52588 
52589     if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1)) {
52590       SmallVector<APInt> ResultBits;
52591       for (int I = 0; I != NumElts; ++I)
52592         ResultBits.push_back(~EltBits0[I] & EltBits1[I]);
52593       return getConstVector(ResultBits, ResultUndefs, VT, DAG, DL);
52594     }
52595 
52596     // Constant fold NOT(N0) to allow us to use AND.
52597     // Ensure this is only performed if we can confirm that the bitcasted source
52598     // has oneuse to prevent an infinite loop with canonicalizeBitSelect.
52599     if (N0->hasOneUse()) {
52600       SDValue BC0 = peekThroughOneUseBitcasts(N0);
52601       if (BC0.getOpcode() != ISD::BITCAST) {
52602         for (APInt &Elt : EltBits0)
52603           Elt = ~Elt;
52604         SDValue Not = getConstVector(EltBits0, ResultUndefs, VT, DAG, DL);
52605         return DAG.getNode(ISD::AND, DL, VT, Not, N1);
52606       }
52607     }
52608   }
52609 
52610   // Attempt to recursively combine a bitmask ANDNP with shuffles.
52611   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
52612     SDValue Op(N, 0);
52613     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
52614       return Res;
52615 
52616     // If either operand is a constant mask, then only the elements that aren't
52617     // zero are actually demanded by the other operand.
52618     auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
52619       APInt UndefElts;
52620       SmallVector<APInt> EltBits;
52621       APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
52622       APInt DemandedElts = APInt::getAllOnes(NumElts);
52623       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
52624                                         EltBits)) {
52625         DemandedBits.clearAllBits();
52626         DemandedElts.clearAllBits();
52627         for (int I = 0; I != NumElts; ++I) {
52628           if (UndefElts[I]) {
52629             // We can't assume an undef src element gives an undef dst - the
52630             // other src might be zero.
52631             DemandedBits.setAllBits();
52632             DemandedElts.setBit(I);
52633           } else if ((Invert && !EltBits[I].isAllOnes()) ||
52634                      (!Invert && !EltBits[I].isZero())) {
52635             DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
52636             DemandedElts.setBit(I);
52637           }
52638         }
52639       }
52640       return std::make_pair(DemandedBits, DemandedElts);
52641     };
52642     APInt Bits0, Elts0;
52643     APInt Bits1, Elts1;
52644     std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
52645     std::tie(Bits1, Elts1) = GetDemandedMasks(N0, true);
52646 
52647     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52648     if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
52649         TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
52650         TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
52651         TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
52652       if (N->getOpcode() != ISD::DELETED_NODE)
52653         DCI.AddToWorklist(N);
52654       return SDValue(N, 0);
52655     }
52656   }
52657 
52658   return SDValue();
52659 }
52660 
combineBT(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52661 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
52662                          TargetLowering::DAGCombinerInfo &DCI) {
52663   SDValue N1 = N->getOperand(1);
52664 
52665   // BT ignores high bits in the bit index operand.
52666   unsigned BitWidth = N1.getValueSizeInBits();
52667   APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
52668   if (DAG.getTargetLoweringInfo().SimplifyDemandedBits(N1, DemandedMask, DCI)) {
52669     if (N->getOpcode() != ISD::DELETED_NODE)
52670       DCI.AddToWorklist(N);
52671     return SDValue(N, 0);
52672   }
52673 
52674   return SDValue();
52675 }
52676 
combineCVTPH2PS(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52677 static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
52678                                TargetLowering::DAGCombinerInfo &DCI) {
52679   bool IsStrict = N->getOpcode() == X86ISD::STRICT_CVTPH2PS;
52680   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
52681 
52682   if (N->getValueType(0) == MVT::v4f32 && Src.getValueType() == MVT::v8i16) {
52683     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52684     APInt DemandedElts = APInt::getLowBitsSet(8, 4);
52685     if (TLI.SimplifyDemandedVectorElts(Src, DemandedElts, DCI)) {
52686       if (N->getOpcode() != ISD::DELETED_NODE)
52687         DCI.AddToWorklist(N);
52688       return SDValue(N, 0);
52689     }
52690 
52691     // Convert a full vector load into vzload when not all bits are needed.
52692     if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
52693       LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(IsStrict ? 1 : 0));
52694       if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::i64, MVT::v2i64, DAG)) {
52695         SDLoc dl(N);
52696         if (IsStrict) {
52697           SDValue Convert = DAG.getNode(
52698               N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
52699               {N->getOperand(0), DAG.getBitcast(MVT::v8i16, VZLoad)});
52700           DCI.CombineTo(N, Convert, Convert.getValue(1));
52701         } else {
52702           SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32,
52703                                         DAG.getBitcast(MVT::v8i16, VZLoad));
52704           DCI.CombineTo(N, Convert);
52705         }
52706 
52707         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52708         DCI.recursivelyDeleteUnusedNodes(LN);
52709         return SDValue(N, 0);
52710       }
52711     }
52712   }
52713 
52714   return SDValue();
52715 }
52716 
52717 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
combineSextInRegCmov(SDNode * N,SelectionDAG & DAG)52718 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
52719   assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52720 
52721   EVT DstVT = N->getValueType(0);
52722 
52723   SDValue N0 = N->getOperand(0);
52724   SDValue N1 = N->getOperand(1);
52725   EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52726 
52727   if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
52728     return SDValue();
52729 
52730   // Look through single use any_extends / truncs.
52731   SDValue IntermediateBitwidthOp;
52732   if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
52733       N0.hasOneUse()) {
52734     IntermediateBitwidthOp = N0;
52735     N0 = N0.getOperand(0);
52736   }
52737 
52738   // See if we have a single use cmov.
52739   if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
52740     return SDValue();
52741 
52742   SDValue CMovOp0 = N0.getOperand(0);
52743   SDValue CMovOp1 = N0.getOperand(1);
52744 
52745   // Make sure both operands are constants.
52746   if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52747       !isa<ConstantSDNode>(CMovOp1.getNode()))
52748     return SDValue();
52749 
52750   SDLoc DL(N);
52751 
52752   // If we looked through an any_extend/trunc above, add one to the constants.
52753   if (IntermediateBitwidthOp) {
52754     unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
52755     CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
52756     CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
52757   }
52758 
52759   CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
52760   CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
52761 
52762   EVT CMovVT = DstVT;
52763   // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
52764   if (DstVT == MVT::i16) {
52765     CMovVT = MVT::i32;
52766     CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
52767     CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
52768   }
52769 
52770   SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
52771                              N0.getOperand(2), N0.getOperand(3));
52772 
52773   if (CMovVT != DstVT)
52774     CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
52775 
52776   return CMov;
52777 }
52778 
combineSignExtendInReg(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52779 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
52780                                       const X86Subtarget &Subtarget) {
52781   assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52782 
52783   if (SDValue V = combineSextInRegCmov(N, DAG))
52784     return V;
52785 
52786   EVT VT = N->getValueType(0);
52787   SDValue N0 = N->getOperand(0);
52788   SDValue N1 = N->getOperand(1);
52789   EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52790   SDLoc dl(N);
52791 
52792   // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
52793   // both SSE and AVX2 since there is no sign-extended shift right
52794   // operation on a vector with 64-bit elements.
52795   //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
52796   // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
52797   if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
52798                            N0.getOpcode() == ISD::SIGN_EXTEND)) {
52799     SDValue N00 = N0.getOperand(0);
52800 
52801     // EXTLOAD has a better solution on AVX2,
52802     // it may be replaced with X86ISD::VSEXT node.
52803     if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
52804       if (!ISD::isNormalLoad(N00.getNode()))
52805         return SDValue();
52806 
52807     // Attempt to promote any comparison mask ops before moving the
52808     // SIGN_EXTEND_INREG in the way.
52809     if (SDValue Promote = PromoteMaskArithmetic(N0.getNode(), DAG, Subtarget))
52810       return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Promote, N1);
52811 
52812     if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
52813       SDValue Tmp =
52814           DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1);
52815       return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
52816     }
52817   }
52818   return SDValue();
52819 }
52820 
52821 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
52822 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
52823 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
52824 /// opportunities to combine math ops, use an LEA, or use a complex addressing
52825 /// mode. This can eliminate extend, add, and shift instructions.
promoteExtBeforeAdd(SDNode * Ext,SelectionDAG & DAG,const X86Subtarget & Subtarget)52826 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
52827                                    const X86Subtarget &Subtarget) {
52828   if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
52829       Ext->getOpcode() != ISD::ZERO_EXTEND)
52830     return SDValue();
52831 
52832   // TODO: This should be valid for other integer types.
52833   EVT VT = Ext->getValueType(0);
52834   if (VT != MVT::i64)
52835     return SDValue();
52836 
52837   SDValue Add = Ext->getOperand(0);
52838   if (Add.getOpcode() != ISD::ADD)
52839     return SDValue();
52840 
52841   bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
52842   bool NSW = Add->getFlags().hasNoSignedWrap();
52843   bool NUW = Add->getFlags().hasNoUnsignedWrap();
52844 
52845   // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
52846   // into the 'zext'
52847   if ((Sext && !NSW) || (!Sext && !NUW))
52848     return SDValue();
52849 
52850   // Having a constant operand to the 'add' ensures that we are not increasing
52851   // the instruction count because the constant is extended for free below.
52852   // A constant operand can also become the displacement field of an LEA.
52853   auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
52854   if (!AddOp1)
52855     return SDValue();
52856 
52857   // Don't make the 'add' bigger if there's no hope of combining it with some
52858   // other 'add' or 'shl' instruction.
52859   // TODO: It may be profitable to generate simpler LEA instructions in place
52860   // of single 'add' instructions, but the cost model for selecting an LEA
52861   // currently has a high threshold.
52862   bool HasLEAPotential = false;
52863   for (auto *User : Ext->uses()) {
52864     if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
52865       HasLEAPotential = true;
52866       break;
52867     }
52868   }
52869   if (!HasLEAPotential)
52870     return SDValue();
52871 
52872   // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
52873   int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
52874   SDValue AddOp0 = Add.getOperand(0);
52875   SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
52876   SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
52877 
52878   // The wider add is guaranteed to not wrap because both operands are
52879   // sign-extended.
52880   SDNodeFlags Flags;
52881   Flags.setNoSignedWrap(NSW);
52882   Flags.setNoUnsignedWrap(NUW);
52883   return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
52884 }
52885 
52886 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
52887 // operands and the result of CMOV is not used anywhere else - promote CMOV
52888 // itself instead of promoting its result. This could be beneficial, because:
52889 //     1) X86TargetLowering::EmitLoweredSelect later can do merging of two
52890 //        (or more) pseudo-CMOVs only when they go one-after-another and
52891 //        getting rid of result extension code after CMOV will help that.
52892 //     2) Promotion of constant CMOV arguments is free, hence the
52893 //        {ANY,SIGN,ZERO}_EXTEND will just be deleted.
52894 //     3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
52895 //        promotion is also good in terms of code-size.
52896 //        (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
52897 //         promotion).
combineToExtendCMOV(SDNode * Extend,SelectionDAG & DAG)52898 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
52899   SDValue CMovN = Extend->getOperand(0);
52900   if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
52901     return SDValue();
52902 
52903   EVT TargetVT = Extend->getValueType(0);
52904   unsigned ExtendOpcode = Extend->getOpcode();
52905   SDLoc DL(Extend);
52906 
52907   EVT VT = CMovN.getValueType();
52908   SDValue CMovOp0 = CMovN.getOperand(0);
52909   SDValue CMovOp1 = CMovN.getOperand(1);
52910 
52911   if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52912       !isa<ConstantSDNode>(CMovOp1.getNode()))
52913     return SDValue();
52914 
52915   // Only extend to i32 or i64.
52916   if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
52917     return SDValue();
52918 
52919   // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
52920   // are free.
52921   if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
52922     return SDValue();
52923 
52924   // If this a zero extend to i64, we should only extend to i32 and use a free
52925   // zero extend to finish.
52926   EVT ExtendVT = TargetVT;
52927   if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
52928     ExtendVT = MVT::i32;
52929 
52930   CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
52931   CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
52932 
52933   SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
52934                             CMovN.getOperand(2), CMovN.getOperand(3));
52935 
52936   // Finish extending if needed.
52937   if (ExtendVT != TargetVT)
52938     Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
52939 
52940   return Res;
52941 }
52942 
52943 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
52944 // result type.
combineExtSetcc(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52945 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
52946                                const X86Subtarget &Subtarget) {
52947   SDValue N0 = N->getOperand(0);
52948   EVT VT = N->getValueType(0);
52949   SDLoc dl(N);
52950 
52951   // Only do this combine with AVX512 for vector extends.
52952   if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
52953     return SDValue();
52954 
52955   // Only combine legal element types.
52956   EVT SVT = VT.getVectorElementType();
52957   if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
52958       SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
52959     return SDValue();
52960 
52961   // We don't have CMPP Instruction for vxf16
52962   if (N0.getOperand(0).getValueType().getVectorElementType() == MVT::f16)
52963     return SDValue();
52964   // We can only do this if the vector size in 256 bits or less.
52965   unsigned Size = VT.getSizeInBits();
52966   if (Size > 256 && Subtarget.useAVX512Regs())
52967     return SDValue();
52968 
52969   // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
52970   // that's the only integer compares with we have.
52971   ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
52972   if (ISD::isUnsignedIntSetCC(CC))
52973     return SDValue();
52974 
52975   // Only do this combine if the extension will be fully consumed by the setcc.
52976   EVT N00VT = N0.getOperand(0).getValueType();
52977   EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
52978   if (Size != MatchingVecType.getSizeInBits())
52979     return SDValue();
52980 
52981   SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
52982 
52983   if (N->getOpcode() == ISD::ZERO_EXTEND)
52984     Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType());
52985 
52986   return Res;
52987 }
52988 
combineSext(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52989 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
52990                            TargetLowering::DAGCombinerInfo &DCI,
52991                            const X86Subtarget &Subtarget) {
52992   SDValue N0 = N->getOperand(0);
52993   EVT VT = N->getValueType(0);
52994   SDLoc DL(N);
52995 
52996   // (i32 (sext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
52997   if (!DCI.isBeforeLegalizeOps() &&
52998       N0.getOpcode() == X86ISD::SETCC_CARRY) {
52999     SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, N0->getOperand(0),
53000                                  N0->getOperand(1));
53001     bool ReplaceOtherUses = !N0.hasOneUse();
53002     DCI.CombineTo(N, Setcc);
53003     // Replace other uses with a truncate of the widened setcc_carry.
53004     if (ReplaceOtherUses) {
53005       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
53006                                   N0.getValueType(), Setcc);
53007       DCI.CombineTo(N0.getNode(), Trunc);
53008     }
53009 
53010     return SDValue(N, 0);
53011   }
53012 
53013   if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
53014     return NewCMov;
53015 
53016   if (!DCI.isBeforeLegalizeOps())
53017     return SDValue();
53018 
53019   if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
53020     return V;
53021 
53022   if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), DL, VT, N0,
53023                                                  DAG, DCI, Subtarget))
53024     return V;
53025 
53026   if (VT.isVector()) {
53027     if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
53028       return R;
53029 
53030     if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG)
53031       return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0));
53032   }
53033 
53034   if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
53035     return NewAdd;
53036 
53037   return SDValue();
53038 }
53039 
combineFMA(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53040 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
53041                           TargetLowering::DAGCombinerInfo &DCI,
53042                           const X86Subtarget &Subtarget) {
53043   SDLoc dl(N);
53044   EVT VT = N->getValueType(0);
53045   bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode();
53046 
53047   // Let legalize expand this if it isn't a legal type yet.
53048   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53049   if (!TLI.isTypeLegal(VT))
53050     return SDValue();
53051 
53052   SDValue A = N->getOperand(IsStrict ? 1 : 0);
53053   SDValue B = N->getOperand(IsStrict ? 2 : 1);
53054   SDValue C = N->getOperand(IsStrict ? 3 : 2);
53055 
53056   // If the operation allows fast-math and the target does not support FMA,
53057   // split this into mul+add to avoid libcall(s).
53058   SDNodeFlags Flags = N->getFlags();
53059   if (!IsStrict && Flags.hasAllowReassociation() &&
53060       TLI.isOperationExpand(ISD::FMA, VT)) {
53061     SDValue Fmul = DAG.getNode(ISD::FMUL, dl, VT, A, B, Flags);
53062     return DAG.getNode(ISD::FADD, dl, VT, Fmul, C, Flags);
53063   }
53064 
53065   EVT ScalarVT = VT.getScalarType();
53066   if (((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
53067        !Subtarget.hasAnyFMA()) &&
53068       !(ScalarVT == MVT::f16 && Subtarget.hasFP16()))
53069     return SDValue();
53070 
53071   auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
53072     bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
53073     bool LegalOperations = !DCI.isBeforeLegalizeOps();
53074     if (SDValue NegV = TLI.getCheaperNegatedExpression(V, DAG, LegalOperations,
53075                                                        CodeSize)) {
53076       V = NegV;
53077       return true;
53078     }
53079     // Look through extract_vector_elts. If it comes from an FNEG, create a
53080     // new extract from the FNEG input.
53081     if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
53082         isNullConstant(V.getOperand(1))) {
53083       SDValue Vec = V.getOperand(0);
53084       if (SDValue NegV = TLI.getCheaperNegatedExpression(
53085               Vec, DAG, LegalOperations, CodeSize)) {
53086         V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
53087                         NegV, V.getOperand(1));
53088         return true;
53089       }
53090     }
53091 
53092     return false;
53093   };
53094 
53095   // Do not convert the passthru input of scalar intrinsics.
53096   // FIXME: We could allow negations of the lower element only.
53097   bool NegA = invertIfNegative(A);
53098   bool NegB = invertIfNegative(B);
53099   bool NegC = invertIfNegative(C);
53100 
53101   if (!NegA && !NegB && !NegC)
53102     return SDValue();
53103 
53104   unsigned NewOpcode =
53105       negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
53106 
53107   // Propagate fast-math-flags to new FMA node.
53108   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
53109   if (IsStrict) {
53110     assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4");
53111     return DAG.getNode(NewOpcode, dl, {VT, MVT::Other},
53112                        {N->getOperand(0), A, B, C});
53113   } else {
53114     if (N->getNumOperands() == 4)
53115       return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
53116     return DAG.getNode(NewOpcode, dl, VT, A, B, C);
53117   }
53118 }
53119 
53120 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
53121 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
combineFMADDSUB(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)53122 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
53123                                TargetLowering::DAGCombinerInfo &DCI) {
53124   SDLoc dl(N);
53125   EVT VT = N->getValueType(0);
53126   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53127   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
53128   bool LegalOperations = !DCI.isBeforeLegalizeOps();
53129 
53130   SDValue N2 = N->getOperand(2);
53131 
53132   SDValue NegN2 =
53133       TLI.getCheaperNegatedExpression(N2, DAG, LegalOperations, CodeSize);
53134   if (!NegN2)
53135     return SDValue();
53136   unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
53137 
53138   if (N->getNumOperands() == 4)
53139     return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
53140                        NegN2, N->getOperand(3));
53141   return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
53142                      NegN2);
53143 }
53144 
combineZext(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53145 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
53146                            TargetLowering::DAGCombinerInfo &DCI,
53147                            const X86Subtarget &Subtarget) {
53148   SDLoc dl(N);
53149   SDValue N0 = N->getOperand(0);
53150   EVT VT = N->getValueType(0);
53151 
53152   // (i32 (aext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
53153   // FIXME: Is this needed? We don't seem to have any tests for it.
53154   if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ANY_EXTEND &&
53155       N0.getOpcode() == X86ISD::SETCC_CARRY) {
53156     SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N0->getOperand(0),
53157                                  N0->getOperand(1));
53158     bool ReplaceOtherUses = !N0.hasOneUse();
53159     DCI.CombineTo(N, Setcc);
53160     // Replace other uses with a truncate of the widened setcc_carry.
53161     if (ReplaceOtherUses) {
53162       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
53163                                   N0.getValueType(), Setcc);
53164       DCI.CombineTo(N0.getNode(), Trunc);
53165     }
53166 
53167     return SDValue(N, 0);
53168   }
53169 
53170   if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
53171     return NewCMov;
53172 
53173   if (DCI.isBeforeLegalizeOps())
53174     if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
53175       return V;
53176 
53177   if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), dl, VT, N0,
53178                                                  DAG, DCI, Subtarget))
53179     return V;
53180 
53181   if (VT.isVector())
53182     if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
53183       return R;
53184 
53185   if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
53186     return NewAdd;
53187 
53188   if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
53189     return R;
53190 
53191   // TODO: Combine with any target/faux shuffle.
53192   if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
53193       VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
53194     SDValue N00 = N0.getOperand(0);
53195     SDValue N01 = N0.getOperand(1);
53196     unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
53197     APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
53198     if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
53199         (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
53200       return concatSubVectors(N00, N01, DAG, dl);
53201     }
53202   }
53203 
53204   return SDValue();
53205 }
53206 
53207 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
53208 /// recognizable memcmp expansion.
isOrXorXorTree(SDValue X,bool Root=true)53209 static bool isOrXorXorTree(SDValue X, bool Root = true) {
53210   if (X.getOpcode() == ISD::OR)
53211     return isOrXorXorTree(X.getOperand(0), false) &&
53212            isOrXorXorTree(X.getOperand(1), false);
53213   if (Root)
53214     return false;
53215   return X.getOpcode() == ISD::XOR;
53216 }
53217 
53218 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
53219 /// expansion.
53220 template <typename F>
emitOrXorXorTree(SDValue X,SDLoc & DL,SelectionDAG & DAG,EVT VecVT,EVT CmpVT,bool HasPT,F SToV)53221 static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG,
53222                                 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
53223   SDValue Op0 = X.getOperand(0);
53224   SDValue Op1 = X.getOperand(1);
53225   if (X.getOpcode() == ISD::OR) {
53226     SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
53227     SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
53228     if (VecVT != CmpVT)
53229       return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
53230     if (HasPT)
53231       return DAG.getNode(ISD::OR, DL, VecVT, A, B);
53232     return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
53233   }
53234   if (X.getOpcode() == ISD::XOR) {
53235     SDValue A = SToV(Op0);
53236     SDValue B = SToV(Op1);
53237     if (VecVT != CmpVT)
53238       return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
53239     if (HasPT)
53240       return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
53241     return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
53242   }
53243   llvm_unreachable("Impossible");
53244 }
53245 
53246 /// Try to map a 128-bit or larger integer comparison to vector instructions
53247 /// before type legalization splits it up into chunks.
combineVectorSizedSetCCEquality(SDNode * SetCC,SelectionDAG & DAG,const X86Subtarget & Subtarget)53248 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
53249                                                const X86Subtarget &Subtarget) {
53250   ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
53251   assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
53252 
53253   // We're looking for an oversized integer equality comparison.
53254   SDValue X = SetCC->getOperand(0);
53255   SDValue Y = SetCC->getOperand(1);
53256   EVT OpVT = X.getValueType();
53257   unsigned OpSize = OpVT.getSizeInBits();
53258   if (!OpVT.isScalarInteger() || OpSize < 128)
53259     return SDValue();
53260 
53261   // Ignore a comparison with zero because that gets special treatment in
53262   // EmitTest(). But make an exception for the special case of a pair of
53263   // logically-combined vector-sized operands compared to zero. This pattern may
53264   // be generated by the memcmp expansion pass with oversized integer compares
53265   // (see PR33325).
53266   bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
53267   if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
53268     return SDValue();
53269 
53270   // Don't perform this combine if constructing the vector will be expensive.
53271   auto IsVectorBitCastCheap = [](SDValue X) {
53272     X = peekThroughBitcasts(X);
53273     return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
53274            X.getOpcode() == ISD::LOAD;
53275   };
53276   if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
53277       !IsOrXorXorTreeCCZero)
53278     return SDValue();
53279 
53280   EVT VT = SetCC->getValueType(0);
53281   SDLoc DL(SetCC);
53282 
53283   // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
53284   // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
53285   // Otherwise use PCMPEQ (plus AND) and mask testing.
53286   bool NoImplicitFloatOps =
53287       DAG.getMachineFunction().getFunction().hasFnAttribute(
53288           Attribute::NoImplicitFloat);
53289   if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
53290       ((OpSize == 128 && Subtarget.hasSSE2()) ||
53291        (OpSize == 256 && Subtarget.hasAVX()) ||
53292        (OpSize == 512 && Subtarget.useAVX512Regs()))) {
53293     bool HasPT = Subtarget.hasSSE41();
53294 
53295     // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
53296     // vector registers are essentially free. (Technically, widening registers
53297     // prevents load folding, but the tradeoff is worth it.)
53298     bool PreferKOT = Subtarget.preferMaskRegisters();
53299     bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
53300 
53301     EVT VecVT = MVT::v16i8;
53302     EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
53303     if (OpSize == 256) {
53304       VecVT = MVT::v32i8;
53305       CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
53306     }
53307     EVT CastVT = VecVT;
53308     bool NeedsAVX512FCast = false;
53309     if (OpSize == 512 || NeedZExt) {
53310       if (Subtarget.hasBWI()) {
53311         VecVT = MVT::v64i8;
53312         CmpVT = MVT::v64i1;
53313         if (OpSize == 512)
53314           CastVT = VecVT;
53315       } else {
53316         VecVT = MVT::v16i32;
53317         CmpVT = MVT::v16i1;
53318         CastVT = OpSize == 512 ? VecVT :
53319                  OpSize == 256 ? MVT::v8i32 : MVT::v4i32;
53320         NeedsAVX512FCast = true;
53321       }
53322     }
53323 
53324     auto ScalarToVector = [&](SDValue X) -> SDValue {
53325       bool TmpZext = false;
53326       EVT TmpCastVT = CastVT;
53327       if (X.getOpcode() == ISD::ZERO_EXTEND) {
53328         SDValue OrigX = X.getOperand(0);
53329         unsigned OrigSize = OrigX.getScalarValueSizeInBits();
53330         if (OrigSize < OpSize) {
53331           if (OrigSize == 128) {
53332             TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
53333             X = OrigX;
53334             TmpZext = true;
53335           } else if (OrigSize == 256) {
53336             TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
53337             X = OrigX;
53338             TmpZext = true;
53339           }
53340         }
53341       }
53342       X = DAG.getBitcast(TmpCastVT, X);
53343       if (!NeedZExt && !TmpZext)
53344         return X;
53345       return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
53346                          DAG.getConstant(0, DL, VecVT), X,
53347                          DAG.getVectorIdxConstant(0, DL));
53348     };
53349 
53350     SDValue Cmp;
53351     if (IsOrXorXorTreeCCZero) {
53352       // This is a bitwise-combined equality comparison of 2 pairs of vectors:
53353       // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
53354       // Use 2 vector equality compares and 'and' the results before doing a
53355       // MOVMSK.
53356       Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
53357     } else {
53358       SDValue VecX = ScalarToVector(X);
53359       SDValue VecY = ScalarToVector(Y);
53360       if (VecVT != CmpVT) {
53361         Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
53362       } else if (HasPT) {
53363         Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
53364       } else {
53365         Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
53366       }
53367     }
53368     // AVX512 should emit a setcc that will lower to kortest.
53369     if (VecVT != CmpVT) {
53370       EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 :
53371                    CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16;
53372       return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
53373                           DAG.getConstant(0, DL, KRegVT), CC);
53374     }
53375     if (HasPT) {
53376       SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
53377                                      Cmp);
53378       SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
53379       X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
53380       SDValue X86SetCC = getSETCC(X86CC, PT, DL, DAG);
53381       return DAG.getNode(ISD::TRUNCATE, DL, VT, X86SetCC.getValue(0));
53382     }
53383     // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
53384     // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
53385     // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
53386     assert(Cmp.getValueType() == MVT::v16i8 &&
53387            "Non 128-bit vector on pre-SSE41 target");
53388     SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
53389     SDValue FFFFs = DAG.getConstant(0xFFFF, DL, MVT::i32);
53390     return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
53391   }
53392 
53393   return SDValue();
53394 }
53395 
combineSetCC(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53396 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
53397                             TargetLowering::DAGCombinerInfo &DCI,
53398                             const X86Subtarget &Subtarget) {
53399   const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
53400   const SDValue LHS = N->getOperand(0);
53401   const SDValue RHS = N->getOperand(1);
53402   EVT VT = N->getValueType(0);
53403   EVT OpVT = LHS.getValueType();
53404   SDLoc DL(N);
53405 
53406   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
53407     if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
53408       return V;
53409 
53410     if (VT == MVT::i1 && isNullConstant(RHS)) {
53411       SDValue X86CC;
53412       if (SDValue V =
53413               MatchVectorAllZeroTest(LHS, CC, DL, Subtarget, DAG, X86CC))
53414         return DAG.getNode(ISD::TRUNCATE, DL, VT,
53415                            DAG.getNode(X86ISD::SETCC, DL, MVT::i8, X86CC, V));
53416     }
53417 
53418     if (OpVT.isScalarInteger()) {
53419       // cmpeq(or(X,Y),X) --> cmpeq(and(~X,Y),0)
53420       // cmpne(or(X,Y),X) --> cmpne(and(~X,Y),0)
53421       auto MatchOrCmpEq = [&](SDValue N0, SDValue N1) {
53422         if (N0.getOpcode() == ISD::OR && N0->hasOneUse()) {
53423           if (N0.getOperand(0) == N1)
53424             return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
53425                                N0.getOperand(1));
53426           if (N0.getOperand(1) == N1)
53427             return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
53428                                N0.getOperand(0));
53429         }
53430         return SDValue();
53431       };
53432       if (SDValue AndN = MatchOrCmpEq(LHS, RHS))
53433         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
53434       if (SDValue AndN = MatchOrCmpEq(RHS, LHS))
53435         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
53436 
53437       // cmpeq(and(X,Y),Y) --> cmpeq(and(~X,Y),0)
53438       // cmpne(and(X,Y),Y) --> cmpne(and(~X,Y),0)
53439       auto MatchAndCmpEq = [&](SDValue N0, SDValue N1) {
53440         if (N0.getOpcode() == ISD::AND && N0->hasOneUse()) {
53441           if (N0.getOperand(0) == N1)
53442             return DAG.getNode(ISD::AND, DL, OpVT, N1,
53443                                DAG.getNOT(DL, N0.getOperand(1), OpVT));
53444           if (N0.getOperand(1) == N1)
53445             return DAG.getNode(ISD::AND, DL, OpVT, N1,
53446                                DAG.getNOT(DL, N0.getOperand(0), OpVT));
53447         }
53448         return SDValue();
53449       };
53450       if (SDValue AndN = MatchAndCmpEq(LHS, RHS))
53451         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
53452       if (SDValue AndN = MatchAndCmpEq(RHS, LHS))
53453         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
53454 
53455       // cmpeq(trunc(x),0) --> cmpeq(x,0)
53456       // cmpne(trunc(x),0) --> cmpne(x,0)
53457       // iff x upper bits are zero.
53458       // TODO: Add support for RHS to be truncate as well?
53459       if (LHS.getOpcode() == ISD::TRUNCATE &&
53460           LHS.getOperand(0).getScalarValueSizeInBits() >= 32 &&
53461           isNullConstant(RHS) && !DCI.isBeforeLegalize()) {
53462         EVT SrcVT = LHS.getOperand(0).getValueType();
53463         APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(),
53464                                                 OpVT.getScalarSizeInBits());
53465         const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53466         if (DAG.MaskedValueIsZero(LHS.getOperand(0), UpperBits) &&
53467             TLI.isTypeLegal(LHS.getOperand(0).getValueType()))
53468           return DAG.getSetCC(DL, VT, LHS.getOperand(0),
53469                               DAG.getConstant(0, DL, SrcVT), CC);
53470       }
53471     }
53472   }
53473 
53474   if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
53475       (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
53476     // Using temporaries to avoid messing up operand ordering for later
53477     // transformations if this doesn't work.
53478     SDValue Op0 = LHS;
53479     SDValue Op1 = RHS;
53480     ISD::CondCode TmpCC = CC;
53481     // Put build_vector on the right.
53482     if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
53483       std::swap(Op0, Op1);
53484       TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
53485     }
53486 
53487     bool IsSEXT0 =
53488         (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
53489         (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
53490     bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
53491 
53492     if (IsSEXT0 && IsVZero1) {
53493       assert(VT == Op0.getOperand(0).getValueType() &&
53494              "Unexpected operand type");
53495       if (TmpCC == ISD::SETGT)
53496         return DAG.getConstant(0, DL, VT);
53497       if (TmpCC == ISD::SETLE)
53498         return DAG.getConstant(1, DL, VT);
53499       if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
53500         return DAG.getNOT(DL, Op0.getOperand(0), VT);
53501 
53502       assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
53503              "Unexpected condition code!");
53504       return Op0.getOperand(0);
53505     }
53506   }
53507 
53508   // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
53509   // pre-promote its result type since vXi1 vectors don't get promoted
53510   // during type legalization.
53511   // NOTE: The element count check is to ignore operand types that need to
53512   // go through type promotion to a 128-bit vector.
53513   if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
53514       VT.getVectorElementType() == MVT::i1 &&
53515       (OpVT.getVectorElementType() == MVT::i8 ||
53516        OpVT.getVectorElementType() == MVT::i16)) {
53517     SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
53518     return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
53519   }
53520 
53521   // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
53522   // to avoid scalarization via legalization because v4i32 is not a legal type.
53523   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
53524       LHS.getValueType() == MVT::v4f32)
53525     return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
53526 
53527   // X pred 0.0 --> X pred -X
53528   // If the negation of X already exists, use it in the comparison. This removes
53529   // the need to materialize 0.0 and allows matching to SSE's MIN/MAX
53530   // instructions in patterns with a 'select' node.
53531   if (isNullFPScalarOrVectorConst(RHS)) {
53532     SDVTList FNegVT = DAG.getVTList(OpVT);
53533     if (SDNode *FNeg = DAG.getNodeIfExists(ISD::FNEG, FNegVT, {LHS}))
53534       return DAG.getSetCC(DL, VT, LHS, SDValue(FNeg, 0), CC);
53535   }
53536 
53537   return SDValue();
53538 }
53539 
combineMOVMSK(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53540 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
53541                              TargetLowering::DAGCombinerInfo &DCI,
53542                              const X86Subtarget &Subtarget) {
53543   SDValue Src = N->getOperand(0);
53544   MVT SrcVT = Src.getSimpleValueType();
53545   MVT VT = N->getSimpleValueType(0);
53546   unsigned NumBits = VT.getScalarSizeInBits();
53547   unsigned NumElts = SrcVT.getVectorNumElements();
53548   unsigned NumBitsPerElt = SrcVT.getScalarSizeInBits();
53549   assert(VT == MVT::i32 && NumElts <= NumBits && "Unexpected MOVMSK types");
53550 
53551   // Perform constant folding.
53552   APInt UndefElts;
53553   SmallVector<APInt, 32> EltBits;
53554   if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits)) {
53555     APInt Imm(32, 0);
53556     for (unsigned Idx = 0; Idx != NumElts; ++Idx)
53557       if (!UndefElts[Idx] && EltBits[Idx].isNegative())
53558         Imm.setBit(Idx);
53559 
53560     return DAG.getConstant(Imm, SDLoc(N), VT);
53561   }
53562 
53563   // Look through int->fp bitcasts that don't change the element width.
53564   unsigned EltWidth = SrcVT.getScalarSizeInBits();
53565   if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
53566       Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
53567     return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
53568 
53569   // Fold movmsk(not(x)) -> not(movmsk(x)) to improve folding of movmsk results
53570   // with scalar comparisons.
53571   if (SDValue NotSrc = IsNOT(Src, DAG)) {
53572     SDLoc DL(N);
53573     APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53574     NotSrc = DAG.getBitcast(SrcVT, NotSrc);
53575     return DAG.getNode(ISD::XOR, DL, VT,
53576                        DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
53577                        DAG.getConstant(NotMask, DL, VT));
53578   }
53579 
53580   // Fold movmsk(icmp_sgt(x,-1)) -> not(movmsk(x)) to improve folding of movmsk
53581   // results with scalar comparisons.
53582   if (Src.getOpcode() == X86ISD::PCMPGT &&
53583       ISD::isBuildVectorAllOnes(Src.getOperand(1).getNode())) {
53584     SDLoc DL(N);
53585     APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53586     return DAG.getNode(ISD::XOR, DL, VT,
53587                        DAG.getNode(X86ISD::MOVMSK, DL, VT, Src.getOperand(0)),
53588                        DAG.getConstant(NotMask, DL, VT));
53589   }
53590 
53591   // Fold movmsk(icmp_eq(and(x,c1),0)) -> movmsk(not(shl(x,c2)))
53592   // iff pow2splat(c1).
53593   if (Src.getOpcode() == X86ISD::PCMPEQ &&
53594       Src.getOperand(0).getOpcode() == ISD::AND &&
53595       ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
53596     SDValue LHS = Src.getOperand(0).getOperand(0);
53597     SDValue RHS = Src.getOperand(0).getOperand(1);
53598     KnownBits KnownRHS = DAG.computeKnownBits(RHS);
53599     if (KnownRHS.isConstant() && KnownRHS.getConstant().isPowerOf2()) {
53600       SDLoc DL(N);
53601       MVT ShiftVT = SrcVT;
53602       if (ShiftVT.getScalarType() == MVT::i8) {
53603         // vXi8 shifts - we only care about the signbit so can use PSLLW.
53604         ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
53605         LHS = DAG.getBitcast(ShiftVT, LHS);
53606       }
53607       unsigned ShiftAmt = KnownRHS.getConstant().countLeadingZeros();
53608       LHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT, LHS,
53609                                        ShiftAmt, DAG);
53610       LHS = DAG.getNOT(DL, DAG.getBitcast(SrcVT, LHS), SrcVT);
53611       return DAG.getNode(X86ISD::MOVMSK, DL, VT, LHS);
53612     }
53613   }
53614 
53615   // Simplify the inputs.
53616   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53617   APInt DemandedMask(APInt::getAllOnes(NumBits));
53618   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
53619     return SDValue(N, 0);
53620 
53621   return SDValue();
53622 }
53623 
combineX86GatherScatter(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53624 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
53625                                        TargetLowering::DAGCombinerInfo &DCI,
53626                                        const X86Subtarget &Subtarget) {
53627   auto *MemOp = cast<X86MaskedGatherScatterSDNode>(N);
53628   SDValue BasePtr = MemOp->getBasePtr();
53629   SDValue Index = MemOp->getIndex();
53630   SDValue Scale = MemOp->getScale();
53631   SDValue Mask = MemOp->getMask();
53632 
53633   // Attempt to fold an index scale into the scale value directly.
53634   // For smaller indices, implicit sext is performed BEFORE scale, preventing
53635   // this fold under most circumstances.
53636   // TODO: Move this into X86DAGToDAGISel::matchVectorAddressRecursively?
53637   if ((Index.getOpcode() == X86ISD::VSHLI ||
53638        (Index.getOpcode() == ISD::ADD &&
53639         Index.getOperand(0) == Index.getOperand(1))) &&
53640       isa<ConstantSDNode>(Scale) &&
53641       BasePtr.getScalarValueSizeInBits() == Index.getScalarValueSizeInBits()) {
53642     unsigned ShiftAmt =
53643         Index.getOpcode() == ISD::ADD ? 1 : Index.getConstantOperandVal(1);
53644     uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
53645     uint64_t NewScaleAmt = ScaleAmt * (1ULL << ShiftAmt);
53646     if (isPowerOf2_64(NewScaleAmt) && NewScaleAmt <= 8) {
53647       SDValue NewIndex = Index.getOperand(0);
53648       SDValue NewScale =
53649           DAG.getTargetConstant(NewScaleAmt, SDLoc(N), Scale.getValueType());
53650       if (N->getOpcode() == X86ISD::MGATHER)
53651         return getAVX2GatherNode(N->getOpcode(), SDValue(N, 0), DAG,
53652                                  MemOp->getOperand(1), Mask,
53653                                  MemOp->getBasePtr(), NewIndex, NewScale,
53654                                  MemOp->getChain(), Subtarget);
53655       if (N->getOpcode() == X86ISD::MSCATTER)
53656         return getScatterNode(N->getOpcode(), SDValue(N, 0), DAG,
53657                               MemOp->getOperand(1), Mask, MemOp->getBasePtr(),
53658                               NewIndex, NewScale, MemOp->getChain(), Subtarget);
53659     }
53660   }
53661 
53662   // With vector masks we only demand the upper bit of the mask.
53663   if (Mask.getScalarValueSizeInBits() != 1) {
53664     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53665     APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53666     if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53667       if (N->getOpcode() != ISD::DELETED_NODE)
53668         DCI.AddToWorklist(N);
53669       return SDValue(N, 0);
53670     }
53671   }
53672 
53673   return SDValue();
53674 }
53675 
rebuildGatherScatter(MaskedGatherScatterSDNode * GorS,SDValue Index,SDValue Base,SDValue Scale,SelectionDAG & DAG)53676 static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
53677                                     SDValue Index, SDValue Base, SDValue Scale,
53678                                     SelectionDAG &DAG) {
53679   SDLoc DL(GorS);
53680 
53681   if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
53682     SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
53683                       Gather->getMask(), Base, Index, Scale } ;
53684     return DAG.getMaskedGather(Gather->getVTList(),
53685                                Gather->getMemoryVT(), DL, Ops,
53686                                Gather->getMemOperand(),
53687                                Gather->getIndexType(),
53688                                Gather->getExtensionType());
53689   }
53690   auto *Scatter = cast<MaskedScatterSDNode>(GorS);
53691   SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
53692                     Scatter->getMask(), Base, Index, Scale };
53693   return DAG.getMaskedScatter(Scatter->getVTList(),
53694                               Scatter->getMemoryVT(), DL,
53695                               Ops, Scatter->getMemOperand(),
53696                               Scatter->getIndexType(),
53697                               Scatter->isTruncatingStore());
53698 }
53699 
combineGatherScatter(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)53700 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
53701                                     TargetLowering::DAGCombinerInfo &DCI) {
53702   SDLoc DL(N);
53703   auto *GorS = cast<MaskedGatherScatterSDNode>(N);
53704   SDValue Index = GorS->getIndex();
53705   SDValue Base = GorS->getBasePtr();
53706   SDValue Scale = GorS->getScale();
53707 
53708   if (DCI.isBeforeLegalize()) {
53709     unsigned IndexWidth = Index.getScalarValueSizeInBits();
53710 
53711     // Shrink constant indices if they are larger than 32-bits.
53712     // Only do this before legalize types since v2i64 could become v2i32.
53713     // FIXME: We could check that the type is legal if we're after legalize
53714     // types, but then we would need to construct test cases where that happens.
53715     // FIXME: We could support more than just constant vectors, but we need to
53716     // careful with costing. A truncate that can be optimized out would be fine.
53717     // Otherwise we might only want to create a truncate if it avoids a split.
53718     if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
53719       if (BV->isConstant() && IndexWidth > 32 &&
53720           DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53721         EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53722         Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53723         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53724       }
53725     }
53726 
53727     // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
53728     // there are sufficient sign bits. Only do this before legalize types to
53729     // avoid creating illegal types in truncate.
53730     if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
53731          Index.getOpcode() == ISD::ZERO_EXTEND) &&
53732         IndexWidth > 32 &&
53733         Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
53734         DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53735       EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53736       Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53737       return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53738     }
53739   }
53740 
53741   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53742   EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
53743   // Try to move splat constant adders from the index operand to the base
53744   // pointer operand. Taking care to multiply by the scale. We can only do
53745   // this when index element type is the same as the pointer type.
53746   // Otherwise we need to be sure the math doesn't wrap before the scale.
53747   if (Index.getOpcode() == ISD::ADD &&
53748       Index.getValueType().getVectorElementType() == PtrVT &&
53749       isa<ConstantSDNode>(Scale)) {
53750     uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
53751     if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
53752       BitVector UndefElts;
53753       if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
53754         // FIXME: Allow non-constant?
53755         if (UndefElts.none()) {
53756           // Apply the scale.
53757           APInt Adder = C->getAPIntValue() * ScaleAmt;
53758           // Add it to the existing base.
53759           Base = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
53760                              DAG.getConstant(Adder, DL, PtrVT));
53761           Index = Index.getOperand(0);
53762           return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53763         }
53764       }
53765 
53766       // It's also possible base is just a constant. In that case, just
53767       // replace it with 0 and move the displacement into the index.
53768       if (BV->isConstant() && isa<ConstantSDNode>(Base) &&
53769           isOneConstant(Scale)) {
53770         SDValue Splat = DAG.getSplatBuildVector(Index.getValueType(), DL, Base);
53771         // Combine the constant build_vector and the constant base.
53772         Splat = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53773                             Index.getOperand(1), Splat);
53774         // Add to the LHS of the original Index add.
53775         Index = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53776                             Index.getOperand(0), Splat);
53777         Base = DAG.getConstant(0, DL, Base.getValueType());
53778         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53779       }
53780     }
53781   }
53782 
53783   if (DCI.isBeforeLegalizeOps()) {
53784     unsigned IndexWidth = Index.getScalarValueSizeInBits();
53785 
53786     // Make sure the index is either i32 or i64
53787     if (IndexWidth != 32 && IndexWidth != 64) {
53788       MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
53789       EVT IndexVT = Index.getValueType().changeVectorElementType(EltVT);
53790       Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
53791       return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53792     }
53793   }
53794 
53795   // With vector masks we only demand the upper bit of the mask.
53796   SDValue Mask = GorS->getMask();
53797   if (Mask.getScalarValueSizeInBits() != 1) {
53798     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53799     APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53800     if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53801       if (N->getOpcode() != ISD::DELETED_NODE)
53802         DCI.AddToWorklist(N);
53803       return SDValue(N, 0);
53804     }
53805   }
53806 
53807   return SDValue();
53808 }
53809 
53810 // Optimize  RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
combineX86SetCC(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53811 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
53812                                const X86Subtarget &Subtarget) {
53813   SDLoc DL(N);
53814   X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
53815   SDValue EFLAGS = N->getOperand(1);
53816 
53817   // Try to simplify the EFLAGS and condition code operands.
53818   if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
53819     return getSETCC(CC, Flags, DL, DAG);
53820 
53821   return SDValue();
53822 }
53823 
53824 /// Optimize branch condition evaluation.
combineBrCond(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53825 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
53826                              const X86Subtarget &Subtarget) {
53827   SDLoc DL(N);
53828   SDValue EFLAGS = N->getOperand(3);
53829   X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
53830 
53831   // Try to simplify the EFLAGS and condition code operands.
53832   // Make sure to not keep references to operands, as combineSetCCEFLAGS can
53833   // RAUW them under us.
53834   if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
53835     SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
53836     return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
53837                        N->getOperand(1), Cond, Flags);
53838   }
53839 
53840   return SDValue();
53841 }
53842 
53843 // TODO: Could we move this to DAGCombine?
combineVectorCompareAndMaskUnaryOp(SDNode * N,SelectionDAG & DAG)53844 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
53845                                                   SelectionDAG &DAG) {
53846   // Take advantage of vector comparisons (etc.) producing 0 or -1 in each lane
53847   // to optimize away operation when it's from a constant.
53848   //
53849   // The general transformation is:
53850   //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
53851   //       AND(VECTOR_CMP(x,y), constant2)
53852   //    constant2 = UNARYOP(constant)
53853 
53854   // Early exit if this isn't a vector operation, the operand of the
53855   // unary operation isn't a bitwise AND, or if the sizes of the operations
53856   // aren't the same.
53857   EVT VT = N->getValueType(0);
53858   bool IsStrict = N->isStrictFPOpcode();
53859   unsigned NumEltBits = VT.getScalarSizeInBits();
53860   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53861   if (!VT.isVector() || Op0.getOpcode() != ISD::AND ||
53862       DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits ||
53863       VT.getSizeInBits() != Op0.getValueSizeInBits())
53864     return SDValue();
53865 
53866   // Now check that the other operand of the AND is a constant. We could
53867   // make the transformation for non-constant splats as well, but it's unclear
53868   // that would be a benefit as it would not eliminate any operations, just
53869   // perform one more step in scalar code before moving to the vector unit.
53870   if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
53871     // Bail out if the vector isn't a constant.
53872     if (!BV->isConstant())
53873       return SDValue();
53874 
53875     // Everything checks out. Build up the new and improved node.
53876     SDLoc DL(N);
53877     EVT IntVT = BV->getValueType(0);
53878     // Create a new constant of the appropriate type for the transformed
53879     // DAG.
53880     SDValue SourceConst;
53881     if (IsStrict)
53882       SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
53883                                 {N->getOperand(0), SDValue(BV, 0)});
53884     else
53885       SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
53886     // The AND node needs bitcasts to/from an integer vector type around it.
53887     SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
53888     SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
53889                                  MaskConst);
53890     SDValue Res = DAG.getBitcast(VT, NewAnd);
53891     if (IsStrict)
53892       return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
53893     return Res;
53894   }
53895 
53896   return SDValue();
53897 }
53898 
53899 /// If we are converting a value to floating-point, try to replace scalar
53900 /// truncate of an extracted vector element with a bitcast. This tries to keep
53901 /// the sequence on XMM registers rather than moving between vector and GPRs.
combineToFPTruncExtElt(SDNode * N,SelectionDAG & DAG)53902 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
53903   // TODO: This is currently only used by combineSIntToFP, but it is generalized
53904   //       to allow being called by any similar cast opcode.
53905   // TODO: Consider merging this into lowering: vectorizeExtractedCast().
53906   SDValue Trunc = N->getOperand(0);
53907   if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
53908     return SDValue();
53909 
53910   SDValue ExtElt = Trunc.getOperand(0);
53911   if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53912       !isNullConstant(ExtElt.getOperand(1)))
53913     return SDValue();
53914 
53915   EVT TruncVT = Trunc.getValueType();
53916   EVT SrcVT = ExtElt.getValueType();
53917   unsigned DestWidth = TruncVT.getSizeInBits();
53918   unsigned SrcWidth = SrcVT.getSizeInBits();
53919   if (SrcWidth % DestWidth != 0)
53920     return SDValue();
53921 
53922   // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
53923   EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
53924   unsigned VecWidth = SrcVecVT.getSizeInBits();
53925   unsigned NumElts = VecWidth / DestWidth;
53926   EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
53927   SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
53928   SDLoc DL(N);
53929   SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
53930                                   BitcastVec, ExtElt.getOperand(1));
53931   return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
53932 }
53933 
combineUIntToFP(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53934 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
53935                                const X86Subtarget &Subtarget) {
53936   bool IsStrict = N->isStrictFPOpcode();
53937   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53938   EVT VT = N->getValueType(0);
53939   EVT InVT = Op0.getValueType();
53940 
53941   // UINT_TO_FP(vXi1~15)  -> UINT_TO_FP(ZEXT(vXi1~15  to vXi16))
53942   // UINT_TO_FP(vXi17~31) -> UINT_TO_FP(ZEXT(vXi17~31 to vXi32))
53943   // UINT_TO_FP(vXi33~63) -> UINT_TO_FP(ZEXT(vXi33~63 to vXi64))
53944   if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
53945     unsigned ScalarSize = InVT.getScalarSizeInBits();
53946     if (ScalarSize == 16 || ScalarSize == 32 || ScalarSize >= 64)
53947       return SDValue();
53948     SDLoc dl(N);
53949     EVT DstVT = EVT::getVectorVT(*DAG.getContext(),
53950                                  ScalarSize < 16   ? MVT::i16
53951                                  : ScalarSize < 32 ? MVT::i32
53952                                                    : MVT::i64,
53953                                  InVT.getVectorNumElements());
53954     SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53955     if (IsStrict)
53956       return DAG.getNode(ISD::STRICT_UINT_TO_FP, dl, {VT, MVT::Other},
53957                          {N->getOperand(0), P});
53958     return DAG.getNode(ISD::UINT_TO_FP, dl, VT, P);
53959   }
53960 
53961   // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
53962   // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
53963   // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
53964   if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
53965       VT.getScalarType() != MVT::f16) {
53966     SDLoc dl(N);
53967     EVT DstVT = InVT.changeVectorElementType(MVT::i32);
53968     SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53969 
53970     // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
53971     if (IsStrict)
53972       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53973                          {N->getOperand(0), P});
53974     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53975   }
53976 
53977   // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
53978   // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
53979   // the optimization here.
53980   if (DAG.SignBitIsZero(Op0)) {
53981     if (IsStrict)
53982       return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
53983                          {N->getOperand(0), Op0});
53984     return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
53985   }
53986 
53987   return SDValue();
53988 }
53989 
combineSIntToFP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53990 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
53991                                TargetLowering::DAGCombinerInfo &DCI,
53992                                const X86Subtarget &Subtarget) {
53993   // First try to optimize away the conversion entirely when it's
53994   // conditionally from a constant. Vectors only.
53995   bool IsStrict = N->isStrictFPOpcode();
53996   if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
53997     return Res;
53998 
53999   // Now move on to more general possibilities.
54000   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
54001   EVT VT = N->getValueType(0);
54002   EVT InVT = Op0.getValueType();
54003 
54004   // SINT_TO_FP(vXi1~15)  -> SINT_TO_FP(SEXT(vXi1~15  to vXi16))
54005   // SINT_TO_FP(vXi17~31) -> SINT_TO_FP(SEXT(vXi17~31 to vXi32))
54006   // SINT_TO_FP(vXi33~63) -> SINT_TO_FP(SEXT(vXi33~63 to vXi64))
54007   if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
54008     unsigned ScalarSize = InVT.getScalarSizeInBits();
54009     if (ScalarSize == 16 || ScalarSize == 32 || ScalarSize >= 64)
54010       return SDValue();
54011     SDLoc dl(N);
54012     EVT DstVT = EVT::getVectorVT(*DAG.getContext(),
54013                                  ScalarSize < 16   ? MVT::i16
54014                                  : ScalarSize < 32 ? MVT::i32
54015                                                    : MVT::i64,
54016                                  InVT.getVectorNumElements());
54017     SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
54018     if (IsStrict)
54019       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
54020                          {N->getOperand(0), P});
54021     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
54022   }
54023 
54024   // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
54025   // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
54026   // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
54027   if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
54028       VT.getScalarType() != MVT::f16) {
54029     SDLoc dl(N);
54030     EVT DstVT = InVT.changeVectorElementType(MVT::i32);
54031     SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
54032     if (IsStrict)
54033       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
54034                          {N->getOperand(0), P});
54035     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
54036   }
54037 
54038   // Without AVX512DQ we only support i64 to float scalar conversion. For both
54039   // vectors and scalars, see if we know that the upper bits are all the sign
54040   // bit, in which case we can truncate the input to i32 and convert from that.
54041   if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
54042     unsigned BitWidth = InVT.getScalarSizeInBits();
54043     unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
54044     if (NumSignBits >= (BitWidth - 31)) {
54045       EVT TruncVT = MVT::i32;
54046       if (InVT.isVector())
54047         TruncVT = InVT.changeVectorElementType(TruncVT);
54048       SDLoc dl(N);
54049       if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
54050         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
54051         if (IsStrict)
54052           return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
54053                              {N->getOperand(0), Trunc});
54054         return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
54055       }
54056       // If we're after legalize and the type is v2i32 we need to shuffle and
54057       // use CVTSI2P.
54058       assert(InVT == MVT::v2i64 && "Unexpected VT!");
54059       SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
54060       SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
54061                                           { 0, 2, -1, -1 });
54062       if (IsStrict)
54063         return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
54064                            {N->getOperand(0), Shuf});
54065       return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
54066     }
54067   }
54068 
54069   // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
54070   // a 32-bit target where SSE doesn't support i64->FP operations.
54071   if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
54072       Op0.getOpcode() == ISD::LOAD) {
54073     LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
54074 
54075     // This transformation is not supported if the result type is f16 or f128.
54076     if (VT == MVT::f16 || VT == MVT::f128)
54077       return SDValue();
54078 
54079     // If we have AVX512DQ we can use packed conversion instructions unless
54080     // the VT is f80.
54081     if (Subtarget.hasDQI() && VT != MVT::f80)
54082       return SDValue();
54083 
54084     if (Ld->isSimple() && !VT.isVector() && ISD::isNormalLoad(Op0.getNode()) &&
54085         Op0.hasOneUse() && !Subtarget.is64Bit() && InVT == MVT::i64) {
54086       std::pair<SDValue, SDValue> Tmp =
54087           Subtarget.getTargetLowering()->BuildFILD(
54088               VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(),
54089               Ld->getPointerInfo(), Ld->getOriginalAlign(), DAG);
54090       DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
54091       return Tmp.first;
54092     }
54093   }
54094 
54095   if (IsStrict)
54096     return SDValue();
54097 
54098   if (SDValue V = combineToFPTruncExtElt(N, DAG))
54099     return V;
54100 
54101   return SDValue();
54102 }
54103 
needCarryOrOverflowFlag(SDValue Flags)54104 static bool needCarryOrOverflowFlag(SDValue Flags) {
54105   assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
54106 
54107   for (const SDNode *User : Flags->uses()) {
54108     X86::CondCode CC;
54109     switch (User->getOpcode()) {
54110     default:
54111       // Be conservative.
54112       return true;
54113     case X86ISD::SETCC:
54114     case X86ISD::SETCC_CARRY:
54115       CC = (X86::CondCode)User->getConstantOperandVal(0);
54116       break;
54117     case X86ISD::BRCOND:
54118     case X86ISD::CMOV:
54119       CC = (X86::CondCode)User->getConstantOperandVal(2);
54120       break;
54121     }
54122 
54123     switch (CC) {
54124     default: break;
54125     case X86::COND_A: case X86::COND_AE:
54126     case X86::COND_B: case X86::COND_BE:
54127     case X86::COND_O: case X86::COND_NO:
54128     case X86::COND_G: case X86::COND_GE:
54129     case X86::COND_L: case X86::COND_LE:
54130       return true;
54131     }
54132   }
54133 
54134   return false;
54135 }
54136 
onlyZeroFlagUsed(SDValue Flags)54137 static bool onlyZeroFlagUsed(SDValue Flags) {
54138   assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
54139 
54140   for (const SDNode *User : Flags->uses()) {
54141     unsigned CCOpNo;
54142     switch (User->getOpcode()) {
54143     default:
54144       // Be conservative.
54145       return false;
54146     case X86ISD::SETCC:
54147     case X86ISD::SETCC_CARRY:
54148       CCOpNo = 0;
54149       break;
54150     case X86ISD::BRCOND:
54151     case X86ISD::CMOV:
54152       CCOpNo = 2;
54153       break;
54154     }
54155 
54156     X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
54157     if (CC != X86::COND_E && CC != X86::COND_NE)
54158       return false;
54159   }
54160 
54161   return true;
54162 }
54163 
54164 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
54165 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
54166 /// with CMP+{ADC, SBB}.
54167 /// Also try (ADD/SUB)+(AND(SRL,1)) bit extraction pattern with BT+{ADC, SBB}.
combineAddOrSubToADCOrSBB(bool IsSub,const SDLoc & DL,EVT VT,SDValue X,SDValue Y,SelectionDAG & DAG,bool ZeroSecondOpOnly=false)54168 static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT,
54169                                          SDValue X, SDValue Y,
54170                                          SelectionDAG &DAG,
54171                                          bool ZeroSecondOpOnly = false) {
54172   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
54173     return SDValue();
54174 
54175   // Look through a one-use zext.
54176   if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse())
54177     Y = Y.getOperand(0);
54178 
54179   X86::CondCode CC;
54180   SDValue EFLAGS;
54181   if (Y.getOpcode() == X86ISD::SETCC && Y.hasOneUse()) {
54182     CC = (X86::CondCode)Y.getConstantOperandVal(0);
54183     EFLAGS = Y.getOperand(1);
54184   } else if (Y.getOpcode() == ISD::AND && isOneConstant(Y.getOperand(1)) &&
54185              Y.hasOneUse()) {
54186     EFLAGS = LowerAndToBT(Y, ISD::SETNE, DL, DAG, CC);
54187   }
54188 
54189   if (!EFLAGS)
54190     return SDValue();
54191 
54192   // If X is -1 or 0, then we have an opportunity to avoid constants required in
54193   // the general case below.
54194   auto *ConstantX = dyn_cast<ConstantSDNode>(X);
54195   if (ConstantX && !ZeroSecondOpOnly) {
54196     if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnes()) ||
54197         (IsSub && CC == X86::COND_B && ConstantX->isZero())) {
54198       // This is a complicated way to get -1 or 0 from the carry flag:
54199       // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
54200       //  0 - SETB  -->  0 -  (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
54201       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54202                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
54203                          EFLAGS);
54204     }
54205 
54206     if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnes()) ||
54207         (IsSub && CC == X86::COND_A && ConstantX->isZero())) {
54208       if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
54209           EFLAGS.getValueType().isInteger() &&
54210           !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
54211         // Swap the operands of a SUB, and we have the same pattern as above.
54212         // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
54213         //  0 - SETA  (SUB A, B) -->  0 - SETB  (SUB B, A) --> SUB + SBB
54214         SDValue NewSub = DAG.getNode(
54215             X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
54216             EFLAGS.getOperand(1), EFLAGS.getOperand(0));
54217         SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
54218         return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54219                            DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
54220                            NewEFLAGS);
54221       }
54222     }
54223   }
54224 
54225   if (CC == X86::COND_B) {
54226     // X + SETB Z --> adc X, 0
54227     // X - SETB Z --> sbb X, 0
54228     return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
54229                        DAG.getVTList(VT, MVT::i32), X,
54230                        DAG.getConstant(0, DL, VT), EFLAGS);
54231   }
54232 
54233   if (ZeroSecondOpOnly)
54234     return SDValue();
54235 
54236   if (CC == X86::COND_A) {
54237     // Try to convert COND_A into COND_B in an attempt to facilitate
54238     // materializing "setb reg".
54239     //
54240     // Do not flip "e > c", where "c" is a constant, because Cmp instruction
54241     // cannot take an immediate as its first operand.
54242     //
54243     if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
54244         EFLAGS.getValueType().isInteger() &&
54245         !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
54246       SDValue NewSub =
54247           DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
54248                       EFLAGS.getOperand(1), EFLAGS.getOperand(0));
54249       SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
54250       return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
54251                          DAG.getVTList(VT, MVT::i32), X,
54252                          DAG.getConstant(0, DL, VT), NewEFLAGS);
54253     }
54254   }
54255 
54256   if (CC == X86::COND_AE) {
54257     // X + SETAE --> sbb X, -1
54258     // X - SETAE --> adc X, -1
54259     return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
54260                        DAG.getVTList(VT, MVT::i32), X,
54261                        DAG.getConstant(-1, DL, VT), EFLAGS);
54262   }
54263 
54264   if (CC == X86::COND_BE) {
54265     // X + SETBE --> sbb X, -1
54266     // X - SETBE --> adc X, -1
54267     // Try to convert COND_BE into COND_AE in an attempt to facilitate
54268     // materializing "setae reg".
54269     //
54270     // Do not flip "e <= c", where "c" is a constant, because Cmp instruction
54271     // cannot take an immediate as its first operand.
54272     //
54273     if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
54274         EFLAGS.getValueType().isInteger() &&
54275         !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
54276       SDValue NewSub =
54277           DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
54278                       EFLAGS.getOperand(1), EFLAGS.getOperand(0));
54279       SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
54280       return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
54281                          DAG.getVTList(VT, MVT::i32), X,
54282                          DAG.getConstant(-1, DL, VT), NewEFLAGS);
54283     }
54284   }
54285 
54286   if (CC != X86::COND_E && CC != X86::COND_NE)
54287     return SDValue();
54288 
54289   if (EFLAGS.getOpcode() != X86ISD::CMP || !EFLAGS.hasOneUse() ||
54290       !X86::isZeroNode(EFLAGS.getOperand(1)) ||
54291       !EFLAGS.getOperand(0).getValueType().isInteger())
54292     return SDValue();
54293 
54294   SDValue Z = EFLAGS.getOperand(0);
54295   EVT ZVT = Z.getValueType();
54296 
54297   // If X is -1 or 0, then we have an opportunity to avoid constants required in
54298   // the general case below.
54299   if (ConstantX) {
54300     // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
54301     // fake operands:
54302     //  0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
54303     // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
54304     if ((IsSub && CC == X86::COND_NE && ConstantX->isZero()) ||
54305         (!IsSub && CC == X86::COND_E && ConstantX->isAllOnes())) {
54306       SDValue Zero = DAG.getConstant(0, DL, ZVT);
54307       SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
54308       SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
54309       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54310                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
54311                          SDValue(Neg.getNode(), 1));
54312     }
54313 
54314     // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
54315     // with fake operands:
54316     //  0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
54317     // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
54318     if ((IsSub && CC == X86::COND_E && ConstantX->isZero()) ||
54319         (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnes())) {
54320       SDValue One = DAG.getConstant(1, DL, ZVT);
54321       SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
54322       SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
54323       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54324                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
54325                          Cmp1.getValue(1));
54326     }
54327   }
54328 
54329   // (cmp Z, 1) sets the carry flag if Z is 0.
54330   SDValue One = DAG.getConstant(1, DL, ZVT);
54331   SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
54332   SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
54333 
54334   // Add the flags type for ADC/SBB nodes.
54335   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54336 
54337   // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
54338   // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
54339   if (CC == X86::COND_NE)
54340     return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
54341                        DAG.getConstant(-1ULL, DL, VT), Cmp1.getValue(1));
54342 
54343   // X - (Z == 0) --> sub X, (zext(sete  Z, 0)) --> sbb X, 0, (cmp Z, 1)
54344   // X + (Z == 0) --> add X, (zext(sete  Z, 0)) --> adc X, 0, (cmp Z, 1)
54345   return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
54346                      DAG.getConstant(0, DL, VT), Cmp1.getValue(1));
54347 }
54348 
54349 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
54350 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
54351 /// with CMP+{ADC, SBB}.
combineAddOrSubToADCOrSBB(SDNode * N,SelectionDAG & DAG)54352 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
54353   bool IsSub = N->getOpcode() == ISD::SUB;
54354   SDValue X = N->getOperand(0);
54355   SDValue Y = N->getOperand(1);
54356   EVT VT = N->getValueType(0);
54357   SDLoc DL(N);
54358 
54359   if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, X, Y, DAG))
54360     return ADCOrSBB;
54361 
54362   // Commute and try again (negate the result for subtracts).
54363   if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, Y, X, DAG)) {
54364     if (IsSub)
54365       ADCOrSBB =
54366           DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), ADCOrSBB);
54367     return ADCOrSBB;
54368   }
54369 
54370   return SDValue();
54371 }
54372 
combineCMP(SDNode * N,SelectionDAG & DAG)54373 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
54374   // Only handle test patterns.
54375   if (!isNullConstant(N->getOperand(1)))
54376     return SDValue();
54377 
54378   // If we have a CMP of a truncated binop, see if we can make a smaller binop
54379   // and use its flags directly.
54380   // TODO: Maybe we should try promoting compares that only use the zero flag
54381   // first if we can prove the upper bits with computeKnownBits?
54382   SDLoc dl(N);
54383   SDValue Op = N->getOperand(0);
54384   EVT VT = Op.getValueType();
54385 
54386   // If we have a constant logical shift that's only used in a comparison
54387   // against zero turn it into an equivalent AND. This allows turning it into
54388   // a TEST instruction later.
54389   if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
54390       Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
54391       onlyZeroFlagUsed(SDValue(N, 0))) {
54392     unsigned BitWidth = VT.getSizeInBits();
54393     const APInt &ShAmt = Op.getConstantOperandAPInt(1);
54394     if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
54395       unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
54396       APInt Mask = Op.getOpcode() == ISD::SRL
54397                        ? APInt::getHighBitsSet(BitWidth, MaskBits)
54398                        : APInt::getLowBitsSet(BitWidth, MaskBits);
54399       if (Mask.isSignedIntN(32)) {
54400         Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
54401                          DAG.getConstant(Mask, dl, VT));
54402         return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
54403                            DAG.getConstant(0, dl, VT));
54404       }
54405     }
54406   }
54407 
54408   // Peek through any zero-extend if we're only testing for a zero result.
54409   if (Op.getOpcode() == ISD::ZERO_EXTEND && onlyZeroFlagUsed(SDValue(N, 0))) {
54410     SDValue Src = Op.getOperand(0);
54411     EVT SrcVT = Src.getValueType();
54412     if (SrcVT.getScalarSizeInBits() >= 8 &&
54413         DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
54414       return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Src,
54415                          DAG.getConstant(0, dl, SrcVT));
54416   }
54417 
54418   // Look for a truncate.
54419   if (Op.getOpcode() != ISD::TRUNCATE)
54420     return SDValue();
54421 
54422   SDValue Trunc = Op;
54423   Op = Op.getOperand(0);
54424 
54425   // See if we can compare with zero against the truncation source,
54426   // which should help using the Z flag from many ops. Only do this for
54427   // i32 truncated op to prevent partial-reg compares of promoted ops.
54428   EVT OpVT = Op.getValueType();
54429   APInt UpperBits =
54430       APInt::getBitsSetFrom(OpVT.getSizeInBits(), VT.getSizeInBits());
54431   if (OpVT == MVT::i32 && DAG.MaskedValueIsZero(Op, UpperBits) &&
54432       onlyZeroFlagUsed(SDValue(N, 0))) {
54433     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
54434                        DAG.getConstant(0, dl, OpVT));
54435   }
54436 
54437   // After this the truncate and arithmetic op must have a single use.
54438   if (!Trunc.hasOneUse() || !Op.hasOneUse())
54439       return SDValue();
54440 
54441   unsigned NewOpc;
54442   switch (Op.getOpcode()) {
54443   default: return SDValue();
54444   case ISD::AND:
54445     // Skip and with constant. We have special handling for and with immediate
54446     // during isel to generate test instructions.
54447     if (isa<ConstantSDNode>(Op.getOperand(1)))
54448       return SDValue();
54449     NewOpc = X86ISD::AND;
54450     break;
54451   case ISD::OR:  NewOpc = X86ISD::OR;  break;
54452   case ISD::XOR: NewOpc = X86ISD::XOR; break;
54453   case ISD::ADD:
54454     // If the carry or overflow flag is used, we can't truncate.
54455     if (needCarryOrOverflowFlag(SDValue(N, 0)))
54456       return SDValue();
54457     NewOpc = X86ISD::ADD;
54458     break;
54459   case ISD::SUB:
54460     // If the carry or overflow flag is used, we can't truncate.
54461     if (needCarryOrOverflowFlag(SDValue(N, 0)))
54462       return SDValue();
54463     NewOpc = X86ISD::SUB;
54464     break;
54465   }
54466 
54467   // We found an op we can narrow. Truncate its inputs.
54468   SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
54469   SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
54470 
54471   // Use a X86 specific opcode to avoid DAG combine messing with it.
54472   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54473   Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
54474 
54475   // For AND, keep a CMP so that we can match the test pattern.
54476   if (NewOpc == X86ISD::AND)
54477     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
54478                        DAG.getConstant(0, dl, VT));
54479 
54480   // Return the flags.
54481   return Op.getValue(1);
54482 }
54483 
combineX86AddSub(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)54484 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
54485                                 TargetLowering::DAGCombinerInfo &DCI) {
54486   assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
54487          "Expected X86ISD::ADD or X86ISD::SUB");
54488 
54489   SDLoc DL(N);
54490   SDValue LHS = N->getOperand(0);
54491   SDValue RHS = N->getOperand(1);
54492   MVT VT = LHS.getSimpleValueType();
54493   bool IsSub = X86ISD::SUB == N->getOpcode();
54494   unsigned GenericOpc = IsSub ? ISD::SUB : ISD::ADD;
54495 
54496   // If we don't use the flag result, simplify back to a generic ADD/SUB.
54497   if (!N->hasAnyUseOfValue(1)) {
54498     SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
54499     return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
54500   }
54501 
54502   // Fold any similar generic ADD/SUB opcodes to reuse this node.
54503   auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
54504     SDValue Ops[] = {N0, N1};
54505     SDVTList VTs = DAG.getVTList(N->getValueType(0));
54506     if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
54507       SDValue Op(N, 0);
54508       if (Negate)
54509         Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
54510       DCI.CombineTo(GenericAddSub, Op);
54511     }
54512   };
54513   MatchGeneric(LHS, RHS, false);
54514   MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
54515 
54516   // TODO: Can we drop the ZeroSecondOpOnly limit? This is to guarantee that the
54517   // EFLAGS result doesn't change.
54518   return combineAddOrSubToADCOrSBB(IsSub, DL, VT, LHS, RHS, DAG,
54519                                    /*ZeroSecondOpOnly*/ true);
54520 }
54521 
combineSBB(SDNode * N,SelectionDAG & DAG)54522 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
54523   SDValue LHS = N->getOperand(0);
54524   SDValue RHS = N->getOperand(1);
54525   SDValue BorrowIn = N->getOperand(2);
54526 
54527   if (SDValue Flags = combineCarryThroughADD(BorrowIn, DAG)) {
54528     MVT VT = N->getSimpleValueType(0);
54529     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54530     return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs, LHS, RHS, Flags);
54531   }
54532 
54533   // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
54534   // iff the flag result is dead.
54535   if (LHS.getOpcode() == ISD::SUB && isNullConstant(RHS) &&
54536       !N->hasAnyUseOfValue(1))
54537     return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), LHS.getOperand(0),
54538                        LHS.getOperand(1), BorrowIn);
54539 
54540   return SDValue();
54541 }
54542 
54543 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
combineADC(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)54544 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
54545                           TargetLowering::DAGCombinerInfo &DCI) {
54546   SDValue LHS = N->getOperand(0);
54547   SDValue RHS = N->getOperand(1);
54548   SDValue CarryIn = N->getOperand(2);
54549   auto *LHSC = dyn_cast<ConstantSDNode>(LHS);
54550   auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
54551 
54552   // Canonicalize constant to RHS.
54553   if (LHSC && !RHSC)
54554     return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), RHS, LHS,
54555                        CarryIn);
54556 
54557   // If the LHS and RHS of the ADC node are zero, then it can't overflow and
54558   // the result is either zero or one (depending on the input carry bit).
54559   // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
54560   if (LHSC && RHSC && LHSC->isZero() && RHSC->isZero() &&
54561       // We don't have a good way to replace an EFLAGS use, so only do this when
54562       // dead right now.
54563       SDValue(N, 1).use_empty()) {
54564     SDLoc DL(N);
54565     EVT VT = N->getValueType(0);
54566     SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
54567     SDValue Res1 = DAG.getNode(
54568         ISD::AND, DL, VT,
54569         DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54570                     DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), CarryIn),
54571         DAG.getConstant(1, DL, VT));
54572     return DCI.CombineTo(N, Res1, CarryOut);
54573   }
54574 
54575   // Fold ADC(C1,C2,Carry) -> ADC(0,C1+C2,Carry)
54576   // iff the flag result is dead.
54577   // TODO: Allow flag result if C1+C2 doesn't signed/unsigned overflow.
54578   if (LHSC && RHSC && !LHSC->isZero() && !N->hasAnyUseOfValue(1)) {
54579     SDLoc DL(N);
54580     APInt Sum = LHSC->getAPIntValue() + RHSC->getAPIntValue();
54581     return DAG.getNode(X86ISD::ADC, DL, N->getVTList(),
54582                        DAG.getConstant(0, DL, LHS.getValueType()),
54583                        DAG.getConstant(Sum, DL, LHS.getValueType()), CarryIn);
54584   }
54585 
54586   if (SDValue Flags = combineCarryThroughADD(CarryIn, DAG)) {
54587     MVT VT = N->getSimpleValueType(0);
54588     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54589     return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs, LHS, RHS, Flags);
54590   }
54591 
54592   // Fold ADC(ADD(X,Y),0,Carry) -> ADC(X,Y,Carry)
54593   // iff the flag result is dead.
54594   if (LHS.getOpcode() == ISD::ADD && RHSC && RHSC->isZero() &&
54595       !N->hasAnyUseOfValue(1))
54596     return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), LHS.getOperand(0),
54597                        LHS.getOperand(1), CarryIn);
54598 
54599   return SDValue();
54600 }
54601 
matchPMADDWD(SelectionDAG & DAG,SDValue Op0,SDValue Op1,const SDLoc & DL,EVT VT,const X86Subtarget & Subtarget)54602 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
54603                             const SDLoc &DL, EVT VT,
54604                             const X86Subtarget &Subtarget) {
54605   // Example of pattern we try to detect:
54606   // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
54607   //(add (build_vector (extract_elt t, 0),
54608   //                   (extract_elt t, 2),
54609   //                   (extract_elt t, 4),
54610   //                   (extract_elt t, 6)),
54611   //     (build_vector (extract_elt t, 1),
54612   //                   (extract_elt t, 3),
54613   //                   (extract_elt t, 5),
54614   //                   (extract_elt t, 7)))
54615 
54616   if (!Subtarget.hasSSE2())
54617     return SDValue();
54618 
54619   if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
54620       Op1.getOpcode() != ISD::BUILD_VECTOR)
54621     return SDValue();
54622 
54623   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
54624       VT.getVectorNumElements() < 4 ||
54625       !isPowerOf2_32(VT.getVectorNumElements()))
54626     return SDValue();
54627 
54628   // Check if one of Op0,Op1 is of the form:
54629   // (build_vector (extract_elt Mul, 0),
54630   //               (extract_elt Mul, 2),
54631   //               (extract_elt Mul, 4),
54632   //                   ...
54633   // the other is of the form:
54634   // (build_vector (extract_elt Mul, 1),
54635   //               (extract_elt Mul, 3),
54636   //               (extract_elt Mul, 5),
54637   //                   ...
54638   // and identify Mul.
54639   SDValue Mul;
54640   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
54641     SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
54642             Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
54643     // TODO: Be more tolerant to undefs.
54644     if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54645         Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54646         Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54647         Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
54648       return SDValue();
54649     auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
54650     auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
54651     auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
54652     auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
54653     if (!Const0L || !Const1L || !Const0H || !Const1H)
54654       return SDValue();
54655     unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
54656              Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
54657     // Commutativity of mul allows factors of a product to reorder.
54658     if (Idx0L > Idx1L)
54659       std::swap(Idx0L, Idx1L);
54660     if (Idx0H > Idx1H)
54661       std::swap(Idx0H, Idx1H);
54662     // Commutativity of add allows pairs of factors to reorder.
54663     if (Idx0L > Idx0H) {
54664       std::swap(Idx0L, Idx0H);
54665       std::swap(Idx1L, Idx1H);
54666     }
54667     if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
54668         Idx1H != 2 * i + 3)
54669       return SDValue();
54670     if (!Mul) {
54671       // First time an extract_elt's source vector is visited. Must be a MUL
54672       // with 2X number of vector elements than the BUILD_VECTOR.
54673       // Both extracts must be from same MUL.
54674       Mul = Op0L->getOperand(0);
54675       if (Mul->getOpcode() != ISD::MUL ||
54676           Mul.getValueType().getVectorNumElements() != 2 * e)
54677         return SDValue();
54678     }
54679     // Check that the extract is from the same MUL previously seen.
54680     if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
54681         Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
54682       return SDValue();
54683   }
54684 
54685   // Check if the Mul source can be safely shrunk.
54686   ShrinkMode Mode;
54687   if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
54688       Mode == ShrinkMode::MULU16)
54689     return SDValue();
54690 
54691   EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54692                                  VT.getVectorNumElements() * 2);
54693   SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(0));
54694   SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(1));
54695 
54696   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54697                          ArrayRef<SDValue> Ops) {
54698     EVT InVT = Ops[0].getValueType();
54699     assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
54700     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54701                                  InVT.getVectorNumElements() / 2);
54702     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54703   };
54704   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { N0, N1 }, PMADDBuilder);
54705 }
54706 
54707 // Attempt to turn this pattern into PMADDWD.
54708 // (add (mul (sext (build_vector)), (sext (build_vector))),
54709 //      (mul (sext (build_vector)), (sext (build_vector)))
matchPMADDWD_2(SelectionDAG & DAG,SDValue N0,SDValue N1,const SDLoc & DL,EVT VT,const X86Subtarget & Subtarget)54710 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
54711                               const SDLoc &DL, EVT VT,
54712                               const X86Subtarget &Subtarget) {
54713   if (!Subtarget.hasSSE2())
54714     return SDValue();
54715 
54716   if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
54717     return SDValue();
54718 
54719   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
54720       VT.getVectorNumElements() < 4 ||
54721       !isPowerOf2_32(VT.getVectorNumElements()))
54722     return SDValue();
54723 
54724   SDValue N00 = N0.getOperand(0);
54725   SDValue N01 = N0.getOperand(1);
54726   SDValue N10 = N1.getOperand(0);
54727   SDValue N11 = N1.getOperand(1);
54728 
54729   // All inputs need to be sign extends.
54730   // TODO: Support ZERO_EXTEND from known positive?
54731   if (N00.getOpcode() != ISD::SIGN_EXTEND ||
54732       N01.getOpcode() != ISD::SIGN_EXTEND ||
54733       N10.getOpcode() != ISD::SIGN_EXTEND ||
54734       N11.getOpcode() != ISD::SIGN_EXTEND)
54735     return SDValue();
54736 
54737   // Peek through the extends.
54738   N00 = N00.getOperand(0);
54739   N01 = N01.getOperand(0);
54740   N10 = N10.getOperand(0);
54741   N11 = N11.getOperand(0);
54742 
54743   // Must be extending from vXi16.
54744   EVT InVT = N00.getValueType();
54745   if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
54746       N10.getValueType() != InVT || N11.getValueType() != InVT)
54747     return SDValue();
54748 
54749   // All inputs should be build_vectors.
54750   if (N00.getOpcode() != ISD::BUILD_VECTOR ||
54751       N01.getOpcode() != ISD::BUILD_VECTOR ||
54752       N10.getOpcode() != ISD::BUILD_VECTOR ||
54753       N11.getOpcode() != ISD::BUILD_VECTOR)
54754     return SDValue();
54755 
54756   // For each element, we need to ensure we have an odd element from one vector
54757   // multiplied by the odd element of another vector and the even element from
54758   // one of the same vectors being multiplied by the even element from the
54759   // other vector. So we need to make sure for each element i, this operator
54760   // is being performed:
54761   //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
54762   SDValue In0, In1;
54763   for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
54764     SDValue N00Elt = N00.getOperand(i);
54765     SDValue N01Elt = N01.getOperand(i);
54766     SDValue N10Elt = N10.getOperand(i);
54767     SDValue N11Elt = N11.getOperand(i);
54768     // TODO: Be more tolerant to undefs.
54769     if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54770         N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54771         N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54772         N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
54773       return SDValue();
54774     auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
54775     auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
54776     auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
54777     auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
54778     if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
54779       return SDValue();
54780     unsigned IdxN00 = ConstN00Elt->getZExtValue();
54781     unsigned IdxN01 = ConstN01Elt->getZExtValue();
54782     unsigned IdxN10 = ConstN10Elt->getZExtValue();
54783     unsigned IdxN11 = ConstN11Elt->getZExtValue();
54784     // Add is commutative so indices can be reordered.
54785     if (IdxN00 > IdxN10) {
54786       std::swap(IdxN00, IdxN10);
54787       std::swap(IdxN01, IdxN11);
54788     }
54789     // N0 indices be the even element. N1 indices must be the next odd element.
54790     if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
54791         IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
54792       return SDValue();
54793     SDValue N00In = N00Elt.getOperand(0);
54794     SDValue N01In = N01Elt.getOperand(0);
54795     SDValue N10In = N10Elt.getOperand(0);
54796     SDValue N11In = N11Elt.getOperand(0);
54797 
54798     // First time we find an input capture it.
54799     if (!In0) {
54800       In0 = N00In;
54801       In1 = N01In;
54802 
54803       // The input vectors must be at least as wide as the output.
54804       // If they are larger than the output, we extract subvector below.
54805       if (In0.getValueSizeInBits() < VT.getSizeInBits() ||
54806           In1.getValueSizeInBits() < VT.getSizeInBits())
54807         return SDValue();
54808     }
54809     // Mul is commutative so the input vectors can be in any order.
54810     // Canonicalize to make the compares easier.
54811     if (In0 != N00In)
54812       std::swap(N00In, N01In);
54813     if (In0 != N10In)
54814       std::swap(N10In, N11In);
54815     if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
54816       return SDValue();
54817   }
54818 
54819   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54820                          ArrayRef<SDValue> Ops) {
54821     EVT OpVT = Ops[0].getValueType();
54822     assert(OpVT.getScalarType() == MVT::i16 &&
54823            "Unexpected scalar element type");
54824     assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
54825     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54826                                  OpVT.getVectorNumElements() / 2);
54827     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54828   };
54829 
54830   // If the output is narrower than an input, extract the low part of the input
54831   // vector.
54832   EVT OutVT16 = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54833                                VT.getVectorNumElements() * 2);
54834   if (OutVT16.bitsLT(In0.getValueType())) {
54835     In0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In0,
54836                       DAG.getIntPtrConstant(0, DL));
54837   }
54838   if (OutVT16.bitsLT(In1.getValueType())) {
54839     In1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In1,
54840                       DAG.getIntPtrConstant(0, DL));
54841   }
54842   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
54843                           PMADDBuilder);
54844 }
54845 
54846 // ADD(VPMADDWD(X,Y),VPMADDWD(Z,W)) -> VPMADDWD(SHUFFLE(X,Z), SHUFFLE(Y,W))
54847 // If upper element in each pair of both VPMADDWD are zero then we can merge
54848 // the operand elements and use the implicit add of VPMADDWD.
54849 // TODO: Add support for VPMADDUBSW (which isn't commutable).
combineAddOfPMADDWD(SelectionDAG & DAG,SDValue N0,SDValue N1,const SDLoc & DL,EVT VT)54850 static SDValue combineAddOfPMADDWD(SelectionDAG &DAG, SDValue N0, SDValue N1,
54851                                    const SDLoc &DL, EVT VT) {
54852   if (N0.getOpcode() != N1.getOpcode() || N0.getOpcode() != X86ISD::VPMADDWD)
54853     return SDValue();
54854 
54855   // TODO: Add 256/512-bit support once VPMADDWD combines with shuffles.
54856   if (VT.getSizeInBits() > 128)
54857     return SDValue();
54858 
54859   unsigned NumElts = VT.getVectorNumElements();
54860   MVT OpVT = N0.getOperand(0).getSimpleValueType();
54861   APInt DemandedBits = APInt::getAllOnes(OpVT.getScalarSizeInBits());
54862   APInt DemandedHiElts = APInt::getSplat(2 * NumElts, APInt(2, 2));
54863 
54864   bool Op0HiZero =
54865       DAG.MaskedValueIsZero(N0.getOperand(0), DemandedBits, DemandedHiElts) ||
54866       DAG.MaskedValueIsZero(N0.getOperand(1), DemandedBits, DemandedHiElts);
54867   bool Op1HiZero =
54868       DAG.MaskedValueIsZero(N1.getOperand(0), DemandedBits, DemandedHiElts) ||
54869       DAG.MaskedValueIsZero(N1.getOperand(1), DemandedBits, DemandedHiElts);
54870 
54871   // TODO: Check for zero lower elements once we have actual codegen that
54872   // creates them.
54873   if (!Op0HiZero || !Op1HiZero)
54874     return SDValue();
54875 
54876   // Create a shuffle mask packing the lower elements from each VPMADDWD.
54877   SmallVector<int> Mask;
54878   for (int i = 0; i != (int)NumElts; ++i) {
54879     Mask.push_back(2 * i);
54880     Mask.push_back(2 * (i + NumElts));
54881   }
54882 
54883   SDValue LHS =
54884       DAG.getVectorShuffle(OpVT, DL, N0.getOperand(0), N1.getOperand(0), Mask);
54885   SDValue RHS =
54886       DAG.getVectorShuffle(OpVT, DL, N0.getOperand(1), N1.getOperand(1), Mask);
54887   return DAG.getNode(X86ISD::VPMADDWD, DL, VT, LHS, RHS);
54888 }
54889 
54890 /// CMOV of constants requires materializing constant operands in registers.
54891 /// Try to fold those constants into an 'add' instruction to reduce instruction
54892 /// count. We do this with CMOV rather the generic 'select' because there are
54893 /// earlier folds that may be used to turn select-of-constants into logic hacks.
pushAddIntoCmovOfConsts(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)54894 static SDValue pushAddIntoCmovOfConsts(SDNode *N, SelectionDAG &DAG,
54895                                        const X86Subtarget &Subtarget) {
54896   // If an operand is zero, add-of-0 gets simplified away, so that's clearly
54897   // better because we eliminate 1-2 instructions. This transform is still
54898   // an improvement without zero operands because we trade 2 move constants and
54899   // 1 add for 2 adds (LEA) as long as the constants can be represented as
54900   // immediate asm operands (fit in 32-bits).
54901   auto isSuitableCmov = [](SDValue V) {
54902     if (V.getOpcode() != X86ISD::CMOV || !V.hasOneUse())
54903       return false;
54904     if (!isa<ConstantSDNode>(V.getOperand(0)) ||
54905         !isa<ConstantSDNode>(V.getOperand(1)))
54906       return false;
54907     return isNullConstant(V.getOperand(0)) || isNullConstant(V.getOperand(1)) ||
54908            (V.getConstantOperandAPInt(0).isSignedIntN(32) &&
54909             V.getConstantOperandAPInt(1).isSignedIntN(32));
54910   };
54911 
54912   // Match an appropriate CMOV as the first operand of the add.
54913   SDValue Cmov = N->getOperand(0);
54914   SDValue OtherOp = N->getOperand(1);
54915   if (!isSuitableCmov(Cmov))
54916     std::swap(Cmov, OtherOp);
54917   if (!isSuitableCmov(Cmov))
54918     return SDValue();
54919 
54920   // Don't remove a load folding opportunity for the add. That would neutralize
54921   // any improvements from removing constant materializations.
54922   if (X86::mayFoldLoad(OtherOp, Subtarget))
54923     return SDValue();
54924 
54925   EVT VT = N->getValueType(0);
54926   SDLoc DL(N);
54927   SDValue FalseOp = Cmov.getOperand(0);
54928   SDValue TrueOp = Cmov.getOperand(1);
54929 
54930   // We will push the add through the select, but we can potentially do better
54931   // if we know there is another add in the sequence and this is pointer math.
54932   // In that case, we can absorb an add into the trailing memory op and avoid
54933   // a 3-operand LEA which is likely slower than a 2-operand LEA.
54934   // TODO: If target has "slow3OpsLEA", do this even without the trailing memop?
54935   if (OtherOp.getOpcode() == ISD::ADD && OtherOp.hasOneUse() &&
54936       !isa<ConstantSDNode>(OtherOp.getOperand(0)) &&
54937       all_of(N->uses(), [&](SDNode *Use) {
54938         auto *MemNode = dyn_cast<MemSDNode>(Use);
54939         return MemNode && MemNode->getBasePtr().getNode() == N;
54940       })) {
54941     // add (cmov C1, C2), add (X, Y) --> add (cmov (add X, C1), (add X, C2)), Y
54942     // TODO: We are arbitrarily choosing op0 as the 1st piece of the sum, but
54943     //       it is possible that choosing op1 might be better.
54944     SDValue X = OtherOp.getOperand(0), Y = OtherOp.getOperand(1);
54945     FalseOp = DAG.getNode(ISD::ADD, DL, VT, X, FalseOp);
54946     TrueOp = DAG.getNode(ISD::ADD, DL, VT, X, TrueOp);
54947     Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp,
54948                        Cmov.getOperand(2), Cmov.getOperand(3));
54949     return DAG.getNode(ISD::ADD, DL, VT, Cmov, Y);
54950   }
54951 
54952   // add (cmov C1, C2), OtherOp --> cmov (add OtherOp, C1), (add OtherOp, C2)
54953   FalseOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, FalseOp);
54954   TrueOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, TrueOp);
54955   return DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp, Cmov.getOperand(2),
54956                      Cmov.getOperand(3));
54957 }
54958 
combineAdd(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)54959 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
54960                           TargetLowering::DAGCombinerInfo &DCI,
54961                           const X86Subtarget &Subtarget) {
54962   EVT VT = N->getValueType(0);
54963   SDValue Op0 = N->getOperand(0);
54964   SDValue Op1 = N->getOperand(1);
54965   SDLoc DL(N);
54966 
54967   if (SDValue Select = pushAddIntoCmovOfConsts(N, DAG, Subtarget))
54968     return Select;
54969 
54970   if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, DL, VT, Subtarget))
54971     return MAdd;
54972   if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, DL, VT, Subtarget))
54973     return MAdd;
54974   if (SDValue MAdd = combineAddOfPMADDWD(DAG, Op0, Op1, DL, VT))
54975     return MAdd;
54976 
54977   // Try to synthesize horizontal adds from adds of shuffles.
54978   if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
54979     return V;
54980 
54981   // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
54982   // (sub Y, (sext (vXi1 X))).
54983   // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
54984   // generic DAG combine without a legal type check, but adding this there
54985   // caused regressions.
54986   if (VT.isVector()) {
54987     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54988     if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
54989         Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
54990         TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
54991       SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
54992       return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
54993     }
54994 
54995     if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
54996         Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
54997         TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
54998       SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
54999       return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
55000     }
55001   }
55002 
55003   // Fold ADD(ADC(Y,0,W),X) -> ADC(X,Y,W)
55004   if (Op0.getOpcode() == X86ISD::ADC && Op0->hasOneUse() &&
55005       X86::isZeroNode(Op0.getOperand(1))) {
55006     assert(!Op0->hasAnyUseOfValue(1) && "Overflow bit in use");
55007     return DAG.getNode(X86ISD::ADC, SDLoc(Op0), Op0->getVTList(), Op1,
55008                        Op0.getOperand(0), Op0.getOperand(2));
55009   }
55010 
55011   return combineAddOrSubToADCOrSBB(N, DAG);
55012 }
55013 
55014 // Try to fold (sub Y, cmovns X, -X) -> (add Y, cmovns -X, X) if the cmov
55015 // condition comes from the subtract node that produced -X. This matches the
55016 // cmov expansion for absolute value. By swapping the operands we convert abs
55017 // to nabs.
combineSubABS(SDNode * N,SelectionDAG & DAG)55018 static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {
55019   SDValue N0 = N->getOperand(0);
55020   SDValue N1 = N->getOperand(1);
55021 
55022   if (N1.getOpcode() != X86ISD::CMOV || !N1.hasOneUse())
55023     return SDValue();
55024 
55025   X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
55026   if (CC != X86::COND_S && CC != X86::COND_NS)
55027     return SDValue();
55028 
55029   // Condition should come from a negate operation.
55030   SDValue Cond = N1.getOperand(3);
55031   if (Cond.getOpcode() != X86ISD::SUB || !isNullConstant(Cond.getOperand(0)))
55032     return SDValue();
55033   assert(Cond.getResNo() == 1 && "Unexpected result number");
55034 
55035   // Get the X and -X from the negate.
55036   SDValue NegX = Cond.getValue(0);
55037   SDValue X = Cond.getOperand(1);
55038 
55039   SDValue FalseOp = N1.getOperand(0);
55040   SDValue TrueOp = N1.getOperand(1);
55041 
55042   // Cmov operands should be X and NegX. Order doesn't matter.
55043   if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
55044     return SDValue();
55045 
55046   // Build a new CMOV with the operands swapped.
55047   SDLoc DL(N);
55048   MVT VT = N->getSimpleValueType(0);
55049   SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
55050                              N1.getOperand(2), Cond);
55051   // Convert sub to add.
55052   return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
55053 }
55054 
combineSub(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55055 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
55056                           TargetLowering::DAGCombinerInfo &DCI,
55057                           const X86Subtarget &Subtarget) {
55058   SDValue Op0 = N->getOperand(0);
55059   SDValue Op1 = N->getOperand(1);
55060 
55061   // TODO: Add NoOpaque handling to isConstantIntBuildVectorOrConstantInt.
55062   auto IsNonOpaqueConstant = [&](SDValue Op) {
55063     if (SDNode *C = DAG.isConstantIntBuildVectorOrConstantInt(Op)) {
55064       if (auto *Cst = dyn_cast<ConstantSDNode>(C))
55065         return !Cst->isOpaque();
55066       return true;
55067     }
55068     return false;
55069   };
55070 
55071   // X86 can't encode an immediate LHS of a sub. See if we can push the
55072   // negation into a preceding instruction. If the RHS of the sub is a XOR with
55073   // one use and a constant, invert the immediate, saving one register.
55074   // sub(C1, xor(X, C2)) -> add(xor(X, ~C2), C1+1)
55075   if (Op1.getOpcode() == ISD::XOR && IsNonOpaqueConstant(Op0) &&
55076       IsNonOpaqueConstant(Op1.getOperand(1)) && Op1->hasOneUse()) {
55077     SDLoc DL(N);
55078     EVT VT = Op0.getValueType();
55079     SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0),
55080                                  DAG.getNOT(SDLoc(Op1), Op1.getOperand(1), VT));
55081     SDValue NewAdd =
55082         DAG.getNode(ISD::ADD, DL, VT, Op0, DAG.getConstant(1, DL, VT));
55083     return DAG.getNode(ISD::ADD, DL, VT, NewXor, NewAdd);
55084   }
55085 
55086   if (SDValue V = combineSubABS(N, DAG))
55087     return V;
55088 
55089   // Try to synthesize horizontal subs from subs of shuffles.
55090   if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
55091     return V;
55092 
55093   // Fold SUB(X,ADC(Y,0,W)) -> SBB(X,Y,W)
55094   if (Op1.getOpcode() == X86ISD::ADC && Op1->hasOneUse() &&
55095       X86::isZeroNode(Op1.getOperand(1))) {
55096     assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
55097     return DAG.getNode(X86ISD::SBB, SDLoc(Op1), Op1->getVTList(), Op0,
55098                        Op1.getOperand(0), Op1.getOperand(2));
55099   }
55100 
55101   // Fold SUB(X,SBB(Y,Z,W)) -> SUB(ADC(X,Z,W),Y)
55102   // Don't fold to ADC(0,0,W)/SETCC_CARRY pattern which will prevent more folds.
55103   if (Op1.getOpcode() == X86ISD::SBB && Op1->hasOneUse() &&
55104       !(X86::isZeroNode(Op0) && X86::isZeroNode(Op1.getOperand(1)))) {
55105     assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
55106     SDValue ADC = DAG.getNode(X86ISD::ADC, SDLoc(Op1), Op1->getVTList(), Op0,
55107                               Op1.getOperand(1), Op1.getOperand(2));
55108     return DAG.getNode(ISD::SUB, SDLoc(N), Op0.getValueType(), ADC.getValue(0),
55109                        Op1.getOperand(0));
55110   }
55111 
55112   return combineAddOrSubToADCOrSBB(N, DAG);
55113 }
55114 
combineVectorCompare(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)55115 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
55116                                     const X86Subtarget &Subtarget) {
55117   MVT VT = N->getSimpleValueType(0);
55118   SDLoc DL(N);
55119 
55120   if (N->getOperand(0) == N->getOperand(1)) {
55121     if (N->getOpcode() == X86ISD::PCMPEQ)
55122       return DAG.getConstant(-1, DL, VT);
55123     if (N->getOpcode() == X86ISD::PCMPGT)
55124       return DAG.getConstant(0, DL, VT);
55125   }
55126 
55127   return SDValue();
55128 }
55129 
55130 /// Helper that combines an array of subvector ops as if they were the operands
55131 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
55132 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
combineConcatVectorOps(const SDLoc & DL,MVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55133 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
55134                                       ArrayRef<SDValue> Ops, SelectionDAG &DAG,
55135                                       TargetLowering::DAGCombinerInfo &DCI,
55136                                       const X86Subtarget &Subtarget) {
55137   assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
55138   unsigned EltSizeInBits = VT.getScalarSizeInBits();
55139 
55140   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
55141     return DAG.getUNDEF(VT);
55142 
55143   if (llvm::all_of(Ops, [](SDValue Op) {
55144         return ISD::isBuildVectorAllZeros(Op.getNode());
55145       }))
55146     return getZeroVector(VT, Subtarget, DAG, DL);
55147 
55148   SDValue Op0 = Ops[0];
55149   bool IsSplat = llvm::all_equal(Ops);
55150 
55151   // Repeated subvectors.
55152   if (IsSplat &&
55153       (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
55154     // If this broadcast is inserted into both halves, use a larger broadcast.
55155     if (Op0.getOpcode() == X86ISD::VBROADCAST)
55156       return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
55157 
55158     // If this simple subvector or scalar/subvector broadcast_load is inserted
55159     // into both halves, use a larger broadcast_load. Update other uses to use
55160     // an extracted subvector.
55161     if (ISD::isNormalLoad(Op0.getNode()) ||
55162         Op0.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55163         Op0.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
55164       auto *Mem = cast<MemSDNode>(Op0);
55165       unsigned Opc = Op0.getOpcode() == X86ISD::VBROADCAST_LOAD
55166                          ? X86ISD::VBROADCAST_LOAD
55167                          : X86ISD::SUBV_BROADCAST_LOAD;
55168       if (SDValue BcastLd =
55169               getBROADCAST_LOAD(Opc, DL, VT, Mem->getMemoryVT(), Mem, 0, DAG)) {
55170         SDValue BcastSrc =
55171             extractSubVector(BcastLd, 0, DAG, DL, Op0.getValueSizeInBits());
55172         DAG.ReplaceAllUsesOfValueWith(Op0, BcastSrc);
55173         return BcastLd;
55174       }
55175     }
55176 
55177     // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
55178     if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
55179         (Subtarget.hasAVX2() ||
55180          X86::mayFoldLoadIntoBroadcastFromMem(Op0.getOperand(0),
55181                                               VT.getScalarType(), Subtarget)))
55182       return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
55183                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
55184                                      Op0.getOperand(0),
55185                                      DAG.getIntPtrConstant(0, DL)));
55186 
55187     // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
55188     if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
55189         (Subtarget.hasAVX2() ||
55190          (EltSizeInBits >= 32 &&
55191           X86::mayFoldLoad(Op0.getOperand(0), Subtarget))) &&
55192         Op0.getOperand(0).getValueType() == VT.getScalarType())
55193       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
55194 
55195     // concat_vectors(extract_subvector(broadcast(x)),
55196     //                extract_subvector(broadcast(x))) -> broadcast(x)
55197     if (Op0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
55198         Op0.getOperand(0).getValueType() == VT) {
55199       if (Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST ||
55200           Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST_LOAD)
55201         return Op0.getOperand(0);
55202     }
55203   }
55204 
55205   // concat(extract_subvector(v0,c0), extract_subvector(v1,c1)) -> vperm2x128.
55206   // Only concat of subvector high halves which vperm2x128 is best at.
55207   // TODO: This should go in combineX86ShufflesRecursively eventually.
55208   if (VT.is256BitVector() && Ops.size() == 2) {
55209     SDValue Src0 = peekThroughBitcasts(Ops[0]);
55210     SDValue Src1 = peekThroughBitcasts(Ops[1]);
55211     if (Src0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
55212         Src1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
55213       EVT SrcVT0 = Src0.getOperand(0).getValueType();
55214       EVT SrcVT1 = Src1.getOperand(0).getValueType();
55215       unsigned NumSrcElts0 = SrcVT0.getVectorNumElements();
55216       unsigned NumSrcElts1 = SrcVT1.getVectorNumElements();
55217       if (SrcVT0.is256BitVector() && SrcVT1.is256BitVector() &&
55218           Src0.getConstantOperandAPInt(1) == (NumSrcElts0 / 2) &&
55219           Src1.getConstantOperandAPInt(1) == (NumSrcElts1 / 2)) {
55220         return DAG.getNode(X86ISD::VPERM2X128, DL, VT,
55221                            DAG.getBitcast(VT, Src0.getOperand(0)),
55222                            DAG.getBitcast(VT, Src1.getOperand(0)),
55223                            DAG.getTargetConstant(0x31, DL, MVT::i8));
55224       }
55225     }
55226   }
55227 
55228   // Repeated opcode.
55229   // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
55230   // but it currently struggles with different vector widths.
55231   if (llvm::all_of(Ops, [Op0](SDValue Op) {
55232         return Op.getOpcode() == Op0.getOpcode();
55233       })) {
55234     auto ConcatSubOperand = [&](MVT VT, ArrayRef<SDValue> SubOps, unsigned I) {
55235       SmallVector<SDValue> Subs;
55236       for (SDValue SubOp : SubOps)
55237         Subs.push_back(SubOp.getOperand(I));
55238       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
55239     };
55240     auto IsConcatFree = [](MVT VT, ArrayRef<SDValue> SubOps, unsigned Op) {
55241       for (unsigned I = 0, E = SubOps.size(); I != E; ++I) {
55242         SDValue Sub = SubOps[I].getOperand(Op);
55243         unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
55244         if (Sub.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
55245             Sub.getOperand(0).getValueType() != VT ||
55246             Sub.getConstantOperandAPInt(1) != (I * NumSubElts))
55247           return false;
55248       }
55249       return true;
55250     };
55251 
55252     unsigned NumOps = Ops.size();
55253     switch (Op0.getOpcode()) {
55254     case X86ISD::VBROADCAST: {
55255       if (!IsSplat && llvm::all_of(Ops, [](SDValue Op) {
55256             return Op.getOperand(0).getValueType().is128BitVector();
55257           })) {
55258         if (VT == MVT::v4f64 || VT == MVT::v4i64)
55259           return DAG.getNode(X86ISD::UNPCKL, DL, VT,
55260                              ConcatSubOperand(VT, Ops, 0),
55261                              ConcatSubOperand(VT, Ops, 0));
55262         // TODO: Add pseudo v8i32 PSHUFD handling to AVX1Only targets.
55263         if (VT == MVT::v8f32 || (VT == MVT::v8i32 && Subtarget.hasInt256()))
55264           return DAG.getNode(VT == MVT::v8f32 ? X86ISD::VPERMILPI
55265                                               : X86ISD::PSHUFD,
55266                              DL, VT, ConcatSubOperand(VT, Ops, 0),
55267                              getV4X86ShuffleImm8ForMask({0, 0, 0, 0}, DL, DAG));
55268       }
55269       break;
55270     }
55271     case X86ISD::MOVDDUP:
55272     case X86ISD::MOVSHDUP:
55273     case X86ISD::MOVSLDUP: {
55274       if (!IsSplat)
55275         return DAG.getNode(Op0.getOpcode(), DL, VT,
55276                            ConcatSubOperand(VT, Ops, 0));
55277       break;
55278     }
55279     case X86ISD::SHUFP: {
55280       // Add SHUFPD support if/when necessary.
55281       if (!IsSplat && VT.getScalarType() == MVT::f32 &&
55282           llvm::all_of(Ops, [Op0](SDValue Op) {
55283             return Op.getOperand(2) == Op0.getOperand(2);
55284           })) {
55285         return DAG.getNode(Op0.getOpcode(), DL, VT,
55286                            ConcatSubOperand(VT, Ops, 0),
55287                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55288       }
55289       break;
55290     }
55291     case X86ISD::PSHUFHW:
55292     case X86ISD::PSHUFLW:
55293     case X86ISD::PSHUFD:
55294       if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
55295           Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
55296         return DAG.getNode(Op0.getOpcode(), DL, VT,
55297                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
55298       }
55299       [[fallthrough]];
55300     case X86ISD::VPERMILPI:
55301       if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
55302           Op0.getOperand(1) == Ops[1].getOperand(1)) {
55303         SDValue Res = DAG.getBitcast(MVT::v8f32, ConcatSubOperand(VT, Ops, 0));
55304         Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
55305                           Op0.getOperand(1));
55306         return DAG.getBitcast(VT, Res);
55307       }
55308       if (!IsSplat && NumOps == 2 && VT == MVT::v4f64) {
55309         uint64_t Idx0 = Ops[0].getConstantOperandVal(1);
55310         uint64_t Idx1 = Ops[1].getConstantOperandVal(1);
55311         uint64_t Idx = ((Idx1 & 3) << 2) | (Idx0 & 3);
55312         return DAG.getNode(Op0.getOpcode(), DL, VT,
55313                            ConcatSubOperand(VT, Ops, 0),
55314                            DAG.getTargetConstant(Idx, DL, MVT::i8));
55315       }
55316       break;
55317     case X86ISD::PSHUFB:
55318       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55319                        (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
55320         return DAG.getNode(Op0.getOpcode(), DL, VT,
55321                            ConcatSubOperand(VT, Ops, 0),
55322                            ConcatSubOperand(VT, Ops, 1));
55323       }
55324       break;
55325     case X86ISD::VPERMV3:
55326       if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
55327         MVT OpVT = Op0.getSimpleValueType();
55328         int NumSrcElts = OpVT.getVectorNumElements();
55329         SmallVector<int, 64> ConcatMask;
55330         for (unsigned i = 0; i != NumOps; ++i) {
55331           SmallVector<int, 64> SubMask;
55332           SmallVector<SDValue, 2> SubOps;
55333           if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
55334                                     SubMask))
55335             break;
55336           for (int M : SubMask) {
55337             if (0 <= M) {
55338               M += M < NumSrcElts ? 0 : NumSrcElts;
55339               M += i * NumSrcElts;
55340             }
55341             ConcatMask.push_back(M);
55342           }
55343         }
55344         if (ConcatMask.size() == (NumOps * NumSrcElts)) {
55345           SDValue Src0 = concatSubVectors(Ops[0].getOperand(0),
55346                                           Ops[1].getOperand(0), DAG, DL);
55347           SDValue Src1 = concatSubVectors(Ops[0].getOperand(2),
55348                                           Ops[1].getOperand(2), DAG, DL);
55349           MVT IntMaskSVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
55350           MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
55351           SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
55352           return DAG.getNode(X86ISD::VPERMV3, DL, VT, Src0, Mask, Src1);
55353         }
55354       }
55355       break;
55356     case X86ISD::VSHLI:
55357     case X86ISD::VSRLI:
55358       // Special case: SHL/SRL AVX1 V4i64 by 32-bits can lower as a shuffle.
55359       // TODO: Move this to LowerShiftByScalarImmediate?
55360       if (VT == MVT::v4i64 && !Subtarget.hasInt256() &&
55361           llvm::all_of(Ops, [](SDValue Op) {
55362             return Op.getConstantOperandAPInt(1) == 32;
55363           })) {
55364         SDValue Res = DAG.getBitcast(MVT::v8i32, ConcatSubOperand(VT, Ops, 0));
55365         SDValue Zero = getZeroVector(MVT::v8i32, Subtarget, DAG, DL);
55366         if (Op0.getOpcode() == X86ISD::VSHLI) {
55367           Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
55368                                      {8, 0, 8, 2, 8, 4, 8, 6});
55369         } else {
55370           Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
55371                                      {1, 8, 3, 8, 5, 8, 7, 8});
55372         }
55373         return DAG.getBitcast(VT, Res);
55374       }
55375       [[fallthrough]];
55376     case X86ISD::VSRAI:
55377     case X86ISD::VSHL:
55378     case X86ISD::VSRL:
55379     case X86ISD::VSRA:
55380       if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
55381            (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
55382             (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
55383           llvm::all_of(Ops, [Op0](SDValue Op) {
55384             return Op0.getOperand(1) == Op.getOperand(1);
55385           })) {
55386         return DAG.getNode(Op0.getOpcode(), DL, VT,
55387                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
55388       }
55389       break;
55390     case X86ISD::VPERMI:
55391     case X86ISD::VROTLI:
55392     case X86ISD::VROTRI:
55393       if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
55394           llvm::all_of(Ops, [Op0](SDValue Op) {
55395             return Op0.getOperand(1) == Op.getOperand(1);
55396           })) {
55397         return DAG.getNode(Op0.getOpcode(), DL, VT,
55398                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
55399       }
55400       break;
55401     case ISD::AND:
55402     case ISD::OR:
55403     case ISD::XOR:
55404     case X86ISD::ANDNP:
55405       // TODO: Add 256-bit support.
55406       if (!IsSplat && VT.is512BitVector()) {
55407         MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
55408         SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
55409                                  NumOps * SrcVT.getVectorNumElements());
55410         return DAG.getNode(Op0.getOpcode(), DL, VT,
55411                            ConcatSubOperand(SrcVT, Ops, 0),
55412                            ConcatSubOperand(SrcVT, Ops, 1));
55413       }
55414       break;
55415     case X86ISD::GF2P8AFFINEQB:
55416       if (!IsSplat &&
55417           (VT.is256BitVector() ||
55418            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
55419           llvm::all_of(Ops, [Op0](SDValue Op) {
55420             return Op0.getOperand(2) == Op.getOperand(2);
55421           })) {
55422         return DAG.getNode(Op0.getOpcode(), DL, VT,
55423                            ConcatSubOperand(VT, Ops, 0),
55424                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55425       }
55426       break;
55427     case X86ISD::HADD:
55428     case X86ISD::HSUB:
55429     case X86ISD::FHADD:
55430     case X86ISD::FHSUB:
55431     case X86ISD::PACKSS:
55432     case X86ISD::PACKUS:
55433       if (!IsSplat && VT.is256BitVector() &&
55434           (VT.isFloatingPoint() || Subtarget.hasInt256())) {
55435         MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
55436         SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
55437                                  NumOps * SrcVT.getVectorNumElements());
55438         return DAG.getNode(Op0.getOpcode(), DL, VT,
55439                            ConcatSubOperand(SrcVT, Ops, 0),
55440                            ConcatSubOperand(SrcVT, Ops, 1));
55441       }
55442       break;
55443     case X86ISD::PALIGNR:
55444       if (!IsSplat &&
55445           ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55446            (VT.is512BitVector() && Subtarget.useBWIRegs())) &&
55447           llvm::all_of(Ops, [Op0](SDValue Op) {
55448             return Op0.getOperand(2) == Op.getOperand(2);
55449           })) {
55450         return DAG.getNode(Op0.getOpcode(), DL, VT,
55451                            ConcatSubOperand(VT, Ops, 0),
55452                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55453       }
55454       break;
55455     case ISD::VSELECT:
55456     case X86ISD::BLENDV:
55457       if (!IsSplat && VT.is256BitVector() && Ops.size() == 2 &&
55458           (VT.getScalarSizeInBits() >= 32 || Subtarget.hasInt256()) &&
55459           IsConcatFree(VT, Ops, 1) && IsConcatFree(VT, Ops, 2)) {
55460         EVT SelVT = Ops[0].getOperand(0).getValueType();
55461         SelVT = SelVT.getDoubleNumVectorElementsVT(*DAG.getContext());
55462         if (DAG.getTargetLoweringInfo().isTypeLegal(SelVT))
55463           return DAG.getNode(Op0.getOpcode(), DL, VT,
55464                              ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
55465                              ConcatSubOperand(VT, Ops, 1),
55466                              ConcatSubOperand(VT, Ops, 2));
55467       }
55468       break;
55469     }
55470   }
55471 
55472   // Fold subvector loads into one.
55473   // If needed, look through bitcasts to get to the load.
55474   if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
55475     unsigned Fast;
55476     const X86TargetLowering *TLI = Subtarget.getTargetLowering();
55477     if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
55478                                 *FirstLd->getMemOperand(), &Fast) &&
55479         Fast) {
55480       if (SDValue Ld =
55481               EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
55482         return Ld;
55483     }
55484   }
55485 
55486   // Attempt to fold target constant loads.
55487   if (all_of(Ops, [](SDValue Op) { return getTargetConstantFromNode(Op); })) {
55488     SmallVector<APInt> EltBits;
55489     APInt UndefElts = APInt::getNullValue(VT.getVectorNumElements());
55490     for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
55491       APInt OpUndefElts;
55492       SmallVector<APInt> OpEltBits;
55493       if (!getTargetConstantBitsFromNode(Ops[I], EltSizeInBits, OpUndefElts,
55494                                         OpEltBits, true, false))
55495           break;
55496       EltBits.append(OpEltBits);
55497       UndefElts.insertBits(OpUndefElts, I * OpUndefElts.getBitWidth());
55498     }
55499     if (EltBits.size() == VT.getVectorNumElements())
55500       return getConstVector(EltBits, UndefElts, VT, DAG, DL);
55501   }
55502 
55503   return SDValue();
55504 }
55505 
combineCONCAT_VECTORS(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55506 static SDValue combineCONCAT_VECTORS(SDNode *N, SelectionDAG &DAG,
55507                                      TargetLowering::DAGCombinerInfo &DCI,
55508                                      const X86Subtarget &Subtarget) {
55509   EVT VT = N->getValueType(0);
55510   EVT SrcVT = N->getOperand(0).getValueType();
55511   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55512 
55513   // Don't do anything for i1 vectors.
55514   if (VT.getVectorElementType() == MVT::i1)
55515     return SDValue();
55516 
55517   if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
55518     SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
55519     if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
55520                                            DCI, Subtarget))
55521       return R;
55522   }
55523 
55524   return SDValue();
55525 }
55526 
combineINSERT_SUBVECTOR(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55527 static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55528                                        TargetLowering::DAGCombinerInfo &DCI,
55529                                        const X86Subtarget &Subtarget) {
55530   if (DCI.isBeforeLegalizeOps())
55531     return SDValue();
55532 
55533   MVT OpVT = N->getSimpleValueType(0);
55534 
55535   bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
55536 
55537   SDLoc dl(N);
55538   SDValue Vec = N->getOperand(0);
55539   SDValue SubVec = N->getOperand(1);
55540 
55541   uint64_t IdxVal = N->getConstantOperandVal(2);
55542   MVT SubVecVT = SubVec.getSimpleValueType();
55543 
55544   if (Vec.isUndef() && SubVec.isUndef())
55545     return DAG.getUNDEF(OpVT);
55546 
55547   // Inserting undefs/zeros into zeros/undefs is a zero vector.
55548   if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
55549       (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
55550     return getZeroVector(OpVT, Subtarget, DAG, dl);
55551 
55552   if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
55553     // If we're inserting into a zero vector and then into a larger zero vector,
55554     // just insert into the larger zero vector directly.
55555     if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
55556         ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
55557       uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
55558       return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55559                          getZeroVector(OpVT, Subtarget, DAG, dl),
55560                          SubVec.getOperand(1),
55561                          DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
55562     }
55563 
55564     // If we're inserting into a zero vector and our input was extracted from an
55565     // insert into a zero vector of the same type and the extraction was at
55566     // least as large as the original insertion. Just insert the original
55567     // subvector into a zero vector.
55568     if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
55569         isNullConstant(SubVec.getOperand(1)) &&
55570         SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
55571       SDValue Ins = SubVec.getOperand(0);
55572       if (isNullConstant(Ins.getOperand(2)) &&
55573           ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
55574           Ins.getOperand(1).getValueSizeInBits().getFixedValue() <=
55575               SubVecVT.getFixedSizeInBits())
55576           return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55577                              getZeroVector(OpVT, Subtarget, DAG, dl),
55578                              Ins.getOperand(1), N->getOperand(2));
55579     }
55580   }
55581 
55582   // Stop here if this is an i1 vector.
55583   if (IsI1Vector)
55584     return SDValue();
55585 
55586   // If this is an insert of an extract, combine to a shuffle. Don't do this
55587   // if the insert or extract can be represented with a subregister operation.
55588   if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
55589       SubVec.getOperand(0).getSimpleValueType() == OpVT &&
55590       (IdxVal != 0 ||
55591        !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
55592     int ExtIdxVal = SubVec.getConstantOperandVal(1);
55593     if (ExtIdxVal != 0) {
55594       int VecNumElts = OpVT.getVectorNumElements();
55595       int SubVecNumElts = SubVecVT.getVectorNumElements();
55596       SmallVector<int, 64> Mask(VecNumElts);
55597       // First create an identity shuffle mask.
55598       for (int i = 0; i != VecNumElts; ++i)
55599         Mask[i] = i;
55600       // Now insert the extracted portion.
55601       for (int i = 0; i != SubVecNumElts; ++i)
55602         Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
55603 
55604       return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
55605     }
55606   }
55607 
55608   // Match concat_vector style patterns.
55609   SmallVector<SDValue, 2> SubVectorOps;
55610   if (collectConcatOps(N, SubVectorOps, DAG)) {
55611     if (SDValue Fold =
55612             combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
55613       return Fold;
55614 
55615     // If we're inserting all zeros into the upper half, change this to
55616     // a concat with zero. We will match this to a move
55617     // with implicit upper bit zeroing during isel.
55618     // We do this here because we don't want combineConcatVectorOps to
55619     // create INSERT_SUBVECTOR from CONCAT_VECTORS.
55620     if (SubVectorOps.size() == 2 &&
55621         ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
55622       return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55623                          getZeroVector(OpVT, Subtarget, DAG, dl),
55624                          SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
55625   }
55626 
55627   // If this is a broadcast insert into an upper undef, use a larger broadcast.
55628   if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
55629     return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
55630 
55631   // If this is a broadcast load inserted into an upper undef, use a larger
55632   // broadcast load.
55633   if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
55634       SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
55635     auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
55636     SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
55637     SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
55638     SDValue BcastLd =
55639         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
55640                                 MemIntr->getMemoryVT(),
55641                                 MemIntr->getMemOperand());
55642     DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
55643     return BcastLd;
55644   }
55645 
55646   // If we're splatting the lower half subvector of a full vector load into the
55647   // upper half, attempt to create a subvector broadcast.
55648   if (IdxVal == (OpVT.getVectorNumElements() / 2) && SubVec.hasOneUse() &&
55649       Vec.getValueSizeInBits() == (2 * SubVec.getValueSizeInBits())) {
55650     auto *VecLd = dyn_cast<LoadSDNode>(Vec);
55651     auto *SubLd = dyn_cast<LoadSDNode>(SubVec);
55652     if (VecLd && SubLd &&
55653         DAG.areNonVolatileConsecutiveLoads(SubLd, VecLd,
55654                                            SubVec.getValueSizeInBits() / 8, 0))
55655       return getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, dl, OpVT, SubVecVT,
55656                                SubLd, 0, DAG);
55657   }
55658 
55659   return SDValue();
55660 }
55661 
55662 /// If we are extracting a subvector of a vector select and the select condition
55663 /// is composed of concatenated vectors, try to narrow the select width. This
55664 /// is a common pattern for AVX1 integer code because 256-bit selects may be
55665 /// legal, but there is almost no integer math/logic available for 256-bit.
55666 /// This function should only be called with legal types (otherwise, the calls
55667 /// to get simple value types will assert).
narrowExtractedVectorSelect(SDNode * Ext,SelectionDAG & DAG)55668 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
55669   SDValue Sel = Ext->getOperand(0);
55670   SmallVector<SDValue, 4> CatOps;
55671   if (Sel.getOpcode() != ISD::VSELECT ||
55672       !collectConcatOps(Sel.getOperand(0).getNode(), CatOps, DAG))
55673     return SDValue();
55674 
55675   // Note: We assume simple value types because this should only be called with
55676   //       legal operations/types.
55677   // TODO: This can be extended to handle extraction to 256-bits.
55678   MVT VT = Ext->getSimpleValueType(0);
55679   if (!VT.is128BitVector())
55680     return SDValue();
55681 
55682   MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
55683   if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
55684     return SDValue();
55685 
55686   MVT WideVT = Ext->getOperand(0).getSimpleValueType();
55687   MVT SelVT = Sel.getSimpleValueType();
55688   assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
55689          "Unexpected vector type with legal operations");
55690 
55691   unsigned SelElts = SelVT.getVectorNumElements();
55692   unsigned CastedElts = WideVT.getVectorNumElements();
55693   unsigned ExtIdx = Ext->getConstantOperandVal(1);
55694   if (SelElts % CastedElts == 0) {
55695     // The select has the same or more (narrower) elements than the extract
55696     // operand. The extraction index gets scaled by that factor.
55697     ExtIdx *= (SelElts / CastedElts);
55698   } else if (CastedElts % SelElts == 0) {
55699     // The select has less (wider) elements than the extract operand. Make sure
55700     // that the extraction index can be divided evenly.
55701     unsigned IndexDivisor = CastedElts / SelElts;
55702     if (ExtIdx % IndexDivisor != 0)
55703       return SDValue();
55704     ExtIdx /= IndexDivisor;
55705   } else {
55706     llvm_unreachable("Element count of simple vector types are not divisible?");
55707   }
55708 
55709   unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
55710   unsigned NarrowElts = SelElts / NarrowingFactor;
55711   MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
55712   SDLoc DL(Ext);
55713   SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
55714   SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
55715   SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
55716   SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
55717   return DAG.getBitcast(VT, NarrowSel);
55718 }
55719 
combineEXTRACT_SUBVECTOR(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55720 static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55721                                         TargetLowering::DAGCombinerInfo &DCI,
55722                                         const X86Subtarget &Subtarget) {
55723   // For AVX1 only, if we are extracting from a 256-bit and+not (which will
55724   // eventually get combined/lowered into ANDNP) with a concatenated operand,
55725   // split the 'and' into 128-bit ops to avoid the concatenate and extract.
55726   // We let generic combining take over from there to simplify the
55727   // insert/extract and 'not'.
55728   // This pattern emerges during AVX1 legalization. We handle it before lowering
55729   // to avoid complications like splitting constant vector loads.
55730 
55731   // Capture the original wide type in the likely case that we need to bitcast
55732   // back to this type.
55733   if (!N->getValueType(0).isSimple())
55734     return SDValue();
55735 
55736   MVT VT = N->getSimpleValueType(0);
55737   SDValue InVec = N->getOperand(0);
55738   unsigned IdxVal = N->getConstantOperandVal(1);
55739   SDValue InVecBC = peekThroughBitcasts(InVec);
55740   EVT InVecVT = InVec.getValueType();
55741   unsigned SizeInBits = VT.getSizeInBits();
55742   unsigned InSizeInBits = InVecVT.getSizeInBits();
55743   unsigned NumSubElts = VT.getVectorNumElements();
55744   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55745 
55746   if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
55747       TLI.isTypeLegal(InVecVT) &&
55748       InSizeInBits == 256 && InVecBC.getOpcode() == ISD::AND) {
55749     auto isConcatenatedNot = [](SDValue V) {
55750       V = peekThroughBitcasts(V);
55751       if (!isBitwiseNot(V))
55752         return false;
55753       SDValue NotOp = V->getOperand(0);
55754       return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
55755     };
55756     if (isConcatenatedNot(InVecBC.getOperand(0)) ||
55757         isConcatenatedNot(InVecBC.getOperand(1))) {
55758       // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
55759       SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
55760       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
55761                          DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
55762     }
55763   }
55764 
55765   if (DCI.isBeforeLegalizeOps())
55766     return SDValue();
55767 
55768   if (SDValue V = narrowExtractedVectorSelect(N, DAG))
55769     return V;
55770 
55771   if (ISD::isBuildVectorAllZeros(InVec.getNode()))
55772     return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55773 
55774   if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
55775     if (VT.getScalarType() == MVT::i1)
55776       return DAG.getConstant(1, SDLoc(N), VT);
55777     return getOnesVector(VT, DAG, SDLoc(N));
55778   }
55779 
55780   if (InVec.getOpcode() == ISD::BUILD_VECTOR)
55781     return DAG.getBuildVector(VT, SDLoc(N),
55782                               InVec->ops().slice(IdxVal, NumSubElts));
55783 
55784   // If we are extracting from an insert into a larger vector, replace with a
55785   // smaller insert if we don't access less than the original subvector. Don't
55786   // do this for i1 vectors.
55787   // TODO: Relax the matching indices requirement?
55788   if (VT.getVectorElementType() != MVT::i1 &&
55789       InVec.getOpcode() == ISD::INSERT_SUBVECTOR && InVec.hasOneUse() &&
55790       IdxVal == InVec.getConstantOperandVal(2) &&
55791       InVec.getOperand(1).getValueSizeInBits() <= SizeInBits) {
55792     SDLoc DL(N);
55793     SDValue NewExt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT,
55794                                  InVec.getOperand(0), N->getOperand(1));
55795     unsigned NewIdxVal = InVec.getConstantOperandVal(2) - IdxVal;
55796     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, NewExt,
55797                        InVec.getOperand(1),
55798                        DAG.getVectorIdxConstant(NewIdxVal, DL));
55799   }
55800 
55801   // If we're extracting an upper subvector from a broadcast we should just
55802   // extract the lowest subvector instead which should allow
55803   // SimplifyDemandedVectorElts do more simplifications.
55804   if (IdxVal != 0 && (InVec.getOpcode() == X86ISD::VBROADCAST ||
55805                       InVec.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55806                       DAG.isSplatValue(InVec, /*AllowUndefs*/ false)))
55807     return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55808 
55809   // If we're extracting a broadcasted subvector, just use the lowest subvector.
55810   if (IdxVal != 0 && InVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
55811       cast<MemIntrinsicSDNode>(InVec)->getMemoryVT() == VT)
55812     return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55813 
55814   // Attempt to extract from the source of a shuffle vector.
55815   if ((InSizeInBits % SizeInBits) == 0 && (IdxVal % NumSubElts) == 0) {
55816     SmallVector<int, 32> ShuffleMask;
55817     SmallVector<int, 32> ScaledMask;
55818     SmallVector<SDValue, 2> ShuffleInputs;
55819     unsigned NumSubVecs = InSizeInBits / SizeInBits;
55820     // Decode the shuffle mask and scale it so its shuffling subvectors.
55821     if (getTargetShuffleInputs(InVecBC, ShuffleInputs, ShuffleMask, DAG) &&
55822         scaleShuffleElements(ShuffleMask, NumSubVecs, ScaledMask)) {
55823       unsigned SubVecIdx = IdxVal / NumSubElts;
55824       if (ScaledMask[SubVecIdx] == SM_SentinelUndef)
55825         return DAG.getUNDEF(VT);
55826       if (ScaledMask[SubVecIdx] == SM_SentinelZero)
55827         return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55828       SDValue Src = ShuffleInputs[ScaledMask[SubVecIdx] / NumSubVecs];
55829       if (Src.getValueSizeInBits() == InSizeInBits) {
55830         unsigned SrcSubVecIdx = ScaledMask[SubVecIdx] % NumSubVecs;
55831         unsigned SrcEltIdx = SrcSubVecIdx * NumSubElts;
55832         return extractSubVector(DAG.getBitcast(InVecVT, Src), SrcEltIdx, DAG,
55833                                 SDLoc(N), SizeInBits);
55834       }
55835     }
55836   }
55837 
55838   // If we're extracting the lowest subvector and we're the only user,
55839   // we may be able to perform this with a smaller vector width.
55840   unsigned InOpcode = InVec.getOpcode();
55841   if (InVec.hasOneUse()) {
55842     if (IdxVal == 0 && VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
55843       // v2f64 CVTDQ2PD(v4i32).
55844       if (InOpcode == ISD::SINT_TO_FP &&
55845           InVec.getOperand(0).getValueType() == MVT::v4i32) {
55846         return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
55847       }
55848       // v2f64 CVTUDQ2PD(v4i32).
55849       if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
55850           InVec.getOperand(0).getValueType() == MVT::v4i32) {
55851         return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
55852       }
55853       // v2f64 CVTPS2PD(v4f32).
55854       if (InOpcode == ISD::FP_EXTEND &&
55855           InVec.getOperand(0).getValueType() == MVT::v4f32) {
55856         return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
55857       }
55858     }
55859     if (IdxVal == 0 &&
55860         (InOpcode == ISD::ANY_EXTEND ||
55861          InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
55862          InOpcode == ISD::ZERO_EXTEND ||
55863          InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
55864          InOpcode == ISD::SIGN_EXTEND ||
55865          InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
55866         (SizeInBits == 128 || SizeInBits == 256) &&
55867         InVec.getOperand(0).getValueSizeInBits() >= SizeInBits) {
55868       SDLoc DL(N);
55869       SDValue Ext = InVec.getOperand(0);
55870       if (Ext.getValueSizeInBits() > SizeInBits)
55871         Ext = extractSubVector(Ext, 0, DAG, DL, SizeInBits);
55872       unsigned ExtOp = DAG.getOpcode_EXTEND_VECTOR_INREG(InOpcode);
55873       return DAG.getNode(ExtOp, DL, VT, Ext);
55874     }
55875     if (IdxVal == 0 && InOpcode == ISD::VSELECT &&
55876         InVec.getOperand(0).getValueType().is256BitVector() &&
55877         InVec.getOperand(1).getValueType().is256BitVector() &&
55878         InVec.getOperand(2).getValueType().is256BitVector()) {
55879       SDLoc DL(N);
55880       SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
55881       SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
55882       SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
55883       return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
55884     }
55885     if (IdxVal == 0 && InOpcode == ISD::TRUNCATE && Subtarget.hasVLX() &&
55886         (VT.is128BitVector() || VT.is256BitVector())) {
55887       SDLoc DL(N);
55888       SDValue InVecSrc = InVec.getOperand(0);
55889       unsigned Scale = InVecSrc.getValueSizeInBits() / InSizeInBits;
55890       SDValue Ext = extractSubVector(InVecSrc, 0, DAG, DL, Scale * SizeInBits);
55891       return DAG.getNode(InOpcode, DL, VT, Ext);
55892     }
55893     if (InOpcode == X86ISD::MOVDDUP &&
55894         (VT.is128BitVector() || VT.is256BitVector())) {
55895       SDLoc DL(N);
55896       SDValue Ext0 =
55897           extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55898       return DAG.getNode(InOpcode, DL, VT, Ext0);
55899     }
55900   }
55901 
55902   // Always split vXi64 logical shifts where we're extracting the upper 32-bits
55903   // as this is very likely to fold into a shuffle/truncation.
55904   if ((InOpcode == X86ISD::VSHLI || InOpcode == X86ISD::VSRLI) &&
55905       InVecVT.getScalarSizeInBits() == 64 &&
55906       InVec.getConstantOperandAPInt(1) == 32) {
55907     SDLoc DL(N);
55908     SDValue Ext =
55909         extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55910     return DAG.getNode(InOpcode, DL, VT, Ext, InVec.getOperand(1));
55911   }
55912 
55913   return SDValue();
55914 }
55915 
combineScalarToVector(SDNode * N,SelectionDAG & DAG)55916 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
55917   EVT VT = N->getValueType(0);
55918   SDValue Src = N->getOperand(0);
55919   SDLoc DL(N);
55920 
55921   // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
55922   // This occurs frequently in our masked scalar intrinsic code and our
55923   // floating point select lowering with AVX512.
55924   // TODO: SimplifyDemandedBits instead?
55925   if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
55926     if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
55927       if (C->getAPIntValue().isOne())
55928         return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
55929                            Src.getOperand(0));
55930 
55931   // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
55932   if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
55933       Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
55934       Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
55935     if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
55936       if (C->isZero())
55937         return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
55938                            Src.getOperand(1));
55939 
55940   // Reduce v2i64 to v4i32 if we don't need the upper bits or are known zero.
55941   // TODO: Move to DAGCombine/SimplifyDemandedBits?
55942   if ((VT == MVT::v2i64 || VT == MVT::v2f64) && Src.hasOneUse()) {
55943     auto IsExt64 = [&DAG](SDValue Op, bool IsZeroExt) {
55944       if (Op.getValueType() != MVT::i64)
55945         return SDValue();
55946       unsigned Opc = IsZeroExt ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND;
55947       if (Op.getOpcode() == Opc &&
55948           Op.getOperand(0).getScalarValueSizeInBits() <= 32)
55949         return Op.getOperand(0);
55950       unsigned Ext = IsZeroExt ? ISD::ZEXTLOAD : ISD::EXTLOAD;
55951       if (auto *Ld = dyn_cast<LoadSDNode>(Op))
55952         if (Ld->getExtensionType() == Ext &&
55953             Ld->getMemoryVT().getScalarSizeInBits() <= 32)
55954           return Op;
55955       if (IsZeroExt) {
55956         KnownBits Known = DAG.computeKnownBits(Op);
55957         if (!Known.isConstant() && Known.countMinLeadingZeros() >= 32)
55958           return Op;
55959       }
55960       return SDValue();
55961     };
55962 
55963     if (SDValue AnyExt = IsExt64(peekThroughOneUseBitcasts(Src), false))
55964       return DAG.getBitcast(
55965           VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55966                           DAG.getAnyExtOrTrunc(AnyExt, DL, MVT::i32)));
55967 
55968     if (SDValue ZeroExt = IsExt64(peekThroughOneUseBitcasts(Src), true))
55969       return DAG.getBitcast(
55970           VT,
55971           DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v4i32,
55972                       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55973                                   DAG.getZExtOrTrunc(ZeroExt, DL, MVT::i32))));
55974   }
55975 
55976   // Combine (v2i64 (scalar_to_vector (i64 (bitconvert (mmx))))) to MOVQ2DQ.
55977   if (VT == MVT::v2i64 && Src.getOpcode() == ISD::BITCAST &&
55978       Src.getOperand(0).getValueType() == MVT::x86mmx)
55979     return DAG.getNode(X86ISD::MOVQ2DQ, DL, VT, Src.getOperand(0));
55980 
55981   // See if we're broadcasting the scalar value, in which case just reuse that.
55982   // Ensure the same SDValue from the SDNode use is being used.
55983   if (VT.getScalarType() == Src.getValueType())
55984     for (SDNode *User : Src->uses())
55985       if (User->getOpcode() == X86ISD::VBROADCAST &&
55986           Src == User->getOperand(0)) {
55987         unsigned SizeInBits = VT.getFixedSizeInBits();
55988         unsigned BroadcastSizeInBits =
55989             User->getValueSizeInBits(0).getFixedValue();
55990         if (BroadcastSizeInBits == SizeInBits)
55991           return SDValue(User, 0);
55992         if (BroadcastSizeInBits > SizeInBits)
55993           return extractSubVector(SDValue(User, 0), 0, DAG, DL, SizeInBits);
55994         // TODO: Handle BroadcastSizeInBits < SizeInBits when we have test
55995         // coverage.
55996       }
55997 
55998   return SDValue();
55999 }
56000 
56001 // Simplify PMULDQ and PMULUDQ operations.
combinePMULDQ(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)56002 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
56003                              TargetLowering::DAGCombinerInfo &DCI,
56004                              const X86Subtarget &Subtarget) {
56005   SDValue LHS = N->getOperand(0);
56006   SDValue RHS = N->getOperand(1);
56007 
56008   // Canonicalize constant to RHS.
56009   if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
56010       !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
56011     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
56012 
56013   // Multiply by zero.
56014   // Don't return RHS as it may contain UNDEFs.
56015   if (ISD::isBuildVectorAllZeros(RHS.getNode()))
56016     return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
56017 
56018   // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
56019   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56020   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(64), DCI))
56021     return SDValue(N, 0);
56022 
56023   // If the input is an extend_invec and the SimplifyDemandedBits call didn't
56024   // convert it to any_extend_invec, due to the LegalOperations check, do the
56025   // conversion directly to a vector shuffle manually. This exposes combine
56026   // opportunities missed by combineEXTEND_VECTOR_INREG not calling
56027   // combineX86ShufflesRecursively on SSE4.1 targets.
56028   // FIXME: This is basically a hack around several other issues related to
56029   // ANY_EXTEND_VECTOR_INREG.
56030   if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
56031       (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
56032        LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
56033       LHS.getOperand(0).getValueType() == MVT::v4i32) {
56034     SDLoc dl(N);
56035     LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
56036                                LHS.getOperand(0), { 0, -1, 1, -1 });
56037     LHS = DAG.getBitcast(MVT::v2i64, LHS);
56038     return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
56039   }
56040   if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
56041       (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
56042        RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
56043       RHS.getOperand(0).getValueType() == MVT::v4i32) {
56044     SDLoc dl(N);
56045     RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
56046                                RHS.getOperand(0), { 0, -1, 1, -1 });
56047     RHS = DAG.getBitcast(MVT::v2i64, RHS);
56048     return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
56049   }
56050 
56051   return SDValue();
56052 }
56053 
56054 // Simplify VPMADDUBSW/VPMADDWD operations.
combineVPMADD(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56055 static SDValue combineVPMADD(SDNode *N, SelectionDAG &DAG,
56056                              TargetLowering::DAGCombinerInfo &DCI) {
56057   EVT VT = N->getValueType(0);
56058   SDValue LHS = N->getOperand(0);
56059   SDValue RHS = N->getOperand(1);
56060 
56061   // Multiply by zero.
56062   // Don't return LHS/RHS as it may contain UNDEFs.
56063   if (ISD::isBuildVectorAllZeros(LHS.getNode()) ||
56064       ISD::isBuildVectorAllZeros(RHS.getNode()))
56065     return DAG.getConstant(0, SDLoc(N), VT);
56066 
56067   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56068   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
56069   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
56070     return SDValue(N, 0);
56071 
56072   return SDValue();
56073 }
56074 
combineEXTEND_VECTOR_INREG(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)56075 static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
56076                                           TargetLowering::DAGCombinerInfo &DCI,
56077                                           const X86Subtarget &Subtarget) {
56078   EVT VT = N->getValueType(0);
56079   SDValue In = N->getOperand(0);
56080   unsigned Opcode = N->getOpcode();
56081   unsigned InOpcode = In.getOpcode();
56082   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56083   SDLoc DL(N);
56084 
56085   // Try to merge vector loads and extend_inreg to an extload.
56086   if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
56087       In.hasOneUse()) {
56088     auto *Ld = cast<LoadSDNode>(In);
56089     if (Ld->isSimple()) {
56090       MVT SVT = In.getSimpleValueType().getVectorElementType();
56091       ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
56092                                  ? ISD::SEXTLOAD
56093                                  : ISD::ZEXTLOAD;
56094       EVT MemVT = VT.changeVectorElementType(SVT);
56095       if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
56096         SDValue Load = DAG.getExtLoad(
56097             Ext, DL, VT, Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
56098             MemVT, Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags());
56099         DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
56100         return Load;
56101       }
56102     }
56103   }
56104 
56105   // Fold EXTEND_VECTOR_INREG(EXTEND_VECTOR_INREG(X)) -> EXTEND_VECTOR_INREG(X).
56106   if (Opcode == InOpcode)
56107     return DAG.getNode(Opcode, DL, VT, In.getOperand(0));
56108 
56109   // Fold EXTEND_VECTOR_INREG(EXTRACT_SUBVECTOR(EXTEND(X),0))
56110   // -> EXTEND_VECTOR_INREG(X).
56111   // TODO: Handle non-zero subvector indices.
56112   if (InOpcode == ISD::EXTRACT_SUBVECTOR && In.getConstantOperandVal(1) == 0 &&
56113       In.getOperand(0).getOpcode() == DAG.getOpcode_EXTEND(Opcode) &&
56114       In.getOperand(0).getOperand(0).getValueSizeInBits() ==
56115           In.getValueSizeInBits())
56116     return DAG.getNode(Opcode, DL, VT, In.getOperand(0).getOperand(0));
56117 
56118   // Fold EXTEND_VECTOR_INREG(BUILD_VECTOR(X,Y,?,?)) -> BUILD_VECTOR(X,0,Y,0).
56119   // TODO: Move to DAGCombine?
56120   if (!DCI.isBeforeLegalizeOps() && Opcode == ISD::ZERO_EXTEND_VECTOR_INREG &&
56121       In.getOpcode() == ISD::BUILD_VECTOR && In.hasOneUse() &&
56122       In.getValueSizeInBits() == VT.getSizeInBits()) {
56123     unsigned NumElts = VT.getVectorNumElements();
56124     unsigned Scale = VT.getScalarSizeInBits() / In.getScalarValueSizeInBits();
56125     EVT EltVT = In.getOperand(0).getValueType();
56126     SmallVector<SDValue> Elts(Scale * NumElts, DAG.getConstant(0, DL, EltVT));
56127     for (unsigned I = 0; I != NumElts; ++I)
56128       Elts[I * Scale] = In.getOperand(I);
56129     return DAG.getBitcast(VT, DAG.getBuildVector(In.getValueType(), DL, Elts));
56130   }
56131 
56132   // Attempt to combine as a shuffle on SSE41+ targets.
56133   if ((Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
56134        Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) &&
56135       Subtarget.hasSSE41()) {
56136     SDValue Op(N, 0);
56137     if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
56138       if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
56139         return Res;
56140   }
56141 
56142   return SDValue();
56143 }
56144 
combineKSHIFT(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56145 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
56146                              TargetLowering::DAGCombinerInfo &DCI) {
56147   EVT VT = N->getValueType(0);
56148 
56149   if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
56150     return DAG.getConstant(0, SDLoc(N), VT);
56151 
56152   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56153   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
56154   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
56155     return SDValue(N, 0);
56156 
56157   return SDValue();
56158 }
56159 
56160 // Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
56161 // Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
56162 // extra instructions between the conversion due to going to scalar and back.
combineFP16_TO_FP(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)56163 static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
56164                                  const X86Subtarget &Subtarget) {
56165   if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
56166     return SDValue();
56167 
56168   if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
56169     return SDValue();
56170 
56171   if (N->getValueType(0) != MVT::f32 ||
56172       N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
56173     return SDValue();
56174 
56175   SDLoc dl(N);
56176   SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
56177                             N->getOperand(0).getOperand(0));
56178   Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
56179                     DAG.getTargetConstant(4, dl, MVT::i32));
56180   Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
56181   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
56182                      DAG.getIntPtrConstant(0, dl));
56183 }
56184 
combineFP_EXTEND(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)56185 static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
56186                                 const X86Subtarget &Subtarget) {
56187   if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
56188     return SDValue();
56189 
56190   if (Subtarget.hasFP16())
56191     return SDValue();
56192 
56193   bool IsStrict = N->isStrictFPOpcode();
56194   EVT VT = N->getValueType(0);
56195   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
56196   EVT SrcVT = Src.getValueType();
56197 
56198   if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::f16)
56199     return SDValue();
56200 
56201   if (VT.getVectorElementType() != MVT::f32 &&
56202       VT.getVectorElementType() != MVT::f64)
56203     return SDValue();
56204 
56205   unsigned NumElts = VT.getVectorNumElements();
56206   if (NumElts == 1 || !isPowerOf2_32(NumElts))
56207     return SDValue();
56208 
56209   SDLoc dl(N);
56210 
56211   // Convert the input to vXi16.
56212   EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
56213   Src = DAG.getBitcast(IntVT, Src);
56214 
56215   // Widen to at least 8 input elements.
56216   if (NumElts < 8) {
56217     unsigned NumConcats = 8 / NumElts;
56218     SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
56219                                 : DAG.getConstant(0, dl, IntVT);
56220     SmallVector<SDValue, 4> Ops(NumConcats, Fill);
56221     Ops[0] = Src;
56222     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, Ops);
56223   }
56224 
56225   // Destination is vXf32 with at least 4 elements.
56226   EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32,
56227                                std::max(4U, NumElts));
56228   SDValue Cvt, Chain;
56229   if (IsStrict) {
56230     Cvt = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {CvtVT, MVT::Other},
56231                       {N->getOperand(0), Src});
56232     Chain = Cvt.getValue(1);
56233   } else {
56234     Cvt = DAG.getNode(X86ISD::CVTPH2PS, dl, CvtVT, Src);
56235   }
56236 
56237   if (NumElts < 4) {
56238     assert(NumElts == 2 && "Unexpected size");
56239     Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Cvt,
56240                       DAG.getIntPtrConstant(0, dl));
56241   }
56242 
56243   if (IsStrict) {
56244     // Extend to the original VT if necessary.
56245     if (Cvt.getValueType() != VT) {
56246       Cvt = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {VT, MVT::Other},
56247                         {Chain, Cvt});
56248       Chain = Cvt.getValue(1);
56249     }
56250     return DAG.getMergeValues({Cvt, Chain}, dl);
56251   }
56252 
56253   // Extend to the original VT if necessary.
56254   return DAG.getNode(ISD::FP_EXTEND, dl, VT, Cvt);
56255 }
56256 
56257 // Try to find a larger VBROADCAST_LOAD/SUBV_BROADCAST_LOAD that we can extract
56258 // from. Limit this to cases where the loads have the same input chain and the
56259 // output chains are unused. This avoids any memory ordering issues.
combineBROADCAST_LOAD(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56260 static SDValue combineBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
56261                                      TargetLowering::DAGCombinerInfo &DCI) {
56262   assert((N->getOpcode() == X86ISD::VBROADCAST_LOAD ||
56263           N->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) &&
56264          "Unknown broadcast load type");
56265 
56266   // Only do this if the chain result is unused.
56267   if (N->hasAnyUseOfValue(1))
56268     return SDValue();
56269 
56270   auto *MemIntrin = cast<MemIntrinsicSDNode>(N);
56271 
56272   SDValue Ptr = MemIntrin->getBasePtr();
56273   SDValue Chain = MemIntrin->getChain();
56274   EVT VT = N->getSimpleValueType(0);
56275   EVT MemVT = MemIntrin->getMemoryVT();
56276 
56277   // Look at other users of our base pointer and try to find a wider broadcast.
56278   // The input chain and the size of the memory VT must match.
56279   for (SDNode *User : Ptr->uses())
56280     if (User != N && User->getOpcode() == N->getOpcode() &&
56281         cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
56282         cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
56283         cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
56284             MemVT.getSizeInBits() &&
56285         !User->hasAnyUseOfValue(1) &&
56286         User->getValueSizeInBits(0).getFixedValue() > VT.getFixedSizeInBits()) {
56287       SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
56288                                          VT.getSizeInBits());
56289       Extract = DAG.getBitcast(VT, Extract);
56290       return DCI.CombineTo(N, Extract, SDValue(User, 1));
56291     }
56292 
56293   return SDValue();
56294 }
56295 
combineFP_ROUND(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)56296 static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
56297                                const X86Subtarget &Subtarget) {
56298   if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
56299     return SDValue();
56300 
56301   if (Subtarget.hasFP16())
56302     return SDValue();
56303 
56304   bool IsStrict = N->isStrictFPOpcode();
56305   EVT VT = N->getValueType(0);
56306   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
56307   EVT SrcVT = Src.getValueType();
56308 
56309   if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
56310       SrcVT.getVectorElementType() != MVT::f32)
56311     return SDValue();
56312 
56313   unsigned NumElts = VT.getVectorNumElements();
56314   if (NumElts == 1 || !isPowerOf2_32(NumElts))
56315     return SDValue();
56316 
56317   SDLoc dl(N);
56318 
56319   // Widen to at least 4 input elements.
56320   if (NumElts < 4)
56321     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
56322                       DAG.getConstantFP(0.0, dl, SrcVT));
56323 
56324   // Destination is v8i16 with at least 8 elements.
56325   EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
56326                                std::max(8U, NumElts));
56327   SDValue Cvt, Chain;
56328   SDValue Rnd = DAG.getTargetConstant(4, dl, MVT::i32);
56329   if (IsStrict) {
56330     Cvt = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {CvtVT, MVT::Other},
56331                       {N->getOperand(0), Src, Rnd});
56332     Chain = Cvt.getValue(1);
56333   } else {
56334     Cvt = DAG.getNode(X86ISD::CVTPS2PH, dl, CvtVT, Src, Rnd);
56335   }
56336 
56337   // Extract down to real number of elements.
56338   if (NumElts < 8) {
56339     EVT IntVT = VT.changeVectorElementTypeToInteger();
56340     Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntVT, Cvt,
56341                       DAG.getIntPtrConstant(0, dl));
56342   }
56343 
56344   Cvt = DAG.getBitcast(VT, Cvt);
56345 
56346   if (IsStrict)
56347     return DAG.getMergeValues({Cvt, Chain}, dl);
56348 
56349   return Cvt;
56350 }
56351 
combineMOVDQ2Q(SDNode * N,SelectionDAG & DAG)56352 static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {
56353   SDValue Src = N->getOperand(0);
56354 
56355   // Turn MOVDQ2Q+simple_load into an mmx load.
56356   if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
56357     LoadSDNode *LN = cast<LoadSDNode>(Src.getNode());
56358 
56359     if (LN->isSimple()) {
56360       SDValue NewLd = DAG.getLoad(MVT::x86mmx, SDLoc(N), LN->getChain(),
56361                                   LN->getBasePtr(),
56362                                   LN->getPointerInfo(),
56363                                   LN->getOriginalAlign(),
56364                                   LN->getMemOperand()->getFlags());
56365       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), NewLd.getValue(1));
56366       return NewLd;
56367     }
56368   }
56369 
56370   return SDValue();
56371 }
56372 
combinePDEP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56373 static SDValue combinePDEP(SDNode *N, SelectionDAG &DAG,
56374                            TargetLowering::DAGCombinerInfo &DCI) {
56375   unsigned NumBits = N->getSimpleValueType(0).getSizeInBits();
56376   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56377   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBits), DCI))
56378     return SDValue(N, 0);
56379 
56380   return SDValue();
56381 }
56382 
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const56383 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
56384                                              DAGCombinerInfo &DCI) const {
56385   SelectionDAG &DAG = DCI.DAG;
56386   switch (N->getOpcode()) {
56387   default: break;
56388   case ISD::SCALAR_TO_VECTOR:
56389     return combineScalarToVector(N, DAG);
56390   case ISD::EXTRACT_VECTOR_ELT:
56391   case X86ISD::PEXTRW:
56392   case X86ISD::PEXTRB:
56393     return combineExtractVectorElt(N, DAG, DCI, Subtarget);
56394   case ISD::CONCAT_VECTORS:
56395     return combineCONCAT_VECTORS(N, DAG, DCI, Subtarget);
56396   case ISD::INSERT_SUBVECTOR:
56397     return combineINSERT_SUBVECTOR(N, DAG, DCI, Subtarget);
56398   case ISD::EXTRACT_SUBVECTOR:
56399     return combineEXTRACT_SUBVECTOR(N, DAG, DCI, Subtarget);
56400   case ISD::VSELECT:
56401   case ISD::SELECT:
56402   case X86ISD::BLENDV:      return combineSelect(N, DAG, DCI, Subtarget);
56403   case ISD::BITCAST:        return combineBitcast(N, DAG, DCI, Subtarget);
56404   case X86ISD::CMOV:        return combineCMov(N, DAG, DCI, Subtarget);
56405   case X86ISD::CMP:         return combineCMP(N, DAG);
56406   case ISD::ADD:            return combineAdd(N, DAG, DCI, Subtarget);
56407   case ISD::SUB:            return combineSub(N, DAG, DCI, Subtarget);
56408   case X86ISD::ADD:
56409   case X86ISD::SUB:         return combineX86AddSub(N, DAG, DCI);
56410   case X86ISD::SBB:         return combineSBB(N, DAG);
56411   case X86ISD::ADC:         return combineADC(N, DAG, DCI);
56412   case ISD::MUL:            return combineMul(N, DAG, DCI, Subtarget);
56413   case ISD::SHL:            return combineShiftLeft(N, DAG);
56414   case ISD::SRA:            return combineShiftRightArithmetic(N, DAG, Subtarget);
56415   case ISD::SRL:            return combineShiftRightLogical(N, DAG, DCI, Subtarget);
56416   case ISD::AND:            return combineAnd(N, DAG, DCI, Subtarget);
56417   case ISD::OR:             return combineOr(N, DAG, DCI, Subtarget);
56418   case ISD::XOR:            return combineXor(N, DAG, DCI, Subtarget);
56419   case X86ISD::BEXTR:
56420   case X86ISD::BEXTRI:      return combineBEXTR(N, DAG, DCI, Subtarget);
56421   case ISD::LOAD:           return combineLoad(N, DAG, DCI, Subtarget);
56422   case ISD::MLOAD:          return combineMaskedLoad(N, DAG, DCI, Subtarget);
56423   case ISD::STORE:          return combineStore(N, DAG, DCI, Subtarget);
56424   case ISD::MSTORE:         return combineMaskedStore(N, DAG, DCI, Subtarget);
56425   case X86ISD::VEXTRACT_STORE:
56426     return combineVEXTRACT_STORE(N, DAG, DCI, Subtarget);
56427   case ISD::SINT_TO_FP:
56428   case ISD::STRICT_SINT_TO_FP:
56429     return combineSIntToFP(N, DAG, DCI, Subtarget);
56430   case ISD::UINT_TO_FP:
56431   case ISD::STRICT_UINT_TO_FP:
56432     return combineUIntToFP(N, DAG, Subtarget);
56433   case ISD::FADD:
56434   case ISD::FSUB:           return combineFaddFsub(N, DAG, Subtarget);
56435   case X86ISD::VFCMULC:
56436   case X86ISD::VFMULC:      return combineFMulcFCMulc(N, DAG, Subtarget);
56437   case ISD::FNEG:           return combineFneg(N, DAG, DCI, Subtarget);
56438   case ISD::TRUNCATE:       return combineTruncate(N, DAG, Subtarget);
56439   case X86ISD::VTRUNC:      return combineVTRUNC(N, DAG, DCI);
56440   case X86ISD::ANDNP:       return combineAndnp(N, DAG, DCI, Subtarget);
56441   case X86ISD::FAND:        return combineFAnd(N, DAG, Subtarget);
56442   case X86ISD::FANDN:       return combineFAndn(N, DAG, Subtarget);
56443   case X86ISD::FXOR:
56444   case X86ISD::FOR:         return combineFOr(N, DAG, DCI, Subtarget);
56445   case X86ISD::FMIN:
56446   case X86ISD::FMAX:        return combineFMinFMax(N, DAG);
56447   case ISD::FMINNUM:
56448   case ISD::FMAXNUM:        return combineFMinNumFMaxNum(N, DAG, Subtarget);
56449   case X86ISD::CVTSI2P:
56450   case X86ISD::CVTUI2P:     return combineX86INT_TO_FP(N, DAG, DCI);
56451   case X86ISD::CVTP2SI:
56452   case X86ISD::CVTP2UI:
56453   case X86ISD::STRICT_CVTTP2SI:
56454   case X86ISD::CVTTP2SI:
56455   case X86ISD::STRICT_CVTTP2UI:
56456   case X86ISD::CVTTP2UI:
56457                             return combineCVTP2I_CVTTP2I(N, DAG, DCI);
56458   case X86ISD::STRICT_CVTPH2PS:
56459   case X86ISD::CVTPH2PS:    return combineCVTPH2PS(N, DAG, DCI);
56460   case X86ISD::BT:          return combineBT(N, DAG, DCI);
56461   case ISD::ANY_EXTEND:
56462   case ISD::ZERO_EXTEND:    return combineZext(N, DAG, DCI, Subtarget);
56463   case ISD::SIGN_EXTEND:    return combineSext(N, DAG, DCI, Subtarget);
56464   case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
56465   case ISD::ANY_EXTEND_VECTOR_INREG:
56466   case ISD::SIGN_EXTEND_VECTOR_INREG:
56467   case ISD::ZERO_EXTEND_VECTOR_INREG:
56468     return combineEXTEND_VECTOR_INREG(N, DAG, DCI, Subtarget);
56469   case ISD::SETCC:          return combineSetCC(N, DAG, DCI, Subtarget);
56470   case X86ISD::SETCC:       return combineX86SetCC(N, DAG, Subtarget);
56471   case X86ISD::BRCOND:      return combineBrCond(N, DAG, Subtarget);
56472   case X86ISD::PACKSS:
56473   case X86ISD::PACKUS:      return combineVectorPack(N, DAG, DCI, Subtarget);
56474   case X86ISD::HADD:
56475   case X86ISD::HSUB:
56476   case X86ISD::FHADD:
56477   case X86ISD::FHSUB:       return combineVectorHADDSUB(N, DAG, DCI, Subtarget);
56478   case X86ISD::VSHL:
56479   case X86ISD::VSRA:
56480   case X86ISD::VSRL:
56481     return combineVectorShiftVar(N, DAG, DCI, Subtarget);
56482   case X86ISD::VSHLI:
56483   case X86ISD::VSRAI:
56484   case X86ISD::VSRLI:
56485     return combineVectorShiftImm(N, DAG, DCI, Subtarget);
56486   case ISD::INSERT_VECTOR_ELT:
56487   case X86ISD::PINSRB:
56488   case X86ISD::PINSRW:      return combineVectorInsert(N, DAG, DCI, Subtarget);
56489   case X86ISD::SHUFP:       // Handle all target specific shuffles
56490   case X86ISD::INSERTPS:
56491   case X86ISD::EXTRQI:
56492   case X86ISD::INSERTQI:
56493   case X86ISD::VALIGN:
56494   case X86ISD::PALIGNR:
56495   case X86ISD::VSHLDQ:
56496   case X86ISD::VSRLDQ:
56497   case X86ISD::BLENDI:
56498   case X86ISD::UNPCKH:
56499   case X86ISD::UNPCKL:
56500   case X86ISD::MOVHLPS:
56501   case X86ISD::MOVLHPS:
56502   case X86ISD::PSHUFB:
56503   case X86ISD::PSHUFD:
56504   case X86ISD::PSHUFHW:
56505   case X86ISD::PSHUFLW:
56506   case X86ISD::MOVSHDUP:
56507   case X86ISD::MOVSLDUP:
56508   case X86ISD::MOVDDUP:
56509   case X86ISD::MOVSS:
56510   case X86ISD::MOVSD:
56511   case X86ISD::MOVSH:
56512   case X86ISD::VBROADCAST:
56513   case X86ISD::VPPERM:
56514   case X86ISD::VPERMI:
56515   case X86ISD::VPERMV:
56516   case X86ISD::VPERMV3:
56517   case X86ISD::VPERMIL2:
56518   case X86ISD::VPERMILPI:
56519   case X86ISD::VPERMILPV:
56520   case X86ISD::VPERM2X128:
56521   case X86ISD::SHUF128:
56522   case X86ISD::VZEXT_MOVL:
56523   case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
56524   case X86ISD::FMADD_RND:
56525   case X86ISD::FMSUB:
56526   case X86ISD::STRICT_FMSUB:
56527   case X86ISD::FMSUB_RND:
56528   case X86ISD::FNMADD:
56529   case X86ISD::STRICT_FNMADD:
56530   case X86ISD::FNMADD_RND:
56531   case X86ISD::FNMSUB:
56532   case X86ISD::STRICT_FNMSUB:
56533   case X86ISD::FNMSUB_RND:
56534   case ISD::FMA:
56535   case ISD::STRICT_FMA:     return combineFMA(N, DAG, DCI, Subtarget);
56536   case X86ISD::FMADDSUB_RND:
56537   case X86ISD::FMSUBADD_RND:
56538   case X86ISD::FMADDSUB:
56539   case X86ISD::FMSUBADD:    return combineFMADDSUB(N, DAG, DCI);
56540   case X86ISD::MOVMSK:      return combineMOVMSK(N, DAG, DCI, Subtarget);
56541   case X86ISD::MGATHER:
56542   case X86ISD::MSCATTER:
56543     return combineX86GatherScatter(N, DAG, DCI, Subtarget);
56544   case ISD::MGATHER:
56545   case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI);
56546   case X86ISD::PCMPEQ:
56547   case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);
56548   case X86ISD::PMULDQ:
56549   case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
56550   case X86ISD::VPMADDUBSW:
56551   case X86ISD::VPMADDWD:    return combineVPMADD(N, DAG, DCI);
56552   case X86ISD::KSHIFTL:
56553   case X86ISD::KSHIFTR:     return combineKSHIFT(N, DAG, DCI);
56554   case ISD::FP16_TO_FP:     return combineFP16_TO_FP(N, DAG, Subtarget);
56555   case ISD::STRICT_FP_EXTEND:
56556   case ISD::FP_EXTEND:      return combineFP_EXTEND(N, DAG, Subtarget);
56557   case ISD::STRICT_FP_ROUND:
56558   case ISD::FP_ROUND:       return combineFP_ROUND(N, DAG, Subtarget);
56559   case X86ISD::VBROADCAST_LOAD:
56560   case X86ISD::SUBV_BROADCAST_LOAD: return combineBROADCAST_LOAD(N, DAG, DCI);
56561   case X86ISD::MOVDQ2Q:     return combineMOVDQ2Q(N, DAG);
56562   case X86ISD::PDEP:        return combinePDEP(N, DAG, DCI);
56563   }
56564 
56565   return SDValue();
56566 }
56567 
isTypeDesirableForOp(unsigned Opc,EVT VT) const56568 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
56569   if (!isTypeLegal(VT))
56570     return false;
56571 
56572   // There are no vXi8 shifts.
56573   if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
56574     return false;
56575 
56576   // TODO: Almost no 8-bit ops are desirable because they have no actual
56577   //       size/speed advantages vs. 32-bit ops, but they do have a major
56578   //       potential disadvantage by causing partial register stalls.
56579   //
56580   // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
56581   // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
56582   // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
56583   // check for a constant operand to the multiply.
56584   if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
56585     return false;
56586 
56587   // i16 instruction encodings are longer and some i16 instructions are slow,
56588   // so those are not desirable.
56589   if (VT == MVT::i16) {
56590     switch (Opc) {
56591     default:
56592       break;
56593     case ISD::LOAD:
56594     case ISD::SIGN_EXTEND:
56595     case ISD::ZERO_EXTEND:
56596     case ISD::ANY_EXTEND:
56597     case ISD::SHL:
56598     case ISD::SRA:
56599     case ISD::SRL:
56600     case ISD::SUB:
56601     case ISD::ADD:
56602     case ISD::MUL:
56603     case ISD::AND:
56604     case ISD::OR:
56605     case ISD::XOR:
56606       return false;
56607     }
56608   }
56609 
56610   // Any legal type not explicitly accounted for above here is desirable.
56611   return true;
56612 }
56613 
expandIndirectJTBranch(const SDLoc & dl,SDValue Value,SDValue Addr,SelectionDAG & DAG) const56614 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
56615                                                   SDValue Value, SDValue Addr,
56616                                                   SelectionDAG &DAG) const {
56617   const Module *M = DAG.getMachineFunction().getMMI().getModule();
56618   Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
56619   if (IsCFProtectionSupported) {
56620     // In case control-flow branch protection is enabled, we need to add
56621     // notrack prefix to the indirect branch.
56622     // In order to do that we create NT_BRIND SDNode.
56623     // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
56624     return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
56625   }
56626 
56627   return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
56628 }
56629 
IsDesirableToPromoteOp(SDValue Op,EVT & PVT) const56630 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
56631   EVT VT = Op.getValueType();
56632   bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
56633                              isa<ConstantSDNode>(Op.getOperand(1));
56634 
56635   // i16 is legal, but undesirable since i16 instruction encodings are longer
56636   // and some i16 instructions are slow.
56637   // 8-bit multiply-by-constant can usually be expanded to something cheaper
56638   // using LEA and/or other ALU ops.
56639   if (VT != MVT::i16 && !Is8BitMulByConstant)
56640     return false;
56641 
56642   auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
56643     if (!Op.hasOneUse())
56644       return false;
56645     SDNode *User = *Op->use_begin();
56646     if (!ISD::isNormalStore(User))
56647       return false;
56648     auto *Ld = cast<LoadSDNode>(Load);
56649     auto *St = cast<StoreSDNode>(User);
56650     return Ld->getBasePtr() == St->getBasePtr();
56651   };
56652 
56653   auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
56654     if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
56655       return false;
56656     if (!Op.hasOneUse())
56657       return false;
56658     SDNode *User = *Op->use_begin();
56659     if (User->getOpcode() != ISD::ATOMIC_STORE)
56660       return false;
56661     auto *Ld = cast<AtomicSDNode>(Load);
56662     auto *St = cast<AtomicSDNode>(User);
56663     return Ld->getBasePtr() == St->getBasePtr();
56664   };
56665 
56666   bool Commute = false;
56667   switch (Op.getOpcode()) {
56668   default: return false;
56669   case ISD::SIGN_EXTEND:
56670   case ISD::ZERO_EXTEND:
56671   case ISD::ANY_EXTEND:
56672     break;
56673   case ISD::SHL:
56674   case ISD::SRA:
56675   case ISD::SRL: {
56676     SDValue N0 = Op.getOperand(0);
56677     // Look out for (store (shl (load), x)).
56678     if (X86::mayFoldLoad(N0, Subtarget) && IsFoldableRMW(N0, Op))
56679       return false;
56680     break;
56681   }
56682   case ISD::ADD:
56683   case ISD::MUL:
56684   case ISD::AND:
56685   case ISD::OR:
56686   case ISD::XOR:
56687     Commute = true;
56688     [[fallthrough]];
56689   case ISD::SUB: {
56690     SDValue N0 = Op.getOperand(0);
56691     SDValue N1 = Op.getOperand(1);
56692     // Avoid disabling potential load folding opportunities.
56693     if (X86::mayFoldLoad(N1, Subtarget) &&
56694         (!Commute || !isa<ConstantSDNode>(N0) ||
56695          (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
56696       return false;
56697     if (X86::mayFoldLoad(N0, Subtarget) &&
56698         ((Commute && !isa<ConstantSDNode>(N1)) ||
56699          (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
56700       return false;
56701     if (IsFoldableAtomicRMW(N0, Op) ||
56702         (Commute && IsFoldableAtomicRMW(N1, Op)))
56703       return false;
56704   }
56705   }
56706 
56707   PVT = MVT::i32;
56708   return true;
56709 }
56710 
56711 //===----------------------------------------------------------------------===//
56712 //                           X86 Inline Assembly Support
56713 //===----------------------------------------------------------------------===//
56714 
56715 // Helper to match a string separated by whitespace.
matchAsm(StringRef S,ArrayRef<const char * > Pieces)56716 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
56717   S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
56718 
56719   for (StringRef Piece : Pieces) {
56720     if (!S.startswith(Piece)) // Check if the piece matches.
56721       return false;
56722 
56723     S = S.substr(Piece.size());
56724     StringRef::size_type Pos = S.find_first_not_of(" \t");
56725     if (Pos == 0) // We matched a prefix.
56726       return false;
56727 
56728     S = S.substr(Pos);
56729   }
56730 
56731   return S.empty();
56732 }
56733 
clobbersFlagRegisters(const SmallVector<StringRef,4> & AsmPieces)56734 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
56735 
56736   if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
56737     if (llvm::is_contained(AsmPieces, "~{cc}") &&
56738         llvm::is_contained(AsmPieces, "~{flags}") &&
56739         llvm::is_contained(AsmPieces, "~{fpsr}")) {
56740 
56741       if (AsmPieces.size() == 3)
56742         return true;
56743       else if (llvm::is_contained(AsmPieces, "~{dirflag}"))
56744         return true;
56745     }
56746   }
56747   return false;
56748 }
56749 
ExpandInlineAsm(CallInst * CI) const56750 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
56751   InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
56752 
56753   const std::string &AsmStr = IA->getAsmString();
56754 
56755   IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
56756   if (!Ty || Ty->getBitWidth() % 16 != 0)
56757     return false;
56758 
56759   // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
56760   SmallVector<StringRef, 4> AsmPieces;
56761   SplitString(AsmStr, AsmPieces, ";\n");
56762 
56763   switch (AsmPieces.size()) {
56764   default: return false;
56765   case 1:
56766     // FIXME: this should verify that we are targeting a 486 or better.  If not,
56767     // we will turn this bswap into something that will be lowered to logical
56768     // ops instead of emitting the bswap asm.  For now, we don't support 486 or
56769     // lower so don't worry about this.
56770     // bswap $0
56771     if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
56772         matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
56773         matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
56774         matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
56775         matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
56776         matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
56777       // No need to check constraints, nothing other than the equivalent of
56778       // "=r,0" would be valid here.
56779       return IntrinsicLowering::LowerToByteSwap(CI);
56780     }
56781 
56782     // rorw $$8, ${0:w}  -->  llvm.bswap.i16
56783     if (CI->getType()->isIntegerTy(16) &&
56784         IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56785         (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
56786          matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
56787       AsmPieces.clear();
56788       StringRef ConstraintsStr = IA->getConstraintString();
56789       SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56790       array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56791       if (clobbersFlagRegisters(AsmPieces))
56792         return IntrinsicLowering::LowerToByteSwap(CI);
56793     }
56794     break;
56795   case 3:
56796     if (CI->getType()->isIntegerTy(32) &&
56797         IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56798         matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
56799         matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
56800         matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
56801       AsmPieces.clear();
56802       StringRef ConstraintsStr = IA->getConstraintString();
56803       SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56804       array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56805       if (clobbersFlagRegisters(AsmPieces))
56806         return IntrinsicLowering::LowerToByteSwap(CI);
56807     }
56808 
56809     if (CI->getType()->isIntegerTy(64)) {
56810       InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
56811       if (Constraints.size() >= 2 &&
56812           Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
56813           Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
56814         // bswap %eax / bswap %edx / xchgl %eax, %edx  -> llvm.bswap.i64
56815         if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
56816             matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
56817             matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
56818           return IntrinsicLowering::LowerToByteSwap(CI);
56819       }
56820     }
56821     break;
56822   }
56823   return false;
56824 }
56825 
parseConstraintCode(llvm::StringRef Constraint)56826 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
56827   X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
56828                            .Case("{@cca}", X86::COND_A)
56829                            .Case("{@ccae}", X86::COND_AE)
56830                            .Case("{@ccb}", X86::COND_B)
56831                            .Case("{@ccbe}", X86::COND_BE)
56832                            .Case("{@ccc}", X86::COND_B)
56833                            .Case("{@cce}", X86::COND_E)
56834                            .Case("{@ccz}", X86::COND_E)
56835                            .Case("{@ccg}", X86::COND_G)
56836                            .Case("{@ccge}", X86::COND_GE)
56837                            .Case("{@ccl}", X86::COND_L)
56838                            .Case("{@ccle}", X86::COND_LE)
56839                            .Case("{@ccna}", X86::COND_BE)
56840                            .Case("{@ccnae}", X86::COND_B)
56841                            .Case("{@ccnb}", X86::COND_AE)
56842                            .Case("{@ccnbe}", X86::COND_A)
56843                            .Case("{@ccnc}", X86::COND_AE)
56844                            .Case("{@ccne}", X86::COND_NE)
56845                            .Case("{@ccnz}", X86::COND_NE)
56846                            .Case("{@ccng}", X86::COND_LE)
56847                            .Case("{@ccnge}", X86::COND_L)
56848                            .Case("{@ccnl}", X86::COND_GE)
56849                            .Case("{@ccnle}", X86::COND_G)
56850                            .Case("{@ccno}", X86::COND_NO)
56851                            .Case("{@ccnp}", X86::COND_NP)
56852                            .Case("{@ccns}", X86::COND_NS)
56853                            .Case("{@cco}", X86::COND_O)
56854                            .Case("{@ccp}", X86::COND_P)
56855                            .Case("{@ccs}", X86::COND_S)
56856                            .Default(X86::COND_INVALID);
56857   return Cond;
56858 }
56859 
56860 /// Given a constraint letter, return the type of constraint for this target.
56861 X86TargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const56862 X86TargetLowering::getConstraintType(StringRef Constraint) const {
56863   if (Constraint.size() == 1) {
56864     switch (Constraint[0]) {
56865     case 'R':
56866     case 'q':
56867     case 'Q':
56868     case 'f':
56869     case 't':
56870     case 'u':
56871     case 'y':
56872     case 'x':
56873     case 'v':
56874     case 'l':
56875     case 'k': // AVX512 masking registers.
56876       return C_RegisterClass;
56877     case 'a':
56878     case 'b':
56879     case 'c':
56880     case 'd':
56881     case 'S':
56882     case 'D':
56883     case 'A':
56884       return C_Register;
56885     case 'I':
56886     case 'J':
56887     case 'K':
56888     case 'N':
56889     case 'G':
56890     case 'L':
56891     case 'M':
56892       return C_Immediate;
56893     case 'C':
56894     case 'e':
56895     case 'Z':
56896       return C_Other;
56897     default:
56898       break;
56899     }
56900   }
56901   else if (Constraint.size() == 2) {
56902     switch (Constraint[0]) {
56903     default:
56904       break;
56905     case 'Y':
56906       switch (Constraint[1]) {
56907       default:
56908         break;
56909       case 'z':
56910         return C_Register;
56911       case 'i':
56912       case 'm':
56913       case 'k':
56914       case 't':
56915       case '2':
56916         return C_RegisterClass;
56917       }
56918     }
56919   } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
56920     return C_Other;
56921   return TargetLowering::getConstraintType(Constraint);
56922 }
56923 
56924 /// Examine constraint type and operand type and determine a weight value.
56925 /// This object must already have been set up with the operand type
56926 /// and the current alternative constraint selected.
56927 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const56928   X86TargetLowering::getSingleConstraintMatchWeight(
56929     AsmOperandInfo &info, const char *constraint) const {
56930   ConstraintWeight weight = CW_Invalid;
56931   Value *CallOperandVal = info.CallOperandVal;
56932     // If we don't have a value, we can't do a match,
56933     // but allow it at the lowest weight.
56934   if (!CallOperandVal)
56935     return CW_Default;
56936   Type *type = CallOperandVal->getType();
56937   // Look at the constraint type.
56938   switch (*constraint) {
56939   default:
56940     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
56941     [[fallthrough]];
56942   case 'R':
56943   case 'q':
56944   case 'Q':
56945   case 'a':
56946   case 'b':
56947   case 'c':
56948   case 'd':
56949   case 'S':
56950   case 'D':
56951   case 'A':
56952     if (CallOperandVal->getType()->isIntegerTy())
56953       weight = CW_SpecificReg;
56954     break;
56955   case 'f':
56956   case 't':
56957   case 'u':
56958     if (type->isFloatingPointTy())
56959       weight = CW_SpecificReg;
56960     break;
56961   case 'y':
56962     if (type->isX86_MMXTy() && Subtarget.hasMMX())
56963       weight = CW_SpecificReg;
56964     break;
56965   case 'Y':
56966     if (StringRef(constraint).size() != 2)
56967       break;
56968     switch (constraint[1]) {
56969       default:
56970         return CW_Invalid;
56971       // XMM0
56972       case 'z':
56973         if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
56974             ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
56975             ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
56976           return CW_SpecificReg;
56977         return CW_Invalid;
56978       // Conditional OpMask regs (AVX512)
56979       case 'k':
56980         if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
56981           return CW_Register;
56982         return CW_Invalid;
56983       // Any MMX reg
56984       case 'm':
56985         if (type->isX86_MMXTy() && Subtarget.hasMMX())
56986           return weight;
56987         return CW_Invalid;
56988       // Any SSE reg when ISA >= SSE2, same as 'x'
56989       case 'i':
56990       case 't':
56991       case '2':
56992         if (!Subtarget.hasSSE2())
56993           return CW_Invalid;
56994         break;
56995     }
56996     break;
56997   case 'v':
56998     if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
56999       weight = CW_Register;
57000     [[fallthrough]];
57001   case 'x':
57002     if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
57003         ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
57004       weight = CW_Register;
57005     break;
57006   case 'k':
57007     // Enable conditional vector operations using %k<#> registers.
57008     if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
57009       weight = CW_Register;
57010     break;
57011   case 'I':
57012     if (auto *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
57013       if (C->getZExtValue() <= 31)
57014         weight = CW_Constant;
57015     }
57016     break;
57017   case 'J':
57018     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57019       if (C->getZExtValue() <= 63)
57020         weight = CW_Constant;
57021     }
57022     break;
57023   case 'K':
57024     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57025       if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
57026         weight = CW_Constant;
57027     }
57028     break;
57029   case 'L':
57030     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57031       if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
57032         weight = CW_Constant;
57033     }
57034     break;
57035   case 'M':
57036     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57037       if (C->getZExtValue() <= 3)
57038         weight = CW_Constant;
57039     }
57040     break;
57041   case 'N':
57042     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57043       if (C->getZExtValue() <= 0xff)
57044         weight = CW_Constant;
57045     }
57046     break;
57047   case 'G':
57048   case 'C':
57049     if (isa<ConstantFP>(CallOperandVal)) {
57050       weight = CW_Constant;
57051     }
57052     break;
57053   case 'e':
57054     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57055       if ((C->getSExtValue() >= -0x80000000LL) &&
57056           (C->getSExtValue() <= 0x7fffffffLL))
57057         weight = CW_Constant;
57058     }
57059     break;
57060   case 'Z':
57061     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57062       if (C->getZExtValue() <= 0xffffffff)
57063         weight = CW_Constant;
57064     }
57065     break;
57066   }
57067   return weight;
57068 }
57069 
57070 /// Try to replace an X constraint, which matches anything, with another that
57071 /// has more specific requirements based on the type of the corresponding
57072 /// operand.
57073 const char *X86TargetLowering::
LowerXConstraint(EVT ConstraintVT) const57074 LowerXConstraint(EVT ConstraintVT) const {
57075   // FP X constraints get lowered to SSE1/2 registers if available, otherwise
57076   // 'f' like normal targets.
57077   if (ConstraintVT.isFloatingPoint()) {
57078     if (Subtarget.hasSSE1())
57079       return "x";
57080   }
57081 
57082   return TargetLowering::LowerXConstraint(ConstraintVT);
57083 }
57084 
57085 // Lower @cc targets via setcc.
LowerAsmOutputForConstraint(SDValue & Chain,SDValue & Flag,const SDLoc & DL,const AsmOperandInfo & OpInfo,SelectionDAG & DAG) const57086 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
57087     SDValue &Chain, SDValue &Flag, const SDLoc &DL,
57088     const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
57089   X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
57090   if (Cond == X86::COND_INVALID)
57091     return SDValue();
57092   // Check that return type is valid.
57093   if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
57094       OpInfo.ConstraintVT.getSizeInBits() < 8)
57095     report_fatal_error("Flag output operand is of invalid type");
57096 
57097   // Get EFLAGS register. Only update chain when copyfrom is glued.
57098   if (Flag.getNode()) {
57099     Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
57100     Chain = Flag.getValue(1);
57101   } else
57102     Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
57103   // Extract CC code.
57104   SDValue CC = getSETCC(Cond, Flag, DL, DAG);
57105   // Extend to 32-bits
57106   SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
57107 
57108   return Result;
57109 }
57110 
57111 /// Lower the specified operand into the Ops vector.
57112 /// If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const57113 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
57114                                                      std::string &Constraint,
57115                                                      std::vector<SDValue>&Ops,
57116                                                      SelectionDAG &DAG) const {
57117   SDValue Result;
57118 
57119   // Only support length 1 constraints for now.
57120   if (Constraint.length() > 1) return;
57121 
57122   char ConstraintLetter = Constraint[0];
57123   switch (ConstraintLetter) {
57124   default: break;
57125   case 'I':
57126     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57127       if (C->getZExtValue() <= 31) {
57128         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57129                                        Op.getValueType());
57130         break;
57131       }
57132     }
57133     return;
57134   case 'J':
57135     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57136       if (C->getZExtValue() <= 63) {
57137         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57138                                        Op.getValueType());
57139         break;
57140       }
57141     }
57142     return;
57143   case 'K':
57144     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57145       if (isInt<8>(C->getSExtValue())) {
57146         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57147                                        Op.getValueType());
57148         break;
57149       }
57150     }
57151     return;
57152   case 'L':
57153     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57154       if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
57155           (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
57156         Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
57157                                        Op.getValueType());
57158         break;
57159       }
57160     }
57161     return;
57162   case 'M':
57163     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57164       if (C->getZExtValue() <= 3) {
57165         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57166                                        Op.getValueType());
57167         break;
57168       }
57169     }
57170     return;
57171   case 'N':
57172     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57173       if (C->getZExtValue() <= 255) {
57174         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57175                                        Op.getValueType());
57176         break;
57177       }
57178     }
57179     return;
57180   case 'O':
57181     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57182       if (C->getZExtValue() <= 127) {
57183         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57184                                        Op.getValueType());
57185         break;
57186       }
57187     }
57188     return;
57189   case 'e': {
57190     // 32-bit signed value
57191     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57192       if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
57193                                            C->getSExtValue())) {
57194         // Widen to 64 bits here to get it sign extended.
57195         Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
57196         break;
57197       }
57198     // FIXME gcc accepts some relocatable values here too, but only in certain
57199     // memory models; it's complicated.
57200     }
57201     return;
57202   }
57203   case 'Z': {
57204     // 32-bit unsigned value
57205     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57206       if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
57207                                            C->getZExtValue())) {
57208         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57209                                        Op.getValueType());
57210         break;
57211       }
57212     }
57213     // FIXME gcc accepts some relocatable values here too, but only in certain
57214     // memory models; it's complicated.
57215     return;
57216   }
57217   case 'i': {
57218     // Literal immediates are always ok.
57219     if (auto *CST = dyn_cast<ConstantSDNode>(Op)) {
57220       bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
57221       BooleanContent BCont = getBooleanContents(MVT::i64);
57222       ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
57223                                     : ISD::SIGN_EXTEND;
57224       int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
57225                                                   : CST->getSExtValue();
57226       Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
57227       break;
57228     }
57229 
57230     // In any sort of PIC mode addresses need to be computed at runtime by
57231     // adding in a register or some sort of table lookup.  These can't
57232     // be used as immediates. BlockAddresses and BasicBlocks are fine though.
57233     if ((Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) &&
57234         !(isa<BlockAddressSDNode>(Op) || isa<BasicBlockSDNode>(Op)))
57235       return;
57236 
57237     // If we are in non-pic codegen mode, we allow the address of a global (with
57238     // an optional displacement) to be used with 'i'.
57239     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
57240       // If we require an extra load to get this address, as in PIC mode, we
57241       // can't accept it.
57242       if (isGlobalStubReference(
57243               Subtarget.classifyGlobalReference(GA->getGlobal())))
57244         return;
57245     break;
57246   }
57247   }
57248 
57249   if (Result.getNode()) {
57250     Ops.push_back(Result);
57251     return;
57252   }
57253   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
57254 }
57255 
57256 /// Check if \p RC is a general purpose register class.
57257 /// I.e., GR* or one of their variant.
isGRClass(const TargetRegisterClass & RC)57258 static bool isGRClass(const TargetRegisterClass &RC) {
57259   return RC.hasSuperClassEq(&X86::GR8RegClass) ||
57260          RC.hasSuperClassEq(&X86::GR16RegClass) ||
57261          RC.hasSuperClassEq(&X86::GR32RegClass) ||
57262          RC.hasSuperClassEq(&X86::GR64RegClass) ||
57263          RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
57264 }
57265 
57266 /// Check if \p RC is a vector register class.
57267 /// I.e., FR* / VR* or one of their variant.
isFRClass(const TargetRegisterClass & RC)57268 static bool isFRClass(const TargetRegisterClass &RC) {
57269   return RC.hasSuperClassEq(&X86::FR16XRegClass) ||
57270          RC.hasSuperClassEq(&X86::FR32XRegClass) ||
57271          RC.hasSuperClassEq(&X86::FR64XRegClass) ||
57272          RC.hasSuperClassEq(&X86::VR128XRegClass) ||
57273          RC.hasSuperClassEq(&X86::VR256XRegClass) ||
57274          RC.hasSuperClassEq(&X86::VR512RegClass);
57275 }
57276 
57277 /// Check if \p RC is a mask register class.
57278 /// I.e., VK* or one of their variant.
isVKClass(const TargetRegisterClass & RC)57279 static bool isVKClass(const TargetRegisterClass &RC) {
57280   return RC.hasSuperClassEq(&X86::VK1RegClass) ||
57281          RC.hasSuperClassEq(&X86::VK2RegClass) ||
57282          RC.hasSuperClassEq(&X86::VK4RegClass) ||
57283          RC.hasSuperClassEq(&X86::VK8RegClass) ||
57284          RC.hasSuperClassEq(&X86::VK16RegClass) ||
57285          RC.hasSuperClassEq(&X86::VK32RegClass) ||
57286          RC.hasSuperClassEq(&X86::VK64RegClass);
57287 }
57288 
57289 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const57290 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
57291                                                 StringRef Constraint,
57292                                                 MVT VT) const {
57293   // First, see if this is a constraint that directly corresponds to an LLVM
57294   // register class.
57295   if (Constraint.size() == 1) {
57296     // GCC Constraint Letters
57297     switch (Constraint[0]) {
57298     default: break;
57299     // 'A' means [ER]AX + [ER]DX.
57300     case 'A':
57301       if (Subtarget.is64Bit())
57302         return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
57303       assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
57304              "Expecting 64, 32 or 16 bit subtarget");
57305       return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
57306 
57307       // TODO: Slight differences here in allocation order and leaving
57308       // RIP in the class. Do they matter any more here than they do
57309       // in the normal allocation?
57310     case 'k':
57311       if (Subtarget.hasAVX512()) {
57312         if (VT == MVT::i1)
57313           return std::make_pair(0U, &X86::VK1RegClass);
57314         if (VT == MVT::i8)
57315           return std::make_pair(0U, &X86::VK8RegClass);
57316         if (VT == MVT::i16)
57317           return std::make_pair(0U, &X86::VK16RegClass);
57318       }
57319       if (Subtarget.hasBWI()) {
57320         if (VT == MVT::i32)
57321           return std::make_pair(0U, &X86::VK32RegClass);
57322         if (VT == MVT::i64)
57323           return std::make_pair(0U, &X86::VK64RegClass);
57324       }
57325       break;
57326     case 'q':   // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
57327       if (Subtarget.is64Bit()) {
57328         if (VT == MVT::i8 || VT == MVT::i1)
57329           return std::make_pair(0U, &X86::GR8RegClass);
57330         if (VT == MVT::i16)
57331           return std::make_pair(0U, &X86::GR16RegClass);
57332         if (VT == MVT::i32 || VT == MVT::f32)
57333           return std::make_pair(0U, &X86::GR32RegClass);
57334         if (VT != MVT::f80 && !VT.isVector())
57335           return std::make_pair(0U, &X86::GR64RegClass);
57336         break;
57337       }
57338       [[fallthrough]];
57339       // 32-bit fallthrough
57340     case 'Q':   // Q_REGS
57341       if (VT == MVT::i8 || VT == MVT::i1)
57342         return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
57343       if (VT == MVT::i16)
57344         return std::make_pair(0U, &X86::GR16_ABCDRegClass);
57345       if (VT == MVT::i32 || VT == MVT::f32 ||
57346           (!VT.isVector() && !Subtarget.is64Bit()))
57347         return std::make_pair(0U, &X86::GR32_ABCDRegClass);
57348       if (VT != MVT::f80 && !VT.isVector())
57349         return std::make_pair(0U, &X86::GR64_ABCDRegClass);
57350       break;
57351     case 'r':   // GENERAL_REGS
57352     case 'l':   // INDEX_REGS
57353       if (VT == MVT::i8 || VT == MVT::i1)
57354         return std::make_pair(0U, &X86::GR8RegClass);
57355       if (VT == MVT::i16)
57356         return std::make_pair(0U, &X86::GR16RegClass);
57357       if (VT == MVT::i32 || VT == MVT::f32 ||
57358           (!VT.isVector() && !Subtarget.is64Bit()))
57359         return std::make_pair(0U, &X86::GR32RegClass);
57360       if (VT != MVT::f80 && !VT.isVector())
57361         return std::make_pair(0U, &X86::GR64RegClass);
57362       break;
57363     case 'R':   // LEGACY_REGS
57364       if (VT == MVT::i8 || VT == MVT::i1)
57365         return std::make_pair(0U, &X86::GR8_NOREXRegClass);
57366       if (VT == MVT::i16)
57367         return std::make_pair(0U, &X86::GR16_NOREXRegClass);
57368       if (VT == MVT::i32 || VT == MVT::f32 ||
57369           (!VT.isVector() && !Subtarget.is64Bit()))
57370         return std::make_pair(0U, &X86::GR32_NOREXRegClass);
57371       if (VT != MVT::f80 && !VT.isVector())
57372         return std::make_pair(0U, &X86::GR64_NOREXRegClass);
57373       break;
57374     case 'f':  // FP Stack registers.
57375       // If SSE is enabled for this VT, use f80 to ensure the isel moves the
57376       // value to the correct fpstack register class.
57377       if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
57378         return std::make_pair(0U, &X86::RFP32RegClass);
57379       if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
57380         return std::make_pair(0U, &X86::RFP64RegClass);
57381       if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80)
57382         return std::make_pair(0U, &X86::RFP80RegClass);
57383       break;
57384     case 'y':   // MMX_REGS if MMX allowed.
57385       if (!Subtarget.hasMMX()) break;
57386       return std::make_pair(0U, &X86::VR64RegClass);
57387     case 'v':
57388     case 'x':   // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
57389       if (!Subtarget.hasSSE1()) break;
57390       bool VConstraint = (Constraint[0] == 'v');
57391 
57392       switch (VT.SimpleTy) {
57393       default: break;
57394       // Scalar SSE types.
57395       case MVT::f16:
57396         if (VConstraint && Subtarget.hasFP16())
57397           return std::make_pair(0U, &X86::FR16XRegClass);
57398         break;
57399       case MVT::f32:
57400       case MVT::i32:
57401         if (VConstraint && Subtarget.hasVLX())
57402           return std::make_pair(0U, &X86::FR32XRegClass);
57403         return std::make_pair(0U, &X86::FR32RegClass);
57404       case MVT::f64:
57405       case MVT::i64:
57406         if (VConstraint && Subtarget.hasVLX())
57407           return std::make_pair(0U, &X86::FR64XRegClass);
57408         return std::make_pair(0U, &X86::FR64RegClass);
57409       case MVT::i128:
57410         if (Subtarget.is64Bit()) {
57411           if (VConstraint && Subtarget.hasVLX())
57412             return std::make_pair(0U, &X86::VR128XRegClass);
57413           return std::make_pair(0U, &X86::VR128RegClass);
57414         }
57415         break;
57416       // Vector types and fp128.
57417       case MVT::v8f16:
57418         if (!Subtarget.hasFP16())
57419           break;
57420         [[fallthrough]];
57421       case MVT::f128:
57422       case MVT::v16i8:
57423       case MVT::v8i16:
57424       case MVT::v4i32:
57425       case MVT::v2i64:
57426       case MVT::v4f32:
57427       case MVT::v2f64:
57428         if (VConstraint && Subtarget.hasVLX())
57429           return std::make_pair(0U, &X86::VR128XRegClass);
57430         return std::make_pair(0U, &X86::VR128RegClass);
57431       // AVX types.
57432       case MVT::v16f16:
57433         if (!Subtarget.hasFP16())
57434           break;
57435         [[fallthrough]];
57436       case MVT::v32i8:
57437       case MVT::v16i16:
57438       case MVT::v8i32:
57439       case MVT::v4i64:
57440       case MVT::v8f32:
57441       case MVT::v4f64:
57442         if (VConstraint && Subtarget.hasVLX())
57443           return std::make_pair(0U, &X86::VR256XRegClass);
57444         if (Subtarget.hasAVX())
57445           return std::make_pair(0U, &X86::VR256RegClass);
57446         break;
57447       case MVT::v32f16:
57448         if (!Subtarget.hasFP16())
57449           break;
57450         [[fallthrough]];
57451       case MVT::v64i8:
57452       case MVT::v32i16:
57453       case MVT::v8f64:
57454       case MVT::v16f32:
57455       case MVT::v16i32:
57456       case MVT::v8i64:
57457         if (!Subtarget.hasAVX512()) break;
57458         if (VConstraint)
57459           return std::make_pair(0U, &X86::VR512RegClass);
57460         return std::make_pair(0U, &X86::VR512_0_15RegClass);
57461       }
57462       break;
57463     }
57464   } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
57465     switch (Constraint[1]) {
57466     default:
57467       break;
57468     case 'i':
57469     case 't':
57470     case '2':
57471       return getRegForInlineAsmConstraint(TRI, "x", VT);
57472     case 'm':
57473       if (!Subtarget.hasMMX()) break;
57474       return std::make_pair(0U, &X86::VR64RegClass);
57475     case 'z':
57476       if (!Subtarget.hasSSE1()) break;
57477       switch (VT.SimpleTy) {
57478       default: break;
57479       // Scalar SSE types.
57480       case MVT::f16:
57481         if (!Subtarget.hasFP16())
57482           break;
57483         return std::make_pair(X86::XMM0, &X86::FR16XRegClass);
57484       case MVT::f32:
57485       case MVT::i32:
57486         return std::make_pair(X86::XMM0, &X86::FR32RegClass);
57487       case MVT::f64:
57488       case MVT::i64:
57489         return std::make_pair(X86::XMM0, &X86::FR64RegClass);
57490       case MVT::v8f16:
57491         if (!Subtarget.hasFP16())
57492           break;
57493         [[fallthrough]];
57494       case MVT::f128:
57495       case MVT::v16i8:
57496       case MVT::v8i16:
57497       case MVT::v4i32:
57498       case MVT::v2i64:
57499       case MVT::v4f32:
57500       case MVT::v2f64:
57501         return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57502       // AVX types.
57503       case MVT::v16f16:
57504         if (!Subtarget.hasFP16())
57505           break;
57506         [[fallthrough]];
57507       case MVT::v32i8:
57508       case MVT::v16i16:
57509       case MVT::v8i32:
57510       case MVT::v4i64:
57511       case MVT::v8f32:
57512       case MVT::v4f64:
57513         if (Subtarget.hasAVX())
57514           return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57515         break;
57516       case MVT::v32f16:
57517         if (!Subtarget.hasFP16())
57518           break;
57519         [[fallthrough]];
57520       case MVT::v64i8:
57521       case MVT::v32i16:
57522       case MVT::v8f64:
57523       case MVT::v16f32:
57524       case MVT::v16i32:
57525       case MVT::v8i64:
57526         if (Subtarget.hasAVX512())
57527           return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57528         break;
57529       }
57530       break;
57531     case 'k':
57532       // This register class doesn't allocate k0 for masked vector operation.
57533       if (Subtarget.hasAVX512()) {
57534         if (VT == MVT::i1)
57535           return std::make_pair(0U, &X86::VK1WMRegClass);
57536         if (VT == MVT::i8)
57537           return std::make_pair(0U, &X86::VK8WMRegClass);
57538         if (VT == MVT::i16)
57539           return std::make_pair(0U, &X86::VK16WMRegClass);
57540       }
57541       if (Subtarget.hasBWI()) {
57542         if (VT == MVT::i32)
57543           return std::make_pair(0U, &X86::VK32WMRegClass);
57544         if (VT == MVT::i64)
57545           return std::make_pair(0U, &X86::VK64WMRegClass);
57546       }
57547       break;
57548     }
57549   }
57550 
57551   if (parseConstraintCode(Constraint) != X86::COND_INVALID)
57552     return std::make_pair(0U, &X86::GR32RegClass);
57553 
57554   // Use the default implementation in TargetLowering to convert the register
57555   // constraint into a member of a register class.
57556   std::pair<Register, const TargetRegisterClass*> Res;
57557   Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
57558 
57559   // Not found as a standard register?
57560   if (!Res.second) {
57561     // Only match x87 registers if the VT is one SelectionDAGBuilder can convert
57562     // to/from f80.
57563     if (VT == MVT::Other || VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80) {
57564       // Map st(0) -> st(7) -> ST0
57565       if (Constraint.size() == 7 && Constraint[0] == '{' &&
57566           tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
57567           Constraint[3] == '(' &&
57568           (Constraint[4] >= '0' && Constraint[4] <= '7') &&
57569           Constraint[5] == ')' && Constraint[6] == '}') {
57570         // st(7) is not allocatable and thus not a member of RFP80. Return
57571         // singleton class in cases where we have a reference to it.
57572         if (Constraint[4] == '7')
57573           return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
57574         return std::make_pair(X86::FP0 + Constraint[4] - '0',
57575                               &X86::RFP80RegClass);
57576       }
57577 
57578       // GCC allows "st(0)" to be called just plain "st".
57579       if (StringRef("{st}").equals_insensitive(Constraint))
57580         return std::make_pair(X86::FP0, &X86::RFP80RegClass);
57581     }
57582 
57583     // flags -> EFLAGS
57584     if (StringRef("{flags}").equals_insensitive(Constraint))
57585       return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
57586 
57587     // dirflag -> DF
57588     // Only allow for clobber.
57589     if (StringRef("{dirflag}").equals_insensitive(Constraint) &&
57590         VT == MVT::Other)
57591       return std::make_pair(X86::DF, &X86::DFCCRRegClass);
57592 
57593     // fpsr -> FPSW
57594     if (StringRef("{fpsr}").equals_insensitive(Constraint))
57595       return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
57596 
57597     return Res;
57598   }
57599 
57600   // Make sure it isn't a register that requires 64-bit mode.
57601   if (!Subtarget.is64Bit() &&
57602       (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
57603       TRI->getEncodingValue(Res.first) >= 8) {
57604     // Register requires REX prefix, but we're in 32-bit mode.
57605     return std::make_pair(0, nullptr);
57606   }
57607 
57608   // Make sure it isn't a register that requires AVX512.
57609   if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
57610       TRI->getEncodingValue(Res.first) & 0x10) {
57611     // Register requires EVEX prefix.
57612     return std::make_pair(0, nullptr);
57613   }
57614 
57615   // Otherwise, check to see if this is a register class of the wrong value
57616   // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
57617   // turn into {ax},{dx}.
57618   // MVT::Other is used to specify clobber names.
57619   if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
57620     return Res;   // Correct type already, nothing to do.
57621 
57622   // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
57623   // return "eax". This should even work for things like getting 64bit integer
57624   // registers when given an f64 type.
57625   const TargetRegisterClass *Class = Res.second;
57626   // The generic code will match the first register class that contains the
57627   // given register. Thus, based on the ordering of the tablegened file,
57628   // the "plain" GR classes might not come first.
57629   // Therefore, use a helper method.
57630   if (isGRClass(*Class)) {
57631     unsigned Size = VT.getSizeInBits();
57632     if (Size == 1) Size = 8;
57633     Register DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
57634     if (DestReg > 0) {
57635       bool is64Bit = Subtarget.is64Bit();
57636       const TargetRegisterClass *RC =
57637           Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
57638         : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
57639         : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
57640         : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
57641         : nullptr;
57642       if (Size == 64 && !is64Bit) {
57643         // Model GCC's behavior here and select a fixed pair of 32-bit
57644         // registers.
57645         switch (DestReg) {
57646         case X86::RAX:
57647           return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
57648         case X86::RDX:
57649           return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
57650         case X86::RCX:
57651           return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
57652         case X86::RBX:
57653           return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
57654         case X86::RSI:
57655           return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
57656         case X86::RDI:
57657           return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
57658         case X86::RBP:
57659           return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
57660         default:
57661           return std::make_pair(0, nullptr);
57662         }
57663       }
57664       if (RC && RC->contains(DestReg))
57665         return std::make_pair(DestReg, RC);
57666       return Res;
57667     }
57668     // No register found/type mismatch.
57669     return std::make_pair(0, nullptr);
57670   } else if (isFRClass(*Class)) {
57671     // Handle references to XMM physical registers that got mapped into the
57672     // wrong class.  This can happen with constraints like {xmm0} where the
57673     // target independent register mapper will just pick the first match it can
57674     // find, ignoring the required type.
57675 
57676     // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
57677     if (VT == MVT::f16)
57678       Res.second = &X86::FR16XRegClass;
57679     else if (VT == MVT::f32 || VT == MVT::i32)
57680       Res.second = &X86::FR32XRegClass;
57681     else if (VT == MVT::f64 || VT == MVT::i64)
57682       Res.second = &X86::FR64XRegClass;
57683     else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
57684       Res.second = &X86::VR128XRegClass;
57685     else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
57686       Res.second = &X86::VR256XRegClass;
57687     else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
57688       Res.second = &X86::VR512RegClass;
57689     else {
57690       // Type mismatch and not a clobber: Return an error;
57691       Res.first = 0;
57692       Res.second = nullptr;
57693     }
57694   } else if (isVKClass(*Class)) {
57695     if (VT == MVT::i1)
57696       Res.second = &X86::VK1RegClass;
57697     else if (VT == MVT::i8)
57698       Res.second = &X86::VK8RegClass;
57699     else if (VT == MVT::i16)
57700       Res.second = &X86::VK16RegClass;
57701     else if (VT == MVT::i32)
57702       Res.second = &X86::VK32RegClass;
57703     else if (VT == MVT::i64)
57704       Res.second = &X86::VK64RegClass;
57705     else {
57706       // Type mismatch and not a clobber: Return an error;
57707       Res.first = 0;
57708       Res.second = nullptr;
57709     }
57710   }
57711 
57712   return Res;
57713 }
57714 
isIntDivCheap(EVT VT,AttributeList Attr) const57715 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
57716   // Integer division on x86 is expensive. However, when aggressively optimizing
57717   // for code size, we prefer to use a div instruction, as it is usually smaller
57718   // than the alternative sequence.
57719   // The exception to this is vector division. Since x86 doesn't have vector
57720   // integer division, leaving the division as-is is a loss even in terms of
57721   // size, because it will have to be scalarized, while the alternative code
57722   // sequence can be performed in vector form.
57723   bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
57724   return OptSize && !VT.isVector();
57725 }
57726 
initializeSplitCSR(MachineBasicBlock * Entry) const57727 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
57728   if (!Subtarget.is64Bit())
57729     return;
57730 
57731   // Update IsSplitCSR in X86MachineFunctionInfo.
57732   X86MachineFunctionInfo *AFI =
57733       Entry->getParent()->getInfo<X86MachineFunctionInfo>();
57734   AFI->setIsSplitCSR(true);
57735 }
57736 
insertCopiesSplitCSR(MachineBasicBlock * Entry,const SmallVectorImpl<MachineBasicBlock * > & Exits) const57737 void X86TargetLowering::insertCopiesSplitCSR(
57738     MachineBasicBlock *Entry,
57739     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
57740   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
57741   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
57742   if (!IStart)
57743     return;
57744 
57745   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
57746   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
57747   MachineBasicBlock::iterator MBBI = Entry->begin();
57748   for (const MCPhysReg *I = IStart; *I; ++I) {
57749     const TargetRegisterClass *RC = nullptr;
57750     if (X86::GR64RegClass.contains(*I))
57751       RC = &X86::GR64RegClass;
57752     else
57753       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
57754 
57755     Register NewVR = MRI->createVirtualRegister(RC);
57756     // Create copy from CSR to a virtual register.
57757     // FIXME: this currently does not emit CFI pseudo-instructions, it works
57758     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
57759     // nounwind. If we want to generalize this later, we may need to emit
57760     // CFI pseudo-instructions.
57761     assert(
57762         Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
57763         "Function should be nounwind in insertCopiesSplitCSR!");
57764     Entry->addLiveIn(*I);
57765     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
57766         .addReg(*I);
57767 
57768     // Insert the copy-back instructions right before the terminator.
57769     for (auto *Exit : Exits)
57770       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
57771               TII->get(TargetOpcode::COPY), *I)
57772           .addReg(NewVR);
57773   }
57774 }
57775 
supportSwiftError() const57776 bool X86TargetLowering::supportSwiftError() const {
57777   return Subtarget.is64Bit();
57778 }
57779 
57780 /// Returns true if stack probing through a function call is requested.
hasStackProbeSymbol(const MachineFunction & MF) const57781 bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
57782   return !getStackProbeSymbolName(MF).empty();
57783 }
57784 
57785 /// Returns true if stack probing through inline assembly is requested.
hasInlineStackProbe(const MachineFunction & MF) const57786 bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
57787 
57788   // No inline stack probe for Windows, they have their own mechanism.
57789   if (Subtarget.isOSWindows() ||
57790       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57791     return false;
57792 
57793   // If the function specifically requests inline stack probes, emit them.
57794   if (MF.getFunction().hasFnAttribute("probe-stack"))
57795     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
57796            "inline-asm";
57797 
57798   return false;
57799 }
57800 
57801 /// Returns the name of the symbol used to emit stack probes or the empty
57802 /// string if not applicable.
57803 StringRef
getStackProbeSymbolName(const MachineFunction & MF) const57804 X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {
57805   // Inline Stack probes disable stack probe call
57806   if (hasInlineStackProbe(MF))
57807     return "";
57808 
57809   // If the function specifically requests stack probes, emit them.
57810   if (MF.getFunction().hasFnAttribute("probe-stack"))
57811     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
57812 
57813   // Generally, if we aren't on Windows, the platform ABI does not include
57814   // support for stack probes, so don't emit them.
57815   if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
57816       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57817     return "";
57818 
57819   // We need a stack probe to conform to the Windows ABI. Choose the right
57820   // symbol.
57821   if (Subtarget.is64Bit())
57822     return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
57823   return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
57824 }
57825 
57826 unsigned
getStackProbeSize(const MachineFunction & MF) const57827 X86TargetLowering::getStackProbeSize(const MachineFunction &MF) const {
57828   // The default stack probe size is 4096 if the function has no stackprobesize
57829   // attribute.
57830   return MF.getFunction().getFnAttributeAsParsedInteger("stack-probe-size",
57831                                                         4096);
57832 }
57833 
getPrefLoopAlignment(MachineLoop * ML) const57834 Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
57835   if (ML->isInnermost() &&
57836       ExperimentalPrefInnermostLoopAlignment.getNumOccurrences())
57837     return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
57838   return TargetLowering::getPrefLoopAlignment();
57839 }
57840