1 //===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares codegen opcodes and related utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CODEGEN_ISDOPCODES_H
14 #define LLVM_CODEGEN_ISDOPCODES_H
15 
16 #include "llvm/CodeGen/ValueTypes.h"
17 
18 namespace llvm {
19 
20 /// ISD namespace - This namespace contains an enum which represents all of the
21 /// SelectionDAG node types and value types.
22 ///
23 namespace ISD {
24 
25 //===--------------------------------------------------------------------===//
26 /// ISD::NodeType enum - This enum defines the target-independent operators
27 /// for a SelectionDAG.
28 ///
29 /// Targets may also define target-dependent operator codes for SDNodes. For
30 /// example, on x86, these are the enum values in the X86ISD namespace.
31 /// Targets should aim to use target-independent operators to model their
32 /// instruction sets as much as possible, and only use target-dependent
33 /// operators when they have special requirements.
34 ///
35 /// Finally, during and after selection proper, SNodes may use special
36 /// operator codes that correspond directly with MachineInstr opcodes. These
37 /// are used to represent selected instructions. See the isMachineOpcode()
38 /// and getMachineOpcode() member functions of SDNode.
39 ///
40 enum NodeType {
41 
42   /// DELETED_NODE - This is an illegal value that is used to catch
43   /// errors.  This opcode is not a legal opcode for any node.
44   DELETED_NODE,
45 
46   /// EntryToken - This is the marker used to indicate the start of a region.
47   EntryToken,
48 
49   /// TokenFactor - This node takes multiple tokens as input and produces a
50   /// single token result. This is used to represent the fact that the operand
51   /// operators are independent of each other.
52   TokenFactor,
53 
54   /// AssertSext, AssertZext - These nodes record if a register contains a
55   /// value that has already been zero or sign extended from a narrower type.
56   /// These nodes take two operands.  The first is the node that has already
57   /// been extended, and the second is a value type node indicating the width
58   /// of the extension.
59   /// NOTE: In case of the source value (or any vector element value) is
60   /// poisoned the assertion will not be true for that value.
61   AssertSext,
62   AssertZext,
63 
64   /// AssertAlign - These nodes record if a register contains a value that
65   /// has a known alignment and the trailing bits are known to be zero.
66   /// NOTE: In case of the source value (or any vector element value) is
67   /// poisoned the assertion will not be true for that value.
68   AssertAlign,
69 
70   /// Various leaf nodes.
71   BasicBlock,
72   VALUETYPE,
73   CONDCODE,
74   Register,
75   RegisterMask,
76   Constant,
77   ConstantFP,
78   GlobalAddress,
79   GlobalTLSAddress,
80   FrameIndex,
81   JumpTable,
82   ConstantPool,
83   ExternalSymbol,
84   BlockAddress,
85 
86   /// The address of the GOT
87   GLOBAL_OFFSET_TABLE,
88 
89   /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
90   /// llvm.returnaddress on the DAG.  These nodes take one operand, the index
91   /// of the frame or return address to return.  An index of zero corresponds
92   /// to the current function's frame or return address, an index of one to
93   /// the parent's frame or return address, and so on.
94   FRAMEADDR,
95   RETURNADDR,
96 
97   /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
98   /// This node takes no operand, returns a target-specific pointer to the
99   /// place in the stack frame where the return address of the current
100   /// function is stored.
101   ADDROFRETURNADDR,
102 
103   /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument
104   /// and returns the stack pointer value at the entry of the current
105   /// function calling this intrinsic.
106   SPONENTRY,
107 
108   /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
109   /// Materializes the offset from the local object pointer of another
110   /// function to a particular local object passed to llvm.localescape. The
111   /// operand is the MCSymbol label used to represent this offset, since
112   /// typically the offset is not known until after code generation of the
113   /// parent.
114   LOCAL_RECOVER,
115 
116   /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
117   /// the DAG, which implements the named register global variables extension.
118   READ_REGISTER,
119   WRITE_REGISTER,
120 
121   /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
122   /// first (possible) on-stack argument. This is needed for correct stack
123   /// adjustment during unwind.
124   FRAME_TO_ARGS_OFFSET,
125 
126   /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
127   /// Frame Address (CFA), generally the value of the stack pointer at the
128   /// call site in the previous frame.
129   EH_DWARF_CFA,
130 
131   /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
132   /// 'eh_return' gcc dwarf builtin, which is used to return from
133   /// exception. The general meaning is: adjust stack by OFFSET and pass
134   /// execution to HANDLER. Many platform-related details also :)
135   EH_RETURN,
136 
137   /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
138   /// This corresponds to the eh.sjlj.setjmp intrinsic.
139   /// It takes an input chain and a pointer to the jump buffer as inputs
140   /// and returns an outchain.
141   EH_SJLJ_SETJMP,
142 
143   /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
144   /// This corresponds to the eh.sjlj.longjmp intrinsic.
145   /// It takes an input chain and a pointer to the jump buffer as inputs
146   /// and returns an outchain.
147   EH_SJLJ_LONGJMP,
148 
149   /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
150   /// The target initializes the dispatch table here.
151   EH_SJLJ_SETUP_DISPATCH,
152 
153   /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
154   /// simplification, or lowering of the constant. They are used for constants
155   /// which are known to fit in the immediate fields of their users, or for
156   /// carrying magic numbers which are not values which need to be
157   /// materialized in registers.
158   TargetConstant,
159   TargetConstantFP,
160 
161   /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
162   /// anything else with this node, and this is valid in the target-specific
163   /// dag, turning into a GlobalAddress operand.
164   TargetGlobalAddress,
165   TargetGlobalTLSAddress,
166   TargetFrameIndex,
167   TargetJumpTable,
168   TargetConstantPool,
169   TargetExternalSymbol,
170   TargetBlockAddress,
171 
172   MCSymbol,
173 
174   /// TargetIndex - Like a constant pool entry, but with completely
175   /// target-dependent semantics. Holds target flags, a 32-bit index, and a
176   /// 64-bit index. Targets can use this however they like.
177   TargetIndex,
178 
179   /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
180   /// This node represents a target intrinsic function with no side effects.
181   /// The first operand is the ID number of the intrinsic from the
182   /// llvm::Intrinsic namespace.  The operands to the intrinsic follow.  The
183   /// node returns the result of the intrinsic.
184   INTRINSIC_WO_CHAIN,
185 
186   /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
187   /// This node represents a target intrinsic function with side effects that
188   /// returns a result.  The first operand is a chain pointer.  The second is
189   /// the ID number of the intrinsic from the llvm::Intrinsic namespace.  The
190   /// operands to the intrinsic follow.  The node has two results, the result
191   /// of the intrinsic and an output chain.
192   INTRINSIC_W_CHAIN,
193 
194   /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
195   /// This node represents a target intrinsic function with side effects that
196   /// does not return a result.  The first operand is a chain pointer.  The
197   /// second is the ID number of the intrinsic from the llvm::Intrinsic
198   /// namespace.  The operands to the intrinsic follow.
199   INTRINSIC_VOID,
200 
201   /// CopyToReg - This node has three operands: a chain, a register number to
202   /// set to this value, and a value.
203   CopyToReg,
204 
205   /// CopyFromReg - This node indicates that the input value is a virtual or
206   /// physical register that is defined outside of the scope of this
207   /// SelectionDAG.  The register is available from the RegisterSDNode object.
208   /// Note that CopyFromReg is considered as also freezing the value.
209   CopyFromReg,
210 
211   /// UNDEF - An undefined node.
212   UNDEF,
213 
214   // FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
215   // is evaluated to UNDEF), or returns VAL otherwise. Note that each
216   // read of UNDEF can yield different value, but FREEZE(UNDEF) cannot.
217   FREEZE,
218 
219   /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
220   /// a Constant, which is required to be operand #1) half of the integer or
221   /// float value specified as operand #0.  This is only for use before
222   /// legalization, for values that will be broken into multiple registers.
223   EXTRACT_ELEMENT,
224 
225   /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
226   /// Given two values of the same integer value type, this produces a value
227   /// twice as big.  Like EXTRACT_ELEMENT, this can only be used before
228   /// legalization. The lower part of the composite value should be in
229   /// element 0 and the upper part should be in element 1.
230   BUILD_PAIR,
231 
232   /// MERGE_VALUES - This node takes multiple discrete operands and returns
233   /// them all as its individual results.  This nodes has exactly the same
234   /// number of inputs and outputs. This node is useful for some pieces of the
235   /// code generator that want to think about a single node with multiple
236   /// results, not multiple nodes.
237   MERGE_VALUES,
238 
239   /// Simple integer binary arithmetic operators.
240   ADD,
241   SUB,
242   MUL,
243   SDIV,
244   UDIV,
245   SREM,
246   UREM,
247 
248   /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
249   /// a signed/unsigned value of type i[2*N], and return the full value as
250   /// two results, each of type iN.
251   SMUL_LOHI,
252   UMUL_LOHI,
253 
254   /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
255   /// remainder result.
256   SDIVREM,
257   UDIVREM,
258 
259   /// CARRY_FALSE - This node is used when folding other nodes,
260   /// like ADDC/SUBC, which indicate the carry result is always false.
261   CARRY_FALSE,
262 
263   /// Carry-setting nodes for multiple precision addition and subtraction.
264   /// These nodes take two operands of the same value type, and produce two
265   /// results.  The first result is the normal add or sub result, the second
266   /// result is the carry flag result.
267   /// FIXME: These nodes are deprecated in favor of UADDO_CARRY and USUBO_CARRY.
268   /// They are kept around for now to provide a smooth transition path
269   /// toward the use of UADDO_CARRY/USUBO_CARRY and will eventually be removed.
270   ADDC,
271   SUBC,
272 
273   /// Carry-using nodes for multiple precision addition and subtraction. These
274   /// nodes take three operands: The first two are the normal lhs and rhs to
275   /// the add or sub, and the third is the input carry flag.  These nodes
276   /// produce two results; the normal result of the add or sub, and the output
277   /// carry flag.  These nodes both read and write a carry flag to allow them
278   /// to them to be chained together for add and sub of arbitrarily large
279   /// values.
280   ADDE,
281   SUBE,
282 
283   /// Carry-using nodes for multiple precision addition and subtraction.
284   /// These nodes take three operands: The first two are the normal lhs and
285   /// rhs to the add or sub, and the third is a boolean value that is 1 if and
286   /// only if there is an incoming carry/borrow. These nodes produce two
287   /// results: the normal result of the add or sub, and a boolean value that is
288   /// 1 if and only if there is an outgoing carry/borrow.
289   ///
290   /// Care must be taken if these opcodes are lowered to hardware instructions
291   /// that use the inverse logic -- 0 if and only if there is an
292   /// incoming/outgoing carry/borrow.  In such cases, you must preserve the
293   /// semantics of these opcodes by inverting the incoming carry/borrow, feeding
294   /// it to the add/sub hardware instruction, and then inverting the outgoing
295   /// carry/borrow.
296   ///
297   /// The use of these opcodes is preferable to adde/sube if the target supports
298   /// it, as the carry is a regular value rather than a glue, which allows
299   /// further optimisation.
300   ///
301   /// These opcodes are different from [US]{ADD,SUB}O in that
302   /// U{ADD,SUB}O_CARRY consume and produce a carry/borrow, whereas
303   /// [US]{ADD,SUB}O produce an overflow.
304   UADDO_CARRY,
305   USUBO_CARRY,
306 
307   /// Carry-using overflow-aware nodes for multiple precision addition and
308   /// subtraction. These nodes take three operands: The first two are normal lhs
309   /// and rhs to the add or sub, and the third is a boolean indicating if there
310   /// is an incoming carry. They produce two results: the normal result of the
311   /// add or sub, and a boolean that indicates if an overflow occurred (*not*
312   /// flag, because it may be a store to memory, etc.). If the type of the
313   /// boolean is not i1 then the high bits conform to getBooleanContents.
314   SADDO_CARRY,
315   SSUBO_CARRY,
316 
317   /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
318   /// These nodes take two operands: the normal LHS and RHS to the add. They
319   /// produce two results: the normal result of the add, and a boolean that
320   /// indicates if an overflow occurred (*not* a flag, because it may be store
321   /// to memory, etc.).  If the type of the boolean is not i1 then the high
322   /// bits conform to getBooleanContents.
323   /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
324   SADDO,
325   UADDO,
326 
327   /// Same for subtraction.
328   SSUBO,
329   USUBO,
330 
331   /// Same for multiplication.
332   SMULO,
333   UMULO,
334 
335   /// RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2
336   /// integers with the same bit width (W). If the true value of LHS + RHS
337   /// exceeds the largest value that can be represented by W bits, the
338   /// resulting value is this maximum value. Otherwise, if this value is less
339   /// than the smallest value that can be represented by W bits, the
340   /// resulting value is this minimum value.
341   SADDSAT,
342   UADDSAT,
343 
344   /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2
345   /// integers with the same bit width (W). If the true value of LHS - RHS
346   /// exceeds the largest value that can be represented by W bits, the
347   /// resulting value is this maximum value. Otherwise, if this value is less
348   /// than the smallest value that can be represented by W bits, the
349   /// resulting value is this minimum value.
350   SSUBSAT,
351   USUBSAT,
352 
353   /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first
354   /// operand is the value to be shifted, and the second argument is the amount
355   /// to shift by. Both must be integers of the same bit width (W). If the true
356   /// value of LHS << RHS exceeds the largest value that can be represented by
357   /// W bits, the resulting value is this maximum value, Otherwise, if this
358   /// value is less than the smallest value that can be represented by W bits,
359   /// the resulting value is this minimum value.
360   SSHLSAT,
361   USHLSAT,
362 
363   /// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication
364   /// on 2 integers with the same width and scale. SCALE represents the scale
365   /// of both operands as fixed point numbers. This SCALE parameter must be a
366   /// constant integer. A scale of zero is effectively performing
367   /// multiplication on 2 integers.
368   SMULFIX,
369   UMULFIX,
370 
371   /// Same as the corresponding unsaturated fixed point instructions, but the
372   /// result is clamped between the min and max values representable by the
373   /// bits of the first 2 operands.
374   SMULFIXSAT,
375   UMULFIXSAT,
376 
377   /// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on
378   /// 2 integers with the same width and scale. SCALE represents the scale
379   /// of both operands as fixed point numbers. This SCALE parameter must be a
380   /// constant integer.
381   SDIVFIX,
382   UDIVFIX,
383 
384   /// Same as the corresponding unsaturated fixed point instructions, but the
385   /// result is clamped between the min and max values representable by the
386   /// bits of the first 2 operands.
387   SDIVFIXSAT,
388   UDIVFIXSAT,
389 
390   /// Simple binary floating point operators.
391   FADD,
392   FSUB,
393   FMUL,
394   FDIV,
395   FREM,
396 
397   /// Constrained versions of the binary floating point operators.
398   /// These will be lowered to the simple operators before final selection.
399   /// They are used to limit optimizations while the DAG is being
400   /// optimized.
401   STRICT_FADD,
402   STRICT_FSUB,
403   STRICT_FMUL,
404   STRICT_FDIV,
405   STRICT_FREM,
406   STRICT_FMA,
407 
408   /// Constrained versions of libm-equivalent floating point intrinsics.
409   /// These will be lowered to the equivalent non-constrained pseudo-op
410   /// (or expanded to the equivalent library call) before final selection.
411   /// They are used to limit optimizations while the DAG is being optimized.
412   STRICT_FSQRT,
413   STRICT_FPOW,
414   STRICT_FPOWI,
415   STRICT_FLDEXP,
416   STRICT_FSIN,
417   STRICT_FCOS,
418   STRICT_FEXP,
419   STRICT_FEXP2,
420   STRICT_FLOG,
421   STRICT_FLOG10,
422   STRICT_FLOG2,
423   STRICT_FRINT,
424   STRICT_FNEARBYINT,
425   STRICT_FMAXNUM,
426   STRICT_FMINNUM,
427   STRICT_FCEIL,
428   STRICT_FFLOOR,
429   STRICT_FROUND,
430   STRICT_FROUNDEVEN,
431   STRICT_FTRUNC,
432   STRICT_LROUND,
433   STRICT_LLROUND,
434   STRICT_LRINT,
435   STRICT_LLRINT,
436   STRICT_FMAXIMUM,
437   STRICT_FMINIMUM,
438 
439   /// STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or
440   /// unsigned integer. These have the same semantics as fptosi and fptoui
441   /// in IR.
442   /// They are used to limit optimizations while the DAG is being optimized.
443   STRICT_FP_TO_SINT,
444   STRICT_FP_TO_UINT,
445 
446   /// STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to
447   /// a floating point value. These have the same semantics as sitofp and
448   /// uitofp in IR.
449   /// They are used to limit optimizations while the DAG is being optimized.
450   STRICT_SINT_TO_FP,
451   STRICT_UINT_TO_FP,
452 
453   /// X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating
454   /// point type down to the precision of the destination VT.  TRUNC is a
455   /// flag, which is always an integer that is zero or one.  If TRUNC is 0,
456   /// this is a normal rounding, if it is 1, this FP_ROUND is known to not
457   /// change the value of Y.
458   ///
459   /// The TRUNC = 1 case is used in cases where we know that the value will
460   /// not be modified by the node, because Y is not using any of the extra
461   /// precision of source type.  This allows certain transformations like
462   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,1)) -> X which are not safe for
463   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,0)) because the extra bits aren't
464   /// removed.
465   /// It is used to limit optimizations while the DAG is being optimized.
466   STRICT_FP_ROUND,
467 
468   /// X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP
469   /// type.
470   /// It is used to limit optimizations while the DAG is being optimized.
471   STRICT_FP_EXTEND,
472 
473   /// STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used
474   /// for floating-point operands only.  STRICT_FSETCC performs a quiet
475   /// comparison operation, while STRICT_FSETCCS performs a signaling
476   /// comparison operation.
477   STRICT_FSETCC,
478   STRICT_FSETCCS,
479 
480   // FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
481   FPTRUNC_ROUND,
482 
483   /// FMA - Perform a * b + c with no intermediate rounding step.
484   FMA,
485 
486   /// FMAD - Perform a * b + c, while getting the same result as the
487   /// separately rounded operations.
488   FMAD,
489 
490   /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.  NOTE: This
491   /// DAG node does not require that X and Y have the same type, just that
492   /// they are both floating point.  X and the result must have the same type.
493   /// FCOPYSIGN(f32, f64) is allowed.
494   FCOPYSIGN,
495 
496   /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
497   /// value as an integer 0/1 value.
498   FGETSIGN,
499 
500   /// Returns platform specific canonical encoding of a floating point number.
501   FCANONICALIZE,
502 
503   /// Performs a check of floating point class property, defined by IEEE-754.
504   /// The first operand is the floating point value to check. The second operand
505   /// specifies the checked property and is a TargetConstant which specifies
506   /// test in the same way as intrinsic 'is_fpclass'.
507   /// Returns boolean value.
508   IS_FPCLASS,
509 
510   /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector
511   /// with the specified, possibly variable, elements. The types of the
512   /// operands must match the vector element type, except that integer types
513   /// are allowed to be larger than the element type, in which case the
514   /// operands are implicitly truncated. The types of the operands must all
515   /// be the same.
516   BUILD_VECTOR,
517 
518   /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
519   /// at IDX replaced with VAL. If the type of VAL is larger than the vector
520   /// element type then VAL is truncated before replacement.
521   ///
522   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
523   /// vector width. IDX is not first scaled by the runtime scaling factor of
524   /// VECTOR.
525   INSERT_VECTOR_ELT,
526 
527   /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
528   /// identified by the (potentially variable) element number IDX. If the return
529   /// type is an integer type larger than the element type of the vector, the
530   /// result is extended to the width of the return type. In that case, the high
531   /// bits are undefined.
532   ///
533   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
534   /// vector width. IDX is not first scaled by the runtime scaling factor of
535   /// VECTOR.
536   EXTRACT_VECTOR_ELT,
537 
538   /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
539   /// vector type with the same length and element type, this produces a
540   /// concatenated vector result value, with length equal to the sum of the
541   /// lengths of the input vectors. If VECTOR0 is a fixed-width vector, then
542   /// VECTOR1..VECTORN must all be fixed-width vectors. Similarly, if VECTOR0
543   /// is a scalable vector, then VECTOR1..VECTORN must all be scalable vectors.
544   CONCAT_VECTORS,
545 
546   /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2
547   /// inserted into VECTOR1. IDX represents the starting element number at which
548   /// VECTOR2 will be inserted. IDX must be a constant multiple of T's known
549   /// minimum vector length. Let the type of VECTOR2 be T, then if T is a
550   /// scalable vector, IDX is first scaled by the runtime scaling factor of T.
551   /// The elements of VECTOR1 starting at IDX are overwritten with VECTOR2.
552   /// Elements IDX through (IDX + num_elements(T) - 1) must be valid VECTOR1
553   /// indices. If this condition cannot be determined statically but is false at
554   /// runtime, then the result vector is undefined. The IDX parameter must be a
555   /// vector index constant type, which for most targets will be an integer
556   /// pointer type.
557   ///
558   /// This operation supports inserting a fixed-width vector into a scalable
559   /// vector, but not the other way around.
560   INSERT_SUBVECTOR,
561 
562   /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
563   /// Let the result type be T, then IDX represents the starting element number
564   /// from which a subvector of type T is extracted. IDX must be a constant
565   /// multiple of T's known minimum vector length. If T is a scalable vector,
566   /// IDX is first scaled by the runtime scaling factor of T. Elements IDX
567   /// through (IDX + num_elements(T) - 1) must be valid VECTOR indices. If this
568   /// condition cannot be determined statically but is false at runtime, then
569   /// the result vector is undefined. The IDX parameter must be a vector index
570   /// constant type, which for most targets will be an integer pointer type.
571   ///
572   /// This operation supports extracting a fixed-width vector from a scalable
573   /// vector, but not the other way around.
574   EXTRACT_SUBVECTOR,
575 
576   /// VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and
577   /// output vectors having the same type. The first output contains the even
578   /// indices from CONCAT_VECTORS(VEC1, VEC2), with the second output
579   /// containing the odd indices. The relative order of elements within an
580   /// output match that of the concatenated input.
581   VECTOR_DEINTERLEAVE,
582 
583   /// VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and
584   /// output vectors having the same type. The first output contains the
585   /// result of interleaving the low half of CONCAT_VECTORS(VEC1, VEC2), with
586   /// the second output containing the result of interleaving the high half.
587   VECTOR_INTERLEAVE,
588 
589   /// VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR,
590   /// whose elements are shuffled using the following algorithm:
591   ///   RESULT[i] = VECTOR[VECTOR.ElementCount - 1 - i]
592   VECTOR_REVERSE,
593 
594   /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
595   /// VEC1/VEC2.  A VECTOR_SHUFFLE node also contains an array of constant int
596   /// values that indicate which value (or undef) each result element will
597   /// get.  These constant ints are accessible through the
598   /// ShuffleVectorSDNode class.  This is quite similar to the Altivec
599   /// 'vperm' instruction, except that the indices must be constants and are
600   /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
601   VECTOR_SHUFFLE,
602 
603   /// VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as
604   /// VEC1/VEC2 from CONCAT_VECTORS(VEC1, VEC2), based on the IMM in two ways.
605   /// Let the result type be T, if IMM is positive it represents the starting
606   /// element number (an index) from which a subvector of type T is extracted
607   /// from CONCAT_VECTORS(VEC1, VEC2). If IMM is negative it represents a count
608   /// specifying the number of trailing elements to extract from VEC1, where the
609   /// elements of T are selected using the following algorithm:
610   ///   RESULT[i] = CONCAT_VECTORS(VEC1,VEC2)[VEC1.ElementCount - ABS(IMM) + i]
611   /// If IMM is not in the range [-VL, VL-1] the result vector is undefined. IMM
612   /// is a constant integer.
613   VECTOR_SPLICE,
614 
615   /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
616   /// scalar value into element 0 of the resultant vector type.  The top
617   /// elements 1 to N-1 of the N-element vector are undefined.  The type
618   /// of the operand must match the vector element type, except when they
619   /// are integer types.  In this case the operand is allowed to be wider
620   /// than the vector element type, and is implicitly truncated to it.
621   SCALAR_TO_VECTOR,
622 
623   /// SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL
624   /// duplicated in all lanes. The type of the operand must match the vector
625   /// element type, except when they are integer types.  In this case the
626   /// operand is allowed to be wider than the vector element type, and is
627   /// implicitly truncated to it.
628   SPLAT_VECTOR,
629 
630   /// SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the
631   /// scalar values joined together and then duplicated in all lanes. This
632   /// represents a SPLAT_VECTOR that has had its scalar operand expanded. This
633   /// allows representing a 64-bit splat on a target with 32-bit integers. The
634   /// total width of the scalars must cover the element width. SCALAR1 contains
635   /// the least significant bits of the value regardless of endianness and all
636   /// scalars should have the same type.
637   SPLAT_VECTOR_PARTS,
638 
639   /// STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised
640   /// of a linear sequence of unsigned values starting from 0 with a step of
641   /// IMM, where IMM must be a TargetConstant with type equal to the vector
642   /// element type. The arithmetic is performed modulo the bitwidth of the
643   /// element.
644   ///
645   /// The operation does not support returning fixed-width vectors or
646   /// non-constant operands.
647   STEP_VECTOR,
648 
649   /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
650   /// producing an unsigned/signed value of type i[2*N], then return the top
651   /// part.
652   MULHU,
653   MULHS,
654 
655   /// AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of
656   /// type i[N+1], halving the result by shifting it one bit right.
657   /// shr(add(ext(X), ext(Y)), 1)
658   AVGFLOORS,
659   AVGFLOORU,
660   /// AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an
661   /// integer of type i[N+2], add 1 and halve the result by shifting it one bit
662   /// right. shr(add(ext(X), ext(Y), 1), 1)
663   AVGCEILS,
664   AVGCEILU,
665 
666   // ABDS/ABDU - Absolute difference - Return the absolute difference between
667   // two numbers interpreted as signed/unsigned.
668   // i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1)
669   //  or trunc(abs(zext(Op0) - zext(Op1))) becomes abdu(Op0, Op1)
670   ABDS,
671   ABDU,
672 
673   /// [US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned
674   /// integers.
675   SMIN,
676   SMAX,
677   UMIN,
678   UMAX,
679 
680   /// Bitwise operators - logical and, logical or, logical xor.
681   AND,
682   OR,
683   XOR,
684 
685   /// ABS - Determine the unsigned absolute value of a signed integer value of
686   /// the same bitwidth.
687   /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow
688   /// is performed.
689   ABS,
690 
691   /// Shift and rotation operations.  After legalization, the type of the
692   /// shift amount is known to be TLI.getShiftAmountTy().  Before legalization
693   /// the shift amount can be any type, but care must be taken to ensure it is
694   /// large enough.  TLI.getShiftAmountTy() is i8 on some targets, but before
695   /// legalization, types like i1024 can occur and i8 doesn't have enough bits
696   /// to represent the shift amount.
697   /// When the 1st operand is a vector, the shift amount must be in the same
698   /// type. (TLI.getShiftAmountTy() will return the same type when the input
699   /// type is a vector.)
700   /// For rotates and funnel shifts, the shift amount is treated as an unsigned
701   /// amount modulo the element size of the first operand.
702   ///
703   /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
704   /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
705   /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
706   SHL,
707   SRA,
708   SRL,
709   ROTL,
710   ROTR,
711   FSHL,
712   FSHR,
713 
714   /// Byte Swap and Counting operators.
715   BSWAP,
716   CTTZ,
717   CTLZ,
718   CTPOP,
719   BITREVERSE,
720   PARITY,
721 
722   /// Bit counting operators with an undefined result for zero inputs.
723   CTTZ_ZERO_UNDEF,
724   CTLZ_ZERO_UNDEF,
725 
726   /// Select(COND, TRUEVAL, FALSEVAL).  If the type of the boolean COND is not
727   /// i1 then the high bits must conform to getBooleanContents.
728   SELECT,
729 
730   /// Select with a vector condition (op #0) and two vector operands (ops #1
731   /// and #2), returning a vector result.  All vectors have the same length.
732   /// Much like the scalar select and setcc, each bit in the condition selects
733   /// whether the corresponding result element is taken from op #1 or op #2.
734   /// At first, the VSELECT condition is of vXi1 type. Later, targets may
735   /// change the condition type in order to match the VSELECT node using a
736   /// pattern. The condition follows the BooleanContent format of the target.
737   VSELECT,
738 
739   /// Select with condition operator - This selects between a true value and
740   /// a false value (ops #2 and #3) based on the boolean result of comparing
741   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
742   /// condition code in op #4, a CondCodeSDNode.
743   SELECT_CC,
744 
745   /// SetCC operator - This evaluates to a true value iff the condition is
746   /// true.  If the result value type is not i1 then the high bits conform
747   /// to getBooleanContents.  The operands to this are the left and right
748   /// operands to compare (ops #0, and #1) and the condition code to compare
749   /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
750   /// then the result type must also be a vector type.
751   SETCC,
752 
753   /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
754   /// op #2 is a boolean indicating if there is an incoming carry. This
755   /// operator checks the result of "LHS - RHS - Carry", and can be used to
756   /// compare two wide integers:
757   /// (setcccarry lhshi rhshi (usubo_carry lhslo rhslo) cc).
758   /// Only valid for integers.
759   SETCCCARRY,
760 
761   /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
762   /// integer shift operations.  The operation ordering is:
763   ///       [Lo,Hi] = op [LoLHS,HiLHS], Amt
764   SHL_PARTS,
765   SRA_PARTS,
766   SRL_PARTS,
767 
768   /// Conversion operators.  These are all single input single output
769   /// operations.  For all of these, the result type must be strictly
770   /// wider or narrower (depending on the operation) than the source
771   /// type.
772 
773   /// SIGN_EXTEND - Used for integer types, replicating the sign bit
774   /// into new bits.
775   SIGN_EXTEND,
776 
777   /// ZERO_EXTEND - Used for integer types, zeroing the new bits. Can carry
778   /// the NonNeg SDNodeFlag to indicate that the input is known to be
779   /// non-negative. If the flag is present and the input is negative, the result
780   /// is poison.
781   ZERO_EXTEND,
782 
783   /// ANY_EXTEND - Used for integer types.  The high bits are undefined.
784   ANY_EXTEND,
785 
786   /// TRUNCATE - Completely drop the high bits.
787   TRUNCATE,
788 
789   /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
790   /// depends on the first letter) to floating point.
791   SINT_TO_FP,
792   UINT_TO_FP,
793 
794   /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
795   /// sign extend a small value in a large integer register (e.g. sign
796   /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
797   /// with the 7th bit).  The size of the smaller type is indicated by the 1th
798   /// operand, a ValueType node.
799   SIGN_EXTEND_INREG,
800 
801   /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
802   /// in-register any-extension of the low lanes of an integer vector. The
803   /// result type must have fewer elements than the operand type, and those
804   /// elements must be larger integer types such that the total size of the
805   /// operand type is less than or equal to the size of the result type. Each
806   /// of the low operand elements is any-extended into the corresponding,
807   /// wider result elements with the high bits becoming undef.
808   /// NOTE: The type legalizer prefers to make the operand and result size
809   /// the same to allow expansion to shuffle vector during op legalization.
810   ANY_EXTEND_VECTOR_INREG,
811 
812   /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
813   /// in-register sign-extension of the low lanes of an integer vector. The
814   /// result type must have fewer elements than the operand type, and those
815   /// elements must be larger integer types such that the total size of the
816   /// operand type is less than or equal to the size of the result type. Each
817   /// of the low operand elements is sign-extended into the corresponding,
818   /// wider result elements.
819   /// NOTE: The type legalizer prefers to make the operand and result size
820   /// the same to allow expansion to shuffle vector during op legalization.
821   SIGN_EXTEND_VECTOR_INREG,
822 
823   /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
824   /// in-register zero-extension of the low lanes of an integer vector. The
825   /// result type must have fewer elements than the operand type, and those
826   /// elements must be larger integer types such that the total size of the
827   /// operand type is less than or equal to the size of the result type. Each
828   /// of the low operand elements is zero-extended into the corresponding,
829   /// wider result elements.
830   /// NOTE: The type legalizer prefers to make the operand and result size
831   /// the same to allow expansion to shuffle vector during op legalization.
832   ZERO_EXTEND_VECTOR_INREG,
833 
834   /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
835   /// integer. These have the same semantics as fptosi and fptoui in IR. If
836   /// the FP value cannot fit in the integer type, the results are undefined.
837   FP_TO_SINT,
838   FP_TO_UINT,
839 
840   /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a
841   /// signed or unsigned scalar integer type given in operand 1 with the
842   /// following semantics:
843   ///
844   ///  * If the value is NaN, zero is returned.
845   ///  * If the value is larger/smaller than the largest/smallest integer,
846   ///    the largest/smallest integer is returned (saturation).
847   ///  * Otherwise the result of rounding the value towards zero is returned.
848   ///
849   /// The scalar width of the type given in operand 1 must be equal to, or
850   /// smaller than, the scalar result type width. It may end up being smaller
851   /// than the result width as a result of integer type legalization.
852   ///
853   /// After converting to the scalar integer type in operand 1, the value is
854   /// extended to the result VT. FP_TO_SINT_SAT sign extends and FP_TO_UINT_SAT
855   /// zero extends.
856   FP_TO_SINT_SAT,
857   FP_TO_UINT_SAT,
858 
859   /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
860   /// down to the precision of the destination VT.  TRUNC is a flag, which is
861   /// always an integer that is zero or one.  If TRUNC is 0, this is a
862   /// normal rounding, if it is 1, this FP_ROUND is known to not change the
863   /// value of Y.
864   ///
865   /// The TRUNC = 1 case is used in cases where we know that the value will
866   /// not be modified by the node, because Y is not using any of the extra
867   /// precision of source type.  This allows certain transformations like
868   /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
869   /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
870   FP_ROUND,
871 
872   /// Returns current rounding mode:
873   /// -1 Undefined
874   ///  0 Round to 0
875   ///  1 Round to nearest, ties to even
876   ///  2 Round to +inf
877   ///  3 Round to -inf
878   ///  4 Round to nearest, ties to zero
879   ///  Other values are target dependent.
880   /// Result is rounding mode and chain. Input is a chain.
881   GET_ROUNDING,
882 
883   /// Set rounding mode.
884   /// The first operand is a chain pointer. The second specifies the required
885   /// rounding mode, encoded in the same way as used in '``GET_ROUNDING``'.
886   SET_ROUNDING,
887 
888   /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
889   FP_EXTEND,
890 
891   /// BITCAST - This operator converts between integer, vector and FP
892   /// values, as if the value was stored to memory with one type and loaded
893   /// from the same address with the other type (or equivalently for vector
894   /// format conversions, etc).  The source and result are required to have
895   /// the same bit size (e.g.  f32 <-> i32).  This can also be used for
896   /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
897   /// getNode().
898   ///
899   /// This operator is subtly different from the bitcast instruction from
900   /// LLVM-IR since this node may change the bits in the register. For
901   /// example, this occurs on big-endian NEON and big-endian MSA where the
902   /// layout of the bits in the register depends on the vector type and this
903   /// operator acts as a shuffle operation for some vector type combinations.
904   BITCAST,
905 
906   /// ADDRSPACECAST - This operator converts between pointers of different
907   /// address spaces.
908   ADDRSPACECAST,
909 
910   /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
911   /// and truncation for half-precision (16 bit) floating numbers. These nodes
912   /// form a semi-softened interface for dealing with f16 (as an i16), which
913   /// is often a storage-only type but has native conversions.
914   FP16_TO_FP,
915   FP_TO_FP16,
916   STRICT_FP16_TO_FP,
917   STRICT_FP_TO_FP16,
918 
919   /// BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions
920   /// and truncation for bfloat16. These nodes form a semi-softened interface
921   /// for dealing with bf16 (as an i16), which is often a storage-only type but
922   /// has native conversions.
923   BF16_TO_FP,
924   FP_TO_BF16,
925   STRICT_BF16_TO_FP,
926   STRICT_FP_TO_BF16,
927 
928   /// Perform various unary floating-point operations inspired by libm. For
929   /// FPOWI, the result is undefined if the integer operand doesn't fit into
930   /// sizeof(int).
931   FNEG,
932   FABS,
933   FSQRT,
934   FCBRT,
935   FSIN,
936   FCOS,
937   FPOW,
938   FPOWI,
939   /// FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
940   FLDEXP,
941 
942   /// FFREXP - frexp, extract fractional and exponent component of a
943   /// floating-point value. Returns the two components as separate return
944   /// values.
945   FFREXP,
946 
947   FLOG,
948   FLOG2,
949   FLOG10,
950   FEXP,
951   FEXP2,
952   FEXP10,
953   FCEIL,
954   FTRUNC,
955   FRINT,
956   FNEARBYINT,
957   FROUND,
958   FROUNDEVEN,
959   FFLOOR,
960   LROUND,
961   LLROUND,
962   LRINT,
963   LLRINT,
964 
965   /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
966   /// values.
967   //
968   /// In the case where a single input is a NaN (either signaling or quiet),
969   /// the non-NaN input is returned.
970   ///
971   /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
972   FMINNUM,
973   FMAXNUM,
974 
975   /// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or
976   /// maximumNumber on two values, following IEEE-754 definitions. This differs
977   /// from FMINNUM/FMAXNUM in the handling of signaling NaNs, and signed zero.
978   ///
979   /// If one input is a signaling NaN, returns a quiet NaN. This matches
980   /// IEEE-754 2008's minnum/maxnum behavior for signaling NaNs (which differs
981   /// from 2019).
982   ///
983   /// These treat -0 as ordered less than +0, matching the behavior of IEEE-754
984   /// 2019's minimumNumber/maximumNumber.
985   FMINNUM_IEEE,
986   FMAXNUM_IEEE,
987 
988   /// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
989   /// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
990   /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2019 semantics.
991   FMINIMUM,
992   FMAXIMUM,
993 
994   /// FSINCOS - Compute both fsin and fcos as a single operation.
995   FSINCOS,
996 
997   /// Gets the current floating-point environment. The first operand is a token
998   /// chain. The results are FP environment, represented by an integer value,
999   /// and a token chain.
1000   GET_FPENV,
1001 
1002   /// Sets the current floating-point environment. The first operand is a token
1003   /// chain, the second is FP environment, represented by an integer value. The
1004   /// result is a token chain.
1005   SET_FPENV,
1006 
1007   /// Set floating-point environment to default state. The first operand and the
1008   /// result are token chains.
1009   RESET_FPENV,
1010 
1011   /// Gets the current floating-point environment. The first operand is a token
1012   /// chain, the second is a pointer to memory, where FP environment is stored
1013   /// to. The result is a token chain.
1014   GET_FPENV_MEM,
1015 
1016   /// Sets the current floating point environment. The first operand is a token
1017   /// chain, the second is a pointer to memory, where FP environment is loaded
1018   /// from. The result is a token chain.
1019   SET_FPENV_MEM,
1020 
1021   /// Reads the current dynamic floating-point control modes. The operand is
1022   /// a token chain.
1023   GET_FPMODE,
1024 
1025   /// Sets the current dynamic floating-point control modes. The first operand
1026   /// is a token chain, the second is control modes set represented as integer
1027   /// value.
1028   SET_FPMODE,
1029 
1030   /// Sets default dynamic floating-point control modes. The operand is a
1031   /// token chain.
1032   RESET_FPMODE,
1033 
1034   /// LOAD and STORE have token chains as their first operand, then the same
1035   /// operands as an LLVM load/store instruction, then an offset node that
1036   /// is added / subtracted from the base pointer to form the address (for
1037   /// indexed memory ops).
1038   LOAD,
1039   STORE,
1040 
1041   /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
1042   /// to a specified boundary.  This node always has two return values: a new
1043   /// stack pointer value and a chain. The first operand is the token chain,
1044   /// the second is the number of bytes to allocate, and the third is the
1045   /// alignment boundary.  The size is guaranteed to be a multiple of the
1046   /// stack alignment, and the alignment is guaranteed to be bigger than the
1047   /// stack alignment (if required) or 0 to get standard stack alignment.
1048   DYNAMIC_STACKALLOC,
1049 
1050   /// Control flow instructions.  These all have token chains.
1051 
1052   /// BR - Unconditional branch.  The first operand is the chain
1053   /// operand, the second is the MBB to branch to.
1054   BR,
1055 
1056   /// BRIND - Indirect branch.  The first operand is the chain, the second
1057   /// is the value to branch to, which must be of the same type as the
1058   /// target's pointer type.
1059   BRIND,
1060 
1061   /// BR_JT - Jumptable branch. The first operand is the chain, the second
1062   /// is the jumptable index, the last one is the jumptable entry index.
1063   BR_JT,
1064 
1065   /// JUMP_TABLE_DEBUG_INFO - Jumptable debug info. The first operand is the
1066   /// chain, the second is the jumptable index.
1067   JUMP_TABLE_DEBUG_INFO,
1068 
1069   /// BRCOND - Conditional branch.  The first operand is the chain, the
1070   /// second is the condition, the third is the block to branch to if the
1071   /// condition is true.  If the type of the condition is not i1, then the
1072   /// high bits must conform to getBooleanContents. If the condition is undef,
1073   /// it nondeterministically jumps to the block.
1074   /// TODO: Its semantics w.r.t undef requires further discussion; we need to
1075   /// make it sure that it is consistent with optimizations in MIR & the
1076   /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015
1077   BRCOND,
1078 
1079   /// BR_CC - Conditional branch.  The behavior is like that of SELECT_CC, in
1080   /// that the condition is represented as condition code, and two nodes to
1081   /// compare, rather than as a combined SetCC node.  The operands in order
1082   /// are chain, cc, lhs, rhs, block to branch to if condition is true. If
1083   /// condition is undef, it nondeterministically jumps to the block.
1084   BR_CC,
1085 
1086   /// INLINEASM - Represents an inline asm block.  This node always has two
1087   /// return values: a chain and a flag result.  The inputs are as follows:
1088   ///   Operand #0  : Input chain.
1089   ///   Operand #1  : a ExternalSymbolSDNode with a pointer to the asm string.
1090   ///   Operand #2  : a MDNodeSDNode with the !srcloc metadata.
1091   ///   Operand #3  : HasSideEffect, IsAlignStack bits.
1092   ///   After this, it is followed by a list of operands with this format:
1093   ///     ConstantSDNode: Flags that encode whether it is a mem or not, the
1094   ///                     of operands that follow, etc.  See InlineAsm.h.
1095   ///     ... however many operands ...
1096   ///   Operand #last: Optional, an incoming flag.
1097   ///
1098   /// The variable width operands are required to represent target addressing
1099   /// modes as a single "operand", even though they may have multiple
1100   /// SDOperands.
1101   INLINEASM,
1102 
1103   /// INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
1104   INLINEASM_BR,
1105 
1106   /// EH_LABEL - Represents a label in mid basic block used to track
1107   /// locations needed for debug and exception handling tables.  These nodes
1108   /// take a chain as input and return a chain.
1109   EH_LABEL,
1110 
1111   /// ANNOTATION_LABEL - Represents a mid basic block label used by
1112   /// annotations. This should remain within the basic block and be ordered
1113   /// with respect to other call instructions, but loads and stores may float
1114   /// past it.
1115   ANNOTATION_LABEL,
1116 
1117   /// CATCHRET - Represents a return from a catch block funclet. Used for
1118   /// MSVC compatible exception handling. Takes a chain operand and a
1119   /// destination basic block operand.
1120   CATCHRET,
1121 
1122   /// CLEANUPRET - Represents a return from a cleanup block funclet.  Used for
1123   /// MSVC compatible exception handling. Takes only a chain operand.
1124   CLEANUPRET,
1125 
1126   /// STACKSAVE - STACKSAVE has one operand, an input chain.  It produces a
1127   /// value, the same type as the pointer type for the system, and an output
1128   /// chain.
1129   STACKSAVE,
1130 
1131   /// STACKRESTORE has two operands, an input chain and a pointer to restore
1132   /// to it returns an output chain.
1133   STACKRESTORE,
1134 
1135   /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
1136   /// of a call sequence, and carry arbitrary information that target might
1137   /// want to know.  The first operand is a chain, the rest are specified by
1138   /// the target and not touched by the DAG optimizers.
1139   /// Targets that may use stack to pass call arguments define additional
1140   /// operands:
1141   /// - size of the call frame part that must be set up within the
1142   ///   CALLSEQ_START..CALLSEQ_END pair,
1143   /// - part of the call frame prepared prior to CALLSEQ_START.
1144   /// Both these parameters must be constants, their sum is the total call
1145   /// frame size.
1146   /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
1147   CALLSEQ_START, // Beginning of a call sequence
1148   CALLSEQ_END,   // End of a call sequence
1149 
1150   /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
1151   /// and the alignment. It returns a pair of values: the vaarg value and a
1152   /// new chain.
1153   VAARG,
1154 
1155   /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
1156   /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
1157   /// source.
1158   VACOPY,
1159 
1160   /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
1161   /// pointer, and a SRCVALUE.
1162   VAEND,
1163   VASTART,
1164 
1165   // PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE
1166   // with the preallocated call Value.
1167   PREALLOCATED_SETUP,
1168   // PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE
1169   // with the preallocated call Value, and a constant int.
1170   PREALLOCATED_ARG,
1171 
1172   /// SRCVALUE - This is a node type that holds a Value* that is used to
1173   /// make reference to a value in the LLVM IR.
1174   SRCVALUE,
1175 
1176   /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
1177   /// reference metadata in the IR.
1178   MDNODE_SDNODE,
1179 
1180   /// PCMARKER - This corresponds to the pcmarker intrinsic.
1181   PCMARKER,
1182 
1183   /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
1184   /// It produces a chain and one i64 value. The only operand is a chain.
1185   /// If i64 is not legal, the result will be expanded into smaller values.
1186   /// Still, it returns an i64, so targets should set legality for i64.
1187   /// The result is the content of the architecture-specific cycle
1188   /// counter-like register (or other high accuracy low latency clock source).
1189   READCYCLECOUNTER,
1190 
1191   /// READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
1192   /// It has the same semantics as the READCYCLECOUNTER implementation except
1193   /// that the result is the content of the architecture-specific fixed
1194   /// frequency counter suitable for measuring elapsed time.
1195   READSTEADYCOUNTER,
1196 
1197   /// HANDLENODE node - Used as a handle for various purposes.
1198   HANDLENODE,
1199 
1200   /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.  It
1201   /// takes as input a token chain, the pointer to the trampoline, the pointer
1202   /// to the nested function, the pointer to pass for the 'nest' parameter, a
1203   /// SRCVALUE for the trampoline and another for the nested function
1204   /// (allowing targets to access the original Function*).
1205   /// It produces a token chain as output.
1206   INIT_TRAMPOLINE,
1207 
1208   /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
1209   /// It takes a pointer to the trampoline and produces a (possibly) new
1210   /// pointer to the same trampoline with platform-specific adjustments
1211   /// applied.  The pointer it returns points to an executable block of code.
1212   ADJUST_TRAMPOLINE,
1213 
1214   /// TRAP - Trapping instruction
1215   TRAP,
1216 
1217   /// DEBUGTRAP - Trap intended to get the attention of a debugger.
1218   DEBUGTRAP,
1219 
1220   /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer
1221   /// failure.
1222   UBSANTRAP,
1223 
1224   /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
1225   /// is the chain.  The other operands are the address to prefetch,
1226   /// read / write specifier, locality specifier and instruction / data cache
1227   /// specifier.
1228   PREFETCH,
1229 
1230   /// ARITH_FENCE - This corresponds to a arithmetic fence intrinsic. Both its
1231   /// operand and output are the same floating type.
1232   ARITH_FENCE,
1233 
1234   /// MEMBARRIER - Compiler barrier only; generate a no-op.
1235   MEMBARRIER,
1236 
1237   /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
1238   /// This corresponds to the fence instruction. It takes an input chain, and
1239   /// two integer constants: an AtomicOrdering and a SynchronizationScope.
1240   ATOMIC_FENCE,
1241 
1242   /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
1243   /// This corresponds to "load atomic" instruction.
1244   ATOMIC_LOAD,
1245 
1246   /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
1247   /// This corresponds to "store atomic" instruction.
1248   ATOMIC_STORE,
1249 
1250   /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
1251   /// For double-word atomic operations:
1252   /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
1253   ///                                          swapLo, swapHi)
1254   /// This corresponds to the cmpxchg instruction.
1255   ATOMIC_CMP_SWAP,
1256 
1257   /// Val, Success, OUTCHAIN
1258   ///     = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
1259   /// N.b. this is still a strong cmpxchg operation, so
1260   /// Success == "Val == cmp".
1261   ATOMIC_CMP_SWAP_WITH_SUCCESS,
1262 
1263   /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
1264   /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
1265   /// For double-word atomic operations:
1266   /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
1267   /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
1268   /// These correspond to the atomicrmw instruction.
1269   ATOMIC_SWAP,
1270   ATOMIC_LOAD_ADD,
1271   ATOMIC_LOAD_SUB,
1272   ATOMIC_LOAD_AND,
1273   ATOMIC_LOAD_CLR,
1274   ATOMIC_LOAD_OR,
1275   ATOMIC_LOAD_XOR,
1276   ATOMIC_LOAD_NAND,
1277   ATOMIC_LOAD_MIN,
1278   ATOMIC_LOAD_MAX,
1279   ATOMIC_LOAD_UMIN,
1280   ATOMIC_LOAD_UMAX,
1281   ATOMIC_LOAD_FADD,
1282   ATOMIC_LOAD_FSUB,
1283   ATOMIC_LOAD_FMAX,
1284   ATOMIC_LOAD_FMIN,
1285   ATOMIC_LOAD_UINC_WRAP,
1286   ATOMIC_LOAD_UDEC_WRAP,
1287 
1288   // Masked load and store - consecutive vector load and store operations
1289   // with additional mask operand that prevents memory accesses to the
1290   // masked-off lanes.
1291   //
1292   // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
1293   // OutChain = MSTORE(Value, BasePtr, Mask)
1294   MLOAD,
1295   MSTORE,
1296 
1297   // Masked gather and scatter - load and store operations for a vector of
1298   // random addresses with additional mask operand that prevents memory
1299   // accesses to the masked-off lanes.
1300   //
1301   // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
1302   // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
1303   //
1304   // The Index operand can have more vector elements than the other operands
1305   // due to type legalization. The extra elements are ignored.
1306   MGATHER,
1307   MSCATTER,
1308 
1309   /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
1310   /// is the chain and the second operand is the alloca pointer.
1311   LIFETIME_START,
1312   LIFETIME_END,
1313 
1314   /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
1315   /// beginning and end of GC transition  sequence, and carry arbitrary
1316   /// information that target might need for lowering.  The first operand is
1317   /// a chain, the rest are specified by the target and not touched by the DAG
1318   /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
1319   /// nested.
1320   GC_TRANSITION_START,
1321   GC_TRANSITION_END,
1322 
1323   /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
1324   /// the most recent dynamic alloca. For most targets that would be 0, but
1325   /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
1326   /// known nonzero constant. The only operand here is the chain.
1327   GET_DYNAMIC_AREA_OFFSET,
1328 
1329   /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve
1330   /// the sample counts quality.
1331   PSEUDO_PROBE,
1332 
1333   /// VSCALE(IMM) - Returns the runtime scaling factor used to calculate the
1334   /// number of elements within a scalable vector. IMM is a constant integer
1335   /// multiplier that is applied to the runtime value.
1336   VSCALE,
1337 
1338   /// Generic reduction nodes. These nodes represent horizontal vector
1339   /// reduction operations, producing a scalar result.
1340   /// The SEQ variants perform reductions in sequential order. The first
1341   /// operand is an initial scalar accumulator value, and the second operand
1342   /// is the vector to reduce.
1343   /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC
1344   ///  ... is equivalent to
1345   /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3]
1346   VECREDUCE_SEQ_FADD,
1347   VECREDUCE_SEQ_FMUL,
1348 
1349   /// These reductions have relaxed evaluation order semantics, and have a
1350   /// single vector operand. The order of evaluation is unspecified. For
1351   /// pow-of-2 vectors, one valid legalizer expansion is to use a tree
1352   /// reduction, i.e.:
1353   /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
1354   ///   PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
1355   ///   PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
1356   ///   RES = FADD PART_RDX2[0], PART_RDX2[1]
1357   /// For non-pow-2 vectors, this can be computed by extracting each element
1358   /// and performing the operation as if it were scalarized.
1359   VECREDUCE_FADD,
1360   VECREDUCE_FMUL,
1361   /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
1362   VECREDUCE_FMAX,
1363   VECREDUCE_FMIN,
1364   /// FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the
1365   /// llvm.minimum and llvm.maximum semantics.
1366   VECREDUCE_FMAXIMUM,
1367   VECREDUCE_FMINIMUM,
1368   /// Integer reductions may have a result type larger than the vector element
1369   /// type. However, the reduction is performed using the vector element type
1370   /// and the value in the top bits is unspecified.
1371   VECREDUCE_ADD,
1372   VECREDUCE_MUL,
1373   VECREDUCE_AND,
1374   VECREDUCE_OR,
1375   VECREDUCE_XOR,
1376   VECREDUCE_SMAX,
1377   VECREDUCE_SMIN,
1378   VECREDUCE_UMAX,
1379   VECREDUCE_UMIN,
1380 
1381   // The `llvm.experimental.stackmap` intrinsic.
1382   // Operands: input chain, glue, <id>, <numShadowBytes>, [live0[, live1...]]
1383   // Outputs: output chain, glue
1384   STACKMAP,
1385 
1386   // The `llvm.experimental.patchpoint.*` intrinsic.
1387   // Operands: input chain, [glue], reg-mask, <id>, <numShadowBytes>, callee,
1388   //   <numArgs>, cc, ...
1389   // Outputs: [rv], output chain, glue
1390   PATCHPOINT,
1391 
1392 // Vector Predication
1393 #define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
1394 #include "llvm/IR/VPIntrinsics.def"
1395 
1396   // The `llvm.experimental.convergence.*` intrinsics.
1397   CONVERGENCECTRL_ANCHOR,
1398   CONVERGENCECTRL_ENTRY,
1399   CONVERGENCECTRL_LOOP,
1400   // This does not correspond to any convergence control intrinsic. It is used
1401   // to glue a convergence control token to a convergent operation in the DAG,
1402   // which is later translated to an implicit use in the MIR.
1403   CONVERGENCECTRL_GLUE,
1404 
1405   /// BUILTIN_OP_END - This must be the last enum value in this list.
1406   /// The target-specific pre-isel opcode values start here.
1407   BUILTIN_OP_END
1408 };
1409 
1410 /// FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations
1411 /// which cannot raise FP exceptions should be less than this value.
1412 /// Those that do must not be less than this value.
1413 static const int FIRST_TARGET_STRICTFP_OPCODE = BUILTIN_OP_END + 400;
1414 
1415 /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
1416 /// which do not reference a specific memory location should be less than
1417 /// this value. Those that do must not be less than this value, and can
1418 /// be used with SelectionDAG::getMemIntrinsicNode.
1419 static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END + 500;
1420 
1421 /// Whether this is bitwise logic opcode.
isBitwiseLogicOp(unsigned Opcode)1422 inline bool isBitwiseLogicOp(unsigned Opcode) {
1423   return Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR;
1424 }
1425 
1426 /// Get underlying scalar opcode for VECREDUCE opcode.
1427 /// For example ISD::AND for ISD::VECREDUCE_AND.
1428 NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode);
1429 
1430 /// Whether this is a vector-predicated Opcode.
1431 bool isVPOpcode(unsigned Opcode);
1432 
1433 /// Whether this is a vector-predicated binary operation opcode.
1434 bool isVPBinaryOp(unsigned Opcode);
1435 
1436 /// Whether this is a vector-predicated reduction opcode.
1437 bool isVPReduction(unsigned Opcode);
1438 
1439 /// The operand position of the vector mask.
1440 std::optional<unsigned> getVPMaskIdx(unsigned Opcode);
1441 
1442 /// The operand position of the explicit vector length parameter.
1443 std::optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
1444 
1445 /// Translate this VP Opcode to its corresponding non-VP Opcode.
1446 std::optional<unsigned> getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept);
1447 
1448 /// Translate this non-VP Opcode to its corresponding VP Opcode.
1449 unsigned getVPForBaseOpcode(unsigned Opcode);
1450 
1451 //===--------------------------------------------------------------------===//
1452 /// MemIndexedMode enum - This enum defines the load / store indexed
1453 /// addressing modes.
1454 ///
1455 /// UNINDEXED    "Normal" load / store. The effective address is already
1456 ///              computed and is available in the base pointer. The offset
1457 ///              operand is always undefined. In addition to producing a
1458 ///              chain, an unindexed load produces one value (result of the
1459 ///              load); an unindexed store does not produce a value.
1460 ///
1461 /// PRE_INC      Similar to the unindexed mode where the effective address is
1462 /// PRE_DEC      the value of the base pointer add / subtract the offset.
1463 ///              It considers the computation as being folded into the load /
1464 ///              store operation (i.e. the load / store does the address
1465 ///              computation as well as performing the memory transaction).
1466 ///              The base operand is always undefined. In addition to
1467 ///              producing a chain, pre-indexed load produces two values
1468 ///              (result of the load and the result of the address
1469 ///              computation); a pre-indexed store produces one value (result
1470 ///              of the address computation).
1471 ///
1472 /// POST_INC     The effective address is the value of the base pointer. The
1473 /// POST_DEC     value of the offset operand is then added to / subtracted
1474 ///              from the base after memory transaction. In addition to
1475 ///              producing a chain, post-indexed load produces two values
1476 ///              (the result of the load and the result of the base +/- offset
1477 ///              computation); a post-indexed store produces one value (the
1478 ///              the result of the base +/- offset computation).
1479 enum MemIndexedMode { UNINDEXED = 0, PRE_INC, PRE_DEC, POST_INC, POST_DEC };
1480 
1481 static const int LAST_INDEXED_MODE = POST_DEC + 1;
1482 
1483 //===--------------------------------------------------------------------===//
1484 /// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's
1485 /// index parameter when calculating addresses.
1486 ///
1487 /// SIGNED_SCALED     Addr = Base + ((signed)Index * Scale)
1488 /// UNSIGNED_SCALED   Addr = Base + ((unsigned)Index * Scale)
1489 ///
1490 /// NOTE: The value of Scale is typically only known to the node owning the
1491 /// IndexType, with a value of 1 the equivalent of being unscaled.
1492 enum MemIndexType { SIGNED_SCALED = 0, UNSIGNED_SCALED };
1493 
1494 static const int LAST_MEM_INDEX_TYPE = UNSIGNED_SCALED + 1;
1495 
isIndexTypeSigned(MemIndexType IndexType)1496 inline bool isIndexTypeSigned(MemIndexType IndexType) {
1497   return IndexType == SIGNED_SCALED;
1498 }
1499 
1500 //===--------------------------------------------------------------------===//
1501 /// LoadExtType enum - This enum defines the three variants of LOADEXT
1502 /// (load with extension).
1503 ///
1504 /// SEXTLOAD loads the integer operand and sign extends it to a larger
1505 ///          integer result type.
1506 /// ZEXTLOAD loads the integer operand and zero extends it to a larger
1507 ///          integer result type.
1508 /// EXTLOAD  is used for two things: floating point extending loads and
1509 ///          integer extending loads [the top bits are undefined].
1510 enum LoadExtType { NON_EXTLOAD = 0, EXTLOAD, SEXTLOAD, ZEXTLOAD };
1511 
1512 static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1;
1513 
1514 NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
1515 
1516 //===--------------------------------------------------------------------===//
1517 /// ISD::CondCode enum - These are ordered carefully to make the bitfields
1518 /// below work out, when considering SETFALSE (something that never exists
1519 /// dynamically) as 0.  "U" -> Unsigned (for integer operands) or Unordered
1520 /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
1521 /// to.  If the "N" column is 1, the result of the comparison is undefined if
1522 /// the input is a NAN.
1523 ///
1524 /// All of these (except for the 'always folded ops') should be handled for
1525 /// floating point.  For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
1526 /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
1527 ///
1528 /// Note that these are laid out in a specific order to allow bit-twiddling
1529 /// to transform conditions.
1530 enum CondCode {
1531   // Opcode       N U L G E       Intuitive operation
1532   SETFALSE, //      0 0 0 0       Always false (always folded)
1533   SETOEQ,   //      0 0 0 1       True if ordered and equal
1534   SETOGT,   //      0 0 1 0       True if ordered and greater than
1535   SETOGE,   //      0 0 1 1       True if ordered and greater than or equal
1536   SETOLT,   //      0 1 0 0       True if ordered and less than
1537   SETOLE,   //      0 1 0 1       True if ordered and less than or equal
1538   SETONE,   //      0 1 1 0       True if ordered and operands are unequal
1539   SETO,     //      0 1 1 1       True if ordered (no nans)
1540   SETUO,    //      1 0 0 0       True if unordered: isnan(X) | isnan(Y)
1541   SETUEQ,   //      1 0 0 1       True if unordered or equal
1542   SETUGT,   //      1 0 1 0       True if unordered or greater than
1543   SETUGE,   //      1 0 1 1       True if unordered, greater than, or equal
1544   SETULT,   //      1 1 0 0       True if unordered or less than
1545   SETULE,   //      1 1 0 1       True if unordered, less than, or equal
1546   SETUNE,   //      1 1 1 0       True if unordered or not equal
1547   SETTRUE,  //      1 1 1 1       Always true (always folded)
1548   // Don't care operations: undefined if the input is a nan.
1549   SETFALSE2, //   1 X 0 0 0       Always false (always folded)
1550   SETEQ,     //   1 X 0 0 1       True if equal
1551   SETGT,     //   1 X 0 1 0       True if greater than
1552   SETGE,     //   1 X 0 1 1       True if greater than or equal
1553   SETLT,     //   1 X 1 0 0       True if less than
1554   SETLE,     //   1 X 1 0 1       True if less than or equal
1555   SETNE,     //   1 X 1 1 0       True if not equal
1556   SETTRUE2,  //   1 X 1 1 1       Always true (always folded)
1557 
1558   SETCC_INVALID // Marker value.
1559 };
1560 
1561 /// Return true if this is a setcc instruction that performs a signed
1562 /// comparison when used with integer operands.
isSignedIntSetCC(CondCode Code)1563 inline bool isSignedIntSetCC(CondCode Code) {
1564   return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
1565 }
1566 
1567 /// Return true if this is a setcc instruction that performs an unsigned
1568 /// comparison when used with integer operands.
isUnsignedIntSetCC(CondCode Code)1569 inline bool isUnsignedIntSetCC(CondCode Code) {
1570   return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
1571 }
1572 
1573 /// Return true if this is a setcc instruction that performs an equality
1574 /// comparison when used with integer operands.
isIntEqualitySetCC(CondCode Code)1575 inline bool isIntEqualitySetCC(CondCode Code) {
1576   return Code == SETEQ || Code == SETNE;
1577 }
1578 
1579 /// Return true if this is a setcc instruction that performs an equality
1580 /// comparison when used with floating point operands.
isFPEqualitySetCC(CondCode Code)1581 inline bool isFPEqualitySetCC(CondCode Code) {
1582   return Code == SETOEQ || Code == SETONE || Code == SETUEQ || Code == SETUNE;
1583 }
1584 
1585 /// Return true if the specified condition returns true if the two operands to
1586 /// the condition are equal. Note that if one of the two operands is a NaN,
1587 /// this value is meaningless.
isTrueWhenEqual(CondCode Cond)1588 inline bool isTrueWhenEqual(CondCode Cond) { return ((int)Cond & 1) != 0; }
1589 
1590 /// This function returns 0 if the condition is always false if an operand is
1591 /// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if
1592 /// the condition is undefined if the operand is a NaN.
getUnorderedFlavor(CondCode Cond)1593 inline unsigned getUnorderedFlavor(CondCode Cond) {
1594   return ((int)Cond >> 3) & 3;
1595 }
1596 
1597 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1598 /// SetCC operation.
1599 CondCode getSetCCInverse(CondCode Operation, EVT Type);
1600 
isExtOpcode(unsigned Opcode)1601 inline bool isExtOpcode(unsigned Opcode) {
1602   return Opcode == ISD::ANY_EXTEND || Opcode == ISD::ZERO_EXTEND ||
1603          Opcode == ISD::SIGN_EXTEND;
1604 }
1605 
isExtVecInRegOpcode(unsigned Opcode)1606 inline bool isExtVecInRegOpcode(unsigned Opcode) {
1607   return Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
1608          Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
1609          Opcode == ISD::SIGN_EXTEND_VECTOR_INREG;
1610 }
1611 
1612 namespace GlobalISel {
1613 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1614 /// SetCC operation. The U bit of the condition code has different meanings
1615 /// between floating point and integer comparisons and LLT's don't provide
1616 /// this distinction. As such we need to be told whether the comparison is
1617 /// floating point or integer-like. Pointers should use integer-like
1618 /// comparisons.
1619 CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike);
1620 } // end namespace GlobalISel
1621 
1622 /// Return the operation corresponding to (Y op X) when given the operation
1623 /// for (X op Y).
1624 CondCode getSetCCSwappedOperands(CondCode Operation);
1625 
1626 /// Return the result of a logical OR between different comparisons of
1627 /// identical values: ((X op1 Y) | (X op2 Y)). This function returns
1628 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1629 CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type);
1630 
1631 /// Return the result of a logical AND between different comparisons of
1632 /// identical values: ((X op1 Y) & (X op2 Y)). This function returns
1633 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1634 CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type);
1635 
1636 } // namespace ISD
1637 
1638 } // namespace llvm
1639 
1640 #endif
1641