66 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
72 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
73 for (
const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
74 Attribute::DereferenceableOrNull, Attribute::NoAlias,
75 Attribute::NonNull, Attribute::NoUndef,
76 Attribute::Range, Attribute::NoFPClass})
77 CallerAttrs.removeAttribute(Attr);
79 if (CallerAttrs.hasAttributes())
83 if (CallerAttrs.contains(Attribute::ZExt) ||
84 CallerAttrs.contains(Attribute::SExt))
95 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
122 IsSExt =
Call->paramHasAttr(ArgIdx, Attribute::SExt);
123 IsZExt =
Call->paramHasAttr(ArgIdx, Attribute::ZExt);
124 IsNoExt =
Call->paramHasAttr(ArgIdx, Attribute::NoExt);
125 IsInReg =
Call->paramHasAttr(ArgIdx, Attribute::InReg);
126 IsSRet =
Call->paramHasAttr(ArgIdx, Attribute::StructRet);
127 IsNest =
Call->paramHasAttr(ArgIdx, Attribute::Nest);
128 IsByVal =
Call->paramHasAttr(ArgIdx, Attribute::ByVal);
138 "multiple ABI attributes?");
154std::pair<SDValue, SDValue>
159 if (LibcallImpl == RTLIB::Unsupported)
166 Args.reserve(
Ops.size());
169 for (
unsigned i = 0; i <
Ops.size(); ++i) {
171 Type *Ty = i < OpsTypeOverrides.
size() && OpsTypeOverrides[i]
172 ? OpsTypeOverrides[i]
181 Entry.IsZExt = !Entry.IsSExt;
185 Entry.IsSExt = Entry.IsZExt =
false;
187 Args.push_back(Entry);
194 Type *OrigRetTy = RetTy;
197 bool zeroExtend = !signExtend;
202 signExtend = zeroExtend =
false;
208 Callee, std::move(Args))
218 LLVMContext &Context, std::vector<EVT> &MemOps,
unsigned Limit,
219 const MemOp &
Op,
unsigned DstAS,
unsigned SrcAS,
220 const AttributeList &FuncAttributes,
EVT *LargestVT)
const {
221 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
222 Op.getSrcAlign() <
Op.getDstAlign())
227 if (VT == MVT::Other) {
231 VT = MVT::LAST_INTEGER_VALUETYPE;
232 if (
Op.isFixedDstAlign())
239 MVT LVT = MVT::LAST_INTEGER_VALUETYPE;
250 unsigned NumMemOps = 0;
254 while (VTSize >
Size) {
265 else if (NewVT == MVT::i64 &&
277 if (NewVT == MVT::i8)
286 if (NumMemOps &&
Op.allowOverlap() && NewVTSize <
Size &&
288 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
298 if (++NumMemOps > Limit)
301 MemOps.push_back(VT);
326 bool IsSignaling)
const {
331 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
332 &&
"Unsupported setcc type!");
335 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
336 bool ShouldInvertCC =
false;
340 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
341 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
342 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
346 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
347 (VT == MVT::f64) ? RTLIB::UNE_F64 :
348 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
352 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
353 (VT == MVT::f64) ? RTLIB::OGE_F64 :
354 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
358 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
359 (VT == MVT::f64) ? RTLIB::OLT_F64 :
360 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
364 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
365 (VT == MVT::f64) ? RTLIB::OLE_F64 :
366 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
370 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
371 (VT == MVT::f64) ? RTLIB::OGT_F64 :
372 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
375 ShouldInvertCC =
true;
378 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
379 (VT == MVT::f64) ? RTLIB::UO_F64 :
380 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
384 ShouldInvertCC =
true;
387 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
388 (VT == MVT::f64) ? RTLIB::UO_F64 :
389 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
390 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
391 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
392 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
396 ShouldInvertCC =
true;
399 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
400 (VT == MVT::f64) ? RTLIB::OGE_F64 :
401 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
404 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
405 (VT == MVT::f64) ? RTLIB::OGT_F64 :
406 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
409 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
410 (VT == MVT::f64) ? RTLIB::OLE_F64 :
411 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
414 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
415 (VT == MVT::f64) ? RTLIB::OLT_F64 :
416 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
434 if (LC1Impl == RTLIB::Unsupported) {
436 "no libcall available to soften floating-point compare");
440 if (ShouldInvertCC) {
442 CCCode = getSetCCInverse(CCCode, RetVT);
445 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
450 if (LC2Impl == RTLIB::Unsupported) {
452 "no libcall available to soften floating-point compare");
456 "unordered call should be simple boolean");
466 auto Call2 =
makeLibCall(DAG, LC2, RetVT,
Ops, CallOptions, dl, Chain);
469 CCCode = getSetCCInverse(CCCode, RetVT);
470 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
523 if (!TM.shouldAssumeDSOLocal(GV))
543 const APInt &DemandedElts,
546 unsigned Opcode =
Op.getOpcode();
565 if (!Op1C || Op1C->isOpaque())
569 const APInt &
C = Op1C->getAPIntValue();
574 EVT VT =
Op.getValueType();
591 EVT VT =
Op.getValueType();
606 "ShrinkDemandedOp only supports binary operators!");
607 assert(
Op.getNode()->getNumValues() == 1 &&
608 "ShrinkDemandedOp only supports nodes with one result!");
610 EVT VT =
Op.getValueType();
619 Op.getOperand(1).getValueType().getScalarSizeInBits() ==
BitWidth &&
620 "ShrinkDemandedOp only supports operands that have the same size!");
624 if (!
Op.getNode()->hasOneUse())
640 unsigned Opcode =
Op.getOpcode();
650 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
674 const APInt &DemandedElts,
694 bool AssumeSingleUse)
const {
695 EVT VT =
Op.getValueType();
711 EVT VT =
Op.getValueType();
729 switch (
Op.getOpcode()) {
735 EVT SrcVT = Src.getValueType();
736 EVT DstVT =
Op.getValueType();
742 if (NumSrcEltBits == NumDstEltBits)
747 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
748 unsigned Scale = NumDstEltBits / NumSrcEltBits;
751 for (
unsigned i = 0; i != Scale; ++i) {
752 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
753 unsigned BitOffset = EltOffset * NumSrcEltBits;
754 DemandedSrcBits |=
DemandedBits.extractBits(NumSrcEltBits, BitOffset);
762 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
767 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
768 unsigned Scale = NumSrcEltBits / NumDstEltBits;
772 for (
unsigned i = 0; i != NumElts; ++i)
773 if (DemandedElts[i]) {
774 unsigned Offset = (i % Scale) * NumDstEltBits;
776 DemandedSrcElts.
setBit(i / Scale);
780 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
794 return Op.getOperand(0);
796 return Op.getOperand(1);
807 return Op.getOperand(0);
809 return Op.getOperand(1);
819 return Op.getOperand(0);
821 return Op.getOperand(1);
831 DemandedElts, 1,
Depth + 1))
832 return Op.getOperand(0);
835 DemandedElts, 0,
Depth + 1))
836 return Op.getOperand(1);
842 if (std::optional<unsigned> MaxSA =
845 unsigned ShAmt = *MaxSA;
846 unsigned NumSignBits =
849 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
857 if (std::optional<unsigned> MaxSA =
860 unsigned ShAmt = *MaxSA;
864 unsigned NumSignBits =
903 if (NumSignBits >= (
BitWidth - ExBits + 1))
916 EVT SrcVT = Src.getValueType();
917 EVT DstVT =
Op.getValueType();
918 if (IsLE && DemandedElts == 1 &&
934 !DemandedElts[CIdx->getZExtValue()])
945 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
948 if (DemandedSubElts == 0)
958 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
959 for (
unsigned i = 0; i != NumElts; ++i) {
960 int M = ShuffleMask[i];
961 if (M < 0 || !DemandedElts[i])
964 IdentityLHS &= (M == (int)i);
965 IdentityRHS &= ((M - NumElts) == i);
971 return Op.getOperand(0);
973 return Op.getOperand(1);
993 unsigned Depth)
const {
994 EVT VT =
Op.getValueType();
1007 unsigned Depth)
const {
1021 "SRL or SRA node is required here!");
1024 if (!N1C || !N1C->
isOne())
1071 unsigned ShiftOpc =
Op.getOpcode();
1072 bool IsSigned =
false;
1076 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1;
1081 unsigned NumZero = std::min(NumZeroA, NumZeroB);
1087 if (NumZero >= 2 && NumSigned < NumZero) {
1092 if (NumSigned >= 1) {
1100 if (NumZero >= 1 && NumSigned < NumZero) {
1120 EVT VT =
Op.getValueType();
1134 Add.getOperand(1)) &&
1165 unsigned Depth,
bool AssumeSingleUse)
const {
1168 "Mask size mismatches value type size!");
1173 EVT VT =
Op.getValueType();
1175 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1177 "Unexpected vector size");
1180 APInt DemandedElts = OriginalDemandedElts;
1205 bool HasMultiUse =
false;
1206 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse()) {
1215 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1224 switch (
Op.getOpcode()) {
1228 if (!DemandedElts[0])
1233 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1240 if (DemandedElts == 1)
1269 EVT MemVT = LD->getMemoryVT();
1286 APInt DemandedVecElts(DemandedElts);
1288 unsigned Idx = CIdx->getZExtValue();
1292 if (!DemandedElts[Idx])
1309 if (!!DemandedVecElts)
1322 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
1324 APInt DemandedSrcElts = DemandedElts;
1325 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
1336 if (!!DemandedSubElts)
1338 if (!!DemandedSrcElts)
1348 if (NewSub || NewSrc) {
1349 NewSub = NewSub ? NewSub :
Sub;
1350 NewSrc = NewSrc ? NewSrc : Src;
1363 if (Src.getValueType().isScalableVector())
1366 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1367 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
1389 EVT SubVT =
Op.getOperand(0).getValueType();
1390 unsigned NumSubVecs =
Op.getNumOperands();
1392 for (
unsigned i = 0; i != NumSubVecs; ++i) {
1393 APInt DemandedSubElts =
1394 DemandedElts.
extractBits(NumSubElts, i * NumSubElts);
1396 Known2, TLO,
Depth + 1))
1399 if (!!DemandedSubElts)
1409 APInt DemandedLHS, DemandedRHS;
1414 if (!!DemandedLHS || !!DemandedRHS) {
1419 if (!!DemandedLHS) {
1425 if (!!DemandedRHS) {
1437 if (DemandedOp0 || DemandedOp1) {
1438 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1439 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1474 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1497 unsigned NumSubElts =
1518 Known2, TLO,
Depth + 1))
1544 if (DemandedOp0 || DemandedOp1) {
1545 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1546 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1565 Known2, TLO,
Depth + 1)) {
1589 if (DemandedOp0 || DemandedOp1) {
1590 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1591 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1602 for (
int I = 0;
I != 2; ++
I) {
1605 SDValue Alt =
Op.getOperand(1 -
I).getOperand(0);
1606 SDValue C2 =
Op.getOperand(1 -
I).getOperand(1);
1608 for (
int J = 0; J != 2; ++J) {
1661 if (
C->getAPIntValue() == Known2.
One) {
1670 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1682 if (ShiftC->getAPIntValue().ult(
BitWidth)) {
1683 uint64_t ShiftAmt = ShiftC->getZExtValue();
1686 : Ones.
lshr(ShiftAmt);
1703 if (!
C || !
C->isAllOnes())
1713 if (DemandedOp0 || DemandedOp1) {
1714 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1715 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1726 Known, TLO,
Depth + 1))
1729 Known2, TLO,
Depth + 1))
1741 Known, TLO,
Depth + 1))
1744 Known2, TLO,
Depth + 1))
1752 Known, TLO,
Depth + 1))
1755 Known2, TLO,
Depth + 1))
1779 DemandedElts, KnownOp0, TLO,
Depth + 1))
1810 if (std::optional<unsigned> KnownSA =
1812 unsigned ShAmt = *KnownSA;
1822 if (std::optional<unsigned> InnerSA =
1824 unsigned C1 = *InnerSA;
1826 int Diff = ShAmt - C1;
1845 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1863 InnerOp, DemandedElts,
Depth + 2)) {
1864 unsigned InnerShAmt = *SA2;
1865 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1867 (InnerBits - InnerShAmt + ShAmt) &&
1895 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1906 Op.getNode()->hasOneUse()) {
1917 assert(DemandedSize <= SmallVTBits &&
1918 "Narrowed below demanded bits?");
1948 Flags.setNoUnsignedWrap(IsNUW);
1953 NewShiftAmt, Flags);
1979 if (std::optional<unsigned> MaxSA =
1981 unsigned ShAmt = *MaxSA;
1982 unsigned NumSignBits =
1985 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1995 if (std::optional<unsigned> KnownSA =
1997 unsigned ShAmt = *KnownSA;
2007 if (std::optional<unsigned> InnerSA =
2009 unsigned C1 = *InnerSA;
2011 int Diff = ShAmt - C1;
2027 if (std::optional<unsigned> InnerSA =
2029 unsigned C1 = *InnerSA;
2031 unsigned Combined = std::min(C1 + ShAmt,
BitWidth - 1);
2043 if (
Op->getFlags().hasExact())
2078 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2092 if (std::optional<unsigned> MaxSA =
2094 unsigned ShAmt = *MaxSA;
2098 unsigned NumSignBits =
2107 DemandedElts,
Depth + 1))
2131 if (std::optional<unsigned> KnownSA =
2133 unsigned ShAmt = *KnownSA;
2140 if (std::optional<unsigned> InnerSA =
2142 unsigned LowBits =
BitWidth - ShAmt;
2147 if (*InnerSA == ShAmt) {
2157 unsigned NumSignBits =
2159 if (NumSignBits > ShAmt)
2169 if (
Op->getFlags().hasExact())
2206 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2216 DemandedElts,
Depth + 1))
2229 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2235 Known, TLO,
Depth + 1))
2251 Known2 <<= (IsFSHL ? Amt : (
BitWidth - Amt));
2252 Known >>= (IsFSHL ? (
BitWidth - Amt) : Amt);
2259 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
2261 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
2262 if (DemandedOp0 || DemandedOp1) {
2263 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
2264 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
2280 unsigned MaxShiftAmt =
2312 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2328 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) {
2333 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) {
2352 unsigned Opc =
Op.getOpcode();
2359 unsigned NumSignBits =
2363 if (NumSignBits >= NumDemandedUpperBits)
2429 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2461 unsigned MinSignedBits =
2463 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2466 if (!AlreadySignExtended) {
2484 InputDemandedBits.
setBit(ExVTBits - 1);
2494 if (Known.
Zero[ExVTBits - 1])
2498 if (Known.
One[ExVTBits - 1]) {
2508 EVT HalfVT =
Op.getOperand(0).getValueType();
2522 Known = KnownHi.
concat(KnownLo);
2531 EVT SrcVT = Src.getValueType();
2540 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2551 APInt InDemandedElts = DemandedElts.
zext(InElts);
2562 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2572 EVT SrcVT = Src.getValueType();
2577 APInt InDemandedElts = DemandedElts.
zext(InElts);
2582 InDemandedBits.
setBit(InBits - 1);
2588 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2625 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2635 EVT SrcVT = Src.getValueType();
2642 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2647 APInt InDemandedElts = DemandedElts.
zext(InElts);
2656 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2665 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2678 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2683 switch (Src.getOpcode()) {
2694 if (Src.getNode()->hasOneUse()) {
2706 std::optional<unsigned> ShAmtC =
2708 if (!ShAmtC || *ShAmtC >=
BitWidth)
2710 unsigned ShVal = *ShAmtC;
2740 Known.
Zero |= ~InMask;
2741 Known.
One &= (~Known.Zero);
2747 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2748 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2757 if (CIdx->getAPIntValue().ult(NumSrcElts))
2764 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2773 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2775 TLO.
DAG.
getNode(
Op.getOpcode(), dl, VT, DemandedSrc, Idx);
2789 EVT SrcVT = Src.getValueType();
2801 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2811 unsigned Scale =
BitWidth / NumSrcEltBits;
2814 for (
unsigned i = 0; i != Scale; ++i) {
2815 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
2816 unsigned BitOffset = EltOffset * NumSrcEltBits;
2817 DemandedSrcBits |=
DemandedBits.extractBits(NumSrcEltBits, BitOffset);
2824 APInt KnownSrcUndef, KnownSrcZero;
2826 KnownSrcZero, TLO,
Depth + 1))
2831 KnownSrcBits, TLO,
Depth + 1))
2833 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2835 unsigned Scale = NumSrcEltBits /
BitWidth;
2839 for (
unsigned i = 0; i != NumElts; ++i)
2840 if (DemandedElts[i]) {
2843 DemandedSrcElts.
setBit(i / Scale);
2847 APInt KnownSrcUndef, KnownSrcZero;
2849 KnownSrcZero, TLO,
Depth + 1))
2855 KnownSrcBits, TLO,
Depth + 1))
2861 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2883 if (
C &&
C->getAPIntValue().countr_zero() == CTZ) {
2899 if (
Op.getOperand(0).getValueType() !=
Op.getOperand(1).getValueType())
2907 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2912 auto GetDemandedBitsLHSMask = [&](
APInt Demanded,
2921 DemandedElts, KnownOp0, TLO,
Depth + 1) ||
2938 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2940 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2941 if (DemandedOp0 || DemandedOp1) {
2942 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2943 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2957 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2958 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2970 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2997 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
3000 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
3001 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
3005 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
3006 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
3014 Op.getOpcode() !=
ISD::SUB, Flags.hasNoSignedWrap(),
3015 Flags.hasNoUnsignedWrap(), KnownOp0, KnownOp1);
3036 Known.
Zero |= SignMask;
3037 Known.
One &= ~SignMask;
3054 Known, TLO,
Depth + 1) ||
3068 Known.
Zero &= ~SignMask0;
3069 Known.
One &= ~SignMask0;
3084 Known.
Zero ^= SignMask;
3085 Known.
One ^= SignMask;
3096 if (
Op.getValueType().isScalableVector())
3115 auto *C = dyn_cast<ConstantSDNode>(V);
3116 return C && C->isOpaque();
3137 const APInt &DemandedElts,
3143 APInt KnownUndef, KnownZero;
3157 const APInt &UndefOp0,
3158 const APInt &UndefOp1) {
3161 "Vector binop only");
3166 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
3168 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
3169 const APInt &UndefVals) {
3170 if (UndefVals[Index])
3186 for (
unsigned i = 0; i != NumElts; ++i) {
3205 bool AssumeSingleUse)
const {
3206 EVT VT =
Op.getValueType();
3207 unsigned Opcode =
Op.getOpcode();
3208 APInt DemandedElts = OriginalDemandedElts;
3222 "Mask size mismatches value type element count!");
3231 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse())
3235 if (DemandedElts == 0) {
3250 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
3255 if (NewOp0 || NewOp1) {
3258 NewOp1 ? NewOp1 : Op1,
Op->getFlags());
3266 if (!DemandedElts[0]) {
3275 EVT SrcVT = Src.getValueType();
3282 for (
unsigned I = 0;
I != NumElts; ++
I) {
3283 if (DemandedElts[
I]) {
3284 unsigned Offset =
I * EltSize;
3297 if (NumSrcElts == NumElts)
3299 KnownZero, TLO,
Depth + 1);
3301 APInt SrcDemandedElts, SrcZero, SrcUndef;
3305 if ((NumElts % NumSrcElts) == 0) {
3306 unsigned Scale = NumElts / NumSrcElts;
3318 for (
unsigned i = 0; i != NumElts; ++i)
3319 if (DemandedElts[i]) {
3320 unsigned Ofs = (i % Scale) * EltSizeInBits;
3321 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
3333 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
3337 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
3338 unsigned Elt = Scale * SrcElt + SubElt;
3339 if (DemandedElts[Elt])
3347 for (
unsigned i = 0; i != NumSrcElts; ++i) {
3348 if (SrcDemandedElts[i]) {
3350 KnownZero.
setBits(i * Scale, (i + 1) * Scale);
3352 KnownUndef.
setBits(i * Scale, (i + 1) * Scale);
3360 if ((NumSrcElts % NumElts) == 0) {
3361 unsigned Scale = NumSrcElts / NumElts;
3369 for (
unsigned i = 0; i != NumElts; ++i) {
3370 if (DemandedElts[i]) {
3399 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
3401 bool Updated =
false;
3402 for (
unsigned i = 0; i != NumElts; ++i) {
3413 for (
unsigned i = 0; i != NumElts; ++i) {
3415 if (
SrcOp.isUndef()) {
3417 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
3425 EVT SubVT =
Op.getOperand(0).getValueType();
3426 unsigned NumSubVecs =
Op.getNumOperands();
3428 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3431 APInt SubUndef, SubZero;
3435 KnownUndef.
insertBits(SubUndef, i * NumSubElts);
3436 KnownZero.
insertBits(SubZero, i * NumSubElts);
3441 bool FoundNewSub =
false;
3443 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3447 SubOp, SubElts, TLO.
DAG,
Depth + 1);
3448 DemandedSubOps.
push_back(NewSubOp ? NewSubOp : SubOp);
3449 FoundNewSub = NewSubOp ?
true : FoundNewSub;
3465 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
3467 APInt DemandedSrcElts = DemandedElts;
3468 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
3471 if (!DemandedSubElts)
3474 APInt SubUndef, SubZero;
3480 if (!DemandedSrcElts && !Src.isUndef())
3494 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3497 if (NewSrc || NewSub) {
3498 NewSrc = NewSrc ? NewSrc : Src;
3499 NewSub = NewSub ? NewSub :
Sub;
3501 NewSub,
Op.getOperand(2));
3510 if (Src.getValueType().isScalableVector())
3513 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3514 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
3516 APInt SrcUndef, SrcZero;
3526 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3542 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3543 unsigned Idx = CIdx->getZExtValue();
3544 if (!DemandedElts[Idx])
3547 APInt DemandedVecElts(DemandedElts);
3550 KnownZero, TLO,
Depth + 1))
3559 APInt VecUndef, VecZero;
3573 APInt UndefSel, ZeroSel;
3579 APInt DemandedLHS(DemandedElts);
3580 APInt DemandedRHS(DemandedElts);
3581 APInt UndefLHS, ZeroLHS;
3582 APInt UndefRHS, ZeroRHS;
3590 KnownUndef = UndefLHS & UndefRHS;
3591 KnownZero = ZeroLHS & ZeroRHS;
3595 APInt DemandedSel = DemandedElts & ~KnownZero;
3596 if (DemandedSel != DemandedElts)
3609 APInt DemandedLHS(NumElts, 0);
3610 APInt DemandedRHS(NumElts, 0);
3611 for (
unsigned i = 0; i != NumElts; ++i) {
3612 int M = ShuffleMask[i];
3613 if (M < 0 || !DemandedElts[i])
3615 assert(0 <= M && M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3616 if (M < (
int)NumElts)
3619 DemandedRHS.
setBit(M - NumElts);
3625 bool FoldLHS = !DemandedLHS && !LHS.isUndef();
3626 bool FoldRHS = !DemandedRHS && !RHS.isUndef();
3627 if (FoldLHS || FoldRHS) {
3628 LHS = FoldLHS ? TLO.
DAG.
getUNDEF(LHS.getValueType()) : LHS;
3629 RHS = FoldRHS ? TLO.
DAG.
getUNDEF(RHS.getValueType()) : RHS;
3636 APInt UndefLHS, ZeroLHS;
3637 APInt UndefRHS, ZeroRHS;
3646 bool Updated =
false;
3647 bool IdentityLHS =
true, IdentityRHS =
true;
3649 for (
unsigned i = 0; i != NumElts; ++i) {
3650 int &M = NewMask[i];
3653 if (!DemandedElts[i] || (M < (
int)NumElts && UndefLHS[M]) ||
3654 (M >= (
int)NumElts && UndefRHS[M - NumElts])) {
3658 IdentityLHS &= (M < 0) || (M == (
int)i);
3659 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
3664 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3672 for (
unsigned i = 0; i != NumElts; ++i) {
3673 int M = ShuffleMask[i];
3676 }
else if (M < (
int)NumElts) {
3682 if (UndefRHS[M - NumElts])
3684 if (ZeroRHS[M - NumElts])
3693 APInt SrcUndef, SrcZero;
3695 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3696 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3704 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3705 DemandedSrcElts == 1) {
3718 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3719 Op->isOnlyUserOf(Src.getNode()) &&
3720 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3722 EVT SrcVT = Src.getValueType();
3736 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3750 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3751 APInt UndefLHS, ZeroLHS;
3773 APInt UndefRHS, ZeroRHS;
3777 APInt UndefLHS, ZeroLHS;
3782 KnownZero = ZeroLHS & ZeroRHS;
3788 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3800 APInt UndefRHS, ZeroRHS;
3804 APInt UndefLHS, ZeroLHS;
3809 KnownZero = ZeroLHS;
3810 KnownUndef = UndefLHS & UndefRHS;
3815 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3826 APInt SrcUndef, SrcZero;
3840 KnownUndef &= DemandedElts;
3841 KnownZero &= DemandedElts;
3846 if (DemandedElts.
isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef))
3853 KnownZero |= SrcZero;
3854 KnownUndef &= SrcUndef;
3855 KnownUndef &= ~KnownZero;
3859 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3867 KnownZero, TLO,
Depth + 1))
3872 Op.getOperand(0), DemandedElts, TLO.
DAG,
Depth + 1))
3887 KnownZero, TLO,
Depth + 1))
3894 KnownZero, TLO,
Depth))
3900 TLO,
Depth, AssumeSingleUse))
3906 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3920 const APInt &DemandedElts,
3922 unsigned Depth)
const {
3927 "Should use MaskedValueIsZero if you don't know whether Op"
3928 " is a target node!");
3935 unsigned Depth)
const {
3942 unsigned Depth)
const {
3954 unsigned Depth)
const {
3963 unsigned Depth)
const {
3968 "Should use ComputeNumSignBits if you don't know whether Op"
3969 " is a target node!");
3986 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3987 " is a target node!");
3998 "Should use SimplifyDemandedBits if you don't know whether Op"
3999 " is a target node!");
4012 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
4013 " is a target node!");
4046 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
4047 " is a target node!");
4054 return DAG.isGuaranteedNotToBeUndefOrPoison(V, Kind, Depth + 1);
4065 "Should use canCreateUndefOrPoison if you don't know whether Op"
4066 " is a target node!");
4073 const APInt &DemandedElts,
4075 unsigned Depth)
const {
4080 "Should use computeKnownFPClass if you don't know whether Op"
4081 " is a target node!");
4085 const APInt &DemandedElts,
4088 unsigned Depth)
const {
4093 "Should use isKnownNeverNaN if you don't know whether Op"
4094 " is a target node!");
4099 const APInt &DemandedElts,
4102 unsigned Depth)
const {
4107 "Should use isSplatValue if you don't know whether Op"
4108 " is a target node!");
4123 CVal = CN->getAPIntValue();
4124 EltWidth =
N.getValueType().getScalarSizeInBits();
4131 CVal = CVal.
trunc(EltWidth);
4137 return CVal.
isOne();
4179 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) != MVT::i1));
4182 return N->isAllOnes() && SExt;
4191 DAGCombinerInfo &DCI)
const {
4220 if (AndC &&
isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() &&
4223 AndC->getAPIntValue().getActiveBits());
4250 if (isXAndYEqZeroPreferableToXAndYEqY(
Cond, OpVT) &&
4258 if (DCI.isBeforeLegalizeOps() ||
4287 DAGCombinerInfo &DCI)
const {
4291 SelectionDAG &DAG = DCI.DAG;
4328SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
4330 const SDLoc &
DL)
const {
4341 ConstantSDNode *C01;
4370 auto checkConstants = [&
I1, &I01]() ->
bool {
4375 if (checkConstants()) {
4383 if (!checkConstants())
4389 const unsigned KeptBits =
I1.logBase2();
4390 const unsigned KeptBitsMinusOne = I01.
logBase2();
4393 if (KeptBits != (KeptBitsMinusOne + 1))
4398 SelectionDAG &DAG = DCI.DAG;
4407 return DAG.
getSetCC(
DL, SCCVT, SExtInReg,
X, NewCond);
4411SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
4413 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
4415 "Should be a comparison with 0.");
4417 "Valid only for [in]equality comparisons.");
4419 unsigned NewShiftOpcode;
4422 SelectionDAG &DAG = DCI.DAG;
4425 auto Match = [&NewShiftOpcode, &
X, &
C, &
Y, &DAG,
this](
SDValue V) {
4429 unsigned OldShiftOpcode =
V.getOpcode();
4430 switch (OldShiftOpcode) {
4442 C =
V.getOperand(0);
4443 ConstantSDNode *CC =
4447 Y =
V.getOperand(1);
4449 ConstantSDNode *XC =
4452 X, XC, CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
4469 EVT VT =
X.getValueType();
4484 DAGCombinerInfo &DCI)
const {
4487 "Unexpected binop");
4493 SelectionDAG &DAG = DCI.DAG;
4515 if (!DCI.isCalledByLegalizer())
4516 DCI.AddToWorklist(YShl1.
getNode());
4531 if (CTPOP.getOpcode() !=
ISD::CTPOP || !CTPOP.hasOneUse())
4534 EVT CTVT = CTPOP.getValueType();
4535 SDValue CTOp = CTPOP.getOperand(0);
4555 for (
unsigned i = 0; i <
Passes; i++) {
4604 auto getRotateSource = [](
SDValue X) {
4606 return X.getOperand(0);
4613 if (
SDValue R = getRotateSource(N0))
4646 if (!C1 || !C1->
isZero())
4671 if (
Or.getOperand(0) ==
Other) {
4672 X =
Or.getOperand(0);
4673 Y =
Or.getOperand(1);
4676 if (
Or.getOperand(1) ==
Other) {
4677 X =
Or.getOperand(1);
4678 Y =
Or.getOperand(0);
4688 if (matchOr(F0, F1)) {
4695 if (matchOr(F1, F0)) {
4711 const SDLoc &dl)
const {
4721 bool N0ConstOrSplat =
4723 bool N1ConstOrSplat =
4731 if (N0ConstOrSplat && !N1ConstOrSplat &&
4734 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4740 if (!N0ConstOrSplat && !N1ConstOrSplat &&
4745 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4754 const APInt &C1 = N1C->getAPIntValue();
4770 !Attr.hasFnAttr(Attribute::MinSize)) {
4774 return DAG.
getNode(LogicOp, dl, VT, IsXZero, IsYZero);
4820 const APInt &C1 = N1C->getAPIntValue();
4836 if ((
C->getAPIntValue()+1).isPowerOf2()) {
4837 MinBits =
C->getAPIntValue().countr_one();
4848 MinBits = LN0->getMemoryVT().getSizeInBits();
4852 MinBits = LN0->getMemoryVT().getSizeInBits();
4863 MinBits >= ReqdBits) {
4868 if (MinBits == 1 && C1 == 1)
4887 if (TopSetCC.
getValueType() == MVT::i1 && VT == MVT::i1 &&
4921 unsigned bestWidth = 0, bestOffset = 0;
4922 if (Lod->isSimple() && Lod->isUnindexed() &&
4923 (Lod->getMemoryVT().isByteSized() ||
4925 unsigned memWidth = Lod->getMemoryVT().getStoreSizeInBits();
4927 unsigned maskWidth = origWidth;
4931 origWidth = Lod->getMemoryVT().getSizeInBits();
4935 for (
unsigned width = 8; width < origWidth; width *= 2) {
4940 unsigned maxOffset = origWidth - width;
4941 for (
unsigned offset = 0; offset <= maxOffset; offset += 8) {
4942 if (Mask.isSubsetOf(newMask)) {
4943 unsigned ptrOffset =
4945 unsigned IsFast = 0;
4946 assert((ptrOffset % 8) == 0 &&
"Non-Bytealigned pointer offset");
4951 *DAG.
getContext(), Layout, newVT, Lod->getAddressSpace(),
4952 NewAlign, Lod->getMemOperand()->getFlags(), &IsFast) &&
4954 bestOffset = ptrOffset / 8;
4955 bestMask = Mask.lshr(offset);
4968 SDValue Ptr = Lod->getBasePtr();
4969 if (bestOffset != 0)
4972 DAG.
getLoad(newVT, dl, Lod->getChain(), Ptr,
4973 Lod->getPointerInfo().getWithOffset(bestOffset),
4974 Lod->getBaseAlign());
5053 ExtDstTy != ExtSrcTy &&
"Unexpected types!");
5060 return DAG.
getSetCC(dl, VT, ZextOp,
5062 }
else if ((N1C->isZero() || N1C->isOne()) &&
5109 return DAG.
getSetCC(dl, VT, Val, N1,
5112 }
else if (N1C->isOne()) {
5195 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1,
Cond, DCI, dl))
5202 const APInt &C1 = N1C->getAPIntValue();
5204 APInt MinVal, MaxVal;
5226 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5246 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5294 if (
SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
5295 VT, N0, N1,
Cond, DCI, dl))
5302 bool CmpZero = N1C->isZero();
5303 bool CmpNegOne = N1C->isAllOnes();
5304 if ((CmpZero || CmpNegOne) && N0.
hasOneUse()) {
5307 unsigned EltBits = V.getScalarValueSizeInBits();
5308 if (V.getOpcode() !=
ISD::OR || (EltBits % 2) != 0)
5316 RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5319 Hi = RHS.getOperand(0);
5324 LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5327 Hi = LHS.getOperand(0);
5335 unsigned HalfBits = EltBits / 2;
5346 if (IsConcat(N0,
Lo,
Hi))
5347 return MergeConcat(
Lo,
Hi);
5385 const APInt &C1 = N1C->getAPIntValue();
5400 unsigned ShCt = AndRHS->getAPIntValue().logBase2();
5401 if (AndRHS->getAPIntValue().isPowerOf2() &&
5408 }
else if (
Cond ==
ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
5428 const APInt &AndRHSC = AndRHS->getAPIntValue();
5480 return DAG.
getSetCC(dl, VT, Shift, CmpRHS, NewCond);
5488 assert(!CFP->getValueAPF().isNaN() &&
"Unexpected NaN value");
5509 !
isFPImmLegal(CFP->getValueAPF(), CFP->getValueType(0))) {
5528 if (CFP->getValueAPF().isInfinity()) {
5529 bool IsNegInf = CFP->getValueAPF().isNegative();
5540 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5549 "Integer types should be handled by FoldSetCC");
5555 if (UOF ==
unsigned(EqTrue))
5560 if (NewCond !=
Cond &&
5563 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5570 if ((isSignedIntSetCC(
Cond) || isUnsignedIntSetCC(
Cond)) &&
5607 bool LegalRHSImm =
false;
5615 DAG.
getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(),
5623 DAG.
getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(),
5633 DAG.
getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(),
5638 if (RHSC->getValueType(0).getSizeInBits() <= 64)
5647 if (
SDValue V = foldSetCCWithBinOp(VT, N0, N1,
Cond, dl, DCI))
5653 if (
SDValue V = foldSetCCWithBinOp(VT, N1, N0,
Cond, dl, DCI))
5656 if (
SDValue V = foldSetCCWithAnd(VT, N0, N1,
Cond, dl, DCI))
5659 if (
SDValue V = foldSetCCWithOr(VT, N0, N1,
Cond, dl, DCI))
5668 if (!
isIntDivCheap(VT, Attr) && !Attr.hasFnAttr(Attribute::MinSize)) {
5670 if (
SDValue Folded = buildUREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5673 if (
SDValue Folded = buildSREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5686 N0 = DAG.
getNOT(dl, Temp, OpVT);
5695 Temp = DAG.
getNOT(dl, N0, OpVT);
5702 Temp = DAG.
getNOT(dl, N1, OpVT);
5709 Temp = DAG.
getNOT(dl, N0, OpVT);
5716 Temp = DAG.
getNOT(dl, N1, OpVT);
5725 N0 = DAG.
getNode(ExtendCode, dl, VT, N0);
5760 GA = GASD->getGlobal();
5761 Offset += GASD->getOffset();
5765 if (
N->isAnyAdd()) {
5770 Offset += V->getSExtValue();
5775 Offset += V->getSExtValue();
5796 unsigned S = Constraint.
size();
5799 switch (Constraint[0]) {
5830 if (S > 1 && Constraint[0] ==
'{' && Constraint[S - 1] ==
'}') {
5831 if (S == 8 && Constraint.
substr(1, 6) ==
"memory")
5859 std::vector<SDValue> &
Ops,
5862 if (Constraint.
size() > 1)
5865 char ConstraintLetter = Constraint[0];
5866 switch (ConstraintLetter) {
5886 bool IsBool =
C->getConstantIntValue()->getBitWidth() == 1;
5896 if (ConstraintLetter !=
'n') {
5899 GA->getValueType(0),
5900 Offset + GA->getOffset()));
5905 BA->getBlockAddress(), BA->getValueType(0),
5906 Offset + BA->getOffset(), BA->getTargetFlags()));
5914 const unsigned OpCode =
Op.getOpcode();
5917 Op =
Op.getOperand(1);
5921 Op =
Op.getOperand(0);
5938std::pair<unsigned, const TargetRegisterClass *>
5944 assert(*(Constraint.
end() - 1) ==
'}' &&
"Not a brace enclosed constraint?");
5949 std::pair<unsigned, const TargetRegisterClass *> R =
5961 std::pair<unsigned, const TargetRegisterClass *> S =
5962 std::make_pair(PR, RC);
6007 unsigned maCount = 0;
6013 unsigned LabelNo = 0;
6016 ConstraintOperands.emplace_back(std::move(CI));
6020 if (OpInfo.multipleAlternatives.size() > maCount)
6021 maCount = OpInfo.multipleAlternatives.size();
6023 OpInfo.ConstraintVT = MVT::Other;
6026 switch (OpInfo.Type) {
6029 if (OpInfo.isIndirect) {
6030 OpInfo.CallOperandVal =
Call.getArgOperand(ArgNo);
6036 assert(!
Call.getType()->isVoidTy() &&
"Bad inline asm!");
6041 assert(ResNo == 0 &&
"Asm only has one result!");
6049 OpInfo.CallOperandVal =
Call.getArgOperand(ArgNo);
6060 if (OpInfo.CallOperandVal) {
6061 llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
6062 if (OpInfo.isIndirect) {
6063 OpTy =
Call.getParamElementType(ArgNo);
6064 assert(OpTy &&
"Indirect operand must have elementtype attribute");
6069 if (STy->getNumElements() == 1)
6070 OpTy = STy->getElementType(0);
6075 unsigned BitSize =
DL.getTypeSizeInBits(OpTy);
6096 if (!ConstraintOperands.empty()) {
6098 unsigned bestMAIndex = 0;
6099 int bestWeight = -1;
6105 for (maIndex = 0; maIndex < maCount; ++maIndex) {
6107 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
6108 cIndex != eIndex; ++cIndex) {
6117 if (OpInfo.hasMatchingInput()) {
6119 if (OpInfo.ConstraintVT !=
Input.ConstraintVT) {
6120 if ((OpInfo.ConstraintVT.isInteger() !=
6121 Input.ConstraintVT.isInteger()) ||
6122 (OpInfo.ConstraintVT.getSizeInBits() !=
6123 Input.ConstraintVT.getSizeInBits())) {
6134 weightSum += weight;
6137 if (weightSum > bestWeight) {
6138 bestWeight = weightSum;
6139 bestMAIndex = maIndex;
6146 cInfo.selectAlternative(bestMAIndex);
6151 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
6152 cIndex != eIndex; ++cIndex) {
6159 if (OpInfo.hasMatchingInput()) {
6162 if (OpInfo.ConstraintVT !=
Input.ConstraintVT) {
6163 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
6165 OpInfo.ConstraintVT);
6166 std::pair<unsigned, const TargetRegisterClass *> InputRC =
6168 Input.ConstraintVT);
6169 const bool OutOpIsIntOrFP = OpInfo.ConstraintVT.isInteger() ||
6170 OpInfo.ConstraintVT.isFloatingPoint();
6171 const bool InOpIsIntOrFP =
Input.ConstraintVT.isInteger() ||
6172 Input.ConstraintVT.isFloatingPoint();
6173 if ((OutOpIsIntOrFP != InOpIsIntOrFP) ||
6174 (MatchRC.second != InputRC.second)) {
6176 " with a matching output constraint of"
6177 " incompatible type!");
6183 return ConstraintOperands;
6218 if (maIndex >= (
int)
info.multipleAlternatives.size())
6219 rCodes = &
info.Codes;
6221 rCodes = &
info.multipleAlternatives[maIndex].Codes;
6225 for (
const std::string &rCode : *rCodes) {
6228 if (weight > BestWeight)
6229 BestWeight = weight;
6242 Value *CallOperandVal =
info.CallOperandVal;
6245 if (!CallOperandVal)
6248 switch (*constraint) {
6312 Ret.
reserve(OpInfo.Codes.size());
6345 "need immediate or other");
6350 std::vector<SDValue> ResultOps;
6352 return !ResultOps.empty();
6360 assert(!OpInfo.Codes.empty() &&
"Must have at least one constraint");
6363 if (OpInfo.Codes.size() == 1) {
6364 OpInfo.ConstraintCode = OpInfo.Codes[0];
6371 unsigned BestIdx = 0;
6372 for (
const unsigned E =
G.size();
6379 if (BestIdx + 1 == E) {
6385 OpInfo.ConstraintCode =
G[BestIdx].first;
6386 OpInfo.ConstraintType =
G[BestIdx].second;
6390 if (OpInfo.ConstraintCode ==
"X" && OpInfo.CallOperandVal) {
6394 Value *v = OpInfo.CallOperandVal;
6400 OpInfo.ConstraintCode =
"i";
6407 OpInfo.ConstraintCode = Repl;
6421 EVT VT =
N->getValueType(0);
6425 bool UseSRA =
false;
6432 EVT CT =
C->getValueType(0);
6433 APInt Divisor =
C->getAPIntValue();
6455 "Expected matchUnaryPredicate to return one element for scalable "
6462 Factor = Factors[0];
6480 EVT VT =
N->getValueType(0);
6484 bool UseSRL =
false;
6491 EVT CT =
C->getValueType(0);
6492 APInt Divisor =
C->getAPIntValue();
6517 "Expected matchUnaryPredicate to return one element for scalable "
6524 Factor = Factors[0];
6567 EVT VT =
N->getValueType(0);
6603 bool IsAfterLegalization,
6604 bool IsAfterLegalTypes,
6609 if (
N->getFlags().hasExact())
6612 EVT VT =
N->getValueType(0);
6651 if (
isTypeLegal(VT) && !HasMULHS && !HasSMUL_LOHI && MulVT ==
EVT()) {
6663 if (!HasMULHS && !HasSMUL_LOHI && MulVT ==
EVT())
6669 if (IsAfterLegalTypes && VT.
isVector()) {
6686 APInt Divisor =
C->getAPIntValue().trunc(EltBits);
6688 int NumeratorFactor = 0;
6699 NumeratorFactor = 1;
6702 NumeratorFactor = -1;
6721 SDValue MagicFactor, Factor, Shift, ShiftMask;
6729 Shifts.
size() == 1 && ShiftMasks.
size() == 1 &&
6730 "Expected matchUnaryPredicate to return one element for scalable "
6738 MagicFactor = MagicFactors[0];
6739 Factor = Factors[0];
6741 ShiftMask = ShiftMasks[0];
6762 SDValue Q = GetMULHS(N0, MagicFactor);
6792 bool IsAfterLegalization,
6793 bool IsAfterLegalTypes,
6798 if (
N->getFlags().hasExact())
6801 EVT VT =
N->getValueType(0);
6840 if (
isTypeLegal(VT) && !HasMULHU && !HasUMUL_LOHI && MulVT ==
EVT()) {
6852 if (!HasMULHU && !HasUMUL_LOHI && MulVT ==
EVT())
6865 if (IsAfterLegalTypes && VT.
isVector()) {
6877 const EVT WideSVT = MVT::i64;
6878 const bool HasWideMULHU =
6881 const bool HasWideUMUL_LOHI =
6884 const bool AllowWiden = (HasWideMULHU || HasWideUMUL_LOHI);
6886 bool UseNPQ =
false, UsePreShift =
false, UsePostShift =
false;
6887 bool UseWiden =
false;
6895 APInt Divisor =
C->getAPIntValue().trunc(EltBits);
6897 SDValue PreShift, MagicFactor, NPQFactor, PostShift;
6901 if (Divisor.
isOne()) {
6902 PreShift = PostShift = DAG.
getUNDEF(ShSVT);
6903 MagicFactor = NPQFactor = DAG.
getUNDEF(SVT);
6907 Divisor, std::min(KnownLeadingZeros, Divisor.
countl_zero()),
6919 "We shouldn't generate an undefined shift!");
6921 "We shouldn't generate an undefined shift!");
6923 "Unexpected pre-shift");
6930 UseNPQ |= magics.
IsAdd;
6931 UsePreShift |= magics.
PreShift != 0;
6947 SDValue PreShift, PostShift, MagicFactor, NPQFactor;
6955 NPQFactors.
size() == 1 && PostShifts.
size() == 1 &&
6956 "Expected matchUnaryPredicate to return one for scalable vectors");
6963 PreShift = PreShifts[0];
6964 MagicFactor = MagicFactors[0];
6965 PostShift = PostShifts[0];
6978 assert(HasWideUMUL_LOHI);
6981 WideN0, MagicFactor);
7013 Q = GetMULHU(Q, MagicFactor);
7026 NPQ = GetMULHU(NPQ, NPQFactor);
7045 return DAG.
getSelect(dl, VT, IsOne, N0, Q);
7059 if (SplatValue != Values.
end()) {
7064 Replacement = *SplatValue;
7068 if (!AlternativeReplacement)
7071 Replacement = AlternativeReplacement;
7081SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT,
SDValue REMNode,
7084 DAGCombinerInfo &DCI,
7085 const SDLoc &
DL)
const {
7087 if (
SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
7089 for (SDNode *
N : Built)
7090 DCI.AddToWorklist(
N);
7098TargetLowering::prepareUREMEqFold(EVT SETCCVT,
SDValue REMNode,
7100 DAGCombinerInfo &DCI,
const SDLoc &
DL,
7101 SmallVectorImpl<SDNode *> &Created)
const {
7109 "Only applicable for (in)equality comparisons.");
7111 SelectionDAG &DAG = DCI.DAG;
7122 bool ComparingWithAllZeros =
true;
7123 bool AllComparisonsWithNonZerosAreTautological =
true;
7124 bool HadTautologicalLanes =
false;
7125 bool AllLanesAreTautological =
true;
7126 bool HadEvenDivisor =
false;
7127 bool AllDivisorsArePowerOfTwo =
true;
7128 bool HadTautologicalInvertedLanes =
false;
7131 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) {
7137 const APInt &
Cmp = CCmp->getAPIntValue();
7139 ComparingWithAllZeros &=
Cmp.isZero();
7145 bool TautologicalInvertedLane =
D.ule(Cmp);
7146 HadTautologicalInvertedLanes |= TautologicalInvertedLane;
7151 bool TautologicalLane =
D.isOne() || TautologicalInvertedLane;
7152 HadTautologicalLanes |= TautologicalLane;
7153 AllLanesAreTautological &= TautologicalLane;
7159 AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
7162 unsigned K =
D.countr_zero();
7163 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
7164 APInt D0 =
D.lshr(K);
7167 HadEvenDivisor |= (
K != 0);
7170 AllDivisorsArePowerOfTwo &= D0.
isOne();
7174 unsigned W =
D.getBitWidth();
7176 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
7189 "We are expecting that K is always less than all-ones for ShSVT");
7192 if (TautologicalLane) {
7216 if (AllLanesAreTautological)
7221 if (AllDivisorsArePowerOfTwo)
7226 if (HadTautologicalLanes) {
7241 "Expected matchBinaryPredicate to return one element for "
7252 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
7256 "Expecting that the types on LHS and RHS of comparisons match.");
7266 if (HadEvenDivisor) {
7279 if (!HadTautologicalInvertedLanes)
7285 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7292 SDValue TautologicalInvertedChannels =
7302 DL, SETCCVT, SETCCVT);
7304 Replacement, NewCC);
7312 TautologicalInvertedChannels);
7322SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT,
SDValue REMNode,
7325 DAGCombinerInfo &DCI,
7326 const SDLoc &
DL)
const {
7328 if (
SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
7330 assert(Built.
size() <= 7 &&
"Max size prediction failed.");
7331 for (SDNode *
N : Built)
7332 DCI.AddToWorklist(
N);
7340TargetLowering::prepareSREMEqFold(EVT SETCCVT,
SDValue REMNode,
7342 DAGCombinerInfo &DCI,
const SDLoc &
DL,
7343 SmallVectorImpl<SDNode *> &Created)
const {
7367 "Only applicable for (in)equality comparisons.");
7369 SelectionDAG &DAG = DCI.DAG;
7383 if (!CompTarget || !CompTarget->
isZero())
7386 bool HadOneDivisor =
false;
7387 bool AllDivisorsAreOnes =
true;
7388 bool HadEvenDivisor =
false;
7389 bool AllDivisorsArePowerOfTwo =
true;
7392 auto BuildSREMPattern = [&](ConstantSDNode *
C) {
7401 APInt
D =
C->getAPIntValue().abs();
7404 HadOneDivisor |=
D.isOne();
7405 AllDivisorsAreOnes &=
D.isOne();
7408 unsigned K =
D.countr_zero();
7409 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
7410 APInt D0 =
D.
lshr(K);
7413 HadEvenDivisor |= (
K != 0);
7417 AllDivisorsArePowerOfTwo &= D0.
isOne();
7421 unsigned W =
D.getBitWidth();
7423 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
7433 "We are expecting that A is always less than all-ones for SVT");
7435 "We are expecting that K is always less than all-ones for ShSVT");
7472 if (AllDivisorsAreOnes)
7477 if (AllDivisorsArePowerOfTwo)
7480 SDValue PVal, AVal, KVal, QVal;
7482 if (HadOneDivisor) {
7502 QAmts.
size() == 1 &&
7503 "Expected matchUnaryPredicate to return one element for scalable "
7531 if (HadEvenDivisor) {
7549 EVT VT =
Op.getValueType();
7574 bool LegalOps,
bool OptForSize,
7576 unsigned Depth)
const {
7578 if (
Op.getOpcode() ==
ISD::FNEG ||
Op.getOpcode() == ISD::VP_FNEG) {
7580 return Op.getOperand(0);
7590 EVT VT =
Op.getValueType();
7591 unsigned Opcode =
Op.getOpcode();
7601 auto RemoveDeadNode = [&](
SDValue N) {
7602 if (
N &&
N.getNode()->use_empty())
7611 std::list<HandleSDNode> Handles;
7622 if (LegalOps && !IsOpLegal)
7651 return !N.isUndef() && !isa<ConstantFPSDNode>(N);
7659 return N.isUndef() ||
7660 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT,
7664 if (LegalOps && !IsOpLegal)
7681 if (!Flags.hasNoSignedZeros())
7695 Handles.emplace_back(NegX);
7706 if (NegX && (CostX <= CostY)) {
7710 RemoveDeadNode(NegY);
7719 RemoveDeadNode(NegX);
7726 if (!Flags.hasNoSignedZeros())
7751 Handles.emplace_back(NegX);
7762 if (NegX && (CostX <= CostY)) {
7766 RemoveDeadNode(NegY);
7772 if (
C->isExactlyValue(2.0) &&
Op.getOpcode() ==
ISD::FMUL)
7780 RemoveDeadNode(NegX);
7788 if (!Flags.hasNoSignedZeros())
7791 SDValue X =
Op.getOperand(0),
Y =
Op.getOperand(1), Z =
Op.getOperand(2);
7800 Handles.emplace_back(NegZ);
7808 Handles.emplace_back(NegX);
7819 if (NegX && (CostX <= CostY)) {
7820 Cost = std::min(CostX, CostZ);
7823 RemoveDeadNode(NegY);
7829 Cost = std::min(CostY, CostZ);
7832 RemoveDeadNode(NegX);
7842 return DAG.
getNode(Opcode,
DL, VT, NegV);
7858 RemoveDeadNode(NegLHS);
7863 Handles.emplace_back(NegLHS);
7876 RemoveDeadNode(NegLHS);
7877 RemoveDeadNode(NegRHS);
7881 Cost = std::min(CostLHS, CostRHS);
7882 return DAG.
getSelect(
DL, VT,
Op.getOperand(0), NegLHS, NegRHS);
7911 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
7923 if ((
Signed && HasSMUL_LOHI) || (!
Signed && HasUMUL_LOHI)) {
7926 Hi =
Lo.getValue(1);
7952 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false)) {
7953 Result.push_back(
Lo);
7954 Result.push_back(
Hi);
7957 Result.push_back(Zero);
7958 Result.push_back(Zero);
7969 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
true)) {
7970 Result.push_back(
Lo);
7971 Result.push_back(
Hi);
7976 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
7991 if (!MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false))
7994 Result.push_back(
Lo);
8001 Result.push_back(
Hi);
8014 if (!MakeMUL_LOHI(LL, RH,
Lo,
Hi,
false))
8021 if (!MakeMUL_LOHI(LH, RL,
Lo,
Hi,
false))
8074 N->getOperand(0),
N->getOperand(1), Result, HiLoVT,
8075 DAG, Kind, LL, LH, RL, RH);
8077 assert(Result.size() == 2);
8112bool TargetLowering::expandUDIVREMByConstantViaUREMDecomposition(
8115 unsigned Opcode =
N->getOpcode();
8116 EVT VT =
N->getValueType(0);
8124 unsigned TrailingZeros = 0;
8133 if (Divisor.
uge(HalfMaxPlus1))
8138 unsigned BestChunkWidth = 0, AltChunkWidth = 0;
8139 for (
unsigned I = HBitWidth,
E = HBitWidth / 2;
I >
E; --
I) {
8141 if (
I == HBitWidth - 1)
8153 if (
I != HBitWidth &&
Mod == Divisor - 1)
8157 bool Alternate =
false;
8158 if (!BestChunkWidth) {
8162 BestChunkWidth = AltChunkWidth;
8167 assert(!LL == !LH &&
"Expected both input halves or no input halves!");
8169 std::tie(LL, LH) = DAG.
SplitScalar(
N->getOperand(0), dl, HiLoVT, HiLoVT);
8174 assert(ShiftAmt > 0 && ShiftAmt < HBitWidth);
8192 if (ShiftAmt < HBitWidth) {
8193 Lo = GetFSHR(
Lo,
Hi, ShiftAmt);
8196 }
else if (ShiftAmt == HBitWidth) {
8209 SDValue PartialRemL, PartialRemH;
8210 if (TrailingZeros && Opcode !=
ISD::UDIV) {
8212 if (TrailingZeros < HBitWidth) {
8216 }
else if (TrailingZeros == HBitWidth) {
8231 if (BestChunkWidth == HBitWidth) {
8234 ShiftRight(LL, LH, TrailingZeros);
8240 SDVTList VTList = DAG.
getVTList(HiLoVT, SetCCType);
8263 for (
unsigned I = 0;
I <
BitWidth - TrailingZeros;
I += BestChunkWidth) {
8265 unsigned Shift =
I + TrailingZeros;
8269 else if (Shift >= HBitWidth)
8274 Chunk = GetFSHR(LL, LH, Shift);
8276 if (
I + BestChunkWidth <
BitWidth - TrailingZeros)
8282 unsigned ChunkNum =
I / BestChunkWidth;
8283 unsigned Opc = (Alternate && (ChunkNum % 2) != 0) ?
ISD::SUB : ISD::
ADD;
8284 Sum = DAG.
getNode(
Opc, dl, HiLoVT, Sum, Chunk);
8316 if (BestChunkWidth != HBitWidth)
8317 ShiftRight(LL, LH, TrailingZeros);
8334 std::tie(QuotL, QuotH) = DAG.
SplitScalar(Quotient, dl, HiLoVT, HiLoVT);
8342 if (TrailingZeros) {
8343 if (TrailingZeros < HBitWidth) {
8355 }
else if (TrailingZeros == HBitWidth) {
8377bool TargetLowering::expandUDIVREMByConstantViaUMulHiMagic(
8378 SDNode *
N,
const APInt &Divisor, SmallVectorImpl<SDValue> &Result,
8385 assert(!Divisor.
isOne() &&
"Magic algorithm does not work for division by 1");
8390 SmallVectorImpl<SDValue> &
Result) {
8394 return expandMUL_LOHI(
Opc, VT,
DL,
LHS,
RHS, Result, HiLoVT, DAG,
8404 DAG.
getVTList(HiLoVT, MVT::i1), LL, RL);
8409 DAG.
getVTList(HiLoVT, MVT::i1), LH, RH, Overflow);
8411 return std::make_pair(OutL, OutH);
8417 if (Shift < HBitWidth) {
8421 return std::make_pair(ResL, ResH);
8424 if (Shift == HBitWidth)
8425 return std::make_pair(LH, Zero);
8426 assert(Shift - HBitWidth < HBitWidth &&
8427 "We shouldn't generate an undefined shift");
8436 Divisor, std::min(KnownLeadingZeros, Divisor.
countl_zero()));
8438 assert(!LL == !LH &&
"Expected both input halves or no input halves!");
8444 std::tie(QL, QH) = MakeSRLLong(QL, QH, Magics.
PreShift);
8454 auto [NPQL, NPQH] = MakeAddSubLong(
ISD::SUB, LL, LH, QL, QH);
8455 std::tie(NPQL, NPQH) = MakeSRLLong(NPQL, NPQH, 1);
8456 std::tie(QL, QH) = MakeAddSubLong(
ISD::ADD, NPQL, NPQH, QL, QH);
8460 std::tie(QL, QH) = MakeSRLLong(QL, QH, Magics.
PostShift);
8462 unsigned Opcode =
N->getOpcode();
8470 if (!MakeMUL_LOHIByConst(
ISD::MUL, QL, QH, Divisor, MulResult))
8476 MakeAddSubLong(
ISD::SUB, LL, LH, MulResult[0], MulResult[1]);
8489 unsigned Opcode =
N->getOpcode();
8496 "Unexpected opcode");
8502 APInt Divisor = CN->getAPIntValue();
8518 if (expandUDIVREMByConstantViaUREMDecomposition(
N, Divisor, Result, HiLoVT,
8522 if (expandUDIVREMByConstantViaUMulHiMagic(
N, Divisor, Result, HiLoVT, DAG, LL,
8538 EVT VT =
Node->getValueType(0);
8548 bool IsFSHL =
Node->getOpcode() == ISD::VP_FSHL;
8551 EVT ShVT = Z.getValueType();
8557 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
8558 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitWidthC, ShAmt, Mask, VL);
8559 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, IsFSHL ? ShAmt : InvShAmt, Mask,
8561 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, IsFSHL ? InvShAmt : ShAmt, Mask,
8569 ShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, Z, BitMask, Mask, VL);
8573 InvShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, NotZ, BitMask, Mask, VL);
8576 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
8577 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitMask, ShAmt, Mask, VL);
8582 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, ShAmt, Mask, VL);
8584 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT, ShY1, InvShAmt, Mask, VL);
8587 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT, ShX1, InvShAmt, Mask, VL);
8588 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, ShAmt, Mask, VL);
8591 return DAG.
getNode(ISD::VP_OR,
DL, VT, ShX, ShY, Mask, VL);
8596 if (
Node->isVPOpcode())
8599 EVT VT =
Node->getValueType(0);
8615 EVT ShVT = Z.getValueType();
8684 EVT VT =
Node->getValueType(0);
8702 if (!AllowVectorOps && VT.
isVector() &&
8720 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8722 HsVal = DAG.
getNode(HsOpc,
DL, VT, Op0, HsAmt);
8728 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8749 EVT VT,
unsigned HalveDepth = 0,
8750 unsigned TotalDepth = 0) {
8782 EVT VT =
Node->getValueType(0);
8786 unsigned Opcode =
Node->getOpcode();
8801 unsigned HalfBW = BW / 2;
8881 for (
unsigned I = 0;
I < BW; ++
I) {
8949 unsigned ShAmt = Opcode ==
ISD::CLMULR ? BW - 1 : BW;
8960 assert(
Node->getNumOperands() == 3 &&
"Not a double-shift!");
8961 EVT VT =
Node->getValueType(0);
9019 EVT VT =
Node->getValueType(0);
9022 Flags.setNoFPExcept(
true);
9034 EVT DstVT =
Node->getValueType(0);
9038 const uint64_t SemEnum =
Node->getConstantOperandVal(1);
9051 "source format (semantics enum " +
9052 Twine(SemEnum) +
")");
9059 const unsigned SrcMant = SrcPrecision - 1;
9060 const unsigned SrcExp = SrcBits - SrcMant - 1;
9068 const unsigned DstExpBits = DstBits - DstMant - 1;
9070 const int DstBias = 1 - DstMinExp;
9071 const uint64_t DstExpAllOnes = (1ULL << DstExpBits) - 1;
9089 const uint64_t MantMask = (SrcMant > 0) ? ((1ULL << SrcMant) - 1) : 0;
9090 const uint64_t ExpMask = (1ULL << SrcExp) - 1;
9122 IsNaN = DAG.
getNode(
ISD::AND, dl, SetCCVT, IsExpAllOnes, IsMantNonZero);
9128 IsNaN = DAG.
getNode(
ISD::AND, dl, SetCCVT, IsExpAllOnes, IsMantAllOnes);
9142 const int BiasAdjust = DstBias - SrcBias;
9148 if (DstMant > SrcMant) {
9151 NormDstMant = DAG.
getNode(
ISD::SHL, dl, IntVT, MantField, NormDstMantShift);
9153 NormDstMant = MantField;
9167 const unsigned IntVTBits = DstBits;
9171 const int DenormExpConst =
9172 (int)IntVTBits + DstBias - SrcBias - (
int)SrcMant;
9180 DAG.
getConstant(IntVTBits - 1, dl, IntVT), LeadingZeros);
9185 const unsigned ShiftSub = IntVTBits - 1 - DstMant;
9200 DAG.
getSelect(dl, IntVT, IsDenorm, DenormResult, NormResult);
9202 const uint64_t QNaNBit = (DstMant > 0) ? (1ULL << (DstMant - 1)) : 0;
9204 DAG.
getConstant((DstExpAllOnes << DstMant) | QNaNBit, dl, IntVT);
9208 DAG.
getConstant(DstExpAllOnes << DstMant, dl, IntVT));
9210 SDValue ZeroResult = SignShifted;
9212 SDValue Result = FiniteResult;
9213 Result = DAG.
getSelect(dl, IntVT, IsZero, ZeroResult, Result);
9214 Result = DAG.
getSelect(dl, IntVT, IsInf, InfResult, Result);
9215 Result = DAG.
getSelect(dl, IntVT, IsNaN, NaNResult, Result);
9222 unsigned OpNo =
Node->isStrictFPOpcode() ? 1 : 0;
9224 EVT SrcVT = Src.getValueType();
9225 EVT DstVT =
Node->getValueType(0);
9229 if (SrcVT != MVT::f32 || DstVT != MVT::i64)
9232 if (
Node->isStrictFPOpcode())
9295 unsigned OpNo =
Node->isStrictFPOpcode() ? 1 : 0;
9298 EVT SrcVT = Src.getValueType();
9299 EVT DstVT =
Node->getValueType(0);
9320 if (
Node->isStrictFPOpcode()) {
9322 {
Node->getOperand(0), Src });
9323 Chain = Result.getValue(1);
9337 if (
Node->isStrictFPOpcode()) {
9339 Node->getOperand(0),
true);
9345 bool Strict =
Node->isStrictFPOpcode() ||
9364 if (
Node->isStrictFPOpcode()) {
9366 { Chain, Src, FltOfs });
9388 Result = DAG.
getSelect(dl, DstVT, Sel, True, False);
9398 if (
Node->isStrictFPOpcode())
9402 EVT SrcVT = Src.getValueType();
9403 EVT DstVT =
Node->getValueType(0);
9407 if (
Node->getFlags().hasNonNeg() &&
9455 unsigned Opcode =
Node->getOpcode();
9460 if (
Node->getFlags().hasNoNaNs()) {
9462 EVT VT =
Node->getValueType(0);
9481 EVT VT =
Node->getValueType(0);
9484 "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
9494 if (!
Node->getFlags().hasNoNaNs()) {
9507 return DAG.
getNode(NewOp, dl, VT, Quiet0, Quiet1,
Node->getFlags());
9512 if (
Node->getFlags().hasNoNaNs() ||
9515 unsigned IEEE2018Op =
9518 return DAG.
getNode(IEEE2018Op, dl, VT,
Node->getOperand(0),
9519 Node->getOperand(1),
Node->getFlags());
9536 unsigned Opc =
N->getOpcode();
9537 EVT VT =
N->getValueType(0);
9550 bool MinMaxMustRespectOrderedZero =
false;
9554 MinMaxMustRespectOrderedZero =
true;
9568 if (!
N->getFlags().hasNoNaNs() &&
9577 if (!MinMaxMustRespectOrderedZero && !
N->getFlags().hasNoSignedZeros() &&
9600 unsigned Opc =
Node->getOpcode();
9601 EVT VT =
Node->getValueType(0);
9610 if (!Flags.hasNoNaNs()) {
9621 return DAG.
getNode(NewOp,
DL, VT, LHS, RHS, Flags);
9626 if (Flags.hasNoNaNs() ||
9628 unsigned IEEE2019Op =
9631 return DAG.
getNode(IEEE2019Op,
DL, VT, LHS, RHS, Flags);
9636 if ((Flags.hasNoNaNs() ||
9642 return DAG.
getNode(IEEE2008Op,
DL, VT, LHS, RHS, Flags);
9696 bool IsOrdered = NanTest ==
fcNone;
9697 bool IsUnordered = NanTest ==
fcNan;
9700 if (!IsOrdered && !IsUnordered)
9701 return std::nullopt;
9703 if (OrderedMask ==
fcZero &&
9709 return std::nullopt;
9716 EVT OperandVT =
Op.getValueType();
9728 if (OperandVT == MVT::ppcf128) {
9731 OperandVT = MVT::f64;
9738 bool IsF80 = (ScalarFloatVT == MVT::f80);
9742 if (Flags.hasNoFPExcept() &&
9745 bool IsInvertedFP =
false;
9749 FPTestMask = InvertedFPCheck;
9750 IsInvertedFP =
true;
9762 OrderedFPTestMask = FPTestMask;
9764 const bool IsOrdered = FPTestMask == OrderedFPTestMask;
9766 if (std::optional<bool> IsCmp0 =
9769 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode,
9776 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode);
9779 if (FPTestMask ==
fcNan &&
9785 bool IsOrderedInf = FPTestMask ==
fcInf;
9788 : UnorderedCmpOpcode,
9799 IsOrderedInf ? OrderedCmpOpcode : UnorderedCmpOpcode);
9804 : UnorderedCmpOpcode,
9815 IsOrdered ? OrderedCmpOpcode : UnorderedCmpOpcode);
9834 return DAG.
getSetCC(
DL, ResultVT, Abs, SmallestNormal,
9835 IsOrdered ? OrderedOp : UnorderedOp);
9858 DAG.
getSetCC(
DL, ResultVT, Abs, SmallestNormal, IsNormalOp);
9860 return DAG.
getNode(LogicOp,
DL, ResultVT, IsFinite, IsNormal);
9867 bool IsInverted =
false;
9870 Test = InvertedCheck;
9884 const unsigned ExplicitIntBitInF80 = 63;
9885 APInt ExpMask = Inf;
9887 ExpMask.
clearBit(ExplicitIntBitInF80);
9901 const auto appendResult = [&](
SDValue PartialRes) {
9911 const auto getIntBitIsSet = [&]() ->
SDValue {
9912 if (!IntBitIsSetV) {
9913 APInt IntBitMask(BitSize, 0);
9914 IntBitMask.
setBit(ExplicitIntBitInF80);
9919 return IntBitIsSetV;
9940 "finite check requires IEEE-like FP");
9958 appendResult(PartialRes);
9967 appendResult(ExpIsZero);
9977 else if (PartialCheck ==
fcZero)
9981 appendResult(PartialRes);
9994 appendResult(PartialRes);
9997 if (
unsigned PartialCheck =
Test &
fcInf) {
10000 else if (PartialCheck ==
fcInf)
10007 appendResult(PartialRes);
10010 if (
unsigned PartialCheck =
Test &
fcNan) {
10011 APInt InfWithQnanBit = Inf | QNaNBitMask;
10013 if (PartialCheck ==
fcNan) {
10026 }
else if (PartialCheck ==
fcQNan) {
10038 appendResult(PartialRes);
10043 APInt ExpLSB = ExpMask & ~(ExpMask.
shl(1));
10046 APInt ExpLimit = ExpMask - ExpLSB;
10059 appendResult(PartialRes);
10082 EVT VT =
Node->getValueType(0);
10089 if (!(Len <= 128 && Len % 8 == 0))
10130 if (Len == 16 && !VT.
isVector()) {
10148 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
10159 EVT VT =
Node->getValueType(0);
10168 if (!(Len <= 128 && Len % 8 == 0))
10180 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5;
10183 Tmp1 = DAG.
getNode(ISD::VP_AND, dl, VT,
10187 Op = DAG.
getNode(ISD::VP_SUB, dl, VT,
Op, Tmp1, Mask, VL);
10190 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op, Mask33, Mask, VL);
10191 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT,
10195 Op = DAG.
getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL);
10200 Tmp5 = DAG.
getNode(ISD::VP_ADD, dl, VT,
Op, Tmp4, Mask, VL);
10201 Op = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL);
10212 V = DAG.
getNode(ISD::VP_MUL, dl, VT,
Op, Mask01, Mask, VL);
10215 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
10217 V = DAG.
getNode(ISD::VP_ADD, dl, VT, V,
10218 DAG.
getNode(ISD::VP_SHL, dl, VT, V, ShiftC, Mask, VL),
10228 EVT VT =
Node->getValueType(0);
10245 return DAG.
getSelect(dl, VT, SrcIsZero,
10267 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
10278 EVT VT =
Node->getValueType(0);
10292 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
10295 DAG.
getNode(ISD::VP_SRL, dl, VT,
Op, Tmp, Mask, VL), Mask,
10300 return DAG.
getNode(ISD::VP_CTPOP, dl, VT,
Op, Mask, VL);
10305 EVT VT =
Node->getValueType(0);
10331 :
APInt(64, 0x0218A392CD3D5DBFULL);
10344 for (
unsigned i = 0; i <
BitWidth; i++) {
10370 EVT VT =
Node->getValueType(0);
10386 return DAG.
getSelect(dl, VT, SrcIsZero,
10430 EVT VT =
Node->getValueType(0);
10438 return DAG.
getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL);
10449 SDValue Source =
N->getOperand(0);
10452 EVT SrcVT = Source.getValueType();
10453 EVT ResVT =
N->getValueType(0);
10462 Source = DAG.
getNode(ISD::VP_SETCC,
DL, SrcVT, Source, AllZero,
10470 DAG.
getNode(ISD::VP_SELECT,
DL, ResVecVT, Source, StepVec,
Splat, EVL);
10471 return DAG.
getNode(ISD::VP_REDUCE_UMIN,
DL, ResVT, ExtEVL,
Select, Mask, EVL);
10479static std::pair<SDValue, SDValue>
10482 EVT MaskVT = Mask.getValueType();
10533 return {Mask, StepVec};
10540 N->getOperand(0),
true,
DL, DAG);
10545 EVT MaskVT =
N->getOperand(0).getValueType();
10546 EVT ResVT =
N->getValueType(0);
10576 EVT StepVecVT = StepVec.getValueType();
10590 EVT VT =
N->getValueType(0);
10591 SDValue SourceValue =
N->getOperand(0);
10592 SDValue SinkValue =
N->getOperand(1);
10593 SDValue EltSizeInBytes =
N->getOperand(2);
10605 if (IsReadAfterWrite)
10631 bool IsNegative)
const {
10633 EVT VT =
N->getValueType(0);
10696 EVT VT =
N->getValueType(0);
10699 bool IsSigned =
N->getOpcode() ==
ISD::ABDS;
10774 EVT VT =
N->getValueType(0);
10778 unsigned Opc =
N->getOpcode();
10787 "Unknown AVG node");
10799 return DAG.
getNode(ShiftOpc, dl, VT, Sum,
10807 LHS = DAG.
getNode(ExtOpc, dl, ExtVT, LHS);
10808 RHS = DAG.
getNode(ExtOpc, dl, ExtVT, RHS);
10836 ISD::SHL, dl, VT, ZeroExtOverflow,
10852 return DAG.
getNode(SumOpc, dl, VT, Sign, Shift);
10857 EVT VT =
N->getValueType(0);
10864 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
10931 EVT VT =
N->getValueType(0);
10940 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
10949 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL);
10959 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10963 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
10964 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
10965 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
10969 Tmp7 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
10970 DAG.
getConstant(255ULL << 8, dl, VT), Mask, EVL);
10973 Tmp6 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
10974 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
10977 Tmp5 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
10978 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
10983 Tmp4 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp4,
10984 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
10987 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp3,
10988 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
10991 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10992 DAG.
getConstant(255ULL << 8, dl, VT), Mask, EVL);
10995 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL);
10996 Tmp6 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL);
10997 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
10998 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
10999 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp6, Mask, EVL);
11000 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
11001 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp4, Mask, EVL);
11007 EVT VT =
N->getValueType(0);
11050 for (
unsigned I = 0, J = Sz-1;
I < Sz; ++
I, --J) {
11067 assert(
N->getOpcode() == ISD::VP_BITREVERSE);
11070 EVT VT =
N->getValueType(0);
11089 Tmp = (Sz > 8 ? DAG.
getNode(ISD::VP_BSWAP, dl, VT,
Op, Mask, EVL) :
Op);
11094 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
11100 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
11105 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
11111 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
11116 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
11122 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
11128std::pair<SDValue, SDValue>
11132 SDValue Chain = LD->getChain();
11133 SDValue BasePTR = LD->getBasePtr();
11134 EVT SrcVT = LD->getMemoryVT();
11135 EVT DstVT = LD->getValueType(0);
11167 LD->getPointerInfo(), SrcIntVT, LD->getBaseAlign(),
11168 LD->getMemOperand()->getFlags(), LD->getAAInfo());
11171 for (
unsigned Idx = 0; Idx < NumElem; ++Idx) {
11172 unsigned ShiftIntoIdx =
11183 Scalar = DAG.
getNode(ExtendOp, SL, DstEltVT, Scalar);
11190 return std::make_pair(
Value, Load.getValue(1));
11199 for (
unsigned Idx = 0; Idx < NumElem; ++Idx) {
11201 ExtType, SL, DstEltVT, Chain, BasePTR,
11202 LD->getPointerInfo().getWithOffset(Idx * Stride), SrcEltVT,
11203 LD->getBaseAlign(), LD->getMemOperand()->getFlags(), LD->getAAInfo());
11214 return std::make_pair(
Value, NewChain);
11221 SDValue Chain = ST->getChain();
11222 SDValue BasePtr = ST->getBasePtr();
11224 EVT StVT = ST->getMemoryVT();
11250 for (
unsigned Idx = 0; Idx < NumElem; ++Idx) {
11254 unsigned ShiftIntoIdx =
11263 return DAG.
getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
11264 ST->getBaseAlign(), ST->getMemOperand()->getFlags(),
11270 assert(Stride &&
"Zero stride!");
11274 for (
unsigned Idx = 0; Idx < NumElem; ++Idx) {
11282 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
11283 MemSclVT, ST->getBaseAlign(), ST->getMemOperand()->getFlags(),
11292std::pair<SDValue, SDValue>
11295 "unaligned indexed loads not implemented!");
11296 SDValue Chain = LD->getChain();
11297 SDValue Ptr = LD->getBasePtr();
11298 EVT VT = LD->getValueType(0);
11299 EVT LoadedVT = LD->getMemoryVT();
11315 LD->getMemOperand());
11317 if (LoadedVT != VT)
11321 return std::make_pair(Result, newLoad.
getValue(1));
11329 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
11335 SDValue StackPtr = StackBase;
11339 EVT StackPtrVT = StackPtr.getValueType();
11345 for (
unsigned i = 1; i < NumRegs; i++) {
11348 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(
Offset),
11349 LD->getBaseAlign(), LD->getMemOperand()->getFlags(), LD->getAAInfo());
11352 Load.getValue(1), dl, Load, StackPtr,
11363 8 * (LoadedBytes -
Offset));
11366 LD->getPointerInfo().getWithOffset(
Offset), MemVT, LD->getBaseAlign(),
11367 LD->getMemOperand()->getFlags(), LD->getAAInfo());
11372 Load.getValue(1), dl, Load, StackPtr,
11379 Load = DAG.
getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
11384 return std::make_pair(Load, TF);
11388 "Unaligned load of unsupported type.");
11397 Align Alignment = LD->getBaseAlign();
11398 unsigned IncrementSize = NumBits / 8;
11409 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
11414 LD->getPointerInfo().getWithOffset(IncrementSize),
11415 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
11418 Hi = DAG.
getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
11419 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
11424 LD->getPointerInfo().getWithOffset(IncrementSize),
11425 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
11437 return std::make_pair(Result, TF);
11443 "unaligned indexed stores not implemented!");
11444 SDValue Chain = ST->getChain();
11445 SDValue Ptr = ST->getBasePtr();
11446 SDValue Val = ST->getValue();
11448 Align Alignment = ST->getBaseAlign();
11450 EVT StoreMemVT = ST->getMemoryVT();
11466 Result = DAG.
getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
11467 Alignment, ST->getMemOperand()->getFlags());
11478 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
11486 Chain, dl, Val, StackPtr,
11489 EVT StackPtrVT = StackPtr.getValueType();
11497 for (
unsigned i = 1; i < NumRegs; i++) {
11500 RegVT, dl, Store, StackPtr,
11504 ST->getPointerInfo().getWithOffset(
Offset),
11505 ST->getBaseAlign(),
11506 ST->getMemOperand()->getFlags()));
11525 Load.getValue(1), dl, Load, Ptr,
11526 ST->getPointerInfo().getWithOffset(
Offset), LoadMemVT,
11527 ST->getBaseAlign(), ST->getMemOperand()->getFlags(), ST->getAAInfo()));
11534 "Unaligned store of unknown type.");
11538 unsigned IncrementSize = NumBits / 8;
11558 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
11559 ST->getMemOperand()->getFlags());
11564 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
11565 ST->getMemOperand()->getFlags(), ST->getAAInfo());
11576 bool IsCompressedMemory)
const {
11579 EVT MaskVT = Mask.getValueType();
11581 "Incompatible types of Data and Mask");
11582 if (IsCompressedMemory) {
11595 MaskIntVT = MVT::i32;
11614 "Cannot index a scalable vector within a fixed-width vector");
11625 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts)
11639 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0;
11649 DAG, VecPtr, VecVT,
11651 Index, PtrArithFlags);
11667 "Converting bits to bytes lost precision");
11669 "Sub-vector must be a vector with matching element type");
11673 EVT IdxVT = Index.getValueType();
11704 assert(EmuTlsVar &&
"Cannot find EmuTlsVar ");
11705 Args.emplace_back(DAG.
getGlobalAddress(EmuTlsVar, dl, PtrVT), VoidPtrType);
11712 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
11721 "Emulated TLS must have zero offset in GlobalAddressSDNode");
11722 return CallResult.first;
11733 EVT VT =
Op.getOperand(0).getValueType();
11735 if (VT.
bitsLT(MVT::i32)) {
11753 unsigned Opcode =
Node->getOpcode();
11760 return DAG.
getNode(AltOpcode,
DL, VT, Op0, Op1);
11801 {Op0, Op1, DAG.getCondCode(CC)})) {
11808 {Op0, Op1, DAG.getCondCode(CC)})) {
11836 unsigned Opcode =
Node->getOpcode();
11839 EVT VT = LHS.getValueType();
11842 assert(VT == RHS.getValueType() &&
"Expected operands to be the same type");
11872 unsigned OverflowOp;
11887 llvm_unreachable(
"Expected method to receive signed or unsigned saturation "
11888 "addition or subtraction node.");
11896 unsigned BitWidth = LHS.getScalarValueSizeInBits();
11899 SDValue SumDiff = Result.getValue(0);
11900 SDValue Overflow = Result.getValue(1);
11922 return DAG.
getSelect(dl, VT, Overflow, Zero, SumDiff);
11926 "Expected signed saturating add/sub opcode");
11942 bool RHSIsNonNegative =
11944 if (LHSIsNonNegative || RHSIsNonNegative) {
11946 return DAG.
getSelect(dl, VT, Overflow, SatMax, SumDiff);
11950 bool RHSIsNegative =
11952 if (LHSIsNegative || RHSIsNegative) {
11954 return DAG.
getSelect(dl, VT, Overflow, SatMin, SumDiff);
11962 return DAG.
getSelect(dl, VT, Overflow, Result, SumDiff);
11966 unsigned Opcode =
Node->getOpcode();
11969 EVT VT = LHS.getValueType();
11970 EVT ResVT =
Node->getValueType(0);
12002 unsigned Opcode =
Node->getOpcode();
12006 EVT VT = LHS.getValueType();
12011 "Expected a SHLSAT opcode");
12043 EVT VT = LHS.getValueType();
12044 assert(RHS.getValueType() == VT &&
"Mismatching operand types");
12046 assert((HiLHS && HiRHS) || (!HiLHS && !HiRHS));
12048 "Signed flag should only be set when HiLHS and RiRHS are null");
12056 unsigned HalfBits = Bits / 2;
12101 EVT VT = LHS.getValueType();
12102 assert(RHS.getValueType() == VT &&
"Mismatching operand types");
12106 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
12107 if (WideVT == MVT::i16)
12108 LC = RTLIB::MUL_I16;
12109 else if (WideVT == MVT::i32)
12110 LC = RTLIB::MUL_I32;
12111 else if (WideVT == MVT::i64)
12112 LC = RTLIB::MUL_I64;
12113 else if (WideVT == MVT::i128)
12114 LC = RTLIB::MUL_I128;
12117 if (LibcallImpl == RTLIB::Unsupported) {
12145 SDValue Args[] = {LHS, HiLHS, RHS, HiRHS};
12146 Ret =
makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
12148 SDValue Args[] = {HiLHS, LHS, HiRHS, RHS};
12149 Ret =
makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
12152 "Ret value is a collection of constituent nodes holding result.");
12169 "Expected a fixed point multiplication opcode");
12174 EVT VT = LHS.getValueType();
12175 unsigned Scale =
Node->getConstantOperandVal(2);
12191 SDValue Product = Result.getValue(0);
12192 SDValue Overflow = Result.getValue(1);
12203 Result = DAG.
getSelect(dl, VT, ProdNeg, SatMin, SatMax);
12204 return DAG.
getSelect(dl, VT, Overflow, Result, Product);
12208 SDValue Product = Result.getValue(0);
12209 SDValue Overflow = Result.getValue(1);
12213 return DAG.
getSelect(dl, VT, Overflow, SatMax, Product);
12218 "Expected scale to be less than the number of bits if signed or at "
12219 "most the number of bits if unsigned.");
12220 assert(LHS.getValueType() == RHS.getValueType() &&
12221 "Expected both operands to be the same type");
12230 Lo = Result.getValue(0);
12231 Hi = Result.getValue(1);
12234 Hi = DAG.
getNode(HiOp, dl, VT, LHS, RHS);
12252 if (Scale == VTSize)
12298 return DAG.
getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
12323 "Expected a fixed point division opcode");
12325 EVT VT = LHS.getValueType();
12347 if (LHSLead + RHSTrail < Scale + (
unsigned)(Saturating &&
Signed))
12350 unsigned LHSShift = std::min(LHSLead, Scale);
12351 unsigned RHSShift = Scale - LHSShift;
12415 { LHS, RHS, CarryIn });
12422 LHS.getValueType(), LHS, RHS);
12424 EVT ResultType =
Node->getValueType(1);
12435 DAG.
getSetCC(dl, SetCCType, Result,
12444 SetCC = DAG.
getSetCC(dl, SetCCType, Result, LHS, CC);
12457 LHS.getValueType(), LHS, RHS);
12459 EVT ResultType =
Node->getValueType(1);
12466 SDValue Sat = DAG.
getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS);
12482 DAG.
getNode(
ISD::XOR, dl, OType, RHSNegative, ResultLowerThanLHS), dl,
12483 ResultType, ResultType);
12490 DAG.
getNode(
ISD::XOR, dl, OType, LHSLessThanRHS, ResultNegative), dl,
12491 ResultType, ResultType);
12498 EVT VT =
Node->getValueType(0);
12506 const APInt &
C = RHSC->getAPIntValue();
12508 if (
C.isPowerOf2()) {
12510 bool UseArithShift =
isSigned && !
C.isMinSignedValue();
12513 Overflow = DAG.
getSetCC(dl, SetCCVT,
12515 dl, VT, Result, ShiftAmt),
12525 static const unsigned Ops[2][3] =
12551 Result = BottomHalf;
12558 Overflow = DAG.
getSetCC(dl, SetCCVT, TopHalf,
12563 EVT RType =
Node->getValueType(1);
12568 "Unexpected result type for S/UMULO legalization");
12577 EVT VT =
Op.getValueType();
12582 bool WidenSrc =
false;
12583 switch (
Node->getOpcode()) {
12636 "Expanding reductions for scalable vectors is undefined.");
12645 for (
unsigned i = 1; i < NumElts; i++)
12646 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res,
Ops[i], Flags);
12649 if (EltVT !=
Node->getValueType(0))
12665 "Expanding reductions for scalable vectors is undefined.");
12675 for (
unsigned i = 0; i < NumElts; i++)
12676 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res,
Ops[i], Flags);
12683 EVT VT =
Node->getValueType(0);
12692 Result = DAG.
getNode(DivRemOpc, dl, VTs, Dividend, Divisor).
getValue(1);
12697 SDValue Divide = DAG.
getNode(DivOpc, dl, VT, Dividend, Divisor);
12712 EVT SrcVT = Src.getValueType();
12713 EVT DstVT =
Node->getValueType(0);
12718 assert(SatWidth <= DstWidth &&
12719 "Expected saturation width smaller than result width");
12723 APInt MinInt, MaxInt;
12734 if (SrcVT == MVT::f16 || SrcVT == MVT::bf16) {
12736 SrcVT = Src.getValueType();
12756 auto EmitMinMax = [&](
unsigned MinOpcode,
unsigned MaxOpcode,
12757 bool MayPropagateNaN) {
12767 Clamped = DAG.
getNode(MaxOpcode, dl, SrcVT, Clamped, MinFloatNode);
12769 Clamped = DAG.
getNode(MinOpcode, dl, SrcVT, Clamped, MaxFloatNode);
12772 dl, DstVT, Clamped);
12776 if (!MayPropagateNaN && !IsSigned)
12784 return DAG.
getSelect(dl, DstVT, IsNan, ZeroInt, FpToInt);
12786 if (AreExactFloatBounds) {
12836 EVT OperandVT =
Op.getValueType();
12862 Op.getValueType());
12866 KeepNarrow = DAG.
getNode(
ISD::OR, dl, WideSetCCVT, KeepNarrow, AlreadyOdd);
12877 SDValue Adjust = DAG.
getSelect(dl, ResultIntVT, NarrowIsRd, One, NegativeOne);
12879 Op = DAG.
getSelect(dl, ResultIntVT, KeepNarrow, NarrowBits, Adjusted);
12886 EVT VT =
Node->getValueType(0);
12889 if (
Node->getConstantOperandVal(1) == 1) {
12892 EVT OperandVT =
Op.getValueType();
12904 EVT I32 =
F32.changeTypeToInteger();
12940 "Unexpected opcode!");
12941 assert((
Node->getValueType(0).isScalableVector() ||
12943 "Fixed length vector types with constant offsets expected to use "
12944 "SHUFFLE_VECTOR!");
12946 EVT VT =
Node->getValueType(0);
12967 EVT PtrVT = StackPtr.getValueType();
12993 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr,
13006 EVT MaskVT = Mask.getValueType();
13023 bool HasPassthru = !Passthru.
isUndef();
13029 Chain = DAG.
getStore(Chain,
DL, Passthru, StackPtr, PtrInfo);
13032 APInt PassthruSplatVal;
13033 bool IsSplatPassthru =
13036 if (IsSplatPassthru) {
13040 LastWriteVal = DAG.
getConstant(PassthruSplatVal,
DL, ScalarVT);
13041 }
else if (HasPassthru) {
13057 ScalarVT,
DL, Chain, LastElmtPtr,
13063 for (
unsigned I = 0;
I < NumElms;
I++) {
13067 Chain,
DL, ValI, OutPtr,
13079 if (HasPassthru &&
I == NumElms - 1) {
13089 LastWriteVal = DAG.
getSelect(
DL, ScalarVT, AllLanesSelected, ValI,
13092 Chain,
DL, LastWriteVal, OutPtr,
13097 return DAG.
getLoad(VecVT,
DL, Chain, StackPtr, PtrInfo);
13102 EVT VT =
Node->getValueType(0);
13105 auto [Mask, StepVec] =
13107 EVT StepVecVT = StepVec.getValueType();
13132 SDValue MulLHS =
N->getOperand(1);
13133 SDValue MulRHS =
N->getOperand(2);
13141 unsigned ExtOpcLHS, ExtOpcRHS;
13142 switch (
N->getOpcode()) {
13156 if (ExtMulOpVT != MulOpVT) {
13157 MulLHS = DAG.
getNode(ExtOpcLHS,
DL, ExtMulOpVT, MulLHS);
13158 MulRHS = DAG.
getNode(ExtOpcRHS,
DL, ExtMulOpVT, MulRHS);
13172 std::deque<SDValue> Subvectors = {Acc};
13173 for (
unsigned I = 0;
I < ScaleFactor;
I++)
13176 unsigned FlatNode =
13180 while (Subvectors.size() > 1) {
13181 Subvectors.push_back(
13182 DAG.
getNode(FlatNode,
DL, AccVT, {Subvectors[0], Subvectors[1]}));
13183 Subvectors.pop_front();
13184 Subvectors.pop_front();
13187 assert(Subvectors.size() == 1 &&
13188 "There should only be one subvector after tree flattening");
13190 return Subvectors[0];
13203 if (
Op.getNode() != FPNode)
13207 while (!Worklist.
empty()) {
13241 std::optional<unsigned> CallRetResNo)
const {
13242 if (LC == RTLIB::UNKNOWN_LIBCALL)
13246 if (LibcallImpl == RTLIB::Unsupported)
13250 EVT VT =
Node->getValueType(0);
13251 unsigned NumResults =
Node->getNumValues();
13261 SDValue StoreValue = ST->getValue();
13262 unsigned ResNo = StoreValue.
getResNo();
13264 if (CallRetResNo == ResNo)
13267 if (!ST->isSimple() || ST->getAddressSpace() != 0)
13270 if (StoresInChain && ST->getChain() != StoresInChain)
13274 if (ST->getAlign() <
13282 ResultStores[ResNo] = ST;
13283 StoresInChain = ST->getChain();
13290 EVT ArgVT =
Op.getValueType();
13292 Args.emplace_back(
Op, ArgTy);
13299 if (ResNo == CallRetResNo)
13301 EVT ResVT =
Node->getValueType(ResNo);
13303 ResultPtrs[ResNo] = ResultPtr;
13304 Args.emplace_back(ResultPtr,
PointerTy);
13316 Type *RetType = CallRetResNo.has_value()
13317 ?
Node->getValueType(*CallRetResNo).getTypeForEVT(Ctx)
13329 if (ResNo == CallRetResNo) {
13335 ResultPtr, PtrInfo);
13341 PtrInfo = ST->getPointerInfo();
13348 Results.push_back(LoadResult);
13357 SDValue EVL,
bool &NeedInvert,
13359 bool IsSignaling)
const {
13360 MVT OpVT = LHS.getSimpleValueType();
13362 NeedInvert =
false;
13363 assert(!EVL == !Mask &&
"VP Mask and EVL must either both be set or unset");
13364 bool IsNonVP = !EVL;
13379 bool NeedSwap =
false;
13380 InvCC = getSetCCInverse(CCCode, OpVT);
13396 if (OpVT == MVT::i1) {
13411 DAG.
getNOT(dl, LHS, MVT::i1));
13416 DAG.
getNOT(dl, RHS, MVT::i1));
13421 DAG.
getNOT(dl, LHS, MVT::i1));
13426 DAG.
getNOT(dl, RHS, MVT::i1));
13449 "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
13454 "If SETO is expanded, SETOEQ must be legal!");
13471 NeedInvert = ((
unsigned)CCCode & 0x8U);
13512 SetCC1 = DAG.
getSetCC(dl, VT, LHS, RHS, CC1, Chain, IsSignaling);
13513 SetCC2 = DAG.
getSetCC(dl, VT, LHS, RHS, CC2, Chain, IsSignaling);
13515 SetCC1 = DAG.
getSetCCVP(dl, VT, LHS, RHS, CC1, Mask, EVL);
13516 SetCC2 = DAG.
getSetCCVP(dl, VT, LHS, RHS, CC2, Mask, EVL);
13521 SetCC1 = DAG.
getSetCC(dl, VT, LHS, LHS, CC1, Chain, IsSignaling);
13522 SetCC2 = DAG.
getSetCC(dl, VT, RHS, RHS, CC2, Chain, IsSignaling);
13524 SetCC1 = DAG.
getSetCCVP(dl, VT, LHS, LHS, CC1, Mask, EVL);
13525 SetCC2 = DAG.
getSetCCVP(dl, VT, RHS, RHS, CC2, Mask, EVL);
13532 LHS = DAG.
getNode(
Opc, dl, VT, SetCC1, SetCC2);
13537 LHS = DAG.
getNode(
Opc, dl, VT, SetCC1, SetCC2, Mask, EVL);
13549 EVT VT =
Node->getValueType(0);
13561 unsigned Opcode =
Node->getOpcode();
13599 std::optional<unsigned> ByteOffset;
13603 int Elt = ConstEltNo->getZExtValue();
13617 unsigned IsFast = 0;
13627 DAG, OriginalLoad->
getBasePtr(), InVecVT, EltNo);
13632 if (ResultVT.
bitsGT(VecEltVT)) {
13641 NewPtr, MPI, VecEltVT, Alignment,
13651 if (ResultVT.
bitsLT(VecEltVT))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
static bool isSigned(unsigned Opcode)
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isNonZeroModBitWidthOrUndef(const MachineRegisterInfo &MRI, Register Reg, unsigned BW)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Function const char * Passes
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
const SmallVectorImpl< MachineOperand > & Cond
Contains matchers for matching SelectionDAG nodes and values.
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static std::pair< SDValue, SDValue > getLegalMaskAndStepVector(SDValue Mask, bool ZeroIsPoison, SDLoc DL, SelectionDAG &DAG)
Returns a type-legalized version of Mask as the first item in the pair.
static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static bool lowerImmediateIfPossible(TargetLowering::ConstraintPair &P, SDValue Op, SelectionDAG *DAG, const TargetLowering &TLI)
If we have an immediate, see if we can lower it.
static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG)
static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, const APInt &UndefOp0, const APInt &UndefOp1)
Given a vector binary operation and known undefined elements for each input operand,...
static SDValue BuildExactUDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact UDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static bool canNarrowCLMULToLegal(const TargetLowering &TLI, LLVMContext &Ctx, EVT VT, unsigned HalveDepth=0, unsigned TotalDepth=0)
Check if CLMUL on VT can eventually reach a type with legal CLMUL through a chain of halving decompos...
static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, EVT VecVT, const SDLoc &dl, ElementCount SubEC)
static unsigned getConstraintPiority(TargetLowering::ConstraintType CT)
Return a number indicating our preference for chosing a type of constraint over another,...
static std::optional< bool > isFCmpEqualZero(FPClassTest Test, const fltSemantics &Semantics, const MachineFunction &MF)
Returns a true value if if this FPClassTest can be performed with an ordered fcmp to 0,...
static bool canFoldStoreIntoLibCallOutputPointers(StoreSDNode *StoreNode, SDNode *FPNode)
Given a store node StoreNode, return true if it is safe to fold that node into FPNode,...
static void turnVectorIntoSplatVector(MutableArrayRef< SDValue > Values, std::function< bool(SDValue)> Predicate, SDValue AlternativeReplacement=SDValue())
If all values in Values that don't match the predicate are same 'splat' value, then replace all value...
static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT)
static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact SDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, SDValue N0, const APInt &C1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue combineShiftToAVG(SDValue Op, TargetLowering::TargetLoweringOpt &TLO, const TargetLowering &TLI, const APInt &DemandedBits, const APInt &DemandedElts, unsigned Depth)
This file describes how to lower LLVM code to machine code.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
static LLVM_ABI const llvm::fltSemantics & EnumToSemantics(Semantics S)
static constexpr roundingMode rmTowardZero
static LLVM_ABI ExponentType semanticsMinExponent(const fltSemantics &)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
static constexpr roundingMode rmNearestTiesToEven
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
static LLVM_ABI bool isIEEELikeFP(const fltSemantics &)
opStatus
IEEE-754R 7: Default exception handling.
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
void clearAllBits()
Set every bit to 0.
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned countLeadingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
LLVM_ABI APInt multiplicativeInverse() const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void clearBits(unsigned LoBit, unsigned HiBit)
Clear the bits from LoBit (inclusive) to HiBit (exclusive) to 0.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void clearHighBits(unsigned hiBits)
Set top hiBits bits to 0.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void setBitVal(unsigned BitPosition, bool BitValue)
Set a given bit to a given value.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
Get the array size.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
ConstantFP - Floating Point Values [float, double].
This class represents a range of values.
const APInt & getAPIntValue() const
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
std::vector< std::string > ConstraintCodeVector
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setAdjustsStack(bool V)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags getFlags() const
Return the raw flags of the source value,.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
unsigned getAddressSpace() const
Return the address space for the associated pointer.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
const GlobalVariable * getNamedGlobal(StringRef Name) const
Return the global variable in the module with the specified name, of arbitrary type.
Represent a mutable reference to an array (0 or more elements consecutively in memory),...
Class to represent pointers.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC)
bool willNotOverflowAdd(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the addition of 2 nodes can never overflow.
LLVM_ABI Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI bool isKnownNeverLogicalZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Test whether the given floating point SDValue (or all elements of it, if it is a vector) is known to ...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
SDValue getExtractVectorElt(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Extract element at Idx from Vec.
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl, SDNodeFlags Flags={})
Constant fold a setcc to true or false.
bool isKnownNeverSNaN(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
LLVM_ABI std::optional< unsigned > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
static LLVM_ABI unsigned getHasPredecessorMaxSteps()
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
bool willNotOverflowSub(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the sub of 2 nodes can never overflow.
LLVM_ABI bool shouldOptForSize() const
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI std::optional< unsigned > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
LLVM_ABI bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getTypeSize(const SDLoc &DL, EVT VT, TypeSize TS)
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI bool isIdentityElement(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo, unsigned Depth=0) const
Returns true if V is an identity element of Opc with Flags.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, UndefPoisonKind Kind=UndefPoisonKind::UndefOrPoison, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, Kind can be used to track poison ...
LLVM_ABI bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
LLVM_ABI bool isKnownToBeAPowerOfTwo(SDValue Val, bool OrZero=false, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
Get the string size.
constexpr const char * data() const
Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
LLVM_ABI void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
unsigned getBitWidthForCttzElements(EVT RetVT, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
const TargetMachine & getTargetMachine() const
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual bool hasBitTest(SDValue X, SDValue Y) const
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
EVT getLegalTypeToTransformTo(LLVMContext &Context, EVT VT) const
Perform getTypeToTransformTo repeatedly until a legal type is obtained.
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall implementation.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
ISD::CondCode getSoftFloatCmpLibcallPredicate(RTLIB::LibcallImpl Call) const
Get the comparison predicate that's to be used to test the result of the comparison libcall against z...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
TargetLoweringBase(const TargetMachine &TM, const TargetSubtargetInfo &STI)
NOTE: The TargetMachine owns TLOF.
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Get the libcall impl routine name for the specified libcall.
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
std::vector< ArgListEntry > ArgListTy
virtual EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom for a comparison of the specified type...
bool isLoadLegal(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace, unsigned ExtType, bool Atomic) const
Return true if the specified load with extension is legal on this target.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
bool expandMultipleResultFPLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl< SDValue > &Results, std::optional< unsigned > CallRetResNo={}) const
Expands a node with multiple results to an FP or vector libcall.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_POISON nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
SmallVector< ConstraintPair > ConstraintGroup
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
virtual Align computeKnownAlignForTargetInstr(GISelValueTracking &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, const SDValue LHS, const SDValue RHS, SDValue &Lo, SDValue &Hi) const
Calculate full product of LHS and RHS either via a libcall or through brute force expansion of the mu...
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
SDValue expandFCANONICALIZE(SDNode *Node, SelectionDAG &DAG) const
Expand FCANONICALIZE to FMUL with 1.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_POISON nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_POISON nodes.
virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandCLMUL(SDNode *N, SelectionDAG &DAG) const
Expand carryless multiply.
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
SDValue expandCttzElts(SDNode *Node, SelectionDAG &DAG) const
Expand a CTTZ_ELTS or CTTZ_ELTS_ZERO_POISON by calculating (VL - i) for each active lane (i),...
SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
virtual unsigned computeNumSignBitsForTargetInstr(GISelValueTracking &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector eleme...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, EVT *LargestVT=nullptr) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual SDValue unwrapAddress(SDValue N) const
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, UndefPoisonKind Kind, unsigned Depth) const
Return true if this function can prove that Op is never poison and, Kind can be used to track poison ...
SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_POISON nodes.
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
SDValue expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimumnum/fmaximumnum into multiple comparison with selects.
void forceExpandMultiply(SelectionDAG &DAG, const SDLoc &dl, bool Signed, SDValue &Lo, SDValue &Hi, SDValue LHS, SDValue RHS, SDValue HiLHS=SDValue(), SDValue HiRHS=SDValue()) const
Calculate the product twice the width of LHS and RHS.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
virtual bool isTypeDesirableForOp(unsigned, EVT VT) const
Return true if the target has native support for the specified value type and it is 'desirable' to us...
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index, const SDNodeFlags PtrArithFlags=SDNodeFlags()) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
SDValue expandLoopDependenceMask(SDNode *N, SelectionDAG &DAG) const
Expand LOOP_DEPENDENCE_MASK nodes.
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
virtual void computeKnownBitsForTargetInstr(GISelValueTracking &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
SDValue expandVectorNaryOpBySplitting(SDNode *Node, SelectionDAG &DAG) const
~TargetLowering() override
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using an n/2-bit algorithm.
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
TargetLowering(const TargetLowering &)=delete
virtual bool shouldSimplifyDemandedVectorElts(SDValue Op, const TargetLoweringOpt &TLO) const
Return true if the target supports simplifying demanded vector elements by converting them to undefs.
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
virtual bool shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const
For most targets, an LLVM type must be broken down into multiple smaller types.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, UndefPoisonKind Kind, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_POISON nodes.
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
SDValue expandCONVERT_FROM_ARBITRARY_FP(SDNode *Node, SelectionDAG &DAG) const
Expand CONVERT_FROM_ARBITRARY_FP using bit manipulation.
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode, SDNodeFlags Flags={}) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index, const SDNodeFlags PtrArithFlags=SDNodeFlags()) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual void computeKnownFPClassForTargetNode(const SDValue Op, KnownFPClass &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine floating-point class information for a target node.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual void computeKnownFPClassForTargetInstr(GISelValueTracking &Analysis, Register R, KnownFPClass &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
SDValue getInboundsVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
SDValue expandVectorFindLastActive(SDNode *N, SelectionDAG &DAG) const
Expand VECTOR_FIND_LAST_ACTIVE nodes.
SDValue expandPartialReduceMLA(SDNode *Node, SelectionDAG &DAG) const
Expands PARTIAL_REDUCE_S/UMLA nodes to a series of simpler operations, consisting of zext/sext,...
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
SDValue scalarizeExtractedVectorLoad(EVT ResultVT, const SDLoc &DL, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad, SelectionDAG &DAG) const
Replace an extraction of a load with a narrowed load.
virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively,...
SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
SDValue expandCTLS(SDNode *N, SelectionDAG &DAG) const
Expand CTLS (count leading sign bits) nodes.
void setTypeIdForCallsiteInfo(const CallBase *CB, MachineFunction &MF, MachineFunction::CallSiteInfo &CSInfo) const
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
unsigned EmitCallSiteInfo
The flag enables call site info production.
unsigned EmitCallGraphSection
Emit section containing call graph metadata.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
iterator_range< regclass_iterator > regclasses() const
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
TargetSubtargetInfo - Generic base class for all target subtargets.
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM_ABI const fltSemantics & getFltSemantics() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ PTRADD
PTRADD represents pointer arithmetic semantics, for targets that opt in using shouldPreservePtrArith(...
@ PARTIAL_REDUCE_SMLA
PARTIAL_REDUCE_[U|S]MLA(Accumulator, Input1, Input2) The partial reduction nodes sign or zero extend ...
@ LOOP_DEPENDENCE_RAW_MASK
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ VECTOR_FIND_LAST_ACTIVE
Finds the index of the last active mask element Operands: Mask.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ CLMUL
Carry-less multiplication operations.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ GET_ACTIVE_LANE_MASK
GET_ACTIVE_LANE_MASK - this corrosponds to the llvm.get.active.lane.mask intrinsic.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ VECTOR_SPLICE_LEFT
VECTOR_SPLICE_LEFT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1, VEC2) left by OFFSET elements an...
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ VECTOR_SPLICE_RIGHT
VECTOR_SPLICE_RIGHT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1,VEC2) right by OFFSET elements a...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ CTTZ_ZERO_POISON
Bit counting operators with a poisoned result for zero inputs.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ ABS_MIN_POISON
ABS with a poison result for INT_MIN.
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI NodeType getOppositeSignednessMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns the corresponding opcode with the opposi...
LLVM_ABI bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
LLVM_ABI NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_ABI bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Or< Preds... > m_AnyOf(const Preds &...preds)
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
NUses_match< 1, Value_match > m_OneUse()
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
@ Undef
Value of the register doesn't matter.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FPClassTest invertFPClassTestIfSimpler(FPClassTest Test, bool UseFCmp)
Evaluates if the specified FP class test is better performed as the inverse (i.e.
LLVM_ABI bool isOneOrOneSplatFP(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant floating-point value, or a splatted vector of a constant float...
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
LLVM_ABI void reportFatalInternalError(Error Err)
Report a fatal error that indicates a bug in LLVM.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
auto find_if_not(R &&Range, UnaryPredicate P)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Mod
The access may modify the value stored in memory.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
To bit_cast(const From &from) noexcept
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ Sub
Subtraction of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
UndefPoisonKind
Enumeration to track whether we are interested in Undef, Poison, or both.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
@ Increment
Incrementally increasing token ID.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isScalableVT() const
Return true if the type is a scalable type.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT widenIntegerElementType(LLVMContext &Context) const
Return a VT for an integer element type with doubled bit width.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
void setAllConflict()
Make all bits known to be both zero and one.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
bool isSignUnknown() const
Returns true if we don't know the sign bit.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasNoUnsignedWrap() const
bool hasNoSignedWrap() const
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Magic data for optimising signed division by a constant.
unsigned ShiftAmount
shift amount
static LLVM_ABI SignedDivisionByConstantInfo get(const APInt &D)
Calculate the magic numbers required to implement a signed integer division by a constant as a sequen...
This contains information for each constraint that we are lowering.
std::string ConstraintCode
This contains the actual string for the code, like "m".
LLVM_ABI unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches.
LLVM_ABI bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)
bool isBeforeLegalizeOps() const
LLVM_ABI void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
LLVM_ABI void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
ArrayRef< EVT > OpsVTBeforeSoften
bool IsPostTypeLegalization
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT)
ArrayRef< Type * > OpsTypeOverrides
MakeLibCallOptions & setIsSigned(bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
bool LegalOperations() const
Magic data for optimising unsigned division by a constant.
unsigned PreShift
pre-shift amount
unsigned PostShift
post-shift amount
static LLVM_ABI UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true, bool AllowWidenOptimization=false)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
bool Widen
use widen optimization
fltNonfiniteBehavior nonFiniteBehavior
fltNanEncoding nanEncoding