42#define DEBUG_TYPE "gisel-known-bits"
50 "Analysis for ComputingKnownBits",
false,
true)
53 : MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
58 switch (
MI->getOpcode()) {
59 case TargetOpcode::COPY:
61 case TargetOpcode::G_ASSERT_ALIGN: {
63 return Align(
MI->getOperand(2).getImm());
65 case TargetOpcode::G_FRAME_INDEX: {
66 int FrameIdx =
MI->getOperand(1).getIndex();
67 return MF.getFrameInfo().getObjectAlign(FrameIdx);
69 case TargetOpcode::G_INTRINSIC:
70 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
71 case TargetOpcode::G_INTRINSIC_CONVERGENT:
72 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
74 return TL.computeKnownAlignForTargetInstr(*
this, R, MRI,
Depth + 1);
79 assert(
MI.getNumExplicitDefs() == 1 &&
80 "expected single return generic instruction");
85 const LLT Ty = MRI.getType(R);
95 const APInt &DemandedElts,
103 LLT Ty = MRI.getType(R);
104 unsigned BitWidth = Ty.getScalarSizeInBits();
116[[maybe_unused]]
static void
119 <<
"] Computed for: " <<
MI <<
"[" <<
Depth <<
"] Known: 0x"
130 const APInt &DemandedElts,
161 const APInt &DemandedElts,
164 unsigned Opcode =
MI.getOpcode();
165 LLT DstTy = MRI.getType(R);
179 "DemandedElt width should equal the fixed vector number of elements");
182 "DemandedElt width should be 1 for scalars or scalable vectors");
207 TL.computeKnownBitsForTargetInstr(*
this, R, Known, DemandedElts, MRI,
210 case TargetOpcode::G_BUILD_VECTOR: {
215 if (!DemandedElts[
I])
229 case TargetOpcode::G_SPLAT_VECTOR: {
237 case TargetOpcode::COPY:
238 case TargetOpcode::G_PHI:
239 case TargetOpcode::PHI: {
245 assert(
MI.getOperand(0).getSubReg() == 0 &&
"Is this code in SSA?");
248 for (
unsigned Idx = 1; Idx <
MI.getNumOperands(); Idx += 2) {
251 LLT SrcTy = MRI.getType(SrcReg);
259 if (SrcReg.
isVirtual() && Src.getSubReg() == 0 &&
261 APInt NowDemandedElts;
262 if (!SrcTy.isFixedVector()) {
263 NowDemandedElts =
APInt(1, 1);
266 NowDemandedElts = DemandedElts;
273 Depth + (Opcode != TargetOpcode::COPY));
288 case TargetOpcode::G_CONSTANT: {
292 case TargetOpcode::G_FRAME_INDEX: {
293 int FrameIdx =
MI.getOperand(1).getIndex();
294 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
297 case TargetOpcode::G_SUB: {
306 case TargetOpcode::G_XOR: {
315 case TargetOpcode::G_PTR_ADD: {
319 LLT Ty = MRI.getType(
MI.getOperand(1).getReg());
320 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
324 case TargetOpcode::G_ADD: {
332 case TargetOpcode::G_AND: {
342 case TargetOpcode::G_OR: {
352 case TargetOpcode::G_MUL: {
360 case TargetOpcode::G_UMULH: {
368 case TargetOpcode::G_SMULH: {
376 case TargetOpcode::G_ABDU: {
384 case TargetOpcode::G_ABDS: {
393 if (SignBits1 == 1) {
402 case TargetOpcode::G_UDIV: {
411 case TargetOpcode::G_SDIV: {
420 case TargetOpcode::G_SELECT: {
421 computeKnownBitsMin(
MI.getOperand(2).getReg(),
MI.getOperand(3).getReg(),
422 Known, DemandedElts,
Depth + 1);
425 case TargetOpcode::G_SMIN: {
435 case TargetOpcode::G_SMAX: {
445 case TargetOpcode::G_UMIN: {
454 case TargetOpcode::G_UMAX: {
463 case TargetOpcode::G_FCMP:
464 case TargetOpcode::G_ICMP: {
467 if (TL.getBooleanContents(DstTy.
isVector(),
468 Opcode == TargetOpcode::G_FCMP) ==
474 case TargetOpcode::G_SEXT: {
482 case TargetOpcode::G_ASSERT_SEXT:
483 case TargetOpcode::G_SEXT_INREG: {
486 Known = Known.
sextInReg(
MI.getOperand(2).getImm());
489 case TargetOpcode::G_ANYEXT: {
495 case TargetOpcode::G_LOAD: {
503 case TargetOpcode::G_SEXTLOAD:
504 case TargetOpcode::G_ZEXTLOAD: {
511 Known = Opcode == TargetOpcode::G_SEXTLOAD
516 case TargetOpcode::G_ASHR: {
525 case TargetOpcode::G_LSHR: {
534 case TargetOpcode::G_SHL: {
543 case TargetOpcode::G_ROTL:
544 case TargetOpcode::G_ROTR: {
545 MachineInstr *AmtOpMI = MRI.getVRegDef(
MI.getOperand(2).getReg());
553 unsigned Amt = MaybeAmtOp->urem(
BitWidth);
556 if (Opcode == TargetOpcode::G_ROTL)
563 case TargetOpcode::G_INTTOPTR:
564 case TargetOpcode::G_PTRTOINT:
569 case TargetOpcode::G_ZEXT:
570 case TargetOpcode::G_TRUNC: {
576 case TargetOpcode::G_ASSERT_ZEXT: {
580 unsigned SrcBitWidth =
MI.getOperand(2).getImm();
581 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
583 Known.
Zero |= (~InMask);
584 Known.
One &= (~Known.Zero);
587 case TargetOpcode::G_ASSERT_ALIGN: {
588 int64_t LogOfAlign =
Log2_64(
MI.getOperand(2).getImm());
597 case TargetOpcode::G_MERGE_VALUES: {
598 unsigned NumOps =
MI.getNumOperands();
599 unsigned OpSize = MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
601 for (
unsigned I = 0;
I !=
NumOps - 1; ++
I) {
604 DemandedElts,
Depth + 1);
609 case TargetOpcode::G_UNMERGE_VALUES: {
610 unsigned NumOps =
MI.getNumOperands();
612 LLT SrcTy = MRI.getType(SrcReg);
614 if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.
getScalarType())
619 for (; DstIdx !=
NumOps - 1 &&
MI.getOperand(DstIdx).
getReg() != R;
623 APInt SubDemandedElts = DemandedElts;
624 if (SrcTy.isVector()) {
627 DemandedElts.
zext(SrcTy.getNumElements()).
shl(DstIdx * DstLanes);
633 if (SrcTy.isVector())
634 Known = std::move(SrcOpKnown);
639 case TargetOpcode::G_BSWAP: {
645 case TargetOpcode::G_BITREVERSE: {
651 case TargetOpcode::G_CTPOP: {
663 case TargetOpcode::G_UBFX: {
664 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
674 case TargetOpcode::G_SBFX: {
675 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
692 case TargetOpcode::G_UADDO:
693 case TargetOpcode::G_UADDE:
694 case TargetOpcode::G_SADDO:
695 case TargetOpcode::G_SADDE: {
696 if (
MI.getOperand(1).getReg() == R) {
699 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
706 assert(
MI.getOperand(0).getReg() == R &&
707 "We only compute knownbits for the sum here.");
710 if (Opcode == TargetOpcode::G_UADDE || Opcode == TargetOpcode::G_SADDE) {
714 Carry = Carry.
trunc(1);
726 case TargetOpcode::G_USUBO:
727 case TargetOpcode::G_USUBE:
728 case TargetOpcode::G_SSUBO:
729 case TargetOpcode::G_SSUBE:
730 case TargetOpcode::G_UMULO:
731 case TargetOpcode::G_SMULO: {
732 if (
MI.getOperand(1).getReg() == R) {
735 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
742 case TargetOpcode::G_CTTZ:
743 case TargetOpcode::G_CTTZ_ZERO_UNDEF: {
753 case TargetOpcode::G_CTLZ:
754 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
764 case TargetOpcode::G_CTLS: {
768 unsigned MaxUpperRedundantSignBits = MRI.getType(Reg).getScalarSizeInBits();
773 Known =
Range.toKnownBits();
776 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
783 LLT VecVT = MRI.getType(InVec);
801 if (ConstEltNo && ConstEltNo->ult(NumSrcElts))
808 case TargetOpcode::G_SHUFFLE_VECTOR: {
809 APInt DemandedLHS, DemandedRHS;
812 unsigned NumElts = MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
814 DemandedElts, DemandedLHS, DemandedRHS))
835 case TargetOpcode::G_CONCAT_VECTORS: {
836 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
841 unsigned NumSubVectorElts =
842 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
846 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
858 case TargetOpcode::G_ABS: {
872 Ty = Ty.getScalarType();
881 LLT Ty = MRI.getType(R);
884 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth);
887void GISelValueTracking::computeKnownFPClassForFPTrunc(
895 KnownFPClass KnownSrc;
896 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
909void GISelValueTracking::computeKnownFPClass(
Register R,
910 const APInt &DemandedElts,
914 assert(Known.
isUnknown() &&
"should not be called with known information");
924 MachineInstr &
MI = *MRI.getVRegDef(R);
925 unsigned Opcode =
MI.getOpcode();
926 LLT DstTy = MRI.getType(R);
934 switch (Cst->getKind()) {
936 auto APF = Cst->getScalarValue();
938 Known.
SignBit = APF.isNegative();
943 bool SignBitAllZero =
true;
944 bool SignBitAllOne =
true;
946 for (
auto C : *Cst) {
949 SignBitAllZero =
false;
951 SignBitAllOne =
false;
954 if (SignBitAllOne != SignBitAllZero)
970 KnownNotFromFlags |=
fcNan;
972 KnownNotFromFlags |=
fcInf;
976 InterestedClasses &= ~KnownNotFromFlags;
979 [=, &Known] { Known.
knownNot(KnownNotFromFlags); });
985 const MachineFunction *MF =
MI.getMF();
989 TL.computeKnownFPClassForTargetInstr(*
this, R, Known, DemandedElts, MRI,
992 case TargetOpcode::G_FNEG: {
994 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
Depth + 1);
998 case TargetOpcode::G_SELECT: {
1021 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
1022 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
1028 MaskIfTrue = TestedMask;
1029 MaskIfFalse = ~TestedMask;
1032 if (TestedValue ==
LHS) {
1034 FilterLHS = MaskIfTrue;
1035 }
else if (TestedValue ==
RHS) {
1037 FilterRHS = MaskIfFalse;
1040 KnownFPClass Known2;
1041 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
1045 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses & FilterRHS,
1052 case TargetOpcode::G_FCOPYSIGN: {
1053 Register Magnitude =
MI.getOperand(1).getReg();
1056 KnownFPClass KnownSign;
1058 computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
1060 computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
1065 case TargetOpcode::G_FMA:
1066 case TargetOpcode::G_STRICT_FMA:
1067 case TargetOpcode::G_FMAD: {
1082 KnownFPClass KnownAddend;
1083 computeKnownFPClass(
C, DemandedElts, InterestedClasses, KnownAddend,
1090 case TargetOpcode::G_FSQRT:
1091 case TargetOpcode::G_STRICT_FSQRT: {
1092 KnownFPClass KnownSrc;
1094 if (InterestedClasses &
fcNan)
1099 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1114 case TargetOpcode::G_FABS: {
1119 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
1125 case TargetOpcode::G_FSIN:
1126 case TargetOpcode::G_FCOS:
1127 case TargetOpcode::G_FSINCOS: {
1130 KnownFPClass KnownSrc;
1132 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1140 case TargetOpcode::G_FMAXNUM:
1141 case TargetOpcode::G_FMINNUM:
1142 case TargetOpcode::G_FMINNUM_IEEE:
1143 case TargetOpcode::G_FMAXIMUM:
1144 case TargetOpcode::G_FMINIMUM:
1145 case TargetOpcode::G_FMAXNUM_IEEE:
1146 case TargetOpcode::G_FMAXIMUMNUM:
1147 case TargetOpcode::G_FMINIMUMNUM: {
1150 KnownFPClass KnownLHS, KnownRHS;
1152 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses, KnownLHS,
1154 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses, KnownRHS,
1158 Known = KnownLHS | KnownRHS;
1161 if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
1162 Opcode == TargetOpcode::G_FMAXNUM ||
1163 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1164 Opcode == TargetOpcode::G_FMAXIMUMNUM))
1167 if (Opcode == TargetOpcode::G_FMAXNUM ||
1168 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1169 Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
1177 }
else if (Opcode == TargetOpcode::G_FMAXIMUM) {
1183 }
else if (Opcode == TargetOpcode::G_FMINNUM ||
1184 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1185 Opcode == TargetOpcode::G_FMINNUM_IEEE) {
1193 }
else if (Opcode == TargetOpcode::G_FMINIMUM) {
1225 }
else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1226 Opcode == TargetOpcode::G_FMINIMUM) ||
1227 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1228 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1229 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
1230 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
1236 if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1237 Opcode == TargetOpcode::G_FMAXNUM ||
1238 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1239 Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
1242 else if ((Opcode == TargetOpcode::G_FMINIMUM ||
1243 Opcode == TargetOpcode::G_FMINNUM ||
1244 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1245 Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
1252 case TargetOpcode::G_FCANONICALIZE: {
1254 KnownFPClass KnownSrc;
1255 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1277 DenormalMode DenormMode = MF->getDenormalMode(FPType);
1296 case TargetOpcode::G_VECREDUCE_FMAX:
1297 case TargetOpcode::G_VECREDUCE_FMIN:
1298 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1299 case TargetOpcode::G_VECREDUCE_FMINIMUM: {
1305 computeKnownFPClass(Val,
MI.getFlags(), InterestedClasses,
Depth + 1);
1311 case TargetOpcode::G_TRUNC:
1312 case TargetOpcode::G_FFLOOR:
1313 case TargetOpcode::G_FCEIL:
1314 case TargetOpcode::G_FRINT:
1315 case TargetOpcode::G_FNEARBYINT:
1316 case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
1317 case TargetOpcode::G_INTRINSIC_ROUND: {
1319 KnownFPClass KnownSrc;
1325 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1342 case TargetOpcode::G_FEXP:
1343 case TargetOpcode::G_FEXP2:
1344 case TargetOpcode::G_FEXP10: {
1350 KnownFPClass KnownSrc;
1351 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1360 case TargetOpcode::G_FLOG:
1361 case TargetOpcode::G_FLOG2:
1362 case TargetOpcode::G_FLOG10: {
1377 KnownFPClass KnownSrc;
1378 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1388 DenormalMode
Mode = MF->getDenormalMode(FltSem);
1395 case TargetOpcode::G_FPOWI: {
1400 LLT ExpTy = MRI.getType(Exp);
1402 Exp, ExpTy.
isVector() ? DemandedElts : APInt(1, 1),
Depth + 1);
1404 if (ExponentKnownBits.
Zero[0]) {
1418 KnownFPClass KnownSrc;
1419 computeKnownFPClass(Val, DemandedElts,
fcNegative, KnownSrc,
Depth + 1);
1424 case TargetOpcode::G_FLDEXP:
1425 case TargetOpcode::G_STRICT_FLDEXP: {
1427 KnownFPClass KnownSrc;
1428 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1445 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
1454 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
1455 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1459 case TargetOpcode::G_FADD:
1460 case TargetOpcode::G_STRICT_FADD:
1461 case TargetOpcode::G_FSUB:
1462 case TargetOpcode::G_STRICT_FSUB: {
1465 KnownFPClass KnownLHS, KnownRHS;
1467 (Opcode == TargetOpcode::G_FADD ||
1468 Opcode == TargetOpcode::G_STRICT_FADD) &&
1470 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
1473 if (!WantNaN && !WantNegative && !WantNegZero)
1479 if (InterestedClasses &
fcNan)
1480 InterestedSrcs |=
fcInf;
1481 computeKnownFPClass(
RHS, DemandedElts, InterestedSrcs, KnownRHS,
Depth + 1);
1486 (Opcode == TargetOpcode::G_FSUB ||
1487 Opcode == TargetOpcode::G_STRICT_FSUB)) {
1491 computeKnownFPClass(
LHS, DemandedElts, InterestedSrcs, KnownLHS,
1499 if (Opcode == TargetOpcode::G_FADD ||
1500 Opcode == TargetOpcode::G_STRICT_FADD) {
1527 case TargetOpcode::G_FMUL:
1528 case TargetOpcode::G_STRICT_FMUL: {
1541 KnownFPClass KnownLHS, KnownRHS;
1542 computeKnownFPClass(
RHS, DemandedElts, NeedForNan, KnownRHS,
Depth + 1);
1546 computeKnownFPClass(
LHS, DemandedElts, NeedForNan, KnownLHS,
Depth + 1);
1573 case TargetOpcode::G_FDIV:
1574 case TargetOpcode::G_FREM: {
1580 if (Opcode == TargetOpcode::G_FDIV) {
1591 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
1593 const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
1595 if (!WantNan && !WantNegative && !WantPositive)
1598 KnownFPClass KnownLHS, KnownRHS;
1601 KnownRHS,
Depth + 1);
1603 bool KnowSomethingUseful =
1606 if (KnowSomethingUseful || WantPositive) {
1611 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & InterestedLHS,
1612 KnownLHS,
Depth + 1);
1615 if (Opcode == TargetOpcode::G_FDIV) {
1656 case TargetOpcode::G_FPEXT: {
1660 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth + 1);
1664 LLT SrcTy = MRI.getType(Src).getScalarType();
1681 case TargetOpcode::G_FPTRUNC: {
1682 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1686 case TargetOpcode::G_SITOFP:
1687 case TargetOpcode::G_UITOFP: {
1696 if (Opcode == TargetOpcode::G_UITOFP)
1700 LLT Ty = MRI.getType(Val);
1702 if (InterestedClasses &
fcInf) {
1707 if (Opcode == TargetOpcode::G_SITOFP)
1721 case TargetOpcode::G_BUILD_VECTOR:
1722 case TargetOpcode::G_CONCAT_VECTORS: {
1729 for (
unsigned Idx = 0; Idx <
Merge.getNumSources(); ++Idx) {
1731 bool NeedsElt = DemandedElts[Idx];
1737 computeKnownFPClass(Src, Known, InterestedClasses,
Depth + 1);
1740 KnownFPClass Known2;
1741 computeKnownFPClass(Src, Known2, InterestedClasses,
Depth + 1);
1753 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1763 LLT VecTy = MRI.getType(Vec);
1768 if (CIdx && CIdx->ult(NumElts))
1770 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
1776 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1782 LLT VecTy = MRI.getType(Vec);
1790 APInt DemandedVecElts = DemandedElts;
1791 bool NeedsElt =
true;
1793 if (CIdx && CIdx->ult(NumElts)) {
1794 DemandedVecElts.
clearBit(CIdx->getZExtValue());
1795 NeedsElt = DemandedElts[CIdx->getZExtValue()];
1800 computeKnownFPClass(Elt, Known, InterestedClasses,
Depth + 1);
1809 if (!DemandedVecElts.
isZero()) {
1810 KnownFPClass Known2;
1811 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
1818 case TargetOpcode::G_SHUFFLE_VECTOR: {
1822 APInt DemandedLHS, DemandedRHS;
1824 assert(DemandedElts == APInt(1, 1));
1825 DemandedLHS = DemandedRHS = DemandedElts;
1828 DemandedElts, DemandedLHS,
1835 if (!!DemandedLHS) {
1837 computeKnownFPClass(
LHS, DemandedLHS, InterestedClasses, Known,
1847 if (!!DemandedRHS) {
1848 KnownFPClass Known2;
1850 computeKnownFPClass(
RHS, DemandedRHS, InterestedClasses, Known2,
1856 case TargetOpcode::COPY: {
1859 if (!Src.isVirtual())
1862 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known,
Depth + 1);
1873 computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses,
Depth);
1874 return KnownClasses;
1880 computeKnownFPClass(R, Known, InterestedClasses,
Depth);
1888 InterestedClasses &=
~fcNan;
1890 InterestedClasses &=
~fcInf;
1893 computeKnownFPClass(R, DemandedElts, InterestedClasses,
Depth);
1896 Result.KnownFPClasses &=
~fcNan;
1898 Result.KnownFPClasses &=
~fcInf;
1904 LLT Ty = MRI.getType(R);
1905 APInt DemandedElts =
1907 return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses,
Depth);
1911unsigned GISelValueTracking::computeNumSignBitsMin(
Register Src0,
Register Src1,
1912 const APInt &DemandedElts,
1916 if (Src1SignBits == 1)
1933 case TargetOpcode::G_SEXTLOAD:
1936 case TargetOpcode::G_ZEXTLOAD:
1949 const APInt &DemandedElts,
1952 unsigned Opcode =
MI.getOpcode();
1954 if (Opcode == TargetOpcode::G_CONSTANT)
1955 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
1963 LLT DstTy = MRI.getType(R);
1973 unsigned FirstAnswer = 1;
1975 case TargetOpcode::COPY: {
1977 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
1978 MRI.getType(Src.getReg()).isValid()) {
1985 case TargetOpcode::G_SEXT: {
1987 LLT SrcTy = MRI.getType(Src);
1991 case TargetOpcode::G_ASSERT_SEXT:
1992 case TargetOpcode::G_SEXT_INREG: {
1995 unsigned SrcBits =
MI.getOperand(2).getImm();
1996 unsigned InRegBits = TyBits - SrcBits + 1;
2000 case TargetOpcode::G_LOAD: {
2007 case TargetOpcode::G_SEXTLOAD: {
2022 case TargetOpcode::G_ZEXTLOAD: {
2037 case TargetOpcode::G_AND:
2038 case TargetOpcode::G_OR:
2039 case TargetOpcode::G_XOR: {
2041 unsigned Src1NumSignBits =
2043 if (Src1NumSignBits != 1) {
2045 unsigned Src2NumSignBits =
2047 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
2051 case TargetOpcode::G_ASHR: {
2056 FirstAnswer = std::min<uint64_t>(FirstAnswer + *
C, TyBits);
2059 case TargetOpcode::G_SHL: {
2062 if (std::optional<ConstantRange> ShAmtRange =
2064 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
2065 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
2075 if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||
2076 ExtOpc == TargetOpcode::G_ANYEXT) {
2077 LLT ExtTy = MRI.getType(Src1);
2079 LLT ExtendeeTy = MRI.getType(Extendee);
2083 if (SizeDiff <= MinShAmt) {
2087 return Tmp - MaxShAmt;
2093 return Tmp - MaxShAmt;
2097 case TargetOpcode::G_TRUNC: {
2099 LLT SrcTy = MRI.getType(Src);
2103 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
2105 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
2106 return NumSrcSignBits - (NumSrcBits - DstTyBits);
2109 case TargetOpcode::G_SELECT: {
2110 return computeNumSignBitsMin(
MI.getOperand(2).getReg(),
2111 MI.getOperand(3).getReg(), DemandedElts,
2114 case TargetOpcode::G_SMIN:
2115 case TargetOpcode::G_SMAX:
2116 case TargetOpcode::G_UMIN:
2117 case TargetOpcode::G_UMAX:
2119 return computeNumSignBitsMin(
MI.getOperand(1).getReg(),
2120 MI.getOperand(2).getReg(), DemandedElts,
2122 case TargetOpcode::G_SADDO:
2123 case TargetOpcode::G_SADDE:
2124 case TargetOpcode::G_UADDO:
2125 case TargetOpcode::G_UADDE:
2126 case TargetOpcode::G_SSUBO:
2127 case TargetOpcode::G_SSUBE:
2128 case TargetOpcode::G_USUBO:
2129 case TargetOpcode::G_USUBE:
2130 case TargetOpcode::G_SMULO:
2131 case TargetOpcode::G_UMULO: {
2135 if (
MI.getOperand(1).getReg() == R) {
2136 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
2143 case TargetOpcode::G_SUB: {
2145 unsigned Src2NumSignBits =
2147 if (Src2NumSignBits == 1)
2157 if ((Known2.
Zero | 1).isAllOnes())
2164 FirstAnswer = Src2NumSignBits;
2171 unsigned Src1NumSignBits =
2173 if (Src1NumSignBits == 1)
2178 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2181 case TargetOpcode::G_ADD: {
2183 unsigned Src2NumSignBits =
2185 if (Src2NumSignBits <= 2)
2189 unsigned Src1NumSignBits =
2191 if (Src1NumSignBits == 1)
2200 if ((Known1.
Zero | 1).isAllOnes())
2206 FirstAnswer = Src1NumSignBits;
2215 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2218 case TargetOpcode::G_FCMP:
2219 case TargetOpcode::G_ICMP: {
2220 bool IsFP = Opcode == TargetOpcode::G_FCMP;
2223 auto BC = TL.getBooleanContents(DstTy.
isVector(), IsFP);
2230 case TargetOpcode::G_BUILD_VECTOR: {
2232 FirstAnswer = TyBits;
2233 APInt SingleDemandedElt(1, 1);
2235 if (!DemandedElts[
I])
2240 FirstAnswer = std::min(FirstAnswer, Tmp2);
2243 if (FirstAnswer == 1)
2248 case TargetOpcode::G_CONCAT_VECTORS: {
2249 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
2251 FirstAnswer = TyBits;
2254 unsigned NumSubVectorElts =
2255 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
2258 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
2263 FirstAnswer = std::min(FirstAnswer, Tmp2);
2266 if (FirstAnswer == 1)
2271 case TargetOpcode::G_SHUFFLE_VECTOR: {
2274 APInt DemandedLHS, DemandedRHS;
2276 unsigned NumElts = MRI.getType(Src1).getNumElements();
2278 DemandedElts, DemandedLHS, DemandedRHS))
2284 if (FirstAnswer == 1)
2286 if (!!DemandedRHS) {
2289 FirstAnswer = std::min(FirstAnswer, Tmp2);
2293 case TargetOpcode::G_SPLAT_VECTOR: {
2297 unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();
2298 if (NumSrcSignBits > (NumSrcBits - TyBits))
2299 return NumSrcSignBits - (NumSrcBits - TyBits);
2302 case TargetOpcode::G_INTRINSIC:
2303 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2304 case TargetOpcode::G_INTRINSIC_CONVERGENT:
2305 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2308 TL.computeNumSignBitsForTargetInstr(*
this, R, DemandedElts, MRI,
Depth);
2310 FirstAnswer = std::max(FirstAnswer, NumBits);
2330 Mask <<= Mask.getBitWidth() - TyBits;
2331 return std::max(FirstAnswer, Mask.countl_one());
2335 LLT Ty = MRI.getType(R);
2336 APInt DemandedElts =
2345 unsigned Opcode =
MI.getOpcode();
2347 LLT Ty = MRI.getType(R);
2348 unsigned BitWidth = Ty.getScalarSizeInBits();
2350 if (Opcode == TargetOpcode::G_CONSTANT) {
2351 const APInt &ShAmt =
MI.getOperand(1).getCImm()->getValue();
2353 return std::nullopt;
2357 if (Opcode == TargetOpcode::G_BUILD_VECTOR) {
2358 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
2359 for (
unsigned I = 0, E =
MI.getNumOperands() - 1;
I != E; ++
I) {
2360 if (!DemandedElts[
I])
2363 if (
Op->getOpcode() != TargetOpcode::G_CONSTANT) {
2364 MinAmt = MaxAmt =
nullptr;
2368 const APInt &ShAmt =
Op->getOperand(1).getCImm()->getValue();
2370 return std::nullopt;
2371 if (!MinAmt || MinAmt->
ugt(ShAmt))
2373 if (!MaxAmt || MaxAmt->ult(ShAmt))
2376 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
2377 "Failed to find matching min/max shift amounts");
2378 if (MinAmt && MaxAmt)
2388 return std::nullopt;
2393 if (std::optional<ConstantRange> AmtRange =
2395 return AmtRange->getUnsignedMin().getZExtValue();
2396 return std::nullopt;
2414 Info = std::make_unique<GISelValueTracking>(MF, MaxDepth);
2439 if (!MO.isReg() || MO.getReg().isPhysical())
2442 if (!MRI.getType(Reg).isValid())
2444 KnownBits Known = VTA.getKnownBits(Reg);
2445 unsigned SignedBits = VTA.computeNumSignBits(Reg);
2446 OS <<
" " << MO <<
" KnownBits:" << Known <<
" SignBits:" << SignedBits
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Utilities for dealing with flags related to floating point properties and mode controls.
static void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
static bool outputDenormalIsIEEEOrPosZero(const MachineFunction &MF, LLT Ty)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This class represents a range of values.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Represents any generic load, including sign/zero extending variants.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
GISelValueTracking & get(MachineFunction &MF)
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GISelValueTracking Result
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
unsigned getMaxDepth() const
KnownBits getKnownBits(Register R)
Align computeKnownAlignment(Register R, unsigned Depth=0)
std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool maskedValueIsZero(Register Val, const APInt &Mask)
std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool signBitIsZero(Register Op)
const DataLayout & getDataLayout() const
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
APInt getKnownOnes(Register R)
APInt getKnownZeroes(Register R)
void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getSrc2Reg() const
Register getSrc1Reg() const
ArrayRef< int > getMask() const
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
TypeSize getValue() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
operand_type_match m_Pred()
bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)
Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
scope_exit(Callable) -> scope_exit< Callable >
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
constexpr unsigned MaxAnalysisRecursionDepth
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false, bool SelfAdd=false)
Compute knownbits resulting from addition of LHS and RHS.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
static LLVM_ABI KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a negative zero.