41#define DEBUG_TYPE "gisel-known-bits"
49 "Analysis for ComputingKnownBits",
false,
true)
52 : MF(MF),
MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
57 switch (
MI->getOpcode()) {
58 case TargetOpcode::COPY:
60 case TargetOpcode::G_ASSERT_ALIGN: {
62 return Align(
MI->getOperand(2).getImm());
64 case TargetOpcode::G_FRAME_INDEX: {
65 int FrameIdx =
MI->getOperand(1).getIndex();
66 return MF.getFrameInfo().getObjectAlign(FrameIdx);
68 case TargetOpcode::G_INTRINSIC:
69 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
70 case TargetOpcode::G_INTRINSIC_CONVERGENT:
71 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
73 return TL.computeKnownAlignForTargetInstr(*
this, R, MRI,
Depth + 1);
78 assert(
MI.getNumExplicitDefs() == 1 &&
79 "expected single return generic instruction");
84 const LLT Ty = MRI.getType(R);
94 const APInt &DemandedElts,
102 LLT Ty = MRI.getType(R);
103 unsigned BitWidth = Ty.getScalarSizeInBits();
115[[maybe_unused]]
static void
118 <<
"] Computed for: " <<
MI <<
"[" <<
Depth <<
"] Known: 0x"
129 const APInt &DemandedElts,
160 const APInt &DemandedElts,
163 unsigned Opcode =
MI.getOpcode();
164 LLT DstTy = MRI.getType(R);
178 "DemandedElt width should equal the fixed vector number of elements");
181 "DemandedElt width should be 1 for scalars or scalable vectors");
206 TL.computeKnownBitsForTargetInstr(*
this, R, Known, DemandedElts, MRI,
209 case TargetOpcode::G_BUILD_VECTOR: {
214 if (!DemandedElts[
I])
228 case TargetOpcode::G_SPLAT_VECTOR: {
236 case TargetOpcode::COPY:
237 case TargetOpcode::G_PHI:
238 case TargetOpcode::PHI: {
244 assert(
MI.getOperand(0).getSubReg() == 0 &&
"Is this code in SSA?");
247 for (
unsigned Idx = 1; Idx <
MI.getNumOperands(); Idx += 2) {
257 if (SrcReg.
isVirtual() && Src.getSubReg() == 0 &&
258 MRI.getType(SrcReg).isValid()) {
261 Depth + (Opcode != TargetOpcode::COPY));
276 case TargetOpcode::G_CONSTANT: {
280 case TargetOpcode::G_FRAME_INDEX: {
281 int FrameIdx =
MI.getOperand(1).getIndex();
282 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
285 case TargetOpcode::G_SUB: {
293 case TargetOpcode::G_XOR: {
302 case TargetOpcode::G_PTR_ADD: {
306 LLT Ty = MRI.getType(
MI.getOperand(1).getReg());
307 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
311 case TargetOpcode::G_ADD: {
319 case TargetOpcode::G_AND: {
329 case TargetOpcode::G_OR: {
339 case TargetOpcode::G_MUL: {
347 case TargetOpcode::G_UMULH: {
355 case TargetOpcode::G_SMULH: {
363 case TargetOpcode::G_SELECT: {
364 computeKnownBitsMin(
MI.getOperand(2).getReg(),
MI.getOperand(3).getReg(),
365 Known, DemandedElts,
Depth + 1);
368 case TargetOpcode::G_SMIN: {
378 case TargetOpcode::G_SMAX: {
388 case TargetOpcode::G_UMIN: {
397 case TargetOpcode::G_UMAX: {
406 case TargetOpcode::G_FCMP:
407 case TargetOpcode::G_ICMP: {
410 if (TL.getBooleanContents(DstTy.
isVector(),
411 Opcode == TargetOpcode::G_FCMP) ==
417 case TargetOpcode::G_SEXT: {
425 case TargetOpcode::G_ASSERT_SEXT:
426 case TargetOpcode::G_SEXT_INREG: {
429 Known = Known.
sextInReg(
MI.getOperand(2).getImm());
432 case TargetOpcode::G_ANYEXT: {
438 case TargetOpcode::G_LOAD: {
446 case TargetOpcode::G_SEXTLOAD:
447 case TargetOpcode::G_ZEXTLOAD: {
454 Known = Opcode == TargetOpcode::G_SEXTLOAD
459 case TargetOpcode::G_ASHR: {
468 case TargetOpcode::G_LSHR: {
477 case TargetOpcode::G_SHL: {
486 case TargetOpcode::G_INTTOPTR:
487 case TargetOpcode::G_PTRTOINT:
492 case TargetOpcode::G_ZEXT:
493 case TargetOpcode::G_TRUNC: {
499 case TargetOpcode::G_ASSERT_ZEXT: {
503 unsigned SrcBitWidth =
MI.getOperand(2).getImm();
504 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
506 Known.
Zero |= (~InMask);
507 Known.
One &= (~Known.Zero);
510 case TargetOpcode::G_ASSERT_ALIGN: {
511 int64_t LogOfAlign =
Log2_64(
MI.getOperand(2).getImm());
520 case TargetOpcode::G_MERGE_VALUES: {
521 unsigned NumOps =
MI.getNumOperands();
522 unsigned OpSize = MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
524 for (
unsigned I = 0;
I !=
NumOps - 1; ++
I) {
527 DemandedElts,
Depth + 1);
532 case TargetOpcode::G_UNMERGE_VALUES: {
533 unsigned NumOps =
MI.getNumOperands();
535 LLT SrcTy = MRI.getType(SrcReg);
537 if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.
getScalarType())
542 for (; DstIdx !=
NumOps - 1 &&
MI.getOperand(DstIdx).
getReg() != R;
546 APInt SubDemandedElts = DemandedElts;
547 if (SrcTy.isVector()) {
550 DemandedElts.
zext(SrcTy.getNumElements()).
shl(DstIdx * DstLanes);
556 if (SrcTy.isVector())
557 Known = std::move(SrcOpKnown);
562 case TargetOpcode::G_BSWAP: {
568 case TargetOpcode::G_BITREVERSE: {
574 case TargetOpcode::G_CTPOP: {
586 case TargetOpcode::G_UBFX: {
587 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
597 case TargetOpcode::G_SBFX: {
598 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
615 case TargetOpcode::G_UADDO:
616 case TargetOpcode::G_UADDE:
617 case TargetOpcode::G_SADDO:
618 case TargetOpcode::G_SADDE:
619 case TargetOpcode::G_USUBO:
620 case TargetOpcode::G_USUBE:
621 case TargetOpcode::G_SSUBO:
622 case TargetOpcode::G_SSUBE:
623 case TargetOpcode::G_UMULO:
624 case TargetOpcode::G_SMULO: {
625 if (
MI.getOperand(1).getReg() == R) {
628 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
635 case TargetOpcode::G_CTLZ:
636 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
646 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
653 LLT VecVT = MRI.getType(InVec);
671 if (ConstEltNo && ConstEltNo->ult(NumSrcElts))
678 case TargetOpcode::G_SHUFFLE_VECTOR: {
679 APInt DemandedLHS, DemandedRHS;
682 unsigned NumElts = MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
684 DemandedElts, DemandedLHS, DemandedRHS))
705 case TargetOpcode::G_CONCAT_VECTORS: {
706 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
711 unsigned NumSubVectorElts =
712 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
716 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
728 case TargetOpcode::G_ABS: {
742 Ty = Ty.getScalarType();
751 LLT Ty = MRI.getType(R);
754 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth);
757void GISelValueTracking::computeKnownFPClassForFPTrunc(
765 KnownFPClass KnownSrc;
766 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
779void GISelValueTracking::computeKnownFPClass(
Register R,
780 const APInt &DemandedElts,
784 assert(Known.
isUnknown() &&
"should not be called with known information");
794 MachineInstr &
MI = *MRI.getVRegDef(R);
795 unsigned Opcode =
MI.getOpcode();
796 LLT DstTy = MRI.getType(R);
804 switch (Cst->getKind()) {
806 auto APF = Cst->getScalarValue();
808 Known.
SignBit = APF.isNegative();
813 bool SignBitAllZero =
true;
814 bool SignBitAllOne =
true;
816 for (
auto C : *Cst) {
819 SignBitAllZero =
false;
821 SignBitAllOne =
false;
824 if (SignBitAllOne != SignBitAllZero)
840 KnownNotFromFlags |=
fcNan;
842 KnownNotFromFlags |=
fcInf;
846 InterestedClasses &= ~KnownNotFromFlags;
848 auto ClearClassesFromFlags =
855 const MachineFunction *MF =
MI.getMF();
859 TL.computeKnownFPClassForTargetInstr(*
this, R, Known, DemandedElts, MRI,
862 case TargetOpcode::G_FNEG: {
864 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
Depth + 1);
868 case TargetOpcode::G_SELECT: {
891 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
892 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
898 MaskIfTrue = TestedMask;
899 MaskIfFalse = ~TestedMask;
902 if (TestedValue ==
LHS) {
904 FilterLHS = MaskIfTrue;
905 }
else if (TestedValue ==
RHS) {
907 FilterRHS = MaskIfFalse;
911 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
915 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses & FilterRHS,
922 case TargetOpcode::G_FCOPYSIGN: {
923 Register Magnitude =
MI.getOperand(1).getReg();
926 KnownFPClass KnownSign;
928 computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
930 computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
935 case TargetOpcode::G_FMA:
936 case TargetOpcode::G_STRICT_FMA:
937 case TargetOpcode::G_FMAD: {
952 KnownFPClass KnownAddend;
953 computeKnownFPClass(
C, DemandedElts, InterestedClasses, KnownAddend,
960 case TargetOpcode::G_FSQRT:
961 case TargetOpcode::G_STRICT_FSQRT: {
962 KnownFPClass KnownSrc;
964 if (InterestedClasses &
fcNan)
969 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
984 case TargetOpcode::G_FABS: {
989 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
995 case TargetOpcode::G_FSIN:
996 case TargetOpcode::G_FCOS:
997 case TargetOpcode::G_FSINCOS: {
1000 KnownFPClass KnownSrc;
1002 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1010 case TargetOpcode::G_FMAXNUM:
1011 case TargetOpcode::G_FMINNUM:
1012 case TargetOpcode::G_FMINNUM_IEEE:
1013 case TargetOpcode::G_FMAXIMUM:
1014 case TargetOpcode::G_FMINIMUM:
1015 case TargetOpcode::G_FMAXNUM_IEEE:
1016 case TargetOpcode::G_FMAXIMUMNUM:
1017 case TargetOpcode::G_FMINIMUMNUM: {
1020 KnownFPClass KnownLHS, KnownRHS;
1022 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses, KnownLHS,
1024 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses, KnownRHS,
1028 Known = KnownLHS | KnownRHS;
1031 if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
1032 Opcode == TargetOpcode::G_FMAXNUM ||
1033 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1034 Opcode == TargetOpcode::G_FMAXIMUMNUM))
1037 if (Opcode == TargetOpcode::G_FMAXNUM ||
1038 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1039 Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
1047 }
else if (Opcode == TargetOpcode::G_FMAXIMUM) {
1053 }
else if (Opcode == TargetOpcode::G_FMINNUM ||
1054 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1055 Opcode == TargetOpcode::G_FMINNUM_IEEE) {
1063 }
else if (Opcode == TargetOpcode::G_FMINIMUM) {
1095 }
else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1096 Opcode == TargetOpcode::G_FMINIMUM) ||
1097 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1098 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1099 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
1100 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
1106 if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1107 Opcode == TargetOpcode::G_FMAXNUM ||
1108 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1109 Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
1112 else if ((Opcode == TargetOpcode::G_FMINIMUM ||
1113 Opcode == TargetOpcode::G_FMINNUM ||
1114 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1115 Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
1122 case TargetOpcode::G_FCANONICALIZE: {
1124 KnownFPClass KnownSrc;
1125 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1147 DenormalMode DenormMode = MF->getDenormalMode(FPType);
1166 case TargetOpcode::G_VECREDUCE_FMAX:
1167 case TargetOpcode::G_VECREDUCE_FMIN:
1168 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1169 case TargetOpcode::G_VECREDUCE_FMINIMUM: {
1175 computeKnownFPClass(Val,
MI.getFlags(), InterestedClasses,
Depth + 1);
1181 case TargetOpcode::G_TRUNC:
1182 case TargetOpcode::G_FFLOOR:
1183 case TargetOpcode::G_FCEIL:
1184 case TargetOpcode::G_FRINT:
1185 case TargetOpcode::G_FNEARBYINT:
1186 case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
1187 case TargetOpcode::G_INTRINSIC_ROUND: {
1189 KnownFPClass KnownSrc;
1195 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1212 case TargetOpcode::G_FEXP:
1213 case TargetOpcode::G_FEXP2:
1214 case TargetOpcode::G_FEXP10: {
1220 KnownFPClass KnownSrc;
1221 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1230 case TargetOpcode::G_FLOG:
1231 case TargetOpcode::G_FLOG2:
1232 case TargetOpcode::G_FLOG10: {
1247 KnownFPClass KnownSrc;
1248 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1258 DenormalMode
Mode = MF->getDenormalMode(FltSem);
1265 case TargetOpcode::G_FPOWI: {
1270 LLT ExpTy = MRI.getType(Exp);
1272 Exp, ExpTy.
isVector() ? DemandedElts : APInt(1, 1),
Depth + 1);
1274 if (ExponentKnownBits.
Zero[0]) {
1288 KnownFPClass KnownSrc;
1289 computeKnownFPClass(Val, DemandedElts,
fcNegative, KnownSrc,
Depth + 1);
1294 case TargetOpcode::G_FLDEXP:
1295 case TargetOpcode::G_STRICT_FLDEXP: {
1297 KnownFPClass KnownSrc;
1298 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1315 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
1324 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
1325 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1329 case TargetOpcode::G_FADD:
1330 case TargetOpcode::G_STRICT_FADD:
1331 case TargetOpcode::G_FSUB:
1332 case TargetOpcode::G_STRICT_FSUB: {
1335 KnownFPClass KnownLHS, KnownRHS;
1337 (Opcode == TargetOpcode::G_FADD ||
1338 Opcode == TargetOpcode::G_STRICT_FADD) &&
1340 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
1343 if (!WantNaN && !WantNegative && !WantNegZero)
1349 if (InterestedClasses &
fcNan)
1350 InterestedSrcs |=
fcInf;
1351 computeKnownFPClass(
RHS, DemandedElts, InterestedSrcs, KnownRHS,
Depth + 1);
1356 (Opcode == TargetOpcode::G_FSUB ||
1357 Opcode == TargetOpcode::G_STRICT_FSUB)) {
1361 computeKnownFPClass(
LHS, DemandedElts, InterestedSrcs, KnownLHS,
1369 if (Opcode == Instruction::FAdd) {
1396 case TargetOpcode::G_FMUL:
1397 case TargetOpcode::G_STRICT_FMUL: {
1410 KnownFPClass KnownLHS, KnownRHS;
1411 computeKnownFPClass(
RHS, DemandedElts, NeedForNan, KnownRHS,
Depth + 1);
1415 computeKnownFPClass(
LHS, DemandedElts, NeedForNan, KnownLHS,
Depth + 1);
1442 case TargetOpcode::G_FDIV:
1443 case TargetOpcode::G_FREM: {
1449 if (Opcode == TargetOpcode::G_FDIV) {
1460 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
1462 const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
1464 if (!WantNan && !WantNegative && !WantPositive)
1467 KnownFPClass KnownLHS, KnownRHS;
1470 KnownRHS,
Depth + 1);
1472 bool KnowSomethingUseful =
1475 if (KnowSomethingUseful || WantPositive) {
1480 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & InterestedLHS,
1481 KnownLHS,
Depth + 1);
1484 if (Opcode == Instruction::FDiv) {
1525 case TargetOpcode::G_FPEXT: {
1529 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth + 1);
1533 LLT SrcTy = MRI.getType(Src).getScalarType();
1550 case TargetOpcode::G_FPTRUNC: {
1551 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1555 case TargetOpcode::G_SITOFP:
1556 case TargetOpcode::G_UITOFP: {
1565 if (Opcode == TargetOpcode::G_UITOFP)
1569 LLT Ty = MRI.getType(Val);
1571 if (InterestedClasses &
fcInf) {
1576 if (Opcode == TargetOpcode::G_SITOFP)
1590 case TargetOpcode::G_BUILD_VECTOR:
1591 case TargetOpcode::G_CONCAT_VECTORS: {
1598 for (
unsigned Idx = 0; Idx <
Merge.getNumSources(); ++Idx) {
1600 bool NeedsElt = DemandedElts[Idx];
1606 computeKnownFPClass(Src, Known, InterestedClasses,
Depth + 1);
1609 KnownFPClass Known2;
1610 computeKnownFPClass(Src, Known2, InterestedClasses,
Depth + 1);
1622 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1632 LLT VecTy = MRI.getType(Vec);
1637 if (CIdx && CIdx->ult(NumElts))
1639 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
1645 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1651 LLT VecTy = MRI.getType(Vec);
1659 APInt DemandedVecElts = DemandedElts;
1660 bool NeedsElt =
true;
1662 if (CIdx && CIdx->ult(NumElts)) {
1663 DemandedVecElts.
clearBit(CIdx->getZExtValue());
1664 NeedsElt = DemandedElts[CIdx->getZExtValue()];
1669 computeKnownFPClass(Elt, Known, InterestedClasses,
Depth + 1);
1678 if (!DemandedVecElts.
isZero()) {
1679 KnownFPClass Known2;
1680 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
1687 case TargetOpcode::G_SHUFFLE_VECTOR: {
1691 APInt DemandedLHS, DemandedRHS;
1693 assert(DemandedElts == APInt(1, 1));
1694 DemandedLHS = DemandedRHS = DemandedElts;
1697 DemandedElts, DemandedLHS,
1704 if (!!DemandedLHS) {
1706 computeKnownFPClass(
LHS, DemandedLHS, InterestedClasses, Known,
1716 if (!!DemandedRHS) {
1717 KnownFPClass Known2;
1719 computeKnownFPClass(
RHS, DemandedRHS, InterestedClasses, Known2,
1725 case TargetOpcode::COPY: {
1728 if (!Src.isVirtual())
1731 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known,
Depth + 1);
1742 computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses,
Depth);
1743 return KnownClasses;
1749 computeKnownFPClass(R, Known, InterestedClasses,
Depth);
1757 InterestedClasses &=
~fcNan;
1759 InterestedClasses &=
~fcInf;
1762 computeKnownFPClass(R, DemandedElts, InterestedClasses,
Depth);
1765 Result.KnownFPClasses &=
~fcNan;
1767 Result.KnownFPClasses &=
~fcInf;
1773 LLT Ty = MRI.getType(R);
1774 APInt DemandedElts =
1776 return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses,
Depth);
1780unsigned GISelValueTracking::computeNumSignBitsMin(
Register Src0,
Register Src1,
1781 const APInt &DemandedElts,
1785 if (Src1SignBits == 1)
1802 case TargetOpcode::G_SEXTLOAD:
1805 case TargetOpcode::G_ZEXTLOAD:
1818 const APInt &DemandedElts,
1821 unsigned Opcode =
MI.getOpcode();
1823 if (Opcode == TargetOpcode::G_CONSTANT)
1824 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
1832 LLT DstTy = MRI.getType(R);
1842 unsigned FirstAnswer = 1;
1844 case TargetOpcode::COPY: {
1846 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
1847 MRI.getType(Src.getReg()).isValid()) {
1854 case TargetOpcode::G_SEXT: {
1856 LLT SrcTy = MRI.getType(Src);
1860 case TargetOpcode::G_ASSERT_SEXT:
1861 case TargetOpcode::G_SEXT_INREG: {
1864 unsigned SrcBits =
MI.getOperand(2).getImm();
1865 unsigned InRegBits = TyBits - SrcBits + 1;
1869 case TargetOpcode::G_LOAD: {
1876 case TargetOpcode::G_SEXTLOAD: {
1891 case TargetOpcode::G_ZEXTLOAD: {
1906 case TargetOpcode::G_AND:
1907 case TargetOpcode::G_OR:
1908 case TargetOpcode::G_XOR: {
1910 unsigned Src1NumSignBits =
1912 if (Src1NumSignBits != 1) {
1914 unsigned Src2NumSignBits =
1916 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
1920 case TargetOpcode::G_ASHR: {
1925 FirstAnswer = std::min<uint64_t>(FirstAnswer + *
C, TyBits);
1928 case TargetOpcode::G_SHL: {
1931 if (std::optional<ConstantRange> ShAmtRange =
1933 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
1934 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
1944 if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||
1945 ExtOpc == TargetOpcode::G_ANYEXT) {
1946 LLT ExtTy = MRI.getType(Src1);
1948 LLT ExtendeeTy = MRI.getType(Extendee);
1952 if (SizeDiff <= MinShAmt) {
1956 return Tmp - MaxShAmt;
1962 return Tmp - MaxShAmt;
1966 case TargetOpcode::G_TRUNC: {
1968 LLT SrcTy = MRI.getType(Src);
1972 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
1974 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
1975 return NumSrcSignBits - (NumSrcBits - DstTyBits);
1978 case TargetOpcode::G_SELECT: {
1979 return computeNumSignBitsMin(
MI.getOperand(2).getReg(),
1980 MI.getOperand(3).getReg(), DemandedElts,
1983 case TargetOpcode::G_SMIN:
1984 case TargetOpcode::G_SMAX:
1985 case TargetOpcode::G_UMIN:
1986 case TargetOpcode::G_UMAX:
1988 return computeNumSignBitsMin(
MI.getOperand(1).getReg(),
1989 MI.getOperand(2).getReg(), DemandedElts,
1991 case TargetOpcode::G_SADDO:
1992 case TargetOpcode::G_SADDE:
1993 case TargetOpcode::G_UADDO:
1994 case TargetOpcode::G_UADDE:
1995 case TargetOpcode::G_SSUBO:
1996 case TargetOpcode::G_SSUBE:
1997 case TargetOpcode::G_USUBO:
1998 case TargetOpcode::G_USUBE:
1999 case TargetOpcode::G_SMULO:
2000 case TargetOpcode::G_UMULO: {
2004 if (
MI.getOperand(1).getReg() == R) {
2005 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
2012 case TargetOpcode::G_SUB: {
2014 unsigned Src2NumSignBits =
2016 if (Src2NumSignBits == 1)
2026 if ((Known2.
Zero | 1).isAllOnes())
2033 FirstAnswer = Src2NumSignBits;
2040 unsigned Src1NumSignBits =
2042 if (Src1NumSignBits == 1)
2047 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2050 case TargetOpcode::G_ADD: {
2052 unsigned Src2NumSignBits =
2054 if (Src2NumSignBits <= 2)
2058 unsigned Src1NumSignBits =
2060 if (Src1NumSignBits == 1)
2069 if ((Known1.
Zero | 1).isAllOnes())
2075 FirstAnswer = Src1NumSignBits;
2084 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2087 case TargetOpcode::G_FCMP:
2088 case TargetOpcode::G_ICMP: {
2089 bool IsFP = Opcode == TargetOpcode::G_FCMP;
2092 auto BC = TL.getBooleanContents(DstTy.
isVector(), IsFP);
2099 case TargetOpcode::G_BUILD_VECTOR: {
2101 FirstAnswer = TyBits;
2102 APInt SingleDemandedElt(1, 1);
2104 if (!DemandedElts[
I])
2109 FirstAnswer = std::min(FirstAnswer, Tmp2);
2112 if (FirstAnswer == 1)
2117 case TargetOpcode::G_CONCAT_VECTORS: {
2118 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
2120 FirstAnswer = TyBits;
2123 unsigned NumSubVectorElts =
2124 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
2127 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
2132 FirstAnswer = std::min(FirstAnswer, Tmp2);
2135 if (FirstAnswer == 1)
2140 case TargetOpcode::G_SHUFFLE_VECTOR: {
2143 APInt DemandedLHS, DemandedRHS;
2145 unsigned NumElts = MRI.getType(Src1).getNumElements();
2147 DemandedElts, DemandedLHS, DemandedRHS))
2153 if (FirstAnswer == 1)
2155 if (!!DemandedRHS) {
2158 FirstAnswer = std::min(FirstAnswer, Tmp2);
2162 case TargetOpcode::G_SPLAT_VECTOR: {
2166 unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();
2167 if (NumSrcSignBits > (NumSrcBits - TyBits))
2168 return NumSrcSignBits - (NumSrcBits - TyBits);
2171 case TargetOpcode::G_INTRINSIC:
2172 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2173 case TargetOpcode::G_INTRINSIC_CONVERGENT:
2174 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2177 TL.computeNumSignBitsForTargetInstr(*
this, R, DemandedElts, MRI,
Depth);
2179 FirstAnswer = std::max(FirstAnswer, NumBits);
2199 Mask <<= Mask.getBitWidth() - TyBits;
2200 return std::max(FirstAnswer, Mask.countl_one());
2204 LLT Ty = MRI.getType(R);
2205 APInt DemandedElts =
2214 unsigned Opcode =
MI.getOpcode();
2216 LLT Ty = MRI.getType(R);
2217 unsigned BitWidth = Ty.getScalarSizeInBits();
2219 if (Opcode == TargetOpcode::G_CONSTANT) {
2220 const APInt &ShAmt =
MI.getOperand(1).getCImm()->getValue();
2222 return std::nullopt;
2226 if (Opcode == TargetOpcode::G_BUILD_VECTOR) {
2227 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
2228 for (
unsigned I = 0, E =
MI.getNumOperands() - 1;
I != E; ++
I) {
2229 if (!DemandedElts[
I])
2232 if (
Op->getOpcode() != TargetOpcode::G_CONSTANT) {
2233 MinAmt = MaxAmt =
nullptr;
2237 const APInt &ShAmt =
Op->getOperand(1).getCImm()->getValue();
2239 return std::nullopt;
2240 if (!MinAmt || MinAmt->
ugt(ShAmt))
2242 if (!MaxAmt || MaxAmt->ult(ShAmt))
2245 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
2246 "Failed to find matching min/max shift amounts");
2247 if (MinAmt && MaxAmt)
2257 return std::nullopt;
2262 if (std::optional<ConstantRange> AmtRange =
2264 return AmtRange->getUnsignedMin().getZExtValue();
2265 return std::nullopt;
2283 Info = std::make_unique<GISelValueTracking>(MF, MaxDepth);
2308 if (!MO.isReg() || MO.getReg().isPhysical())
2311 if (!
MRI.getType(Reg).isValid())
2313 KnownBits Known = VTA.getKnownBits(Reg);
2314 unsigned SignedBits = VTA.computeNumSignBits(Reg);
2315 OS <<
" " << MO <<
" KnownBits:" << Known <<
" SignBits:" << SignedBits
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Utilities for dealing with flags related to floating point properties and mode controls.
static void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
Promote Memory to Register
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file describes how to lower LLVM code to machine code.
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This class represents a range of values.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Represents any generic load, including sign/zero extending variants.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
GISelValueTracking & get(MachineFunction &MF)
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GISelValueTracking Result
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
unsigned getMaxDepth() const
KnownBits getKnownBits(Register R)
Align computeKnownAlignment(Register R, unsigned Depth=0)
std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool maskedValueIsZero(Register Val, const APInt &Mask)
std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool signBitIsZero(Register Op)
const DataLayout & getDataLayout() const
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
APInt getKnownOnes(Register R)
KnownBits getKnownBits(MachineInstr &MI)
APInt getKnownZeroes(Register R)
void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getSrc2Reg() const
Register getSrc1Reg() const
ArrayRef< int > getMask() const
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
TypeSize getValue() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
operand_type_match m_Pred()
bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)
Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
constexpr unsigned MaxAnalysisRecursionDepth
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a zero.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a negative zero.