59#include "llvm/IR/IntrinsicsAArch64.h"
60#include "llvm/IR/IntrinsicsAMDGPU.h"
61#include "llvm/IR/IntrinsicsRISCV.h"
62#include "llvm/IR/IntrinsicsX86.h"
100 if (
unsigned BitWidth = Ty->getScalarSizeInBits())
103 return DL.getPointerTypeSizeInBits(Ty);
123 const APInt &DemandedElts,
127 DemandedLHS = DemandedRHS = DemandedElts;
134 DemandedElts, DemandedLHS, DemandedRHS);
155 bool UseInstrInfo,
unsigned Depth) {
230 R->uge(
LHS->getType()->getScalarSizeInBits()))
243 assert(LHS->getType() == RHS->getType() &&
244 "LHS and RHS should have the same type");
245 assert(LHS->getType()->isIntOrIntVectorTy() &&
246 "LHS and RHS should be integers");
257 return !
I->user_empty() &&
262 return !
I->user_empty() &&
all_of(
I->users(), [](
const User *U) {
264 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
273 return ::isKnownToBeAPowerOfTwo(
289 return CI->getValue().isStrictlyPositive();
315 return ::isKnownNonEqual(V1, V2, DemandedElts, Q,
Depth);
322 return Mask.isSubsetOf(Known.
Zero);
329 unsigned Depth = 0) {
340 return ::ComputeNumSignBits(
350 return V->getType()->getScalarSizeInBits() - SignBits + 1;
373 const APInt &DemandedElts,
379 const unsigned BitWidth = Ty->getScalarSizeInBits();
382 if (Ty->isVectorTy())
387 const Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
390 const auto MatchSubBC = [&]() {
407 const auto MatchASubBC = [&]() {
415 const auto MatchCD = [&]() {
432 if (!Match(Op0, Op1) && !Match(Op1, Op0))
435 const auto ComputeKnownBitsOrOne = [&](
const Value *V) {
443 const KnownBits KnownA = ComputeKnownBitsOrOne(
A);
447 const KnownBits KnownD = ComputeKnownBitsOrOne(
D);
464 if (SubBC->
getOpcode() == Instruction::Xor &&
482 const unsigned MinimumNumberOfLeadingZeros = UpperBound.
countl_zero();
488 const APInt &DemandedElts,
495 if (KnownOut.
isUnknown() && !NSW && !NUW)
512 bool NUW,
const APInt &DemandedElts,
529 bool isKnownNegativeOp0 = Known2.
isNegative();
532 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
544 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
546 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.
isNonZero());
550 bool SelfMultiply = Op0 == Op1;
559 unsigned OutValidBits = 2 * (TyBits - SignBits + 1);
561 if (OutValidBits < TyBits) {
562 APInt KnownZeroMask =
564 Known.
Zero |= KnownZeroMask;
582 unsigned NumRanges = Ranges.getNumOperands() / 2;
587 for (
unsigned i = 0; i < NumRanges; ++i) {
596 "Known bit width must match range bit width!");
599 unsigned CommonPrefixBits =
600 (
Range.getUnsignedMax() ^
Range.getUnsignedMin()).countl_zero();
603 Known.
One &= UnsignedMax & Mask;
604 Known.
Zero &= ~UnsignedMax & Mask;
619 while (!WorkSet.
empty()) {
621 if (!Visited.
insert(V).second)
626 return EphValues.count(cast<Instruction>(U));
631 if (V ==
I || (!V->mayHaveSideEffects() && !V->isTerminator())) {
635 for (
const Use &U : U->operands()) {
650 return CI->isAssumeLikeIntrinsic();
658 bool AllowEphemerals) {
676 if (!AllowEphemerals && Inv == CxtI)
707 if (CtxI->
getParent() != Assume->getParent() || !Assume->comesBefore(CtxI))
716 for (
const auto &[Idx,
I] :
722 if (!CB->hasFnAttr(Attribute::NoFree))
754 for (
unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
757 Pred, VC->getElementAsAPInt(ElemIdx));
766 const PHINode **PhiOut =
nullptr) {
770 CtxIOut =
PHI->getIncomingBlock(*U)->getTerminator();
786 IncPhi && IncPhi->getNumIncomingValues() == 2) {
787 for (
int Idx = 0; Idx < 2; ++Idx) {
788 if (IncPhi->getIncomingValue(Idx) ==
PHI) {
789 ValOut = IncPhi->getIncomingValue(1 - Idx);
792 CtxIOut = IncPhi->getIncomingBlock(1 - Idx)->getTerminator();
811 "Got assumption for the wrong function!");
814 if (!V->getType()->isPointerTy())
817 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
819 (RK.AttrKind == Attribute::NonNull ||
820 (RK.AttrKind == Attribute::Dereferenceable &&
849 if (
RHS->getType()->isPointerTy()) {
891 Known.
Zero |= ~*
C & *Mask;
897 Known.
One |= *
C & ~*Mask;
956 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate();
962 KnownBits DstKnown(
LHS->getType()->getScalarSizeInBits());
976 bool Invert,
unsigned Depth) {
1058 "Got assumption for the wrong function!");
1061 if (!V->getType()->isPointerTy())
1064 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
1068 if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment &&
1080 Value *Arg =
I->getArgOperand(0);
1096 if (Trunc && Trunc->getOperand(0) == V &&
1098 if (Trunc->hasNoUnsignedWrap()) {
1146 Known = KF(Known2, Known, ShAmtNonZero);
1157 Value *
X =
nullptr, *
Y =
nullptr;
1159 switch (
I->getOpcode()) {
1160 case Instruction::And:
1161 KnownOut = KnownLHS & KnownRHS;
1171 KnownOut = KnownLHS.
blsi();
1173 KnownOut = KnownRHS.
blsi();
1176 case Instruction::Or:
1177 KnownOut = KnownLHS | KnownRHS;
1179 case Instruction::Xor:
1180 KnownOut = KnownLHS ^ KnownRHS;
1190 const KnownBits &XBits =
I->getOperand(0) ==
X ? KnownLHS : KnownRHS;
1191 KnownOut = XBits.
blsmsk();
1204 if (!KnownOut.
Zero[0] && !KnownOut.
One[0] &&
1225 APInt DemandedEltsLHS, DemandedEltsRHS;
1227 DemandedElts, DemandedEltsLHS,
1230 const auto ComputeForSingleOpFunc =
1232 return KnownBitsFunc(
1237 if (DemandedEltsRHS.
isZero())
1238 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS);
1239 if (DemandedEltsLHS.
isZero())
1240 return ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS);
1242 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS)
1243 .intersectWith(ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS));
1253 APInt DemandedElts =
1261 Attribute Attr =
F->getFnAttribute(Attribute::VScaleRange);
1269 return ConstantRange::getEmpty(
BitWidth);
1280 Value *Arm,
bool Invert,
1319 "Input should be a Select!");
1329 const Value *LHS2 =
nullptr, *RHS2 =
nullptr;
1341 return CLow->
sle(*CHigh);
1346 const APInt *&CHigh) {
1347 assert((
II->getIntrinsicID() == Intrinsic::smin ||
1348 II->getIntrinsicID() == Intrinsic::smax) &&
1349 "Must be smin/smax");
1353 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
1358 if (
II->getIntrinsicID() == Intrinsic::smin)
1360 return CLow->
sle(*CHigh);
1365 const APInt *CLow, *CHigh;
1372 const APInt &DemandedElts,
1379 switch (
I->getOpcode()) {
1381 case Instruction::Load:
1386 case Instruction::And:
1392 case Instruction::Or:
1398 case Instruction::Xor:
1404 case Instruction::Mul: {
1408 DemandedElts, Known, Known2, Q,
Depth);
1411 case Instruction::UDiv: {
1418 case Instruction::SDiv: {
1425 case Instruction::Select: {
1426 auto ComputeForArm = [&](
Value *Arm,
bool Invert) {
1434 ComputeForArm(
I->getOperand(1),
false)
1438 case Instruction::FPTrunc:
1439 case Instruction::FPExt:
1440 case Instruction::FPToUI:
1441 case Instruction::FPToSI:
1442 case Instruction::SIToFP:
1443 case Instruction::UIToFP:
1445 case Instruction::PtrToInt:
1446 case Instruction::IntToPtr:
1449 case Instruction::ZExt:
1450 case Instruction::Trunc: {
1451 Type *SrcTy =
I->getOperand(0)->getType();
1453 unsigned SrcBitWidth;
1461 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
1465 Inst && Inst->hasNonNeg() && !Known.
isNegative())
1470 case Instruction::BitCast: {
1471 Type *SrcTy =
I->getOperand(0)->getType();
1472 if (SrcTy->isIntOrPtrTy() &&
1475 !
I->getType()->isVectorTy()) {
1483 V->getType()->isFPOrFPVectorTy()) {
1484 Type *FPType = V->getType()->getScalarType();
1496 if (FPClasses &
fcInf)
1508 if (Result.SignBit) {
1509 if (*Result.SignBit)
1520 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1521 !
I->getType()->isIntOrIntVectorTy() ||
1529 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1545 unsigned SubScale =
BitWidth / SubBitWidth;
1547 for (
unsigned i = 0; i != NumElts; ++i) {
1548 if (DemandedElts[i])
1549 SubDemandedElts.
setBit(i * SubScale);
1553 for (
unsigned i = 0; i != SubScale; ++i) {
1556 unsigned ShiftElt = IsLE ? i : SubScale - 1 - i;
1557 Known.
insertBits(KnownSrc, ShiftElt * SubBitWidth);
1563 unsigned SubScale = SubBitWidth /
BitWidth;
1565 APInt SubDemandedElts =
1571 for (
unsigned i = 0; i != NumElts; ++i) {
1572 if (DemandedElts[i]) {
1573 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
1583 case Instruction::SExt: {
1585 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
1587 Known = Known.
trunc(SrcBitWidth);
1594 case Instruction::Shl: {
1598 bool ShAmtNonZero) {
1599 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
1609 case Instruction::LShr: {
1612 bool ShAmtNonZero) {
1623 case Instruction::AShr: {
1626 bool ShAmtNonZero) {
1633 case Instruction::Sub: {
1637 DemandedElts, Known, Known2, Q,
Depth);
1640 case Instruction::Add: {
1644 DemandedElts, Known, Known2, Q,
Depth);
1647 case Instruction::SRem:
1653 case Instruction::URem:
1658 case Instruction::Alloca:
1661 case Instruction::GetElementPtr: {
1668 APInt AccConstIndices(IndexWidth, 0);
1670 auto AddIndexToKnown = [&](
KnownBits IndexBits) {
1679 "Index width can't be larger than pointer width");
1685 for (
unsigned i = 1, e =
I->getNumOperands(); i != e; ++i, ++GTI) {
1690 Value *Index =
I->getOperand(i);
1701 "Access to structure field must be known at compile time");
1709 AccConstIndices +=
Offset;
1726 CI->getValue().
sextOrTrunc(IndexWidth) * StrideInBytes;
1750 case Instruction::PHI: {
1753 Value *R =
nullptr, *L =
nullptr;
1766 case Instruction::LShr:
1767 case Instruction::AShr:
1768 case Instruction::Shl:
1769 case Instruction::UDiv:
1776 case Instruction::URem: {
1789 case Instruction::Shl:
1793 case Instruction::LShr:
1794 case Instruction::UDiv:
1795 case Instruction::URem:
1800 case Instruction::AShr:
1812 case Instruction::Add:
1813 case Instruction::Sub:
1814 case Instruction::And:
1815 case Instruction::Or:
1816 case Instruction::Mul: {
1823 unsigned OpNum =
P->getOperand(0) == R ? 0 : 1;
1824 Instruction *RInst =
P->getIncomingBlock(OpNum)->getTerminator();
1825 Instruction *LInst =
P->getIncomingBlock(1 - OpNum)->getTerminator();
1854 case Instruction::Add: {
1864 case Instruction::Sub: {
1875 case Instruction::Mul:
1892 if (
P->getNumIncomingValues() == 0)
1903 for (
const Use &U :
P->operands()) {
1938 if ((TrueSucc == CxtPhi->
getParent()) !=
1955 Known2 = KnownUnion;
1969 case Instruction::Call:
1970 case Instruction::Invoke: {
1980 if (std::optional<ConstantRange>
Range = CB->getRange())
1983 if (
const Value *RV = CB->getReturnedArgOperand()) {
1984 if (RV->getType() ==
I->getType()) {
1996 switch (
II->getIntrinsicID()) {
1999 case Intrinsic::abs: {
2001 bool IntMinIsPoison =
match(
II->getArgOperand(1),
m_One());
2005 case Intrinsic::bitreverse:
2009 case Intrinsic::bswap:
2013 case Intrinsic::ctlz: {
2019 PossibleLZ = std::min(PossibleLZ,
BitWidth - 1);
2024 case Intrinsic::cttz: {
2030 PossibleTZ = std::min(PossibleTZ,
BitWidth - 1);
2035 case Intrinsic::ctpop: {
2046 case Intrinsic::fshr:
2047 case Intrinsic::fshl: {
2054 if (
II->getIntrinsicID() == Intrinsic::fshr)
2061 Known2 <<= ShiftAmt;
2066 case Intrinsic::uadd_sat:
2071 case Intrinsic::usub_sat:
2076 case Intrinsic::sadd_sat:
2081 case Intrinsic::ssub_sat:
2087 case Intrinsic::vector_reverse:
2093 case Intrinsic::vector_reduce_and:
2094 case Intrinsic::vector_reduce_or:
2095 case Intrinsic::vector_reduce_umax:
2096 case Intrinsic::vector_reduce_umin:
2097 case Intrinsic::vector_reduce_smax:
2098 case Intrinsic::vector_reduce_smin:
2101 case Intrinsic::vector_reduce_xor: {
2108 bool EvenCnt = VecTy->getElementCount().isKnownEven();
2112 if (VecTy->isScalableTy() || EvenCnt)
2116 case Intrinsic::umin:
2121 case Intrinsic::umax:
2126 case Intrinsic::smin:
2132 case Intrinsic::smax:
2138 case Intrinsic::ptrmask: {
2141 const Value *Mask =
I->getOperand(1);
2142 Known2 =
KnownBits(Mask->getType()->getScalarSizeInBits());
2148 case Intrinsic::x86_sse2_pmulh_w:
2149 case Intrinsic::x86_avx2_pmulh_w:
2150 case Intrinsic::x86_avx512_pmulh_w_512:
2155 case Intrinsic::x86_sse2_pmulhu_w:
2156 case Intrinsic::x86_avx2_pmulhu_w:
2157 case Intrinsic::x86_avx512_pmulhu_w_512:
2162 case Intrinsic::x86_sse42_crc32_64_64:
2165 case Intrinsic::x86_ssse3_phadd_d_128:
2166 case Intrinsic::x86_ssse3_phadd_w_128:
2167 case Intrinsic::x86_avx2_phadd_d:
2168 case Intrinsic::x86_avx2_phadd_w: {
2170 I, DemandedElts, Q,
Depth,
2176 case Intrinsic::x86_ssse3_phadd_sw_128:
2177 case Intrinsic::x86_avx2_phadd_sw: {
2182 case Intrinsic::x86_ssse3_phsub_d_128:
2183 case Intrinsic::x86_ssse3_phsub_w_128:
2184 case Intrinsic::x86_avx2_phsub_d:
2185 case Intrinsic::x86_avx2_phsub_w: {
2187 I, DemandedElts, Q,
Depth,
2193 case Intrinsic::x86_ssse3_phsub_sw_128:
2194 case Intrinsic::x86_avx2_phsub_sw: {
2199 case Intrinsic::riscv_vsetvli:
2200 case Intrinsic::riscv_vsetvlimax: {
2201 bool HasAVL =
II->getIntrinsicID() == Intrinsic::riscv_vsetvli;
2214 MaxVL = std::min(MaxVL, CI->getZExtValue());
2216 unsigned KnownZeroFirstBit =
Log2_32(MaxVL) + 1;
2221 case Intrinsic::vscale: {
2222 if (!
II->getParent() || !
II->getFunction())
2232 case Instruction::ShuffleVector: {
2241 APInt DemandedLHS, DemandedRHS;
2247 if (!!DemandedLHS) {
2248 const Value *
LHS = Shuf->getOperand(0);
2254 if (!!DemandedRHS) {
2255 const Value *
RHS = Shuf->getOperand(1);
2261 case Instruction::InsertElement: {
2266 const Value *Vec =
I->getOperand(0);
2267 const Value *Elt =
I->getOperand(1);
2270 APInt DemandedVecElts = DemandedElts;
2271 bool NeedsElt =
true;
2273 if (CIdx && CIdx->getValue().ult(NumElts)) {
2274 DemandedVecElts.
clearBit(CIdx->getZExtValue());
2275 NeedsElt = DemandedElts[CIdx->getZExtValue()];
2286 if (!DemandedVecElts.
isZero()) {
2292 case Instruction::ExtractElement: {
2295 const Value *Vec =
I->getOperand(0);
2296 const Value *Idx =
I->getOperand(1);
2305 if (CIdx && CIdx->getValue().ult(NumElts))
2310 case Instruction::ExtractValue:
2315 switch (
II->getIntrinsicID()) {
2317 case Intrinsic::uadd_with_overflow:
2318 case Intrinsic::sadd_with_overflow:
2320 true,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2321 false, DemandedElts, Known, Known2, Q,
Depth);
2323 case Intrinsic::usub_with_overflow:
2324 case Intrinsic::ssub_with_overflow:
2326 false,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2327 false, DemandedElts, Known, Known2, Q,
Depth);
2329 case Intrinsic::umul_with_overflow:
2330 case Intrinsic::smul_with_overflow:
2332 false, DemandedElts, Known, Known2, Q,
Depth);
2338 case Instruction::Freeze:
2382 if (!DemandedElts) {
2388 assert(V &&
"No Value?");
2392 Type *Ty = V->getType();
2395 assert((Ty->isIntOrIntVectorTy(
BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
2396 "Not integer or pointer type!");
2400 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
2401 "DemandedElt width should equal the fixed vector number of elements");
2404 "DemandedElt width should be 1 for scalars or scalable vectors");
2410 "V and Known should have same BitWidth");
2413 "V and Known should have same BitWidth");
2435 for (
unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2436 if (!DemandedElts[i])
2438 APInt Elt = CDV->getElementAsAPInt(i);
2452 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2453 if (!DemandedElts[i])
2463 const APInt &Elt = ElementCI->getValue();
2484 if (std::optional<ConstantRange>
Range =
A->getRange())
2485 Known =
Range->toKnownBits();
2494 if (!GA->isInterposable())
2502 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
2503 Known = CR->toKnownBits();
2508 Align Alignment = V->getPointerAlignment(Q.
DL);
2524 Value *Start =
nullptr, *Step =
nullptr;
2530 if (U.get() == Start) {
2546 case Instruction::Mul:
2551 case Instruction::SDiv:
2557 case Instruction::UDiv:
2563 case Instruction::Shl:
2565 case Instruction::AShr:
2569 case Instruction::LShr:
2607 if (OrZero && V->getType()->getScalarSizeInBits() == 1)
2649 return F->hasFnAttribute(Attribute::VScaleRange);
2666 switch (
I->getOpcode()) {
2667 case Instruction::ZExt:
2669 case Instruction::Trunc:
2671 case Instruction::Shl:
2675 case Instruction::LShr:
2679 case Instruction::UDiv:
2683 case Instruction::Mul:
2687 case Instruction::And:
2698 case Instruction::Add: {
2704 if (
match(
I->getOperand(0),
2708 if (
match(
I->getOperand(1),
2713 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2722 if ((~(LHSBits.
Zero & RHSBits.
Zero)).isPowerOf2())
2735 case Instruction::Select:
2738 case Instruction::PHI: {
2759 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2760 return isKnownToBeAPowerOfTwo(U.get(), OrZero, RecQ, NewDepth);
2763 case Instruction::Invoke:
2764 case Instruction::Call: {
2766 switch (
II->getIntrinsicID()) {
2767 case Intrinsic::umax:
2768 case Intrinsic::smax:
2769 case Intrinsic::umin:
2770 case Intrinsic::smin:
2775 case Intrinsic::bitreverse:
2776 case Intrinsic::bswap:
2778 case Intrinsic::fshr:
2779 case Intrinsic::fshl:
2781 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
2805 F =
I->getFunction();
2809 if (!
GEP->hasNoUnsignedWrap() &&
2810 !(
GEP->isInBounds() &&
2815 assert(
GEP->getType()->isPointerTy() &&
"We only support plain pointer GEP");
2826 GTI != GTE; ++GTI) {
2828 if (
StructType *STy = GTI.getStructTypeOrNull()) {
2833 if (ElementOffset > 0)
2839 if (GTI.getSequentialElementStride(Q.
DL).isZero())
2873 unsigned NumUsesExplored = 0;
2874 for (
auto &U : V->uses()) {
2883 if (V->getType()->isPointerTy()) {
2885 if (CB->isArgOperand(&U) &&
2886 CB->paramHasNonNullAttr(CB->getArgOperandNo(&U),
2914 NonNullIfTrue =
true;
2916 NonNullIfTrue =
false;
2922 for (
const auto *CmpU : UI->
users()) {
2924 if (Visited.
insert(CmpU).second)
2927 while (!WorkList.
empty()) {
2936 for (
const auto *CurrU : Curr->users())
2937 if (Visited.
insert(CurrU).second)
2943 assert(BI->isConditional() &&
"uses a comparison!");
2946 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2950 }
else if (NonNullIfTrue &&
isGuard(Curr) &&
2965 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2967 for (
unsigned i = 0; i < NumRanges; ++i) {
2983 Value *Start =
nullptr, *Step =
nullptr;
2984 const APInt *StartC, *StepC;
2990 case Instruction::Add:
2996 case Instruction::Mul:
2999 case Instruction::Shl:
3001 case Instruction::AShr:
3002 case Instruction::LShr:
3018 bool NUW,
unsigned Depth) {
3075 return ::isKnownNonEqual(
X,
Y, DemandedElts, Q,
Depth);
3080 bool NUW,
unsigned Depth) {
3109 auto ShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
3110 switch (
I->getOpcode()) {
3111 case Instruction::Shl:
3112 return Lhs.
shl(Rhs);
3113 case Instruction::LShr:
3114 return Lhs.
lshr(Rhs);
3115 case Instruction::AShr:
3116 return Lhs.
ashr(Rhs);
3122 auto InvShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
3123 switch (
I->getOpcode()) {
3124 case Instruction::Shl:
3125 return Lhs.
lshr(Rhs);
3126 case Instruction::LShr:
3127 case Instruction::AShr:
3128 return Lhs.
shl(Rhs);
3141 if (MaxShift.
uge(NumBits))
3144 if (!ShiftOp(KnownVal.
One, MaxShift).isZero())
3149 if (InvShiftOp(KnownVal.
Zero, NumBits - MaxShift)
3158 const APInt &DemandedElts,
3161 switch (
I->getOpcode()) {
3162 case Instruction::Alloca:
3164 return I->getType()->getPointerAddressSpace() == 0;
3165 case Instruction::GetElementPtr:
3166 if (
I->getType()->isPointerTy())
3169 case Instruction::BitCast: {
3197 Type *FromTy =
I->getOperand(0)->getType();
3202 case Instruction::IntToPtr:
3211 case Instruction::PtrToInt:
3219 case Instruction::Trunc:
3222 if (TI->hasNoSignedWrap() || TI->hasNoUnsignedWrap())
3228 case Instruction::Xor:
3229 case Instruction::Sub:
3231 I->getOperand(1),
Depth);
3232 case Instruction::Or:
3243 case Instruction::SExt:
3244 case Instruction::ZExt:
3248 case Instruction::Shl: {
3263 case Instruction::LShr:
3264 case Instruction::AShr: {
3279 case Instruction::UDiv:
3280 case Instruction::SDiv: {
3295 if (
I->getOpcode() == Instruction::SDiv) {
3297 XKnown = XKnown.
abs(
false);
3298 YKnown = YKnown.
abs(
false);
3304 return XUgeY && *XUgeY;
3306 case Instruction::Add: {
3316 case Instruction::Mul: {
3322 case Instruction::Select: {
3329 auto SelectArmIsNonZero = [&](
bool IsTrueArm) {
3331 Op = IsTrueArm ?
I->getOperand(1) :
I->getOperand(2);
3349 if (SelectArmIsNonZero(
true) &&
3350 SelectArmIsNonZero(
false))
3354 case Instruction::PHI: {
3365 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
3369 BasicBlock *TrueSucc, *FalseSucc;
3370 if (match(RecQ.CxtI,
3371 m_Br(m_c_ICmp(Pred, m_Specific(U.get()), m_Value(X)),
3372 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
3374 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) {
3376 if (FalseSucc == PN->getParent())
3377 Pred = CmpInst::getInversePredicate(Pred);
3378 if (cmpExcludesZero(Pred, X))
3386 case Instruction::InsertElement: {
3390 const Value *Vec =
I->getOperand(0);
3391 const Value *Elt =
I->getOperand(1);
3395 APInt DemandedVecElts = DemandedElts;
3396 bool SkipElt =
false;
3398 if (CIdx && CIdx->getValue().ult(NumElts)) {
3399 DemandedVecElts.
clearBit(CIdx->getZExtValue());
3400 SkipElt = !DemandedElts[CIdx->getZExtValue()];
3406 (DemandedVecElts.
isZero() ||
3409 case Instruction::ExtractElement:
3411 const Value *Vec = EEI->getVectorOperand();
3412 const Value *Idx = EEI->getIndexOperand();
3415 unsigned NumElts = VecTy->getNumElements();
3417 if (CIdx && CIdx->getValue().ult(NumElts))
3423 case Instruction::ShuffleVector: {
3427 APInt DemandedLHS, DemandedRHS;
3433 return (DemandedRHS.
isZero() ||
3438 case Instruction::Freeze:
3442 case Instruction::Load: {
3459 case Instruction::ExtractValue: {
3465 case Instruction::Add:
3470 case Instruction::Sub:
3473 case Instruction::Mul:
3476 false,
false,
Depth);
3482 case Instruction::Call:
3483 case Instruction::Invoke: {
3485 if (
I->getType()->isPointerTy()) {
3486 if (
Call->isReturnNonNull())
3493 if (std::optional<ConstantRange>
Range =
Call->getRange()) {
3494 const APInt ZeroValue(
Range->getBitWidth(), 0);
3495 if (!
Range->contains(ZeroValue))
3498 if (
const Value *RV =
Call->getReturnedArgOperand())
3504 switch (
II->getIntrinsicID()) {
3505 case Intrinsic::sshl_sat:
3506 case Intrinsic::ushl_sat:
3507 case Intrinsic::abs:
3508 case Intrinsic::bitreverse:
3509 case Intrinsic::bswap:
3510 case Intrinsic::ctpop:
3514 case Intrinsic::ssub_sat:
3517 case Intrinsic::sadd_sat:
3519 II->getArgOperand(1),
3520 true,
false,
Depth);
3522 case Intrinsic::vector_reverse:
3526 case Intrinsic::vector_reduce_or:
3527 case Intrinsic::vector_reduce_umax:
3528 case Intrinsic::vector_reduce_umin:
3529 case Intrinsic::vector_reduce_smax:
3530 case Intrinsic::vector_reduce_smin:
3532 case Intrinsic::umax:
3533 case Intrinsic::uadd_sat:
3541 case Intrinsic::smax: {
3544 auto IsNonZero = [&](
Value *
Op, std::optional<bool> &OpNonZero,
3546 if (!OpNonZero.has_value())
3547 OpNonZero = OpKnown.isNonZero() ||
3552 std::optional<bool> Op0NonZero, Op1NonZero;
3556 IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known))
3561 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known))
3563 return IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known) &&
3564 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known);
3566 case Intrinsic::smin: {
3582 case Intrinsic::umin:
3585 case Intrinsic::cttz:
3588 case Intrinsic::ctlz:
3591 case Intrinsic::fshr:
3592 case Intrinsic::fshl:
3594 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
3597 case Intrinsic::vscale:
3599 case Intrinsic::experimental_get_vector_length:
3613 return Known.
One != 0;
3624 Type *Ty = V->getType();
3631 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
3632 "DemandedElt width should equal the fixed vector number of elements");
3635 "DemandedElt width should be 1 for scalars");
3640 if (
C->isNullValue())
3649 for (
unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
3650 if (!DemandedElts[i])
3652 Constant *Elt =
C->getAggregateElement(i);
3669 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3670 GV->getType()->getAddressSpace() == 0)
3680 if (std::optional<ConstantRange>
Range =
A->getRange()) {
3681 const APInt ZeroValue(
Range->getBitWidth(), 0);
3682 if (!
Range->contains(ZeroValue))
3699 if (((
A->hasPassPointeeByValueCopyAttr() &&
3701 A->hasNonNullAttr()))
3723 APInt DemandedElts =
3725 return ::isKnownNonZero(V, DemandedElts, Q,
Depth);
3734static std::optional<std::pair<Value*, Value*>>
3738 return std::nullopt;
3747 case Instruction::Or:
3752 case Instruction::Xor:
3753 case Instruction::Add: {
3761 case Instruction::Sub:
3767 case Instruction::Mul: {
3773 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3774 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3784 case Instruction::Shl: {
3789 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3790 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3797 case Instruction::AShr:
3798 case Instruction::LShr: {
3801 if (!PEO1->isExact() || !PEO2->isExact())
3808 case Instruction::SExt:
3809 case Instruction::ZExt:
3813 case Instruction::PHI: {
3821 Value *Start1 =
nullptr, *Step1 =
nullptr;
3823 Value *Start2 =
nullptr, *Step2 =
nullptr;
3839 if (Values->first != PN1 || Values->second != PN2)
3842 return std::make_pair(Start1, Start2);
3845 return std::nullopt;
3852 const APInt &DemandedElts,
3860 case Instruction::Or:
3864 case Instruction::Xor:
3865 case Instruction::Add:
3886 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3887 !
C->isZero() && !
C->isOne() &&
3901 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3915 bool UsedFullRecursion =
false;
3917 if (!VisitedBBs.
insert(IncomBB).second)
3921 const APInt *C1, *C2;
3926 if (UsedFullRecursion)
3930 RecQ.
CxtI = IncomBB->getTerminator();
3933 UsedFullRecursion =
true;
3947 const Value *Cond2 = SI2->getCondition();
3950 DemandedElts, Q,
Depth + 1) &&
3952 DemandedElts, Q,
Depth + 1);
3965 if (!
A->getType()->isPointerTy() || !
B->getType()->isPointerTy())
3969 if (!GEPA || GEPA->getNumIndices() != 1 || !
isa<Constant>(GEPA->idx_begin()))
3974 if (!PN || PN->getNumIncomingValues() != 2)
3979 Value *Start =
nullptr;
3981 if (PN->getIncomingValue(0) == Step)
3982 Start = PN->getIncomingValue(1);
3983 else if (PN->getIncomingValue(1) == Step)
3984 Start = PN->getIncomingValue(0);
3995 APInt StartOffset(IndexWidth, 0);
3996 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, StartOffset);
3997 APInt StepOffset(IndexWidth, 0);
4003 APInt OffsetB(IndexWidth, 0);
4004 B =
B->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, OffsetB);
4005 return Start ==
B &&
4017 auto IsKnownNonEqualFromDominatingCondition = [&](
const Value *V) {
4038 if (IsKnownNonEqualFromDominatingCondition(V1) ||
4039 IsKnownNonEqualFromDominatingCondition(V2))
4053 "Got assumption for the wrong function!");
4054 assert(
I->getIntrinsicID() == Intrinsic::assume &&
4055 "must be an assume intrinsic");
4085 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
4087 return isKnownNonEqual(Values->first, Values->second, DemandedElts, Q,
4149 const APInt &DemandedElts,
4155 unsigned MinSignBits = TyBits;
4157 for (
unsigned i = 0; i != NumElts; ++i) {
4158 if (!DemandedElts[i])
4165 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
4172 const APInt &DemandedElts,
4178 assert(Result > 0 &&
"At least one sign bit needs to be present!");
4190 const APInt &DemandedElts,
4192 Type *Ty = V->getType();
4198 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
4199 "DemandedElt width should equal the fixed vector number of elements");
4202 "DemandedElt width should be 1 for scalars");
4216 unsigned FirstAnswer = 1;
4227 case Instruction::BitCast: {
4228 Value *Src = U->getOperand(0);
4229 Type *SrcTy = Src->getType();
4233 if (!SrcTy->isIntOrIntVectorTy())
4239 if ((SrcBits % TyBits) != 0)
4252 case Instruction::SExt:
4253 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
4257 case Instruction::SDiv: {
4258 const APInt *Denominator;
4271 return std::min(TyBits, NumBits + Denominator->
logBase2());
4276 case Instruction::SRem: {
4279 const APInt *Denominator;
4300 unsigned ResBits = TyBits - Denominator->
ceilLogBase2();
4301 Tmp = std::max(Tmp, ResBits);
4307 case Instruction::AShr: {
4312 if (ShAmt->
uge(TyBits))
4315 Tmp += ShAmtLimited;
4316 if (Tmp > TyBits) Tmp = TyBits;
4320 case Instruction::Shl: {
4325 if (ShAmt->
uge(TyBits))
4330 ShAmt->
uge(TyBits -
X->getType()->getScalarSizeInBits())) {
4332 Tmp += TyBits -
X->getType()->getScalarSizeInBits();
4336 if (ShAmt->
uge(Tmp))
4343 case Instruction::And:
4344 case Instruction::Or:
4345 case Instruction::Xor:
4350 FirstAnswer = std::min(Tmp, Tmp2);
4357 case Instruction::Select: {
4361 const APInt *CLow, *CHigh;
4369 return std::min(Tmp, Tmp2);
4372 case Instruction::Add:
4376 if (Tmp == 1)
break;
4380 if (CRHS->isAllOnesValue()) {
4386 if ((Known.
Zero | 1).isAllOnes())
4398 return std::min(Tmp, Tmp2) - 1;
4400 case Instruction::Sub:
4407 if (CLHS->isNullValue()) {
4412 if ((Known.
Zero | 1).isAllOnes())
4429 return std::min(Tmp, Tmp2) - 1;
4431 case Instruction::Mul: {
4434 unsigned SignBitsOp0 =
4436 if (SignBitsOp0 == 1)
4438 unsigned SignBitsOp1 =
4440 if (SignBitsOp1 == 1)
4442 unsigned OutValidBits =
4443 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
4444 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
4447 case Instruction::PHI: {
4451 if (NumIncomingValues > 4)
break;
4453 if (NumIncomingValues == 0)
break;
4459 for (
unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
4460 if (Tmp == 1)
return Tmp;
4463 DemandedElts, RecQ,
Depth + 1));
4468 case Instruction::Trunc: {
4473 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
4474 if (Tmp > (OperandTyBits - TyBits))
4475 return Tmp - (OperandTyBits - TyBits);
4480 case Instruction::ExtractElement:
4487 case Instruction::ShuffleVector: {
4495 APInt DemandedLHS, DemandedRHS;
4500 Tmp = std::numeric_limits<unsigned>::max();
4501 if (!!DemandedLHS) {
4502 const Value *
LHS = Shuf->getOperand(0);
4509 if (!!DemandedRHS) {
4510 const Value *
RHS = Shuf->getOperand(1);
4512 Tmp = std::min(Tmp, Tmp2);
4518 assert(Tmp <= TyBits &&
"Failed to determine minimum sign bits");
4521 case Instruction::Call: {
4523 switch (
II->getIntrinsicID()) {
4526 case Intrinsic::abs:
4534 case Intrinsic::smin:
4535 case Intrinsic::smax: {
4536 const APInt *CLow, *CHigh;
4551 if (
unsigned VecSignBits =
4569 if (
F->isIntrinsic())
4570 return F->getIntrinsicID();
4576 if (
F->hasLocalLinkage() || !TLI || !TLI->
getLibFunc(CB, Func) ||
4586 return Intrinsic::sin;
4590 return Intrinsic::cos;
4594 return Intrinsic::tan;
4598 return Intrinsic::asin;
4602 return Intrinsic::acos;
4606 return Intrinsic::atan;
4608 case LibFunc_atan2f:
4609 case LibFunc_atan2l:
4610 return Intrinsic::atan2;
4614 return Intrinsic::sinh;
4618 return Intrinsic::cosh;
4622 return Intrinsic::tanh;
4626 return Intrinsic::exp;
4630 return Intrinsic::exp2;
4632 case LibFunc_exp10f:
4633 case LibFunc_exp10l:
4634 return Intrinsic::exp10;
4638 return Intrinsic::log;
4640 case LibFunc_log10f:
4641 case LibFunc_log10l:
4642 return Intrinsic::log10;
4646 return Intrinsic::log2;
4650 return Intrinsic::fabs;
4654 return Intrinsic::minnum;
4658 return Intrinsic::maxnum;
4659 case LibFunc_copysign:
4660 case LibFunc_copysignf:
4661 case LibFunc_copysignl:
4662 return Intrinsic::copysign;
4664 case LibFunc_floorf:
4665 case LibFunc_floorl:
4666 return Intrinsic::floor;
4670 return Intrinsic::ceil;
4672 case LibFunc_truncf:
4673 case LibFunc_truncl:
4674 return Intrinsic::trunc;
4678 return Intrinsic::rint;
4679 case LibFunc_nearbyint:
4680 case LibFunc_nearbyintf:
4681 case LibFunc_nearbyintl:
4682 return Intrinsic::nearbyint;
4684 case LibFunc_roundf:
4685 case LibFunc_roundl:
4686 return Intrinsic::round;
4687 case LibFunc_roundeven:
4688 case LibFunc_roundevenf:
4689 case LibFunc_roundevenl:
4690 return Intrinsic::roundeven;
4694 return Intrinsic::pow;
4698 return Intrinsic::sqrt;
4705 Ty = Ty->getScalarType();
4714 bool &TrueIfSigned) {
4717 TrueIfSigned =
true;
4718 return RHS.isZero();
4720 TrueIfSigned =
true;
4721 return RHS.isAllOnes();
4723 TrueIfSigned =
false;
4724 return RHS.isAllOnes();
4726 TrueIfSigned =
false;
4727 return RHS.isZero();
4730 TrueIfSigned =
true;
4731 return RHS.isMaxSignedValue();
4734 TrueIfSigned =
true;
4735 return RHS.isMinSignedValue();
4738 TrueIfSigned =
false;
4739 return RHS.isMinSignedValue();
4742 TrueIfSigned =
false;
4743 return RHS.isMaxSignedValue();
4753 unsigned Depth = 0) {
4778 KnownFromContext.
knownNot(~(CondIsTrue ? MaskIfTrue : MaskIfFalse));
4782 KnownFromContext.
knownNot(CondIsTrue ? ~Mask : Mask);
4788 if (TrueIfSigned == CondIsTrue)
4804 return KnownFromContext;
4824 return KnownFromContext;
4834 "Got assumption for the wrong function!");
4835 assert(
I->getIntrinsicID() == Intrinsic::assume &&
4836 "must be an assume intrinsic");
4842 true, Q.
CxtI, KnownFromContext);
4845 return KnownFromContext;
4856 APInt DemandedElts =
4862 const APInt &DemandedElts,
4867 if ((InterestedClasses &
4873 KnownSrc, Q,
Depth + 1);
4888 assert(Known.
isUnknown() &&
"should not be called with known information");
4890 if (!DemandedElts) {
4900 Known.
SignBit = CFP->isNegative();
4921 bool SignBitAllZero =
true;
4922 bool SignBitAllOne =
true;
4925 unsigned NumElts = VFVTy->getNumElements();
4926 for (
unsigned i = 0; i != NumElts; ++i) {
4927 if (!DemandedElts[i])
4943 const APFloat &
C = CElt->getValueAPF();
4946 SignBitAllZero =
false;
4948 SignBitAllOne =
false;
4950 if (SignBitAllOne != SignBitAllZero)
4951 Known.
SignBit = SignBitAllOne;
4957 KnownNotFromFlags |= CB->getRetNoFPClass();
4959 KnownNotFromFlags |= Arg->getNoFPClass();
4963 if (FPOp->hasNoNaNs())
4964 KnownNotFromFlags |=
fcNan;
4965 if (FPOp->hasNoInfs())
4966 KnownNotFromFlags |=
fcInf;
4970 KnownNotFromFlags |= ~AssumedClasses.KnownFPClasses;
4974 InterestedClasses &= ~KnownNotFromFlags;
4993 const unsigned Opc =
Op->getOpcode();
4995 case Instruction::FNeg: {
4997 Known, Q,
Depth + 1);
5001 case Instruction::Select: {
5009 Value *TestedValue =
nullptr;
5015 Value *CmpLHS, *CmpRHS;
5022 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
5023 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
5029 MaskIfTrue = TestedMask;
5030 MaskIfFalse = ~TestedMask;
5033 if (TestedValue ==
LHS) {
5035 FilterLHS = MaskIfTrue;
5036 }
else if (TestedValue ==
RHS) {
5038 FilterRHS = MaskIfFalse;
5047 Known2, Q,
Depth + 1);
5053 case Instruction::Call: {
5057 case Intrinsic::fabs: {
5062 InterestedClasses, Known, Q,
Depth + 1);
5068 case Intrinsic::copysign: {
5072 Known, Q,
Depth + 1);
5074 KnownSign, Q,
Depth + 1);
5078 case Intrinsic::fma:
5079 case Intrinsic::fmuladd: {
5083 if (
II->getArgOperand(0) !=
II->getArgOperand(1))
5092 KnownAddend, Q,
Depth + 1);
5098 case Intrinsic::sqrt:
5099 case Intrinsic::experimental_constrained_sqrt: {
5102 if (InterestedClasses &
fcNan)
5106 KnownSrc, Q,
Depth + 1);
5124 II->getType()->getScalarType()->getFltSemantics();
5133 case Intrinsic::sin:
5134 case Intrinsic::cos: {
5138 KnownSrc, Q,
Depth + 1);
5144 case Intrinsic::maxnum:
5145 case Intrinsic::minnum:
5146 case Intrinsic::minimum:
5147 case Intrinsic::maximum:
5148 case Intrinsic::minimumnum:
5149 case Intrinsic::maximumnum: {
5152 KnownLHS, Q,
Depth + 1);
5154 KnownRHS, Q,
Depth + 1);
5157 Known = KnownLHS | KnownRHS;
5161 (IID == Intrinsic::minnum || IID == Intrinsic::maxnum ||
5162 IID == Intrinsic::minimumnum || IID == Intrinsic::maximumnum))
5165 if (IID == Intrinsic::maxnum || IID == Intrinsic::maximumnum) {
5173 }
else if (IID == Intrinsic::maximum) {
5179 }
else if (IID == Intrinsic::minnum || IID == Intrinsic::minimumnum) {
5187 }
else if (IID == Intrinsic::minimum) {
5210 II->getType()->getScalarType()->getFltSemantics());
5222 }
else if ((IID == Intrinsic::maximum || IID == Intrinsic::minimum ||
5223 IID == Intrinsic::maximumnum ||
5224 IID == Intrinsic::minimumnum) ||
5232 KnownLHS.
SignBit = std::nullopt;
5234 KnownRHS.
SignBit = std::nullopt;
5235 if ((IID == Intrinsic::maximum || IID == Intrinsic::maximumnum ||
5236 IID == Intrinsic::maxnum) &&
5239 else if ((IID == Intrinsic::minimum || IID == Intrinsic::minimumnum ||
5240 IID == Intrinsic::minnum) &&
5247 case Intrinsic::canonicalize: {
5250 KnownSrc, Q,
Depth + 1);
5274 II->getType()->getScalarType()->getFltSemantics();
5294 case Intrinsic::vector_reduce_fmax:
5295 case Intrinsic::vector_reduce_fmin:
5296 case Intrinsic::vector_reduce_fmaximum:
5297 case Intrinsic::vector_reduce_fminimum: {
5301 InterestedClasses, Q,
Depth + 1);
5308 case Intrinsic::vector_reverse:
5311 II->getFastMathFlags(), InterestedClasses, Q,
Depth + 1);
5313 case Intrinsic::trunc:
5314 case Intrinsic::floor:
5315 case Intrinsic::ceil:
5316 case Intrinsic::rint:
5317 case Intrinsic::nearbyint:
5318 case Intrinsic::round:
5319 case Intrinsic::roundeven: {
5327 KnownSrc, Q,
Depth + 1);
5336 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) {
5351 case Intrinsic::exp:
5352 case Intrinsic::exp2:
5353 case Intrinsic::exp10: {
5360 KnownSrc, Q,
Depth + 1);
5368 case Intrinsic::fptrunc_round: {
5373 case Intrinsic::log:
5374 case Intrinsic::log10:
5375 case Intrinsic::log2:
5376 case Intrinsic::experimental_constrained_log:
5377 case Intrinsic::experimental_constrained_log10:
5378 case Intrinsic::experimental_constrained_log2: {
5394 KnownSrc, Q,
Depth + 1);
5408 II->getType()->getScalarType()->getFltSemantics();
5416 case Intrinsic::powi: {
5420 const Value *Exp =
II->getArgOperand(1);
5421 Type *ExpTy = Exp->getType();
5425 ExponentKnownBits, Q,
Depth + 1);
5427 if (ExponentKnownBits.
Zero[0]) {
5442 KnownSrc, Q,
Depth + 1);
5447 case Intrinsic::ldexp: {
5450 KnownSrc, Q,
Depth + 1);
5466 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
5472 II->getType()->getScalarType()->getFltSemantics();
5474 const Value *ExpArg =
II->getArgOperand(1);
5478 const int MantissaBits = Precision - 1;
5485 II->getType()->getScalarType()->getFltSemantics();
5486 if (ConstVal && ConstVal->
isZero()) {
5511 case Intrinsic::arithmetic_fence: {
5513 Known, Q,
Depth + 1);
5516 case Intrinsic::experimental_constrained_sitofp:
5517 case Intrinsic::experimental_constrained_uitofp:
5527 if (IID == Intrinsic::experimental_constrained_uitofp)
5538 case Instruction::FAdd:
5539 case Instruction::FSub: {
5542 Op->getOpcode() == Instruction::FAdd &&
5544 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
5547 if (!WantNaN && !WantNegative && !WantNegZero)
5553 if (InterestedClasses &
fcNan)
5554 InterestedSrcs |=
fcInf;
5556 KnownRHS, Q,
Depth + 1);
5560 WantNegZero ||
Opc == Instruction::FSub) {
5565 KnownLHS, Q,
Depth + 1);
5575 if (
Op->getOpcode() == Instruction::FAdd) {
5583 Op->getType()->getScalarType()->getFltSemantics();
5597 Op->getType()->getScalarType()->getFltSemantics();
5611 case Instruction::FMul: {
5613 if (
Op->getOperand(0) ==
Op->getOperand(1))
5650 Type *OpTy =
Op->getType()->getScalarType();
5662 case Instruction::FDiv:
5663 case Instruction::FRem: {
5664 if (
Op->getOperand(0) ==
Op->getOperand(1)) {
5666 if (
Op->getOpcode() == Instruction::FDiv) {
5677 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
5679 const bool WantPositive =
5681 if (!WantNan && !WantNegative && !WantPositive)
5690 bool KnowSomethingUseful =
5693 if (KnowSomethingUseful || WantPositive) {
5699 InterestedClasses & InterestedLHS, KnownLHS, Q,
5705 Op->getType()->getScalarType()->getFltSemantics();
5707 if (
Op->getOpcode() == Instruction::FDiv) {
5746 case Instruction::FPExt: {
5749 Known, Q,
Depth + 1);
5752 Op->getType()->getScalarType()->getFltSemantics();
5754 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5770 case Instruction::FPTrunc: {
5775 case Instruction::SIToFP:
5776 case Instruction::UIToFP: {
5785 if (
Op->getOpcode() == Instruction::UIToFP)
5788 if (InterestedClasses &
fcInf) {
5792 int IntSize =
Op->getOperand(0)->getType()->getScalarSizeInBits();
5793 if (
Op->getOpcode() == Instruction::SIToFP)
5798 Type *FPTy =
Op->getType()->getScalarType();
5805 case Instruction::ExtractElement: {
5808 const Value *Vec =
Op->getOperand(0);
5810 APInt DemandedVecElts;
5812 unsigned NumElts = VecTy->getNumElements();
5815 if (CIdx && CIdx->getValue().ult(NumElts))
5818 DemandedVecElts =
APInt(1, 1);
5824 case Instruction::InsertElement: {
5828 const Value *Vec =
Op->getOperand(0);
5829 const Value *Elt =
Op->getOperand(1);
5832 APInt DemandedVecElts = DemandedElts;
5833 bool NeedsElt =
true;
5835 if (CIdx && CIdx->getValue().ult(NumElts)) {
5836 DemandedVecElts.
clearBit(CIdx->getZExtValue());
5837 NeedsElt = DemandedElts[CIdx->getZExtValue()];
5851 if (!DemandedVecElts.
isZero()) {
5860 case Instruction::ShuffleVector: {
5863 APInt DemandedLHS, DemandedRHS;
5868 if (!!DemandedLHS) {
5869 const Value *
LHS = Shuf->getOperand(0);
5880 if (!!DemandedRHS) {
5882 const Value *
RHS = Shuf->getOperand(1);
5890 case Instruction::ExtractValue: {
5897 switch (
II->getIntrinsicID()) {
5898 case Intrinsic::frexp: {
5903 InterestedClasses, KnownSrc, Q,
Depth + 1);
5907 Op->getType()->getScalarType()->getFltSemantics();
5942 case Instruction::PHI: {
5945 if (
P->getNumIncomingValues() == 0)
5952 if (
Depth < PhiRecursionLimit) {
5959 for (
const Use &U :
P->operands()) {
5989 case Instruction::BitCast: {
5992 !Src->getType()->isIntOrIntVectorTy())
5995 const Type *Ty =
Op->getType()->getScalarType();
5996 KnownBits Bits(Ty->getScalarSizeInBits());
6000 if (Bits.isNonNegative())
6002 else if (Bits.isNegative())
6005 if (Ty->isIEEELikeFPTy()) {
6015 else if (!
APFloat(Ty->getFltSemantics(), ~Bits.Zero).
isNaN())
6022 InfKB.Zero.clearSignBit();
6024 assert(!InfResult.value());
6026 }
else if (Bits == InfKB) {
6034 ZeroKB.Zero.clearSignBit();
6036 assert(!ZeroResult.value());
6038 }
else if (Bits == ZeroKB) {
6051 const APInt &DemandedElts,
6058 return KnownClasses;
6084 InterestedClasses &=
~fcNan;
6086 InterestedClasses &=
~fcInf;
6092 Result.KnownFPClasses &=
~fcNan;
6094 Result.KnownFPClasses &=
~fcInf;
6103 APInt DemandedElts =
6157 if (FPOp->hasNoSignedZeros())
6161 switch (
User->getOpcode()) {
6162 case Instruction::FPToSI:
6163 case Instruction::FPToUI:
6165 case Instruction::FCmp:
6168 case Instruction::Call:
6170 switch (
II->getIntrinsicID()) {
6171 case Intrinsic::fabs:
6173 case Intrinsic::copysign:
6174 return U.getOperandNo() == 0;
6175 case Intrinsic::is_fpclass:
6176 case Intrinsic::vp_is_fpclass: {
6196 if (FPOp->hasNoNaNs())
6200 switch (
User->getOpcode()) {
6201 case Instruction::FPToSI:
6202 case Instruction::FPToUI:
6205 case Instruction::FAdd:
6206 case Instruction::FSub:
6207 case Instruction::FMul:
6208 case Instruction::FDiv:
6209 case Instruction::FRem:
6210 case Instruction::FPTrunc:
6211 case Instruction::FPExt:
6212 case Instruction::FCmp:
6215 case Instruction::FNeg:
6216 case Instruction::Select:
6217 case Instruction::PHI:
6219 case Instruction::Ret:
6220 return User->getFunction()->getAttributes().getRetNoFPClass() &
6222 case Instruction::Call:
6223 case Instruction::Invoke: {
6225 switch (
II->getIntrinsicID()) {
6226 case Intrinsic::fabs:
6228 case Intrinsic::copysign:
6229 return U.getOperandNo() == 0;
6231 case Intrinsic::maxnum:
6232 case Intrinsic::minnum:
6233 case Intrinsic::maximum:
6234 case Intrinsic::minimum:
6235 case Intrinsic::maximumnum:
6236 case Intrinsic::minimumnum:
6237 case Intrinsic::canonicalize:
6238 case Intrinsic::fma:
6239 case Intrinsic::fmuladd:
6240 case Intrinsic::sqrt:
6241 case Intrinsic::pow:
6242 case Intrinsic::powi:
6243 case Intrinsic::fptoui_sat:
6244 case Intrinsic::fptosi_sat:
6245 case Intrinsic::is_fpclass:
6246 case Intrinsic::vp_is_fpclass:
6265 if (V->getType()->isIntegerTy(8))
6276 if (
DL.getTypeStoreSize(V->getType()).isZero())
6291 if (
C->isNullValue())
6298 if (CFP->getType()->isHalfTy())
6300 else if (CFP->getType()->isFloatTy())
6302 else if (CFP->getType()->isDoubleTy())
6311 if (CI->getBitWidth() % 8 == 0) {
6312 assert(CI->getBitWidth() > 8 &&
"8 bits should be handled above!");
6313 if (!CI->getValue().isSplat(8))
6315 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
6320 if (CE->getOpcode() == Instruction::IntToPtr) {
6322 unsigned BitWidth =
DL.getPointerSizeInBits(PtrTy->getAddressSpace());
6335 if (LHS == UndefInt8)
6337 if (RHS == UndefInt8)
6343 Value *Val = UndefInt8;
6344 for (
uint64_t I = 0, E = CA->getNumElements();
I != E; ++
I)
6351 Value *Val = UndefInt8;
6386 while (PrevTo != OrigTo) {
6433 unsigned IdxSkip = Idxs.
size();
6446 std::optional<BasicBlock::iterator> InsertBefore) {
6449 if (idx_range.
empty())
6452 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
6453 "Not looking at a struct or array?");
6455 "Invalid indices for type?");
6458 C =
C->getAggregateElement(idx_range[0]);
6459 if (!
C)
return nullptr;
6466 const unsigned *req_idx = idx_range.
begin();
6467 for (
const unsigned *i =
I->idx_begin(), *e =
I->idx_end();
6468 i != e; ++i, ++req_idx) {
6469 if (req_idx == idx_range.
end()) {
6499 ArrayRef(req_idx, idx_range.
end()), InsertBefore);
6508 unsigned size =
I->getNumIndices() + idx_range.
size();
6513 Idxs.
append(
I->idx_begin(),
I->idx_end());
6519 &&
"Number of indices added not correct?");
6536 assert(V &&
"V should not be null.");
6537 assert((ElementSize % 8) == 0 &&
6538 "ElementSize expected to be a multiple of the size of a byte.");
6539 unsigned ElementSizeInBytes = ElementSize / 8;
6551 APInt Off(
DL.getIndexTypeSizeInBits(V->getType()), 0);
6558 uint64_t StartIdx = Off.getLimitedValue();
6565 if ((StartIdx % ElementSizeInBytes) != 0)
6568 Offset += StartIdx / ElementSizeInBytes;
6574 uint64_t SizeInBytes =
DL.getTypeStoreSize(GVTy).getFixedValue();
6577 Slice.Array =
nullptr;
6589 Type *InitElTy = ArrayInit->getElementType();
6594 ArrayTy = ArrayInit->getType();
6599 if (ElementSize != 8)
6618 Slice.Array = Array;
6620 Slice.Length = NumElts -
Offset;
6634 if (Slice.Array ==
nullptr) {
6645 if (Slice.Length == 1) {
6657 Str = Str.
substr(Slice.Offset);
6663 Str = Str.substr(0, Str.find(
'\0'));
6676 unsigned CharSize) {
6678 V = V->stripPointerCasts();
6683 if (!PHIs.
insert(PN).second)
6688 for (
Value *IncValue : PN->incoming_values()) {
6690 if (Len == 0)
return 0;
6692 if (Len == ~0ULL)
continue;
6694 if (Len != LenSoFar && LenSoFar != ~0ULL)
6706 if (Len1 == 0)
return 0;
6708 if (Len2 == 0)
return 0;
6709 if (Len1 == ~0ULL)
return Len2;
6710 if (Len2 == ~0ULL)
return Len1;
6711 if (Len1 != Len2)
return 0;
6720 if (Slice.Array ==
nullptr)
6728 unsigned NullIndex = 0;
6729 for (
unsigned E = Slice.Length; NullIndex <
E; ++NullIndex) {
6730 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
6734 return NullIndex + 1;
6740 if (!V->getType()->isPointerTy())
6747 return Len == ~0ULL ? 1 : Len;
6752 bool MustPreserveNullness) {
6754 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
6755 if (
const Value *RV =
Call->getReturnedArgOperand())
6759 Call, MustPreserveNullness))
6760 return Call->getArgOperand(0);
6766 switch (
Call->getIntrinsicID()) {
6767 case Intrinsic::launder_invariant_group:
6768 case Intrinsic::strip_invariant_group:
6769 case Intrinsic::aarch64_irg:
6770 case Intrinsic::aarch64_tagp:
6780 case Intrinsic::amdgcn_make_buffer_rsrc:
6782 case Intrinsic::ptrmask:
6783 return !MustPreserveNullness;
6784 case Intrinsic::threadlocal_address:
6787 return !
Call->getParent()->getParent()->isPresplitCoroutine();
6804 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6806 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6815 if (!L->isLoopInvariant(Load->getPointerOperand()))
6821 for (
unsigned Count = 0; MaxLookup == 0 ||
Count < MaxLookup; ++
Count) {
6823 const Value *PtrOp =
GEP->getPointerOperand();
6834 if (GA->isInterposable())
6836 V = GA->getAliasee();
6840 if (
PHI->getNumIncomingValues() == 1) {
6841 V =
PHI->getIncomingValue(0);
6862 assert(V->getType()->isPointerTy() &&
"Unexpected operand type!");
6869 const LoopInfo *LI,
unsigned MaxLookup) {
6877 if (!Visited.
insert(
P).second)
6906 }
while (!Worklist.
empty());
6910 const unsigned MaxVisited = 8;
6915 const Value *Object =
nullptr;
6925 if (!Visited.
insert(
P).second)
6928 if (Visited.
size() == MaxVisited)
6944 else if (Object !=
P)
6946 }
while (!Worklist.
empty());
6948 return Object ? Object : FirstObject;
6958 if (U->getOpcode() == Instruction::PtrToInt)
6959 return U->getOperand(0);
6966 if (U->getOpcode() != Instruction::Add ||
6971 V = U->getOperand(0);
6975 assert(V->getType()->isIntegerTy() &&
"Unexpected operand type!");
6992 for (
const Value *V : Objs) {
6993 if (!Visited.
insert(V).second)
6998 if (O->getType()->isPointerTy()) {
7011 }
while (!Working.
empty());
7020 auto AddWork = [&](
Value *V) {
7021 if (Visited.
insert(V).second)
7031 if (Result && Result != AI)
7035 AddWork(CI->getOperand(0));
7037 for (
Value *IncValue : PN->incoming_values())
7040 AddWork(
SI->getTrueValue());
7041 AddWork(
SI->getFalseValue());
7043 if (OffsetZero && !
GEP->hasAllZeroIndices())
7045 AddWork(
GEP->getPointerOperand());
7047 Value *Returned = CB->getReturnedArgOperand();
7055 }
while (!Worklist.
empty());
7061 const Value *V,
bool AllowLifetime,
bool AllowDroppable) {
7067 if (AllowLifetime &&
II->isLifetimeStartOrEnd())
7070 if (AllowDroppable &&
II->isDroppable())
7091 return (!Shuffle || Shuffle->isSelect()) &&
7098 bool IgnoreUBImplyingAttrs) {
7100 AC, DT, TLI, UseVariableInfo,
7101 IgnoreUBImplyingAttrs);
7107 bool UseVariableInfo,
bool IgnoreUBImplyingAttrs) {
7111 auto hasEqualReturnAndLeadingOperandTypes =
7112 [](
const Instruction *Inst,
unsigned NumLeadingOperands) {
7116 for (
unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
7122 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
7124 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
7131 case Instruction::UDiv:
7132 case Instruction::URem: {
7139 case Instruction::SDiv:
7140 case Instruction::SRem: {
7142 const APInt *Numerator, *Denominator;
7146 if (*Denominator == 0)
7158 case Instruction::Load: {
7159 if (!UseVariableInfo)
7172 case Instruction::Call: {
7176 const Function *Callee = CI->getCalledFunction();
7180 if (!Callee || !Callee->isSpeculatable())
7184 return IgnoreUBImplyingAttrs || !CI->hasUBImplyingAttrs();
7186 case Instruction::VAArg:
7187 case Instruction::Alloca:
7188 case Instruction::Invoke:
7189 case Instruction::CallBr:
7190 case Instruction::PHI:
7191 case Instruction::Store:
7192 case Instruction::Ret:
7193 case Instruction::Br:
7194 case Instruction::IndirectBr:
7195 case Instruction::Switch:
7196 case Instruction::Unreachable:
7197 case Instruction::Fence:
7198 case Instruction::AtomicRMW:
7199 case Instruction::AtomicCmpXchg:
7200 case Instruction::LandingPad:
7201 case Instruction::Resume:
7202 case Instruction::CatchSwitch:
7203 case Instruction::CatchPad:
7204 case Instruction::CatchRet:
7205 case Instruction::CleanupPad:
7206 case Instruction::CleanupRet:
7212 if (
I.mayReadOrWriteMemory())
7280 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
7325 if (
Add &&
Add->hasNoSignedWrap()) {
7364 bool LHSOrRHSKnownNonNegative =
7366 bool LHSOrRHSKnownNegative =
7368 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
7371 if ((AddKnown.
isNonNegative() && LHSOrRHSKnownNonNegative) ||
7372 (AddKnown.
isNegative() && LHSOrRHSKnownNegative))
7447 assert(EVI->getNumIndices() == 1 &&
"Obvious from CI's type");
7449 if (EVI->getIndices()[0] == 0)
7452 assert(EVI->getIndices()[0] == 1 &&
"Obvious from CI's type");
7454 for (
const auto *U : EVI->users())
7456 assert(
B->isConditional() &&
"How else is it using an i1?");
7467 auto AllUsesGuardedByBranch = [&](
const BranchInst *BI) {
7473 for (
const auto *Result :
Results) {
7476 if (DT.
dominates(NoWrapEdge, Result->getParent()))
7479 for (
const auto &RU : Result->uses())
7487 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
7499 unsigned NumElts = FVTy->getNumElements();
7500 for (
unsigned i = 0; i < NumElts; ++i)
7501 ShiftAmounts.
push_back(
C->getAggregateElement(i));
7509 return CI && CI->getValue().ult(
C->getType()->getIntegerBitWidth());
7530 bool ConsiderFlagsAndMetadata) {
7533 Op->hasPoisonGeneratingAnnotations())
7536 unsigned Opcode =
Op->getOpcode();
7540 case Instruction::Shl:
7541 case Instruction::AShr:
7542 case Instruction::LShr:
7544 case Instruction::FPToSI:
7545 case Instruction::FPToUI:
7549 case Instruction::Call:
7551 switch (
II->getIntrinsicID()) {
7553 case Intrinsic::ctlz:
7554 case Intrinsic::cttz:
7555 case Intrinsic::abs:
7559 case Intrinsic::sshl_sat:
7560 case Intrinsic::ushl_sat:
7568 case Instruction::CallBr:
7569 case Instruction::Invoke: {
7571 return !CB->hasRetAttr(Attribute::NoUndef) &&
7572 !CB->hasFnAttr(Attribute::NoCreateUndefOrPoison);
7574 case Instruction::InsertElement:
7575 case Instruction::ExtractElement: {
7578 unsigned IdxOp =
Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
7582 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue());
7585 case Instruction::ShuffleVector: {
7591 case Instruction::FNeg:
7592 case Instruction::PHI:
7593 case Instruction::Select:
7594 case Instruction::ExtractValue:
7595 case Instruction::InsertValue:
7596 case Instruction::Freeze:
7597 case Instruction::ICmp:
7598 case Instruction::FCmp:
7599 case Instruction::GetElementPtr:
7601 case Instruction::AddrSpaceCast:
7616 bool ConsiderFlagsAndMetadata) {
7618 ConsiderFlagsAndMetadata);
7623 ConsiderFlagsAndMetadata);
7628 if (ValAssumedPoison == V)
7631 const unsigned MaxDepth = 2;
7632 if (
Depth >= MaxDepth)
7637 return propagatesPoison(Op) &&
7638 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
7662 const unsigned MaxDepth = 2;
7663 if (
Depth >= MaxDepth)
7669 return impliesPoison(Op, V, Depth + 1);
7676 return ::impliesPoison(ValAssumedPoison, V, 0);
7691 if (
A->hasAttribute(Attribute::NoUndef) ||
7692 A->hasAttribute(Attribute::Dereferenceable) ||
7693 A->hasAttribute(Attribute::DereferenceableOrNull))
7708 if (
C->getType()->isVectorTy()) {
7711 if (
Constant *SplatC =
C->getSplatValue())
7719 return !
C->containsConstantExpression();
7732 auto *StrippedV = V->stripPointerCastsSameRepresentation();
7737 auto OpCheck = [&](
const Value *V) {
7748 if (CB->hasRetAttr(Attribute::NoUndef) ||
7749 CB->hasRetAttr(Attribute::Dereferenceable) ||
7750 CB->hasRetAttr(Attribute::DereferenceableOrNull))
7757 unsigned Num = PN->getNumIncomingValues();
7758 bool IsWellDefined =
true;
7759 for (
unsigned i = 0; i < Num; ++i) {
7760 if (PN == PN->getIncomingValue(i))
7762 auto *TI = PN->getIncomingBlock(i)->getTerminator();
7764 DT,
Depth + 1, Kind)) {
7765 IsWellDefined =
false;
7776 }
else if (
all_of(Opr->operands(), OpCheck))
7782 if (
I->hasMetadata(LLVMContext::MD_noundef) ||
7783 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
7784 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
7804 auto *Dominator = DNode->
getIDom();
7809 auto *TI = Dominator->getBlock()->getTerminator();
7813 if (BI->isConditional())
7814 Cond = BI->getCondition();
7816 Cond =
SI->getCondition();
7825 if (
any_of(Opr->operands(), [V](
const Use &U) {
7826 return V == U && propagatesPoison(U);
7832 Dominator = Dominator->getIDom();
7845 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7852 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7859 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7883 while (!Worklist.
empty()) {
7892 if (
I != Root && !
any_of(
I->operands(), [&KnownPoison](
const Use &U) {
7893 return KnownPoison.contains(U) && propagatesPoison(U);
7897 if (KnownPoison.
insert(
I).second)
7909 return ::computeOverflowForSignedAdd(
Add->getOperand(0),
Add->getOperand(1),
7917 return ::computeOverflowForSignedAdd(LHS, RHS,
nullptr, SQ);
7949 return !
I->mayThrow() &&
I->willReturn();
7963 unsigned ScanLimit) {
7970 assert(ScanLimit &&
"scan limit must be non-zero");
7972 if (--ScanLimit == 0)
7986 if (
I->getParent() != L->getHeader())
return false;
7989 if (&LI ==
I)
return true;
7992 llvm_unreachable(
"Instruction not contained in its own parent basic block.");
7998 case Intrinsic::sadd_with_overflow:
7999 case Intrinsic::ssub_with_overflow:
8000 case Intrinsic::smul_with_overflow:
8001 case Intrinsic::uadd_with_overflow:
8002 case Intrinsic::usub_with_overflow:
8003 case Intrinsic::umul_with_overflow:
8008 case Intrinsic::ctpop:
8009 case Intrinsic::ctlz:
8010 case Intrinsic::cttz:
8011 case Intrinsic::abs:
8012 case Intrinsic::smax:
8013 case Intrinsic::smin:
8014 case Intrinsic::umax:
8015 case Intrinsic::umin:
8016 case Intrinsic::scmp:
8017 case Intrinsic::is_fpclass:
8018 case Intrinsic::ptrmask:
8019 case Intrinsic::ucmp:
8020 case Intrinsic::bitreverse:
8021 case Intrinsic::bswap:
8022 case Intrinsic::sadd_sat:
8023 case Intrinsic::ssub_sat:
8024 case Intrinsic::sshl_sat:
8025 case Intrinsic::uadd_sat:
8026 case Intrinsic::usub_sat:
8027 case Intrinsic::ushl_sat:
8028 case Intrinsic::smul_fix:
8029 case Intrinsic::smul_fix_sat:
8030 case Intrinsic::umul_fix:
8031 case Intrinsic::umul_fix_sat:
8032 case Intrinsic::pow:
8033 case Intrinsic::powi:
8034 case Intrinsic::sin:
8035 case Intrinsic::sinh:
8036 case Intrinsic::cos:
8037 case Intrinsic::cosh:
8038 case Intrinsic::sincos:
8039 case Intrinsic::sincospi:
8040 case Intrinsic::tan:
8041 case Intrinsic::tanh:
8042 case Intrinsic::asin:
8043 case Intrinsic::acos:
8044 case Intrinsic::atan:
8045 case Intrinsic::atan2:
8046 case Intrinsic::canonicalize:
8047 case Intrinsic::sqrt:
8048 case Intrinsic::exp:
8049 case Intrinsic::exp2:
8050 case Intrinsic::exp10:
8051 case Intrinsic::log:
8052 case Intrinsic::log2:
8053 case Intrinsic::log10:
8054 case Intrinsic::modf:
8055 case Intrinsic::floor:
8056 case Intrinsic::ceil:
8057 case Intrinsic::trunc:
8058 case Intrinsic::rint:
8059 case Intrinsic::nearbyint:
8060 case Intrinsic::round:
8061 case Intrinsic::roundeven:
8062 case Intrinsic::lrint:
8063 case Intrinsic::llrint:
8072 switch (
I->getOpcode()) {
8073 case Instruction::Freeze:
8074 case Instruction::PHI:
8075 case Instruction::Invoke:
8077 case Instruction::Select:
8079 case Instruction::Call:
8083 case Instruction::ICmp:
8084 case Instruction::FCmp:
8085 case Instruction::GetElementPtr:
8099template <
typename CallableT>
8101 const CallableT &Handle) {
8102 switch (
I->getOpcode()) {
8103 case Instruction::Store:
8108 case Instruction::Load:
8115 case Instruction::AtomicCmpXchg:
8120 case Instruction::AtomicRMW:
8125 case Instruction::Call:
8126 case Instruction::Invoke: {
8130 for (
unsigned i = 0; i < CB->
arg_size(); ++i)
8133 CB->
paramHasAttr(i, Attribute::DereferenceableOrNull)) &&
8138 case Instruction::Ret:
8139 if (
I->getFunction()->hasRetAttribute(Attribute::NoUndef) &&
8140 Handle(
I->getOperand(0)))
8143 case Instruction::Switch:
8147 case Instruction::Br: {
8149 if (BR->isConditional() && Handle(BR->getCondition()))
8161template <
typename CallableT>
8163 const CallableT &Handle) {
8166 switch (
I->getOpcode()) {
8168 case Instruction::UDiv:
8169 case Instruction::SDiv:
8170 case Instruction::URem:
8171 case Instruction::SRem:
8172 return Handle(
I->getOperand(1));
8181 I, [&](
const Value *V) {
return KnownPoison.
count(V); });
8200 if (Arg->getParent()->isDeclaration())
8203 Begin = BB->
begin();
8210 unsigned ScanLimit = 32;
8219 if (--ScanLimit == 0)
8223 return WellDefinedOp == V;
8243 if (--ScanLimit == 0)
8251 for (
const Use &
Op :
I.operands()) {
8261 if (
I.getOpcode() == Instruction::Select &&
8262 YieldsPoison.
count(
I.getOperand(1)) &&
8263 YieldsPoison.
count(
I.getOperand(2))) {
8269 if (!BB || !Visited.
insert(BB).second)
8279 return ::programUndefinedIfUndefOrPoison(Inst,
false);
8283 return ::programUndefinedIfUndefOrPoison(Inst,
true);
8294 if (!
C->getElementType()->isFloatingPointTy())
8296 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
8297 if (
C->getElementAsAPFloat(
I).isNaN())
8311 return !
C->isZero();
8314 if (!
C->getElementType()->isFloatingPointTy())
8316 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
8317 if (
C->getElementAsAPFloat(
I).isZero())
8340 if (CmpRHS == FalseVal) {
8384 if (CmpRHS != TrueVal) {
8423 Value *
A =
nullptr, *
B =
nullptr;
8428 Value *
C =
nullptr, *
D =
nullptr;
8430 if (L.Flavor != R.Flavor)
8482 return {L.Flavor,
SPNB_NA,
false};
8489 return {L.Flavor,
SPNB_NA,
false};
8496 return {L.Flavor,
SPNB_NA,
false};
8503 return {L.Flavor,
SPNB_NA,
false};
8519 return ConstantInt::get(V->getType(), ~(*
C));
8576 if ((CmpLHS == TrueVal &&
match(FalseVal,
m_APInt(C2))) ||
8596 assert(
X &&
Y &&
"Invalid operand");
8598 auto IsNegationOf = [&](
const Value *
X,
const Value *
Y) {
8603 if (NeedNSW && !BO->hasNoSignedWrap())
8607 if (!AllowPoison && !Zero->isNullValue())
8614 if (IsNegationOf(
X,
Y) || IsNegationOf(
Y,
X))
8641 const APInt *RHSC1, *RHSC2;
8652 return CR1.inverse() == CR2;
8686std::optional<std::pair<CmpPredicate, Constant *>>
8689 "Only for relational integer predicates.");
8691 return std::nullopt;
8697 bool WillIncrement =
8702 auto ConstantIsOk = [WillIncrement, IsSigned](
ConstantInt *
C) {
8703 return WillIncrement ? !
C->isMaxValue(IsSigned) : !
C->isMinValue(IsSigned);
8706 Constant *SafeReplacementConstant =
nullptr;
8709 if (!ConstantIsOk(CI))
8710 return std::nullopt;
8712 unsigned NumElts = FVTy->getNumElements();
8713 for (
unsigned i = 0; i != NumElts; ++i) {
8714 Constant *Elt =
C->getAggregateElement(i);
8716 return std::nullopt;
8724 if (!CI || !ConstantIsOk(CI))
8725 return std::nullopt;
8727 if (!SafeReplacementConstant)
8728 SafeReplacementConstant = CI;
8732 Value *SplatC =
C->getSplatValue();
8735 if (!CI || !ConstantIsOk(CI))
8736 return std::nullopt;
8739 return std::nullopt;
8746 if (
C->containsUndefOrPoisonElement()) {
8747 assert(SafeReplacementConstant &&
"Replacement constant not set");
8754 Constant *OneOrNegOne = ConstantInt::get(
Type, WillIncrement ? 1 : -1,
true);
8757 return std::make_pair(NewPred, NewC);
8766 bool HasMismatchedZeros =
false;
8772 Value *OutputZeroVal =
nullptr;
8775 OutputZeroVal = TrueVal;
8778 OutputZeroVal = FalseVal;
8780 if (OutputZeroVal) {
8782 HasMismatchedZeros =
true;
8783 CmpLHS = OutputZeroVal;
8786 HasMismatchedZeros =
true;
8787 CmpRHS = OutputZeroVal;
8804 if (!HasMismatchedZeros)
8815 bool Ordered =
false;
8826 if (LHSSafe && RHSSafe) {
8857 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
8868 if (TrueVal == CmpLHS && FalseVal == CmpRHS)
8874 auto MaybeSExtCmpLHS =
8878 if (
match(TrueVal, MaybeSExtCmpLHS)) {
8900 else if (
match(FalseVal, MaybeSExtCmpLHS)) {
8940 case Instruction::ZExt:
8944 case Instruction::SExt:
8948 case Instruction::Trunc:
8951 CmpConst->
getType() == SrcTy) {
8973 CastedTo = CmpConst;
8975 unsigned ExtOp = CmpI->
isSigned() ? Instruction::SExt : Instruction::ZExt;
8979 case Instruction::FPTrunc:
8982 case Instruction::FPExt:
8985 case Instruction::FPToUI:
8988 case Instruction::FPToSI:
8991 case Instruction::UIToFP:
8994 case Instruction::SIToFP:
9007 if (CastedBack && CastedBack !=
C)
9035 *CastOp = Cast1->getOpcode();
9036 Type *SrcTy = Cast1->getSrcTy();
9039 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
9040 return Cast2->getOperand(0);
9048 Value *CastedTo =
nullptr;
9049 if (*CastOp == Instruction::Trunc) {
9063 "V2 and Cast1 should be the same type.");
9082 Value *TrueVal =
SI->getTrueValue();
9083 Value *FalseVal =
SI->getFalseValue();
9086 CmpI, TrueVal, FalseVal, LHS, RHS,
9105 if (CastOp && CmpLHS->
getType() != TrueVal->getType()) {
9109 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9111 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9118 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9120 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9125 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
9144 return Intrinsic::umin;
9146 return Intrinsic::umax;
9148 return Intrinsic::smin;
9150 return Intrinsic::smax;
9166 case Intrinsic::smax:
return Intrinsic::smin;
9167 case Intrinsic::smin:
return Intrinsic::smax;
9168 case Intrinsic::umax:
return Intrinsic::umin;
9169 case Intrinsic::umin:
return Intrinsic::umax;
9172 case Intrinsic::maximum:
return Intrinsic::minimum;
9173 case Intrinsic::minimum:
return Intrinsic::maximum;
9174 case Intrinsic::maxnum:
return Intrinsic::minnum;
9175 case Intrinsic::minnum:
return Intrinsic::maxnum;
9176 case Intrinsic::maximumnum:
9177 return Intrinsic::minimumnum;
9178 case Intrinsic::minimumnum:
9179 return Intrinsic::maximumnum;
9194std::pair<Intrinsic::ID, bool>
9199 bool AllCmpSingleUse =
true;
9202 if (
all_of(VL, [&SelectPattern, &AllCmpSingleUse](
Value *
I) {
9208 SelectPattern.
Flavor != CurrentPattern.Flavor)
9210 SelectPattern = CurrentPattern;
9215 switch (SelectPattern.
Flavor) {
9217 return {Intrinsic::smin, AllCmpSingleUse};
9219 return {Intrinsic::umin, AllCmpSingleUse};
9221 return {Intrinsic::smax, AllCmpSingleUse};
9223 return {Intrinsic::umax, AllCmpSingleUse};
9225 return {Intrinsic::maxnum, AllCmpSingleUse};
9227 return {Intrinsic::minnum, AllCmpSingleUse};
9235template <
typename InstTy>
9245 for (
unsigned I = 0;
I != 2; ++
I) {
9250 if (
LHS != PN &&
RHS != PN)
9286 if (
I->arg_size() != 2 ||
I->getType() !=
I->getArgOperand(0)->getType() ||
9287 I->getType() !=
I->getArgOperand(1)->getType())
9315 return !
C->isNegative();
9327 const APInt *CLHS, *CRHS;
9330 return CLHS->
sle(*CRHS);
9368 const APInt *CLHS, *CRHS;
9371 return CLHS->
ule(*CRHS);
9380static std::optional<bool>
9385 return std::nullopt;
9392 return std::nullopt;
9399 return std::nullopt;
9406 return std::nullopt;
9413 return std::nullopt;
9420static std::optional<bool>
9426 if (CR.
icmp(Pred, RCR))
9433 return std::nullopt;
9446 return std::nullopt;
9452static std::optional<bool>
9483 const APInt *Unused;
9502 return std::nullopt;
9506 if (L0 == R0 && L1 == R1)
9539 ((
A == R0 &&
B == R1) || (
A == R1 &&
B == R0) ||
9557 return std::nullopt;
9563static std::optional<bool>
9593 if (L0 == R0 && L1 == R1) {
9594 if ((LPred & RPred) == LPred)
9596 if ((LPred & ~RPred) == LPred)
9604 if (std::optional<ConstantFPRange> DomCR =
9606 if (std::optional<ConstantFPRange> ImpliedCR =
9608 if (ImpliedCR->contains(*DomCR))
9611 if (std::optional<ConstantFPRange> ImpliedCR =
9614 if (ImpliedCR->contains(*DomCR))
9620 return std::nullopt;
9627static std::optional<bool>
9632 assert((
LHS->getOpcode() == Instruction::And ||
9633 LHS->getOpcode() == Instruction::Or ||
9634 LHS->getOpcode() == Instruction::Select) &&
9635 "Expected LHS to be 'and', 'or', or 'select'.");
9642 const Value *ALHS, *ARHS;
9647 ALHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9650 ARHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9652 return std::nullopt;
9654 return std::nullopt;
9663 return std::nullopt;
9668 return std::nullopt;
9670 assert(LHS->getType()->isIntOrIntVectorTy(1) &&
9671 "Expected integer type only!");
9675 LHSIsTrue = !LHSIsTrue;
9681 LHSCmp->getOperand(0), LHSCmp->getOperand(1),
9682 RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue);
9686 ConstantInt::get(V->getType(), 0), RHSPred,
9687 RHSOp0, RHSOp1,
DL, LHSIsTrue);
9690 "Expected floating point type only!");
9693 LHSCmp->getOperand(1), RHSPred, RHSOp0, RHSOp1,
9701 if ((LHSI->getOpcode() == Instruction::And ||
9702 LHSI->getOpcode() == Instruction::Or ||
9703 LHSI->getOpcode() == Instruction::Select))
9707 return std::nullopt;
9712 bool LHSIsTrue,
unsigned Depth) {
9718 bool InvertRHS =
false;
9727 LHS, RHSCmp->getCmpPredicate(), RHSCmp->getOperand(0),
9728 RHSCmp->getOperand(1),
DL, LHSIsTrue,
Depth))
9729 return InvertRHS ? !*Implied : *Implied;
9730 return std::nullopt;
9734 LHS, RHSCmp->getPredicate(), RHSCmp->getOperand(0),
9735 RHSCmp->getOperand(1),
DL, LHSIsTrue,
Depth))
9736 return InvertRHS ? !*Implied : *Implied;
9737 return std::nullopt;
9743 ConstantInt::get(V->getType(), 0),
DL,
9745 return InvertRHS ? !*Implied : *Implied;
9746 return std::nullopt;
9750 return std::nullopt;
9754 const Value *RHS1, *RHS2;
9756 if (std::optional<bool> Imp =
9760 if (std::optional<bool> Imp =
9766 if (std::optional<bool> Imp =
9770 if (std::optional<bool> Imp =
9776 return std::nullopt;
9781static std::pair<Value *, bool>
9783 if (!ContextI || !ContextI->
getParent())
9784 return {
nullptr,
false};
9791 return {
nullptr,
false};
9797 return {
nullptr,
false};
9800 if (TrueBB == FalseBB)
9801 return {
nullptr,
false};
9803 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
9804 "Predecessor block does not point to successor?");
9807 return {PredCond, TrueBB == ContextBB};
9813 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
"Condition must be bool");
9817 return std::nullopt;
9829 return std::nullopt;
9834 bool PreferSignedRange) {
9835 unsigned Width =
Lower.getBitWidth();
9838 case Instruction::Sub:
9848 if (PreferSignedRange && HasNSW && HasNUW)
9854 }
else if (HasNSW) {
9855 if (
C->isNegative()) {
9868 case Instruction::Add:
9877 if (PreferSignedRange && HasNSW && HasNUW)
9883 }
else if (HasNSW) {
9884 if (
C->isNegative()) {
9897 case Instruction::And:
9908 case Instruction::Or:
9914 case Instruction::AShr:
9920 unsigned ShiftAmount = Width - 1;
9921 if (!
C->isZero() && IIQ.
isExact(&BO))
9922 ShiftAmount =
C->countr_zero();
9923 if (
C->isNegative()) {
9926 Upper =
C->ashr(ShiftAmount) + 1;
9929 Lower =
C->ashr(ShiftAmount);
9935 case Instruction::LShr:
9941 unsigned ShiftAmount = Width - 1;
9942 if (!
C->isZero() && IIQ.
isExact(&BO))
9943 ShiftAmount =
C->countr_zero();
9944 Lower =
C->lshr(ShiftAmount);
9949 case Instruction::Shl:
9956 if (
C->isNegative()) {
9958 unsigned ShiftAmount =
C->countl_one() - 1;
9959 Lower =
C->shl(ShiftAmount);
9963 unsigned ShiftAmount =
C->countl_zero() - 1;
9965 Upper =
C->shl(ShiftAmount) + 1;
9984 case Instruction::SDiv:
9988 if (
C->isAllOnes()) {
9993 }
else if (
C->countl_zero() < Width - 1) {
10004 if (
C->isMinSignedValue()) {
10016 case Instruction::UDiv:
10026 case Instruction::SRem:
10032 if (
C->isNegative()) {
10043 case Instruction::URem:
10058 bool UseInstrInfo) {
10059 unsigned Width =
II.getType()->getScalarSizeInBits();
10061 switch (
II.getIntrinsicID()) {
10062 case Intrinsic::ctlz:
10063 case Intrinsic::cttz: {
10065 if (!UseInstrInfo || !
match(
II.getArgOperand(1),
m_One()))
10070 case Intrinsic::ctpop:
10073 APInt(Width, Width) + 1);
10074 case Intrinsic::uadd_sat:
10080 case Intrinsic::sadd_sat:
10083 if (
C->isNegative())
10094 case Intrinsic::usub_sat:
10104 case Intrinsic::ssub_sat:
10106 if (
C->isNegative())
10116 if (
C->isNegative())
10127 case Intrinsic::umin:
10128 case Intrinsic::umax:
10129 case Intrinsic::smin:
10130 case Intrinsic::smax:
10135 switch (
II.getIntrinsicID()) {
10136 case Intrinsic::umin:
10138 case Intrinsic::umax:
10140 case Intrinsic::smin:
10143 case Intrinsic::smax:
10150 case Intrinsic::abs:
10159 case Intrinsic::vscale:
10160 if (!
II.getParent() || !
II.getFunction())
10167 return ConstantRange::getFull(Width);
10172 unsigned BitWidth =
SI.getType()->getScalarSizeInBits();
10176 return ConstantRange::getFull(
BitWidth);
10199 return ConstantRange::getFull(
BitWidth);
10201 switch (R.Flavor) {
10213 return ConstantRange::getFull(
BitWidth);
10220 unsigned BitWidth =
I->getType()->getScalarSizeInBits();
10221 if (!
I->getOperand(0)->getType()->getScalarType()->isHalfTy())
10239 assert(V->getType()->isIntOrIntVectorTy() &&
"Expected integer instruction");
10242 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
10245 return C->toConstantRange();
10247 unsigned BitWidth = V->getType()->getScalarSizeInBits();
10260 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10262 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10272 if (std::optional<ConstantRange>
Range =
A->getRange())
10280 if (std::optional<ConstantRange>
Range = CB->getRange())
10291 "Got assumption for the wrong function!");
10292 assert(
I->getIntrinsicID() == Intrinsic::assume &&
10293 "must be an assume intrinsic");
10297 Value *Arg =
I->getArgOperand(0);
10300 if (!Cmp || Cmp->getOperand(0) != V)
10305 UseInstrInfo, AC,
I, DT,
Depth + 1);
10327 InsertAffected(
Op);
10334 auto AddAffected = [&InsertAffected](
Value *V) {
10338 auto AddCmpOperands = [&AddAffected, IsAssume](
Value *LHS,
Value *RHS) {
10349 while (!Worklist.
empty()) {
10351 if (!Visited.
insert(V).second)
10397 AddCmpOperands(
A,
B);
10434 AddCmpOperands(
A,
B);
10462 if (BO->getOpcode() == Instruction::Add ||
10463 BO->getOpcode() == Instruction::Or) {
10465 const APInt *C1, *C2;
10484 unsigned MaxCount,
bool AllowUndefOrPoison) {
10487 auto Push = [&](
const Value *V) ->
bool {
10492 if (Constants.contains(
C))
10494 if (Constants.size() == MaxCount)
10496 Constants.insert(
C);
10501 if (Visited.
insert(Inst).second)
10509 while (!Worklist.
empty()) {
10512 case Instruction::Select:
10518 case Instruction::PHI:
10521 if (IncomingValue == CurInst)
10523 if (!Push(IncomingValue))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Utilities for dealing with flags related to floating point properties and mode controls.
static Value * getCondition(Instruction *I)
Module.h This file contains the declarations for the Module class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
std::pair< BasicBlock *, BasicBlock * > Edge
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static SmallVector< VPValue *, 4 > getOperands(ArrayRef< VPValue * > Values, unsigned OperandIndex)
static void computeKnownFPClassFromCond(const Value *V, Value *Cond, bool CondIsTrue, const Instruction *CxtI, KnownFPClass &KnownFromContext, unsigned Depth=0)
static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, SimplifyQuery &Q, unsigned Depth)
Try to detect a recurrence that the value of the induction variable is always a power of two (or zero...
static cl::opt< unsigned > DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20))
static unsigned computeNumSignBitsVectorConstant(const Value *V, const APInt &DemandedElts, unsigned TyBits)
For vector constants, loop over the elements and find the constant with the minimum number of sign bi...
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, const Value *RHS)
Return true if "icmp Pred LHS RHS" is always true.
static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V1 == (binop V2, X), where X is known non-zero.
static bool isGEPKnownNonNull(const GEPOperator *GEP, const SimplifyQuery &Q, unsigned Depth)
Test whether a GEP's result is known to be non-null.
static bool isNonEqualShl(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and the shift is nuw or nsw.
static bool isKnownNonNullFromDominatingCondition(const Value *V, const Instruction *CtxI, const DominatorTree *DT)
static const Value * getUnderlyingObjectFromInt(const Value *V)
This is the function that does the work of looking through basic ptrtoint+arithmetic+inttoptr sequenc...
static bool isNonZeroMul(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool rangeMetadataExcludesValue(const MDNode *Ranges, const APInt &Value)
Does the 'Range' metadata (which must be a valid MD_range operand list) ensure that the value it's at...
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static KnownBits getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &Q, unsigned Depth)
static void breakSelfRecursivePHI(const Use *U, const PHINode *PHI, Value *&ValOut, Instruction *&CtxIOut, const PHINode **PhiOut=nullptr)
static bool isNonZeroSub(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, unsigned Depth)
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static void addValueAffectedByCondition(Value *V, function_ref< void(Value *)> InsertAffected)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, APInt &Upper, const InstrInfoQuery &IIQ, bool PreferSignedRange)
static Value * lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp)
Helps to match a select pattern in case of a type mismatch.
static std::pair< Value *, bool > getDomPredecessorCondition(const Instruction *ContextI)
static constexpr unsigned MaxInstrsToCheckForFree
Maximum number of instructions to check between assume and context instruction.
static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, const KnownBits &KnownVal, unsigned Depth)
static std::optional< bool > isImpliedCondFCmps(FCmpInst::Predicate LPred, const Value *L0, const Value *L1, FCmpInst::Predicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2, const SimplifyQuery &Q, unsigned Depth)
static bool includesPoison(UndefPoisonKind Kind)
static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS)
Match clamp pattern for float types without care about NaNs or signed zeros.
static std::optional< bool > isImpliedCondICmps(CmpPredicate LPred, const Value *L0, const Value *L1, CmpPredicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool includesUndef(UndefPoisonKind Kind)
static std::optional< bool > isImpliedCondCommonOperandWithCR(CmpPredicate LPred, const ConstantRange &LCR, CmpPredicate RPred, const ConstantRange &RCR)
Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
static ConstantRange getRangeForSelectPattern(const SelectInst &SI, const InstrInfoQuery &IIQ)
static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth)
static uint64_t GetStringLengthH(const Value *V, SmallPtrSetImpl< const PHINode * > &PHIs, unsigned CharSize)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
static void computeKnownBitsFromShiftOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth, function_ref< KnownBits(const KnownBits &, const KnownBits &, bool)> KF)
Compute known bits from a shift operator, including those with a non-constant shift amount.
static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value *V, bool AllowLifetime, bool AllowDroppable)
static std::optional< bool > isImpliedCondAndOr(const Instruction *LHS, CmpPredicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, bool LHSIsTrue, unsigned Depth)
Return true if LHS implies RHS is true.
static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, const APInt *&CLow, const APInt *&CHigh)
static bool isNonZeroAdd(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, unsigned Depth)
static bool isNonEqualSelect(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchTwoInputRecurrence(const PHINode *PN, InstTy *&Inst, Value *&Init, Value *&OtherOp)
static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS, KnownBits &Known, const SimplifyQuery &Q)
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TVal, Value *FVal, unsigned Depth)
Recognize variations of: a < c ?
static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II, KnownBits &Known)
static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper)
static bool isSameUnderlyingObjectInLoop(const PHINode *PN, const LoopInfo *LI)
PN defines a loop-variant pointer to an object.
static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B, const SimplifyQuery &Q)
static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, const APInt *&CLow, const APInt *&CHigh)
static Value * lookThroughCastConst(CmpInst *CmpI, Type *SrcTy, Constant *C, Instruction::CastOps *CastOp)
static bool handleGuaranteedWellDefinedOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be undef or poison.
static void computeKnownBitsFromLerpPattern(const Value *Op0, const Value *Op1, const APInt &DemandedElts, KnownBits &KnownOut, const SimplifyQuery &Q, unsigned Depth)
Try to detect the lerp pattern: a * (b - c) + c * d where a >= 0, b >= 0, c >= 0, d >= 0,...
static KnownFPClass computeKnownFPClassFromContext(const Value *V, const SimplifyQuery &Q)
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static Value * getNotValue(Value *V)
If the input value is the result of a 'not' op, constant integer, or vector splat of a constant integ...
static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return the number of times the sign bit of the register is replicated into the other bits.
static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, KnownBits &Known, const SimplifyQuery &SQ, bool Invert)
static bool isKnownNonZeroFromOperator(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchOpWithOpEqZero(Value *Op0, Value *Op1)
static bool isNonZeroRecurrence(const PHINode *PN)
Try to detect a recurrence that monotonically increases/decreases from a non-zero starting value.
static SelectPatternResult matchClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal)
Recognize variations of: CLAMP(v,l,h) ==> ((v) < (l) ?
static bool shiftAmountKnownInRange(const Value *ShiftAmount)
Shifts return poison if shiftwidth is larger than the bitwidth.
static bool isEphemeralValueOf(const Instruction *I, const Value *E)
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
Match non-obvious integer minimum and maximum sequences.
static KnownBits computeKnownBitsForHorizontalOperation(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth, const function_ref< KnownBits(const KnownBits &, const KnownBits &)> KnownBitsFunc)
static bool handleGuaranteedNonPoisonOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be poison.
static std::optional< std::pair< Value *, Value * > > getInvertibleOperands(const Operator *Op1, const Operator *Op2)
If the pair of operators are the same invertible function, return the the operands of the function co...
static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS)
static void computeKnownBitsFromCond(const Value *V, Value *Cond, KnownBits &Known, const SimplifyQuery &SQ, bool Invert, unsigned Depth)
static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q)
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
static const Instruction * safeCxtI(const Value *V, const Instruction *CxtI)
static bool isNonEqualMul(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and the multiplication is nuw o...
static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero, const Value *Cond, bool CondIsTrue)
Return true if we can infer that V is known to be a power of 2 from dominating condition Cond (e....
static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static bool isKnownNonNaN(const Value *V, FastMathFlags FMF)
static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II, bool UseInstrInfo)
static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, const SimplifyQuery &Q, unsigned Depth)
static Value * BuildSubAggregate(Value *From, Value *To, Type *IndexedType, SmallVectorImpl< unsigned > &Idxs, unsigned IdxSkip, BasicBlock::iterator InsertBefore)
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
LLVM_ABI APInt reverseBits() const
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool getBoolValue() const
Convert APInt to a boolean value.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void clearSignBit()
Set the sign bit to 0.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Class to represent array types.
This represents the llvm.assume intrinsic.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static bool isFPPredicate(Predicate P)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
static bool isIntPredicate(Predicate P)
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
bool hasSameSign() const
Query samesign information, for optimizations.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
StringRef getAsString() const
If this array is isString(), then this method returns the array as a StringRef.
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI std::optional< ConstantFPRange > makeExactFCmpRegion(FCmpInst::Predicate Pred, const APFloat &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This class represents a range of values.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI bool isAllNegative() const
Return true if all values in this range are negative.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This is an important base class in LLVM.
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
DomTreeNodeBase * getIDom() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
void setNoSignedZeros(bool B=true)
void setNoNaNs(bool B=true)
const BasicBlock & getEntryBlock() const
bool hasNoSync() const
Determine if the call can synchroize with other threads.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
PointerType * getType() const
Global values are always pointers.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getSwappedCmpPredicate() const
CmpPredicate getInverseCmpPredicate() const
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
bool isLoopHeader(const BlockT *BB) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
This is a utility class that provides an abstraction for the common functionality between Instruction...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
iterator_range< const_block_iterator > blocks() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv, sdiv, lshr, or ashr instruction, which can be marked as "exact", indicating that no bits are ...
bool isExact() const
Test whether this division is known to be exact, with zero remainder.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI uint64_t getArrayNumElements() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
const KnownBits & getKnownBits(const SimplifyQuery &Q) const
PointerType getValue() const
Represents an op.with.overflow intrinsic.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
Type * getIndexedType() const
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
@ C
The default llvm calling convention, compatible with C.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
cst_pred_ty< is_power2_or_zero > m_Power2OrZero()
Match an integer or vector of 0 or power-of-2 values.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmin_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > > m_OrdOrUnordFMin(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point minimum function.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmax_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > > m_OrdOrUnordFMax(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point maximum function.
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
cst_pred_ty< is_nonpositive > m_NonPositive()
Match an integer or vector of non-positive values.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
LLVM_ABI bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root, Instruction *OnPathTo, DominatorTree *DT)
Return true if undefined behavior would provable be executed on the path to OnPathTo if Root produced...
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Merge bits known from context-dependent facts into Known.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
LLVM_ABI bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
LLVM_ABI Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI bool getUnderlyingObjectsForCodeGen(const Value *V, SmallVectorImpl< Value * > &Objects)
This is a wrapper around getUnderlyingObjects and adds support for basic ptrtoint+arithmetic+inttoptr...
LLVM_ABI std::pair< Intrinsic::ID, bool > canConvertToMinOrMaxIntrinsic(ArrayRef< Value * > VL)
Check if the values in VL are select instructions that can be converted to a min or max (vector) intr...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, const Loop *L)
Return true if this function can prove that the instruction I is executed for every iteration of the ...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
gep_type_iterator gep_type_end(const User *GEP)
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
bool isa_and_nonnull(const Y &Val)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool canIgnoreSignBitOfZero(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is zero.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ, bool IsNSW=false)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
LLVM_ABI bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase *Call, bool MustPreserveNullness)
{launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures point...
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
Compute the demanded elements mask of horizontal binary operations.
LLVM_ABI SelectPatternResult getSelectPattern(CmpInst::Predicate Pred, SelectPatternNaNBehavior NaNBehavior=SPNB_NA, bool Ordered=false)
Determine the pattern for predicate X Pred Y ? X : Y.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)
Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
LLVM_ABI bool programUndefinedIfUndefOrPoison(const Instruction *Inst)
Return true if this function can prove that if Inst is executed and yields a poison value or undef bi...
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI bool collectPossibleValues(const Value *V, SmallPtrSetImpl< const Constant * > &Constants, unsigned MaxCount, bool AllowUndefOrPoison=true)
Enumerates all possible values of V and inserts them into the set Constants.
FunctionAddr VTableAddr Count
LLVM_ABI uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y)
Return true iff:
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, const Instruction *CtxI, const DominatorTree *DT=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
LLVM_ABI bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
LLVM_ABI bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
LLVM_ABI Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, const TargetLibraryInfo *TLI)
Map a call instruction to an intrinsic ID.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
LLVM_ABI Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF)
Convert given SPF to equivalent min/max intrinsic.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF=FastMathFlags(), Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
SelectPatternNaNBehavior
Behavior when a floating point min/max is given one NaN and one non-NaN as input.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &SQ, unsigned Depth=0)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point value can never contain a NaN or infinity.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_ABI Value * FindInsertedValue(Value *V, ArrayRef< unsigned > idx_range, std::optional< BasicBlock::iterator > InsertBefore=std::nullopt)
Given an aggregate and an sequence of indices, see if the scalar value indexed is already around as a...
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is either NaN or never less than -0....
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI bool mayHaveNonDefUseDependency(const Instruction &I)
Returns true if the result or effects of the given instructions I depend values not reachable through...
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
LLVM_ABI bool canIgnoreSignBitOfNaN(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is NaN.
LLVM_ABI void findValuesAffectedByCondition(Value *Cond, bool IsAssume, function_ref< void(Value *)> InsertAffected)
Call InsertAffected on all Values whose known bits / value may be affected by the condition Cond.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
SmallPtrSet< Value *, 4 > AffectedValues
Represents offset+length into a ConstantDataArray.
const ConstantDataArray * Array
ConstantDataArray pointer.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedZeros(const InstT *Op) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
static LLVM_ABI KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
LLVM_ABI KnownBits blsi() const
Compute known bits for X & -X, which has only the lowest bit set of X set.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
LLVM_ABI KnownBits blsmsk() const
Compute known bits for X ^ (X - 1), which has all bits up to and including the lowest set bit of X se...
void makeNegative()
Make this value negative.
void setAllConflict()
Make all bits known to be both zero and one.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
bool hasConflict() const
Returns true if there is conflicting information.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
bool isConstant() const
Returns true if we know the value of all bits.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinTrailingOnes() const
Returns the minimum number of trailing one bits.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void setAllOnes()
Make all bits known to be one and discard any previous information.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
KnownBits sextOrTrunc(unsigned BitWidth) const
Return known bits for a sign extension or truncation of the value we're tracking.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a zero.
bool isKnownNeverNegInfinity() const
Return true if it's known this can never be -infinity.
bool isKnownNeverNegSubnormal() const
Return true if it's known this can never be a negative subnormal.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
LLVM_ABI void propagateCanonicalizingSrc(const KnownFPClass &Src, DenormalMode Mode)
Report known classes if Src is evaluated through a potentially canonicalizing operation.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a negative zero.
bool isKnownNeverPosSubnormal() const
Return true if it's known this can never be a positive subnormal.
Represent one information held inside an operand bundle of an llvm.assume.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithoutCondContext() const
SimplifyQuery getWithInstruction(const Instruction *I) const
const DomConditionCache * DC