99void SelectionDAG::DAGNodeDeletedListener::anchor() {}
100void SelectionDAG::DAGNodeInsertedListener::anchor() {}
102#define DEBUG_TYPE "selectiondag"
106 cl::desc(
"Gang up loads and stores generated by inlining of memcpy"));
109 cl::desc(
"Number limit for gluing ld/st of memcpy."),
114 cl::desc(
"DAG combiner limit number of steps when searching DAG "
115 "for predecessor nodes"));
153 if (
auto OptAPInt =
N->getOperand(0)->bitcastToAPInt()) {
155 N->getValueType(0).getVectorElementType().getSizeInBits();
156 SplatVal = OptAPInt->
trunc(EltSize);
166 unsigned SplatBitSize;
168 unsigned EltSize =
N->getValueType(0).getVectorElementType().getSizeInBits();
173 const bool IsBigEndian =
false;
174 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
175 EltSize, IsBigEndian) &&
176 EltSize == SplatBitSize;
185 N =
N->getOperand(0).getNode();
194 unsigned i = 0, e =
N->getNumOperands();
197 while (i != e &&
N->getOperand(i).isUndef())
201 if (i == e)
return false;
213 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
214 if (OptAPInt->countr_one() < EltSize)
222 for (++i; i != e; ++i)
223 if (
N->getOperand(i) != NotZero && !
N->getOperand(i).isUndef())
231 N =
N->getOperand(0).getNode();
240 bool IsAllUndef =
true;
253 if (
auto OptAPInt =
Op->bitcastToAPInt()) {
254 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
255 if (OptAPInt->countr_zero() < EltSize)
303 assert(
N->getValueType(0).isVector() &&
"Expected a vector!");
305 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
306 if (EltSize <= NewEltSize)
310 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
315 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
328 APInt C =
Op->getAsAPIntVal().trunc(EltSize);
329 if (
Signed &&
C.trunc(NewEltSize).sext(EltSize) !=
C)
331 if (!
Signed &&
C.trunc(NewEltSize).zext(EltSize) !=
C)
342 if (
N->getNumOperands() == 0)
348 return N->getOpcode() ==
ISD::FREEZE &&
N->getOperand(0).isUndef();
351template <
typename ConstNodeType>
353 std::function<
bool(ConstNodeType *)> Match,
354 bool AllowUndefs,
bool AllowTruncation) {
364 EVT SVT =
Op.getValueType().getScalarType();
365 for (
unsigned i = 0, e =
Op.getNumOperands(); i != e; ++i) {
366 if (AllowUndefs &&
Op.getOperand(i).isUndef()) {
373 if (!Cst || (!AllowTruncation && Cst->getValueType(0) != SVT) ||
388 bool AllowUndefs,
bool AllowTypeMismatch) {
389 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
395 return Match(LHSCst, RHSCst);
398 if (LHS.getOpcode() != RHS.getOpcode() ||
404 for (
unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
407 bool LHSUndef = AllowUndefs && LHSOp.
isUndef();
408 bool RHSUndef = AllowUndefs && RHSOp.
isUndef();
411 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
413 if (!AllowTypeMismatch && (LHSOp.
getValueType() != SVT ||
416 if (!Match(LHSCst, RHSCst))
453 switch (VecReduceOpcode) {
458 case ISD::VP_REDUCE_FADD:
459 case ISD::VP_REDUCE_SEQ_FADD:
463 case ISD::VP_REDUCE_FMUL:
464 case ISD::VP_REDUCE_SEQ_FMUL:
467 case ISD::VP_REDUCE_ADD:
470 case ISD::VP_REDUCE_MUL:
473 case ISD::VP_REDUCE_AND:
476 case ISD::VP_REDUCE_OR:
479 case ISD::VP_REDUCE_XOR:
482 case ISD::VP_REDUCE_SMAX:
485 case ISD::VP_REDUCE_SMIN:
488 case ISD::VP_REDUCE_UMAX:
491 case ISD::VP_REDUCE_UMIN:
494 case ISD::VP_REDUCE_FMAX:
497 case ISD::VP_REDUCE_FMIN:
500 case ISD::VP_REDUCE_FMAXIMUM:
503 case ISD::VP_REDUCE_FMINIMUM:
527#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
530#include "llvm/IR/VPIntrinsics.def"
538#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
539#define VP_PROPERTY_BINARYOP return true;
540#define END_REGISTER_VP_SDNODE(VPSD) break;
541#include "llvm/IR/VPIntrinsics.def"
550 case ISD::VP_REDUCE_ADD:
551 case ISD::VP_REDUCE_MUL:
552 case ISD::VP_REDUCE_AND:
553 case ISD::VP_REDUCE_OR:
554 case ISD::VP_REDUCE_XOR:
555 case ISD::VP_REDUCE_SMAX:
556 case ISD::VP_REDUCE_SMIN:
557 case ISD::VP_REDUCE_UMAX:
558 case ISD::VP_REDUCE_UMIN:
559 case ISD::VP_REDUCE_FMAX:
560 case ISD::VP_REDUCE_FMIN:
561 case ISD::VP_REDUCE_FMAXIMUM:
562 case ISD::VP_REDUCE_FMINIMUM:
563 case ISD::VP_REDUCE_FADD:
564 case ISD::VP_REDUCE_FMUL:
565 case ISD::VP_REDUCE_SEQ_FADD:
566 case ISD::VP_REDUCE_SEQ_FMUL:
576#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
579#include "llvm/IR/VPIntrinsics.def"
588#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
591#include "llvm/IR/VPIntrinsics.def"
601#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
602#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
603#define END_REGISTER_VP_SDNODE(VPOPC) break;
604#include "llvm/IR/VPIntrinsics.def"
613#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
614#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
615#define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
616#include "llvm/IR/VPIntrinsics.def"
663 bool isIntegerLike) {
688 bool IsInteger =
Type.isInteger();
693 unsigned Op = Op1 | Op2;
709 bool IsInteger =
Type.isInteger();
744 ID.AddPointer(VTList.
VTs);
750 for (
const auto &
Op :
Ops) {
751 ID.AddPointer(
Op.getNode());
752 ID.AddInteger(
Op.getResNo());
759 for (
const auto &
Op :
Ops) {
760 ID.AddPointer(
Op.getNode());
761 ID.AddInteger(
Op.getResNo());
774 switch (
N->getOpcode()) {
783 ID.AddPointer(
C->getConstantIntValue());
784 ID.AddBoolean(
C->isOpaque());
848 ID.AddInteger(LD->getMemoryVT().getRawBits());
849 ID.AddInteger(LD->getRawSubclassData());
850 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
851 ID.AddInteger(LD->getMemOperand()->getFlags());
856 ID.AddInteger(ST->getMemoryVT().getRawBits());
857 ID.AddInteger(ST->getRawSubclassData());
858 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
859 ID.AddInteger(ST->getMemOperand()->getFlags());
870 case ISD::VP_LOAD_FF: {
872 ID.AddInteger(LD->getMemoryVT().getRawBits());
873 ID.AddInteger(LD->getRawSubclassData());
874 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
875 ID.AddInteger(LD->getMemOperand()->getFlags());
878 case ISD::VP_STORE: {
886 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
893 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
900 case ISD::VP_GATHER: {
908 case ISD::VP_SCATTER: {
1007 ID.AddInteger(MN->getRawSubclassData());
1008 ID.AddInteger(MN->getMemoryVT().getRawBits());
1010 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
1011 ID.AddInteger(MMO->getFlags());
1035 if (
N->getValueType(0) == MVT::Glue)
1038 switch (
N->getOpcode()) {
1046 for (
unsigned i = 1, e =
N->getNumValues(); i != e; ++i)
1047 if (
N->getValueType(i) == MVT::Glue)
1056 EVT VT = V.getValueType();
1075 if (
Node.use_empty())
1090 while (!DeadNodes.
empty()) {
1099 DUL->NodeDeleted(
N,
nullptr);
1102 RemoveNodeFromCSEMaps(
N);
1133 RemoveNodeFromCSEMaps(
N);
1137 DeleteNodeNotInCSEMaps(
N);
1140void SelectionDAG::DeleteNodeNotInCSEMaps(
SDNode *
N) {
1141 assert(
N->getIterator() != AllNodes.begin() &&
1142 "Cannot delete the entry node!");
1143 assert(
N->use_empty() &&
"Cannot delete a node that is not dead!");
1152 assert(!(V->isVariadic() && isParameter));
1154 ByvalParmDbgValues.push_back(V);
1156 DbgValues.push_back(V);
1159 DbgValMap[
Node].push_back(V);
1163 DbgValMapType::iterator
I = DbgValMap.find(
Node);
1164 if (
I == DbgValMap.end())
1166 for (
auto &Val:
I->second)
1167 Val->setIsInvalidated();
1171void SelectionDAG::DeallocateNode(
SDNode *
N) {
1194void SelectionDAG::verifyNode(
SDNode *
N)
const {
1195 switch (
N->getOpcode()) {
1197 if (
N->isTargetOpcode())
1201 EVT VT =
N->getValueType(0);
1202 assert(
N->getNumValues() == 1 &&
"Too many results!");
1204 "Wrong return type!");
1205 assert(
N->getNumOperands() == 2 &&
"Wrong number of operands!");
1206 assert(
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType() &&
1207 "Mismatched operand types!");
1209 "Wrong operand type!");
1211 "Wrong return type size");
1215 assert(
N->getNumValues() == 1 &&
"Too many results!");
1216 assert(
N->getValueType(0).isVector() &&
"Wrong return type!");
1217 assert(
N->getNumOperands() ==
N->getValueType(0).getVectorNumElements() &&
1218 "Wrong number of operands!");
1219 EVT EltVT =
N->getValueType(0).getVectorElementType();
1220 for (
const SDUse &
Op :
N->ops()) {
1221 assert((
Op.getValueType() == EltVT ||
1222 (EltVT.
isInteger() &&
Op.getValueType().isInteger() &&
1223 EltVT.
bitsLE(
Op.getValueType()))) &&
1224 "Wrong operand type!");
1225 assert(
Op.getValueType() ==
N->getOperand(0).getValueType() &&
1226 "Operands must all have the same type");
1234 assert(
N->getNumValues() == 2 &&
"Wrong number of results!");
1235 assert(
N->getVTList().NumVTs == 2 &&
N->getNumOperands() == 2 &&
1236 "Invalid add/sub overflow op!");
1237 assert(
N->getVTList().VTs[0].isInteger() &&
1238 N->getVTList().VTs[1].isInteger() &&
1239 N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType() &&
1240 N->getOperand(0).getValueType() ==
N->getVTList().VTs[0] &&
1241 "Binary operator types must match!");
1251void SelectionDAG::InsertNode(SDNode *
N) {
1252 AllNodes.push_back(
N);
1254 N->PersistentId = NextPersistentId++;
1258 DUL->NodeInserted(
N);
1265bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *
N) {
1266 bool Erased =
false;
1267 switch (
N->getOpcode()) {
1271 "Cond code doesn't exist!");
1280 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1286 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1292 Erased = ExtendedValueTypeNodes.erase(VT);
1303 Erased = CSEMap.RemoveNode(
N);
1310 if (!Erased &&
N->getValueType(
N->getNumValues()-1) != MVT::Glue &&
1325SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *
N) {
1329 SDNode *Existing = CSEMap.GetOrInsertNode(
N);
1330 if (Existing !=
N) {
1341 DUL->NodeDeleted(
N, Existing);
1342 DeleteNodeNotInCSEMaps(
N);
1349 DUL->NodeUpdated(
N);
1356SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *
N,
SDValue Op,
1362 FoldingSetNodeID
ID;
1365 SDNode *
Node = FindNodeOrInsertPos(
ID, SDLoc(
N), InsertPos);
1367 Node->intersectFlagsWith(
N->getFlags());
1375SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *
N,
1382 FoldingSetNodeID
ID;
1385 SDNode *
Node = FindNodeOrInsertPos(
ID, SDLoc(
N), InsertPos);
1387 Node->intersectFlagsWith(
N->getFlags());
1400 FoldingSetNodeID
ID;
1403 SDNode *
Node = FindNodeOrInsertPos(
ID, SDLoc(
N), InsertPos);
1405 Node->intersectFlagsWith(
N->getFlags());
1418 : TM(tm), OptLevel(OL), EntryNode(
ISD::EntryToken, 0,
DebugLoc(),
1421 InsertNode(&EntryNode);
1433 SDAGISelPass = PassPtr;
1437 LibInfo = LibraryInfo;
1438 Libcalls = LibcallsInfo;
1439 Context = &MF->getFunction().getContext();
1444 FnVarLocs = VarLocs;
1448 assert(!UpdateListeners &&
"Dangling registered DAGUpdateListeners");
1450 OperandRecycler.clear(OperandAllocator);
1458void SelectionDAG::allnodes_clear() {
1459 assert(&*AllNodes.begin() == &EntryNode);
1460 AllNodes.remove(AllNodes.begin());
1461 while (!AllNodes.empty())
1462 DeallocateNode(&AllNodes.front());
1464 NextPersistentId = 0;
1470 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1472 switch (
N->getOpcode()) {
1477 "debug location. Use another overload.");
1484 const SDLoc &
DL,
void *&InsertPos) {
1485 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1487 switch (
N->getOpcode()) {
1493 if (
N->getDebugLoc() !=
DL.getDebugLoc())
1500 if (
DL.getIROrder() &&
DL.getIROrder() <
N->getIROrder())
1501 N->setDebugLoc(
DL.getDebugLoc());
1510 OperandRecycler.clear(OperandAllocator);
1511 OperandAllocator.Reset();
1514 ExtendedValueTypeNodes.clear();
1515 ExternalSymbols.clear();
1516 TargetExternalSymbols.clear();
1522 EntryNode.UseList =
nullptr;
1523 InsertNode(&EntryNode);
1529 return VT.
bitsGT(
Op.getValueType())
1535std::pair<SDValue, SDValue>
1539 "Strict no-op FP extend/round not allowed.");
1546 return std::pair<SDValue, SDValue>(Res,
SDValue(Res.
getNode(), 1));
1550 return VT.
bitsGT(
Op.getValueType()) ?
1556 return VT.
bitsGT(
Op.getValueType()) ?
1562 return VT.
bitsGT(
Op.getValueType()) ?
1570 auto Type =
Op.getValueType();
1574 auto Size =
Op.getValueSizeInBits();
1585 auto Type =
Op.getValueType();
1589 auto Size =
Op.getValueSizeInBits();
1600 auto Type =
Op.getValueType();
1604 auto Size =
Op.getValueSizeInBits();
1618 return getNode(TLI->getExtendForContent(BType), SL, VT,
Op);
1622 EVT OpVT =
Op.getValueType();
1624 "Cannot getZeroExtendInReg FP types");
1626 "getZeroExtendInReg type should be vector iff the operand "
1630 "Vector element counts must match in getZeroExtendInReg");
1648 EVT OpVT =
Op.getValueType();
1650 "Cannot getVPZeroExtendInReg FP types");
1652 "getVPZeroExtendInReg type and operand type should be vector!");
1654 "Vector element counts must match in getZeroExtendInReg");
1693 return getNode(ISD::VP_XOR,
DL, VT, Val, TrueValue, Mask, EVL);
1704 return getNode(ISD::VP_ZERO_EXTEND,
DL, VT,
Op, Mask, EVL);
1706 return getNode(ISD::VP_TRUNCATE,
DL, VT,
Op, Mask, EVL);
1715 switch (TLI->getBooleanContents(OpVT)) {
1726 bool isT,
bool isO) {
1732 bool isT,
bool isO) {
1733 return getConstant(*ConstantInt::get(*Context, Val),
DL, VT, isT, isO);
1737 EVT VT,
bool isT,
bool isO) {
1754 EltVT = TLI->getTypeToTransformTo(*
getContext(), EltVT);
1760 Elt = ConstantInt::get(*
getContext(), NewVal);
1772 EVT ViaEltVT = TLI->getTypeToTransformTo(*
getContext(), EltVT);
1779 "Can only handle an even split!");
1783 for (
unsigned i = 0; i != Parts; ++i)
1785 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1786 ViaEltVT, isT, isO));
1791 unsigned ViaVecNumElts = VT.
getSizeInBits() / ViaEltSizeInBits;
1802 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1803 ViaEltVT, isT, isO));
1808 std::reverse(EltParts.
begin(), EltParts.
end());
1827 "APInt size does not match type size!");
1836 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1841 N = newSDNode<ConstantSDNode>(isT, isO, Elt, VTs);
1843 N->setDebugLoc(
DL.getDebugLoc());
1844 CSEMap.InsertNode(
N, IP);
1856 bool isT,
bool isO) {
1864 IsTarget, IsOpaque);
1896 EVT VT,
bool isTarget) {
1917 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1922 N = newSDNode<ConstantFPSDNode>(isTarget, Elt, VTs);
1923 CSEMap.InsertNode(
N, IP);
1937 if (EltVT == MVT::f32)
1939 if (EltVT == MVT::f64)
1941 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1942 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1953 EVT VT, int64_t
Offset,
bool isTargetGA,
1954 unsigned TargetFlags) {
1955 assert((TargetFlags == 0 || isTargetGA) &&
1956 "Cannot set target flags on target-independent globals");
1974 ID.AddInteger(TargetFlags);
1976 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
1979 auto *
N = newSDNode<GlobalAddressSDNode>(
1980 Opc,
DL.getIROrder(),
DL.getDebugLoc(), GV, VTs,
Offset, TargetFlags);
1981 CSEMap.InsertNode(
N, IP);
1995 auto *
N = newSDNode<DeactivationSymbolSDNode>(GV, VTs);
1996 CSEMap.InsertNode(
N, IP);
2008 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2011 auto *
N = newSDNode<FrameIndexSDNode>(FI, VTs, isTarget);
2012 CSEMap.InsertNode(
N, IP);
2018 unsigned TargetFlags) {
2019 assert((TargetFlags == 0 || isTarget) &&
2020 "Cannot set target flags on target-independent jump tables");
2026 ID.AddInteger(TargetFlags);
2028 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2031 auto *
N = newSDNode<JumpTableSDNode>(JTI, VTs, isTarget, TargetFlags);
2032 CSEMap.InsertNode(
N, IP);
2046 bool isTarget,
unsigned TargetFlags) {
2047 assert((TargetFlags == 0 || isTarget) &&
2048 "Cannot set target flags on target-independent globals");
2057 ID.AddInteger(Alignment->value());
2060 ID.AddInteger(TargetFlags);
2062 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2065 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
2067 CSEMap.InsertNode(
N, IP);
2076 bool isTarget,
unsigned TargetFlags) {
2077 assert((TargetFlags == 0 || isTarget) &&
2078 "Cannot set target flags on target-independent globals");
2085 ID.AddInteger(Alignment->value());
2087 C->addSelectionDAGCSEId(
ID);
2088 ID.AddInteger(TargetFlags);
2090 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2093 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VTs,
Offset, *Alignment,
2095 CSEMap.InsertNode(
N, IP);
2105 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2108 auto *
N = newSDNode<BasicBlockSDNode>(
MBB);
2109 CSEMap.InsertNode(
N, IP);
2116 ValueTypeNodes.size())
2123 N = newSDNode<VTSDNode>(VT);
2129 SDNode *&
N = ExternalSymbols[Sym];
2131 N = newSDNode<ExternalSymbolSDNode>(
false, Sym, 0,
getVTList(VT));
2145 N = newSDNode<MCSymbolSDNode>(Sym,
getVTList(VT));
2151 unsigned TargetFlags) {
2153 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
2155 N = newSDNode<ExternalSymbolSDNode>(
true, Sym, TargetFlags,
getVTList(VT));
2161 EVT VT,
unsigned TargetFlags) {
2167 if ((
unsigned)
Cond >= CondCodeNodes.size())
2168 CondCodeNodes.resize(
Cond+1);
2170 if (!CondCodeNodes[
Cond]) {
2171 auto *
N = newSDNode<CondCodeSDNode>(
Cond);
2172 CondCodeNodes[
Cond] =
N;
2181 "APInt size does not match type size!");
2199template <
typename Ty>
2201 EVT VT, Ty Quantity) {
2202 if (Quantity.isScalable())
2206 return DAG.
getConstant(Quantity.getKnownMinValue(),
DL, VT);
2232 const APInt &StepVal) {
2256 "Must have the same number of vector elements as mask elements!");
2258 "Invalid VECTOR_SHUFFLE");
2266 int NElts = Mask.size();
2268 [&](
int M) {
return M < (NElts * 2) && M >= -1; }) &&
2269 "Index out of range");
2277 for (
int i = 0; i != NElts; ++i)
2278 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2285 if (TLI->hasVectorBlend()) {
2294 for (
int i = 0; i < NElts; ++i) {
2295 if (MaskVec[i] <
Offset || MaskVec[i] >= (
Offset + NElts))
2299 if (UndefElements[MaskVec[i] -
Offset]) {
2305 if (!UndefElements[i])
2310 BlendSplat(N1BV, 0);
2312 BlendSplat(N2BV, NElts);
2317 bool AllLHS =
true, AllRHS =
true;
2319 for (
int i = 0; i != NElts; ++i) {
2320 if (MaskVec[i] >= NElts) {
2325 }
else if (MaskVec[i] >= 0) {
2329 if (AllLHS && AllRHS)
2331 if (AllLHS && !N2Undef)
2344 bool Identity =
true, AllSame =
true;
2345 for (
int i = 0; i != NElts; ++i) {
2346 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity =
false;
2347 if (MaskVec[i] != MaskVec[0]) AllSame =
false;
2349 if (Identity && NElts)
2382 if (AllSame && SameNumElts) {
2383 EVT BuildVT = BV->getValueType(0);
2400 for (
int i = 0; i != NElts; ++i)
2401 ID.AddInteger(MaskVec[i]);
2404 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2410 int *MaskAlloc = OperandAllocator.Allocate<
int>(NElts);
2413 auto *
N = newSDNode<ShuffleVectorSDNode>(VTs, dl.
getIROrder(),
2415 createOperands(
N,
Ops);
2417 CSEMap.InsertNode(
N, IP);
2438 ID.AddInteger(Reg.id());
2440 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2443 auto *
N = newSDNode<RegisterSDNode>(Reg, VTs);
2444 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(
N, FLI, UA);
2445 CSEMap.InsertNode(
N, IP);
2453 ID.AddPointer(RegMask);
2455 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2458 auto *
N = newSDNode<RegisterMaskSDNode>(RegMask);
2459 CSEMap.InsertNode(
N, IP);
2474 ID.AddPointer(Label);
2476 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2481 createOperands(
N,
Ops);
2483 CSEMap.InsertNode(
N, IP);
2489 int64_t
Offset,
bool isTarget,
2490 unsigned TargetFlags) {
2498 ID.AddInteger(TargetFlags);
2500 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2503 auto *
N = newSDNode<BlockAddressSDNode>(
Opc, VTs, BA,
Offset, TargetFlags);
2504 CSEMap.InsertNode(
N, IP);
2515 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2518 auto *
N = newSDNode<SrcValueSDNode>(V);
2519 CSEMap.InsertNode(
N, IP);
2530 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2533 auto *
N = newSDNode<MDNodeSDNode>(MD);
2534 CSEMap.InsertNode(
N, IP);
2540 if (VT == V.getValueType())
2547 unsigned SrcAS,
unsigned DestAS) {
2552 ID.AddInteger(SrcAS);
2553 ID.AddInteger(DestAS);
2556 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2560 VTs, SrcAS, DestAS);
2561 createOperands(
N,
Ops);
2563 CSEMap.InsertNode(
N, IP);
2582 EVT OpTy =
Op.getValueType();
2584 if (OpTy == ShTy || OpTy.
isVector())
return Op;
2593 EVT VT =
Node->getValueType(0);
2602 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2640 Align RedAlign = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2642 if (TLI->isTypeLegal(VT) || !VT.
isVector())
2650 if (RedAlign > StackAlign) {
2653 unsigned NumIntermediates;
2654 TLI->getVectorTypeBreakdown(*
getContext(), VT, IntermediateVT,
2655 NumIntermediates, RegisterVT);
2657 Align RedAlign2 = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2658 if (RedAlign2 < RedAlign)
2659 RedAlign = RedAlign2;
2664 RedAlign = std::min(RedAlign, StackAlign);
2679 false,
nullptr, StackID);
2694 "Don't know how to choose the maximum size when creating a stack "
2703 Align Align = std::max(
DL.getPrefTypeAlign(Ty1),
DL.getPrefTypeAlign(Ty2));
2712 auto GetUndefBooleanConstant = [&]() {
2714 TLI->getBooleanContents(OpVT) ==
2751 return GetUndefBooleanConstant();
2756 return GetUndefBooleanConstant();
2765 const APInt &C2 = N2C->getAPIntValue();
2767 const APInt &C1 = N1C->getAPIntValue();
2777 if (N1CFP && N2CFP) {
2782 return GetUndefBooleanConstant();
2787 return GetUndefBooleanConstant();
2793 return GetUndefBooleanConstant();
2798 return GetUndefBooleanConstant();
2803 return GetUndefBooleanConstant();
2809 return GetUndefBooleanConstant();
2836 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.
getSimpleVT()))
2838 return getSetCC(dl, VT, N2, N1, SwappedCond, {},
2840 }
else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2855 return GetUndefBooleanConstant();
2866 unsigned BitWidth =
Op.getScalarValueSizeInBits();
2875 unsigned Opc =
Op.getOpcode();
2884 return (NoFPClass & TestMask) == TestMask;
2891 return Op->getFlags().hasNoNaNs();
2917 unsigned Depth)
const {
2925 const APInt &DemandedElts,
2926 unsigned Depth)
const {
2933 unsigned Depth )
const {
2939 unsigned Depth)
const {
2944 const APInt &DemandedElts,
2945 unsigned Depth)
const {
2946 EVT VT =
Op.getValueType();
2953 for (
unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
2954 if (!DemandedElts[EltIdx])
2958 KnownZeroElements.
setBit(EltIdx);
2960 return KnownZeroElements;
2970 unsigned Opcode = V.getOpcode();
2971 EVT VT = V.getValueType();
2974 "scalable demanded bits are ignored");
2986 UndefElts = V.getOperand(0).isUndef()
2995 APInt UndefLHS, UndefRHS;
3004 (DemandedElts & UndefLHS) == (DemandedElts & UndefRHS)) {
3005 UndefElts = UndefLHS | UndefRHS;
3019 return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, *
this,
3036 for (
unsigned i = 0; i != NumElts; ++i) {
3042 if (!DemandedElts[i])
3044 if (Scl && Scl !=
Op)
3055 for (
int i = 0; i != (int)NumElts; ++i) {
3061 if (!DemandedElts[i])
3063 if (M < (
int)NumElts)
3066 DemandedRHS.
setBit(M - NumElts);
3078 auto CheckSplatSrc = [&](
SDValue Src,
const APInt &SrcElts) {
3080 return (SrcElts.popcount() == 1) ||
3082 (SrcElts & SrcUndefs).
isZero());
3084 if (!DemandedLHS.
isZero())
3085 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
3086 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
3092 if (Src.getValueType().isScalableVector())
3094 uint64_t Idx = V.getConstantOperandVal(1);
3095 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3097 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
3099 UndefElts = UndefSrcElts.
extractBits(NumElts, Idx);
3110 if (Src.getValueType().isScalableVector())
3114 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3116 UndefElts = UndefSrcElts.
trunc(NumElts);
3123 EVT SrcVT = Src.getValueType();
3133 if ((
BitWidth % SrcBitWidth) == 0) {
3135 unsigned Scale =
BitWidth / SrcBitWidth;
3137 APInt ScaledDemandedElts =
3139 for (
unsigned I = 0;
I != Scale; ++
I) {
3143 SubDemandedElts &= ScaledDemandedElts;
3147 if (!SubUndefElts.
isZero())
3161 EVT VT = V.getValueType();
3171 (AllowUndefs || !UndefElts);
3177 EVT VT = V.getValueType();
3178 unsigned Opcode = V.getOpcode();
3199 SplatIdx = (UndefElts & DemandedElts).
countr_one();
3214 if (!SVN->isSplat())
3216 int Idx = SVN->getSplatIndex();
3217 int NumElts = V.getValueType().getVectorNumElements();
3218 SplatIdx = Idx % NumElts;
3219 return V.getOperand(Idx / NumElts);
3231 if (LegalTypes && !TLI->isTypeLegal(SVT)) {
3234 LegalSVT = TLI->getTypeToTransformTo(*
getContext(), LegalSVT);
3235 if (LegalSVT.
bitsLT(SVT))
3243std::optional<ConstantRange>
3245 unsigned Depth)
const {
3248 "Unknown shift node");
3250 unsigned BitWidth = V.getScalarValueSizeInBits();
3253 const APInt &ShAmt = Cst->getAPIntValue();
3255 return std::nullopt;
3260 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
3261 for (
unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3262 if (!DemandedElts[i])
3266 MinAmt = MaxAmt =
nullptr;
3269 const APInt &ShAmt = SA->getAPIntValue();
3271 return std::nullopt;
3272 if (!MinAmt || MinAmt->
ugt(ShAmt))
3274 if (!MaxAmt || MaxAmt->ult(ShAmt))
3277 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
3278 "Failed to find matching min/max shift amounts");
3279 if (MinAmt && MaxAmt)
3289 return std::nullopt;
3292std::optional<unsigned>
3294 unsigned Depth)
const {
3297 "Unknown shift node");
3298 if (std::optional<ConstantRange> AmtRange =
3300 if (
const APInt *ShAmt = AmtRange->getSingleElement())
3301 return ShAmt->getZExtValue();
3302 return std::nullopt;
3305std::optional<unsigned>
3311std::optional<unsigned>
3313 unsigned Depth)
const {
3316 "Unknown shift node");
3317 if (std::optional<ConstantRange> AmtRange =
3319 return AmtRange->getUnsignedMin().getZExtValue();
3320 return std::nullopt;
3323std::optional<unsigned>
3329std::optional<unsigned>
3331 unsigned Depth)
const {
3334 "Unknown shift node");
3335 if (std::optional<ConstantRange> AmtRange =
3337 return AmtRange->getUnsignedMax().getZExtValue();
3338 return std::nullopt;
3341std::optional<unsigned>
3359 unsigned Depth)
const {
3360 unsigned BitWidth =
Op.getScalarValueSizeInBits();
3364 if (
auto OptAPInt =
Op->bitcastToAPInt()) {
3374 assert((!
Op.getValueType().isScalableVector() || NumElts == 1) &&
3375 "DemandedElts for scalable vectors must be 1 to represent all lanes");
3376 assert((!
Op.getValueType().isFixedLengthVector() ||
3377 NumElts ==
Op.getValueType().getVectorNumElements()) &&
3378 "Unexpected vector size");
3383 unsigned Opcode =
Op.getOpcode();
3391 "Expected SPLAT_VECTOR implicit truncation");
3398 unsigned ScalarSize =
Op.getOperand(0).getScalarValueSizeInBits();
3400 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3407 const APInt &Step =
Op.getConstantOperandAPInt(0);
3416 const APInt MinNumElts =
3422 .
umul_ov(MinNumElts, Overflow);
3426 const APInt MaxValue = (MaxNumElts - 1).
umul_ov(Step, Overflow);
3434 assert(!
Op.getValueType().isScalableVector());
3437 for (
unsigned i = 0, e =
Op.getNumOperands(); i != e; ++i) {
3438 if (!DemandedElts[i])
3447 "Expected BUILD_VECTOR implicit truncation");
3471 assert(!
Op.getValueType().isScalableVector());
3474 APInt DemandedLHS, DemandedRHS;
3478 DemandedLHS, DemandedRHS))
3483 if (!!DemandedLHS) {
3491 if (!!DemandedRHS) {
3500 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
3505 if (
Op.getValueType().isScalableVector())
3509 EVT SubVectorVT =
Op.getOperand(0).getValueType();
3511 unsigned NumSubVectors =
Op.getNumOperands();
3512 for (
unsigned i = 0; i != NumSubVectors; ++i) {
3514 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
3515 if (!!DemandedSub) {
3527 if (
Op.getValueType().isScalableVector())
3534 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
3536 APInt DemandedSrcElts = DemandedElts;
3537 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
3540 if (!!DemandedSubElts) {
3545 if (!!DemandedSrcElts) {
3555 APInt DemandedSrcElts;
3556 if (Src.getValueType().isScalableVector())
3557 DemandedSrcElts =
APInt(1, 1);
3560 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3561 DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
3567 if (
Op.getValueType().isScalableVector())
3571 if (DemandedElts != 1)
3582 if (
Op.getValueType().isScalableVector())
3602 if ((
BitWidth % SubBitWidth) == 0) {
3609 unsigned SubScale =
BitWidth / SubBitWidth;
3610 APInt SubDemandedElts(NumElts * SubScale, 0);
3611 for (
unsigned i = 0; i != NumElts; ++i)
3612 if (DemandedElts[i])
3613 SubDemandedElts.
setBit(i * SubScale);
3615 for (
unsigned i = 0; i != SubScale; ++i) {
3618 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3619 Known.
insertBits(Known2, SubBitWidth * Shifts);
3624 if ((SubBitWidth %
BitWidth) == 0) {
3625 assert(
Op.getValueType().isVector() &&
"Expected bitcast to vector");
3630 unsigned SubScale = SubBitWidth /
BitWidth;
3631 APInt SubDemandedElts =
3636 for (
unsigned i = 0; i != NumElts; ++i)
3637 if (DemandedElts[i]) {
3638 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3669 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3680 if (
Op->getFlags().hasNoSignedWrap() &&
3681 Op.getOperand(0) ==
Op.getOperand(1) &&
3708 unsigned SignBits1 =
3712 unsigned SignBits0 =
3718 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3721 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3722 if (
Op.getResNo() == 0)
3729 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3732 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3733 if (
Op.getResNo() == 0)
3786 if (
Op.getResNo() != 1)
3792 if (TLI->getBooleanContents(
Op.getValueType().isVector(),
false) ==
3801 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
3803 if (TLI->getBooleanContents(
Op.getOperand(OpNo).getValueType()) ==
3813 bool NUW =
Op->getFlags().hasNoUnsignedWrap();
3814 bool NSW =
Op->getFlags().hasNoSignedWrap();
3821 if (std::optional<unsigned> ShMinAmt =
3830 Op->getFlags().hasExact());
3833 if (std::optional<unsigned> ShMinAmt =
3841 Op->getFlags().hasExact());
3847 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3862 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3868 DemandedElts,
Depth + 1);
3884 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3887 unsigned LoBits =
Op.getOperand(0).getScalarValueSizeInBits();
3888 unsigned HiBits =
Op.getOperand(1).getScalarValueSizeInBits();
3891 Known = Known2.
concat(Known);
3905 if (
Op.getResNo() == 0)
3936 unsigned MinRedundantSignBits =
3940 Known =
Range.toKnownBits();
3976 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3981 !
Op.getValueType().isScalableVector()) {
3994 for (
unsigned i = 0; i != NumElts; ++i) {
3995 if (!DemandedElts[i])
4005 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4024 }
else if (
Op.getResNo() == 0) {
4025 unsigned ScalarMemorySize = LD->getMemoryVT().getScalarSizeInBits();
4026 KnownBits KnownScalarMemory(ScalarMemorySize);
4027 if (
const MDNode *MD = LD->getRanges())
4038 Known = KnownScalarMemory;
4045 if (
Op.getValueType().isScalableVector())
4047 EVT InVT =
Op.getOperand(0).getValueType();
4059 if (
Op.getValueType().isScalableVector())
4061 EVT InVT =
Op.getOperand(0).getValueType();
4077 if (
Op.getValueType().isScalableVector())
4079 EVT InVT =
Op.getOperand(0).getValueType();
4114 Known.
Zero |= (~InMask);
4115 Known.
One &= (~Known.Zero);
4135 if ((NoFPClass & NegativeTestMask) == NegativeTestMask) {
4141 if ((NoFPClass & PositiveTestMask) == PositiveTestMask) {
4161 bool SelfAdd =
Op.getOperand(0) ==
Op.getOperand(1) &&
4163 Op.getOperand(0), DemandedElts,
4166 Flags.hasNoUnsignedWrap(), SelfAdd);
4174 Flags.hasNoUnsignedWrap());
4181 if (
Op.getResNo() == 1) {
4183 if (TLI->getBooleanContents(
Op.getOperand(0).getValueType()) ==
4192 "We only compute knownbits for the difference here.");
4199 Borrow = Borrow.
trunc(1);
4213 if (
Op.getResNo() == 1) {
4215 if (TLI->getBooleanContents(
Op.getOperand(0).getValueType()) ==
4224 assert(
Op.getResNo() == 0 &&
"We only compute knownbits for the sum here.");
4234 Carry = Carry.
trunc(1);
4270 const unsigned Index =
Op.getConstantOperandVal(1);
4271 const unsigned EltBitWidth =
Op.getValueSizeInBits();
4278 Known = Known.
trunc(EltBitWidth);
4294 Known = Known.
trunc(EltBitWidth);
4300 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4310 if (
Op.getValueType().isScalableVector())
4319 bool DemandedVal =
true;
4320 APInt DemandedVecElts = DemandedElts;
4322 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4323 unsigned EltIdx = CEltNo->getZExtValue();
4324 DemandedVal = !!DemandedElts[EltIdx];
4332 if (!!DemandedVecElts) {
4351 Known = Known2.
abs();
4384 if (CstLow && CstHigh) {
4389 const APInt &ValueHigh = CstHigh->getAPIntValue();
4390 if (ValueLow.
sle(ValueHigh)) {
4393 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4416 if (IsMax && CstLow) {
4446 if (
Op.getResNo() == 0) {
4448 unsigned ScalarMemorySize = AT->getMemoryVT().getScalarSizeInBits();
4449 KnownBits KnownScalarMemory(ScalarMemorySize);
4450 if (
const MDNode *MD = AT->getRanges())
4453 switch (AT->getExtensionType()) {
4461 switch (TLI->getExtendForAtomicOps()) {
4474 Known = KnownScalarMemory;
4482 if (
Op.getResNo() == 1) {
4487 if (TLI->getBooleanContents(
Op.getValueType().isVector(),
false) ==
4508 if (
Op.getResNo() == 0) {
4510 unsigned MemBits = AT->getMemoryVT().getScalarSizeInBits();
4532 if (
Op.getValueType().isScalableVector())
4536 TLI->computeKnownBitsForTargetNode(
Op, Known, DemandedElts, *
this,
Depth);
4668 unsigned Depth)
const {
4674 const APInt &DemandedElts,
4676 unsigned Depth)
const {
4677 EVT VT =
Op.getValueType();
4681 return ConstantRange::getFull(
BitWidth);
4686 unsigned Opcode =
Op.getOpcode();
4690 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
4697 return ConstantRange::getFull(
BitWidth);
4702 unsigned Depth)
const {
4710 unsigned Depth)
const {
4720 unsigned Depth)
const {
4726 const APInt &DemandedElts,
4727 bool OrZero,
unsigned Depth)
const {
4733 [[maybe_unused]]
unsigned NumElts = DemandedElts.
getBitWidth();
4735 "DemandedElts for scalable vectors must be 1 to represent all lanes");
4738 "Unexpected vector size");
4742 return (OrZero && V.isZero()) || V.isPowerOf2();
4753 auto *C = dyn_cast<ConstantSDNode>(P.value());
4754 return !DemandedElts[P.index()] || (C && IsPowerOfTwoOrZero(C));
4762 if (IsPowerOfTwoOrZero(
C))
4780 APInt DemandedSrcElts =
4781 ConstEltNo && ConstEltNo->getAPIntValue().
ult(NumSrcElts)
4806 if (
C &&
C->getAPIntValue() == 1)
4817 if (
C &&
C->getAPIntValue().isSignMask())
4867 APInt DemandedLHS, DemandedRHS;
4871 DemandedLHS, DemandedRHS))
4895 return C1->getValueAPF().getExactLog2Abs() >= 0;
4909 unsigned Depth)
const {
4910 EVT VT =
Op.getValueType();
4915 unsigned FirstAnswer = 1;
4918 "DemandedElts for scalable vectors must be 1 to represent all lanes");
4921 const APInt &Val =
C->getAPIntValue();
4931 unsigned Opcode =
Op.getOpcode();
4936 return VTBits-Tmp+1;
4950 unsigned NumSrcBits =
Op.getOperand(0).getValueSizeInBits();
4952 if (NumSrcSignBits > (NumSrcBits - VTBits))
4953 return NumSrcSignBits - (NumSrcBits - VTBits);
4959 for (
unsigned i = 0, e =
Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
4960 if (!DemandedElts[i])
4967 APInt T =
C->getAPIntValue().trunc(VTBits);
4968 Tmp2 =
T.getNumSignBits();
4972 if (
SrcOp.getValueSizeInBits() != VTBits) {
4974 "Expected BUILD_VECTOR implicit truncation");
4975 unsigned ExtraBits =
SrcOp.getValueSizeInBits() - VTBits;
4976 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4979 Tmp = std::min(Tmp, Tmp2);
4990 Tmp = std::min(Tmp, Tmp2);
4997 APInt DemandedLHS, DemandedRHS;
5001 DemandedLHS, DemandedRHS))
5004 Tmp = std::numeric_limits<unsigned>::max();
5007 if (!!DemandedRHS) {
5009 Tmp = std::min(Tmp, Tmp2);
5014 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5030 if (VTBits == SrcBits)
5036 if ((SrcBits % VTBits) == 0) {
5039 unsigned Scale = SrcBits / VTBits;
5040 APInt SrcDemandedElts =
5050 for (
unsigned i = 0; i != NumElts; ++i)
5051 if (DemandedElts[i]) {
5052 unsigned SubOffset = i % Scale;
5053 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
5054 SubOffset = SubOffset * VTBits;
5055 if (Tmp <= SubOffset)
5057 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
5067 return VTBits - Tmp + 1;
5069 Tmp = VTBits -
Op.getOperand(0).getScalarValueSizeInBits();
5076 return std::max(Tmp, Tmp2);
5081 EVT SrcVT = Src.getValueType();
5089 if (std::optional<unsigned> ShAmt =
5091 Tmp = std::min(Tmp + *ShAmt, VTBits);
5094 if (std::optional<ConstantRange> ShAmtRange =
5096 unsigned MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
5097 unsigned MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
5108 unsigned SizeDifference =
5110 if (SizeDifference <= MinShAmt) {
5111 Tmp = SizeDifference +
5114 return Tmp - MaxShAmt;
5120 return Tmp - MaxShAmt;
5130 FirstAnswer = std::min(Tmp, Tmp2);
5140 if (Tmp == 1)
return 1;
5142 return std::min(Tmp, Tmp2);
5145 if (Tmp == 1)
return 1;
5147 return std::min(Tmp, Tmp2);
5159 if (CstLow && CstHigh) {
5164 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
5165 return std::min(Tmp, Tmp2);
5174 return std::min(Tmp, Tmp2);
5182 return std::min(Tmp, Tmp2);
5186 if (
Op.getResNo() == 0 &&
Op.getOperand(0) ==
Op.getOperand(1))
5197 if (
Op.getResNo() != 1)
5203 if (TLI->getBooleanContents(VT.
isVector(),
false) ==
5211 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
5213 if (TLI->getBooleanContents(
Op.getOperand(OpNo).getValueType()) ==
5228 unsigned RotAmt =
C->getAPIntValue().urem(VTBits);
5232 RotAmt = (VTBits - RotAmt) % VTBits;
5236 if (Tmp > (RotAmt + 1))
return (Tmp - RotAmt);
5243 if (Tmp == 1)
return 1;
5248 if (CRHS->isAllOnes()) {
5254 if ((Known.
Zero | 1).isAllOnes())
5264 if (Tmp2 == 1)
return 1;
5268 return std::min(Tmp, Tmp2) - 1;
5271 if (Tmp2 == 1)
return 1;
5276 if (CLHS->isZero()) {
5281 if ((Known.
Zero | 1).isAllOnes())
5295 if (Tmp == 1)
return 1;
5296 return std::min(Tmp, Tmp2) - 1;
5300 if (SignBitsOp0 == 1)
5303 if (SignBitsOp1 == 1)
5305 unsigned OutValidBits =
5306 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
5307 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
5315 return std::min(Tmp, Tmp2);
5324 unsigned NumSrcBits =
Op.getOperand(0).getScalarValueSizeInBits();
5326 if (NumSrcSignBits > (NumSrcBits - VTBits))
5327 return NumSrcSignBits - (NumSrcBits - VTBits);
5334 const int BitWidth =
Op.getValueSizeInBits();
5335 const int Items =
Op.getOperand(0).getValueSizeInBits() /
BitWidth;
5339 const int rIndex = Items - 1 -
Op.getConstantOperandVal(1);
5354 bool DemandedVal =
true;
5355 APInt DemandedVecElts = DemandedElts;
5357 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
5358 unsigned EltIdx = CEltNo->getZExtValue();
5359 DemandedVal = !!DemandedElts[EltIdx];
5362 Tmp = std::numeric_limits<unsigned>::max();
5368 Tmp = std::min(Tmp, Tmp2);
5370 if (!!DemandedVecElts) {
5372 Tmp = std::min(Tmp, Tmp2);
5374 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5384 const unsigned BitWidth =
Op.getValueSizeInBits();
5385 const unsigned EltBitWidth =
Op.getOperand(0).getScalarValueSizeInBits();
5398 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
5408 APInt DemandedSrcElts;
5409 if (Src.getValueType().isScalableVector())
5410 DemandedSrcElts =
APInt(1, 1);
5413 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5414 DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
5423 Tmp = std::numeric_limits<unsigned>::max();
5424 EVT SubVectorVT =
Op.getOperand(0).getValueType();
5426 unsigned NumSubVectors =
Op.getNumOperands();
5427 for (
unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
5429 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
5433 Tmp = std::min(Tmp, Tmp2);
5435 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5446 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
5448 APInt DemandedSrcElts = DemandedElts;
5449 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
5451 Tmp = std::numeric_limits<unsigned>::max();
5452 if (!!DemandedSubElts) {
5457 if (!!DemandedSrcElts) {
5459 Tmp = std::min(Tmp, Tmp2);
5461 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
5466 if (
Op.getResNo() != 0)
5470 if (
const MDNode *Ranges = LD->getRanges()) {
5471 if (DemandedElts != 1)
5476 switch (LD->getExtensionType()) {
5494 unsigned ExtType = LD->getExtensionType();
5499 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5500 return VTBits - Tmp + 1;
5502 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5503 return VTBits - Tmp;
5505 if (
const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
5508 Type *CstTy = Cst->getType();
5513 for (
unsigned i = 0; i != NumElts; ++i) {
5514 if (!DemandedElts[i])
5519 Tmp = std::min(Tmp,
Value.getNumSignBits());
5523 APInt Value = CFP->getValueAPF().bitcastToAPInt();
5524 Tmp = std::min(Tmp,
Value.getNumSignBits());
5556 if (
Op.getResNo() == 0) {
5557 Tmp = AT->getMemoryVT().getScalarSizeInBits();
5563 switch (AT->getExtensionType()) {
5567 return VTBits - Tmp + 1;
5569 return VTBits - Tmp;
5574 return VTBits - Tmp + 1;
5576 return VTBits - Tmp;
5591 TLI->ComputeNumSignBitsForTargetNode(
Op, DemandedElts, *
this,
Depth);
5593 FirstAnswer = std::max(FirstAnswer, NumBits);
5604 unsigned Depth)
const {
5606 return Op.getScalarValueSizeInBits() - SignBits + 1;
5610 const APInt &DemandedElts,
5611 unsigned Depth)
const {
5613 return Op.getScalarValueSizeInBits() - SignBits + 1;
5618 unsigned Depth)
const {
5628 const APInt &DemandedElts,
5630 unsigned Depth)
const {
5631 unsigned Opcode =
Op.getOpcode();
5660 for (
unsigned i = 0, e =
Op.getNumOperands(); i < e; ++i) {
5661 if (!DemandedElts[i])
5670 if (Src.getValueType().isScalableVector())
5673 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5674 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
5680 if (
Op.getValueType().isScalableVector())
5685 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
5687 APInt DemandedSrcElts = DemandedElts;
5688 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
5691 Sub, DemandedSubElts, Kind,
Depth + 1))
5694 Src, DemandedSrcElts, Kind,
Depth + 1))
5702 EVT SrcVT = Src.getValueType();
5706 IndexC->getZExtValue());
5721 if (DemandedElts[IndexC->getZExtValue()] &&
5724 APInt InVecDemandedElts = DemandedElts;
5725 InVecDemandedElts.
clearBit(IndexC->getZExtValue());
5726 if (!!InVecDemandedElts &&
5729 InVecDemandedElts, Kind,
Depth + 1))
5741 if (DemandedElts[0] &&
5750 APInt DemandedLHS, DemandedRHS;
5753 DemandedElts, DemandedLHS, DemandedRHS,
5756 if (!DemandedLHS.
isZero() &&
5760 if (!DemandedRHS.
isZero() &&
5808 return isGuaranteedNotToBeUndefOrPoison(V, DemandedElts, Kind,
5821 return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode(
5822 Op, DemandedElts, *
this, Kind,
Depth);
5833 return isGuaranteedNotToBeUndefOrPoison(V, Kind, Depth + 1);
5839 unsigned Depth)
const {
5847 unsigned Depth)
const {
5848 if (ConsiderFlags &&
includesPoison(Kind) &&
Op->hasPoisonGeneratingFlags())
5851 unsigned Opcode =
Op.getOpcode();
5942 if (
Op.getOperand(0).getValueType().isInteger())
5949 unsigned CCOp = Opcode ==
ISD::SETCC ? 2 : 4;
5951 return (
unsigned)CCCode & 0x10U;
6011 EVT VecVT =
Op.getOperand(0).getValueType();
6022 for (
auto [Idx, Elt] :
enumerate(SVN->getMask()))
6023 if (Elt < 0 && DemandedElts[Idx])
6035 return TLI->canCreateUndefOrPoisonForTargetNode(
6036 Op, DemandedElts, *
this, Kind, ConsiderFlags,
Depth);
6045 unsigned Opcode =
Op.getOpcode();
6047 return Op->getFlags().hasDisjoint() ||
6061 unsigned Depth)
const {
6067 const APInt &DemandedElts,
6069 unsigned Depth)
const {
6081 EVT VT =
Op.getValueType();
6085 "Unexpected vector size");
6090 unsigned Opcode =
Op.getOpcode();
6099 InterestedClasses,
Depth + 1);
6106 for (
unsigned I = 0, E =
Op.getNumOperands();
I != E; ++
I) {
6107 if (!DemandedElts[
I])
6127 EVT SrcVT = Src.getValueType();
6153 EVT SrcVT =
Op.getOperand(0).getValueType();
6158 if (VTNumElts != SrcVTNumElts)
6167 InterestedClasses,
Depth + 1);
6173 InterestedClasses,
Depth + 1);
6175 InterestedClasses,
Depth + 1);
6181 InterestedClasses,
Depth + 1);
6189 EVT SrcVT = Src.getValueType();
6191 unsigned Idx =
Op.getConstantOperandVal(1);
6207 unsigned Idx =
Op.getConstantOperandVal(2);
6211 APInt DemandedMask =
6213 APInt DemandedSrcElts = DemandedElts & ~DemandedMask;
6216 if (!DemandedSrcElts.
isZero())
6218 InterestedClasses,
Depth + 1);
6219 if (!DemandedSubElts.
isZero()) {
6221 SubVector, DemandedSubElts, InterestedClasses,
Depth + 1);
6222 Known = DemandedSrcElts.
isZero() ? SubKnown : (Known | SubKnown);
6236 Op.getOperand(2), DemandedElts, InterestedClasses,
Depth + 1);
6240 Op.getOperand(1), DemandedElts, InterestedClasses,
Depth + 1);
6247 TLI->computeKnownFPClassForTargetNode(
Op, Known, DemandedElts, *
this,
6257 unsigned Depth)
const {
6263 bool SNaN,
unsigned Depth)
const {
6264 assert(!DemandedElts.
isZero() &&
"No demanded elements");
6267 if (
Op->getFlags().hasNoNaNs())
6273 unsigned Opcode =
Op.getOpcode();
6375 EVT SrcVT = Src.getValueType();
6379 Idx->getZExtValue());
6386 if (Src.getValueType().isFixedLengthVector()) {
6387 unsigned Idx =
Op.getConstantOperandVal(1);
6388 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
6389 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
6399 unsigned Idx =
Op.getConstantOperandVal(2);
6405 APInt DemandedMask =
6407 APInt DemandedSrcElts = DemandedElts & ~DemandedMask;
6410 bool NeverNaN =
true;
6411 if (!DemandedSrcElts.
isZero())
6414 if (NeverNaN && !DemandedSubElts.
isZero())
6423 unsigned NumElts =
Op.getNumOperands();
6424 for (
unsigned I = 0;
I != NumElts; ++
I)
6425 if (DemandedElts[
I] &&
6444 return TLI->isKnownNeverNaNForTargetNode(
Op, DemandedElts, *
this, SNaN,
6461 const APInt &DemandedElts,
6462 unsigned Depth)
const {
6463 assert(!DemandedElts.
isZero() &&
"No demanded elements");
6464 EVT VT =
Op.getValueType();
6476 unsigned Depth)
const {
6480 EVT OpVT =
Op.getValueType();
6483 assert(!
Op.getValueType().isFloatingPoint() &&
6484 "Floating point types unsupported - use isKnownNeverLogicalZero");
6497 switch (
Op.getOpcode()) {
6504 auto *C = dyn_cast<ConstantSDNode>(P.value());
6505 return !DemandedElts[P.index()] || (C && IsNeverZero(C));
6532 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
6549 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
6554 if (ValKnown.
One[0])
6566 if (
Op.getValueType().isScalableVector())
6574 APInt DemandedLHS, DemandedRHS;
6576 assert(NumElts == SVN->getMask().size() &&
"Unexpected vector size");
6578 DemandedLHS, DemandedRHS))
6581 return (!DemandedLHS ||
6640 if (
Op->getFlags().hasExact())
6658 if (
Op->getFlags().hasExact())
6663 if (
Op->getFlags().hasNoUnsignedWrap())
6681 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
6692 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
6706 return !C1->isNegative();
6708 switch (
Op.getOpcode()) {
6722 assert(
Use.getValueType().isFloatingPoint());
6724 if (
User->getFlags().hasNoSignedZeros())
6729 switch (
User->getOpcode()) {
6737 return OperandNo == 0;
6755 if (
Op->getFlags().hasNoSignedZeros())
6760 if (
Op->use_size() > 2)
6763 [&](
const SDUse &
Use) { return canIgnoreSignBitOfZero(Use); });
6768 if (
A ==
B)
return true;
6773 if (CA->isZero() && CB->isZero())
return true;
6808 NotOperand = NotOperand->getOperand(0);
6810 if (
Other == NotOperand)
6813 return NotOperand ==
Other->getOperand(0) ||
6814 NotOperand ==
Other->getOperand(1);
6820 A =
A->getOperand(0);
6823 B =
B->getOperand(0);
6826 return MatchNoCommonBitsPattern(
A->getOperand(0),
A->getOperand(1),
B) ||
6827 MatchNoCommonBitsPattern(
A->getOperand(1),
A->getOperand(0),
B);
6833 assert(
A.getValueType() ==
B.getValueType() &&
6834 "Values must have the same type");
6856 "BUILD_VECTOR cannot be used with scalable types");
6858 "Incorrect element count in BUILD_VECTOR!");
6866 bool IsIdentity =
true;
6867 for (
int i = 0; i !=
NumOps; ++i) {
6870 (IdentitySrc &&
Ops[i].getOperand(0) != IdentitySrc) ||
6872 Ops[i].getConstantOperandAPInt(1) != i) {
6876 IdentitySrc =
Ops[i].getOperand(0);
6889 assert(!
Ops.empty() &&
"Can't concatenate an empty list of vectors!");
6892 return Ops[0].getValueType() ==
Op.getValueType();
6894 "Concatenation of vectors with inconsistent value types!");
6897 "Incorrect element count in vector concatenation!");
6899 if (
Ops.size() == 1)
6910 bool IsIdentity =
true;
6911 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
6913 unsigned IdentityIndex = i *
Op.getValueType().getVectorMinNumElements();
6915 Op.getOperand(0).getValueType() != VT ||
6916 (IdentitySrc &&
Op.getOperand(0) != IdentitySrc) ||
6917 Op.getConstantOperandVal(1) != IdentityIndex) {
6921 assert((!IdentitySrc || IdentitySrc ==
Op.getOperand(0)) &&
6922 "Unexpected identity source vector for concat of extracts");
6923 IdentitySrc =
Op.getOperand(0);
6926 assert(IdentitySrc &&
"Failed to set source vector of extracts");
6942 EVT OpVT =
Op.getValueType();
6958 SVT = (SVT.
bitsLT(
Op.getValueType()) ?
Op.getValueType() : SVT);
6982 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
6985 auto *
N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6986 CSEMap.InsertNode(
N, IP);
6998 Flags = Inserter->getFlags();
6999 return getNode(Opcode,
DL, VT, N1, Flags);
7052 "STEP_VECTOR can only be used with scalable types");
7055 "Unexpected step operand");
7076 "Invalid FP cast!");
7080 "Vector element count mismatch!");
7098 "Invalid SIGN_EXTEND!");
7100 "SIGN_EXTEND result type type should be vector iff the operand "
7105 "Vector element count mismatch!");
7128 unsigned NumSignExtBits =
7139 "Invalid ZERO_EXTEND!");
7141 "ZERO_EXTEND result type type should be vector iff the operand "
7146 "Vector element count mismatch!");
7184 "Invalid ANY_EXTEND!");
7186 "ANY_EXTEND result type type should be vector iff the operand "
7191 "Vector element count mismatch!");
7216 "Invalid TRUNCATE!");
7218 "TRUNCATE result type type should be vector iff the operand "
7223 "Vector element count mismatch!");
7250 assert(VT.
isVector() &&
"This DAG node is restricted to vector types.");
7252 "The input must be the same size or smaller than the result.");
7255 "The destination vector type must have fewer lanes than the input.");
7264 "Invalid ABS_MIN_POISON!");
7271 "BSWAP types must be a multiple of 16 bits!");
7285 "Cannot BITCAST between types of different sizes!");
7298 "Illegal SCALAR_TO_VECTOR node!");
7359 "Wrong operand type!");
7366 if (VT != MVT::Glue) {
7370 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
7371 E->intersectFlagsWith(Flags);
7375 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7377 createOperands(
N,
Ops);
7378 CSEMap.InsertNode(
N, IP);
7380 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7381 createOperands(
N,
Ops);
7415 if (!C2.getBoolValue())
7419 if (!C2.getBoolValue())
7423 if (!C2.getBoolValue())
7427 if (!C2.getBoolValue())
7453 return std::nullopt;
7458 bool IsUndef1,
const APInt &C2,
7460 if (!(IsUndef1 || IsUndef2))
7468 return std::nullopt;
7476 if (!TLI->isOffsetFoldingLegal(GA))
7481 int64_t
Offset = C2->getSExtValue();
7501 assert(
Ops.size() == 2 &&
"Div/rem should have 2 operands");
7508 [](
SDValue V) { return V.isUndef() ||
7509 isNullConstant(V); });
7547 const APInt &Val =
C->getAPIntValue();
7551 C->isTargetOpcode(),
C->isOpaque());
7558 C->isTargetOpcode(),
C->isOpaque());
7563 C->isTargetOpcode(),
C->isOpaque());
7565 C->isTargetOpcode(),
C->isOpaque());
7594 C->isTargetOpcode(),
C->isOpaque());
7620 if (VT == MVT::f16 &&
C->getValueType(0) == MVT::i16)
7622 if (VT == MVT::f32 &&
C->getValueType(0) == MVT::i32)
7624 if (VT == MVT::f64 &&
C->getValueType(0) == MVT::i64)
7626 if (VT == MVT::f128 &&
C->getValueType(0) == MVT::i128)
7687 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
7690 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::f16)
7693 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::bf16)
7696 if (VT == MVT::i32 &&
C->getValueType(0) == MVT::f32)
7699 if (VT == MVT::i64 &&
C->getValueType(0) == MVT::f64)
7700 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
7717 if (C1->isOpaque() || C2->isOpaque())
7720 std::optional<APInt> FoldAttempt =
7721 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
7727 "Can't fold vectors ops with scalar operands");
7735 if (TLI->isCommutativeBinOp(Opcode))
7751 const APInt &Val = C1->getAPIntValue();
7752 return SignExtendInReg(Val, VT);
7765 ScalarOps.
push_back(SignExtendInReg(Val, OpVT));
7773 SignExtendInReg(
Ops[0].getConstantOperandAPInt(0),
7784 if (C1 && C2 && C3) {
7785 if (C1->isOpaque() || C2->isOpaque() || C3->isOpaque())
7787 const APInt &V1 = C1->getAPIntValue(), &V2 = C2->getAPIntValue(),
7788 &V3 = C3->getAPIntValue();
7804 if (C1 && C2 && C3) {
7825 Ops[0].getValueType() == VT &&
Ops[1].getValueType() == VT &&
7838 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
7839 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
7843 Opcode, RawBits1[
I], UndefElts1[
I], RawBits2[
I], UndefElts2[
I]);
7854 BVEltVT = BV1->getOperand(0).getValueType();
7857 BVEltVT = BV2->getOperand(0).getValueType();
7863 DstBits, RawBits, DstUndefs,
7866 for (
unsigned I = 0, E = DstBits.
size();
I != E; ++
I) {
7891 ?
Ops[0].getConstantOperandAPInt(0) * RHSVal
7892 :
Ops[0].getConstantOperandAPInt(0) << RHSVal;
7897 auto IsScalarOrSameVectorSize = [NumElts](
const SDValue &
Op) {
7898 return !
Op.getValueType().isVector() ||
7899 Op.getValueType().getVectorElementCount() == NumElts;
7902 auto IsBuildVectorSplatVectorOrUndef = [](
const SDValue &
Op) {
7928 LegalSVT = TLI->getTypeToTransformTo(*
getContext(), LegalSVT);
7940 for (
unsigned I = 0;
I != NumVectorElts;
I++) {
7943 EVT InSVT =
Op.getValueType().getScalarType();
7986 if (LegalSVT != SVT)
7987 ScalarResult =
getNode(ExtendCode,
DL, LegalSVT, ScalarResult);
8001 if (
Ops.size() != 2)
8012 if (N1CFP && N2CFP) {
8063 if (N1C && N1C->getValueAPF().isNegZero() && N2.
isUndef())
8086 if (SrcEltVT == DstEltVT)
8094 if (SrcBitSize == DstBitSize) {
8099 if (
Op.getValueType() != SrcEltVT)
8142 for (
unsigned I = 0, E = RawBits.
size();
I != E; ++
I) {
8143 if (UndefElements[
I])
8164 ID.AddInteger(
A.value());
8167 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
8171 newSDNode<AssertAlignSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
A);
8172 createOperands(
N, {Val});
8174 CSEMap.InsertNode(
N, IP);
8186 Flags = Inserter->getFlags();
8187 return getNode(Opcode,
DL, VT, N1, N2, Flags);
8192 if (!TLI->isCommutativeBinOp(Opcode))
8201 if ((N1C && !N2C) || (N1CFP && !N2CFP))
8215 "Operand is DELETED_NODE!");
8231 N2.
getValueType() == MVT::Other &&
"Invalid token factor!");
8235 if (N1 == N2)
return N1;
8251 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
8253 N1.
getValueType() == VT &&
"Binary operator types must match!");
8256 if (N2CV && N2CV->
isZero())
8266 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
8268 N1.
getValueType() == VT &&
"Binary operator types must match!");
8278 if (N2CV && N2CV->
isZero())
8292 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
8294 N1.
getValueType() == VT &&
"Binary operator types must match!");
8297 if (N2CV && N2CV->
isZero())
8301 const APInt &N2CImm = N2C->getAPIntValue();
8315 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
8317 N1.
getValueType() == VT &&
"Binary operator types must match!");
8330 "Types of operands of UCMP/SCMP must match");
8332 "Operands and return type of must both be scalars or vectors");
8336 "Result and operands must have the same number of elements");
8342 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
8344 N1.
getValueType() == VT &&
"Binary operator types must match!");
8348 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
8350 N1.
getValueType() == VT &&
"Binary operator types must match!");
8356 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
8358 N1.
getValueType() == VT &&
"Binary operator types must match!");
8364 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
8366 N1.
getValueType() == VT &&
"Binary operator types must match!");
8377 N1.
getValueType() == VT &&
"Binary operator types must match!");
8385 "Invalid FCOPYSIGN!");
8390 const APInt &ShiftImm = N2C->getAPIntValue();
8404 "Shift operators return type must be the same as their first arg");
8406 "Shifts only work on integers");
8408 "Vector shift amounts must be in the same as their first arg");
8415 "Invalid use of small shift amount with oversized value!");
8422 if (N2CV && N2CV->
isZero())
8428 (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
8434 "IS_FPCLASS is used for a non-floating type");
8449 "AssertNoFPClass is used for a non-floating type");
8454 "FPClassTest value too large");
8463 "Cannot *_EXTEND_INREG FP types");
8465 "AssertSExt/AssertZExt type should be the vector element type "
8466 "rather than the vector type!");
8475 "Cannot *_EXTEND_INREG FP types");
8477 "SIGN_EXTEND_INREG type should be vector iff the operand "
8481 "Vector element counts must match in SIGN_EXTEND_INREG");
8483 if (
EVT == VT)
return N1;
8491 "FP_TO_*INT_SAT type should be vector iff the operand type is "
8495 "Vector element counts must match in FP_TO_*INT_SAT");
8497 "Type to saturate to must be a scalar.");
8504 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
8505 element type of the vector.");
8527 N2C->getZExtValue() % Factor);
8536 "BUILD_VECTOR used for scalable vectors");
8559 if (N1Op2C && N2C) {
8589 assert(N2C && (
unsigned)N2C->getZExtValue() < 2 &&
"Bad EXTRACT_ELEMENT!");
8593 "Wrong types for EXTRACT_ELEMENT!");
8604 unsigned Shift = ElementSize * N2C->getZExtValue();
8605 const APInt &Val = N1C->getAPIntValue();
8612 "Extract subvector VTs must be vectors!");
8614 "Extract subvector VTs must have the same element type!");
8616 "Cannot extract a scalable vector from a fixed length vector!");
8619 "Extract subvector must be from larger vector to smaller vector!");
8620 assert(N2C &&
"Extract subvector index must be a constant");
8624 "Extract subvector overflow!");
8625 assert(N2C->getAPIntValue().getBitWidth() ==
8627 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
8629 "Extract index is not a multiple of the output vector length");
8644 return N1.
getOperand(N2C->getZExtValue() / Factor);
8685 if (TLI->isCommutativeBinOp(Opcode)) {
8764 if (VT != MVT::Glue) {
8768 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
8769 E->intersectFlagsWith(Flags);
8773 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8775 createOperands(
N,
Ops);
8776 CSEMap.InsertNode(
N, IP);
8778 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
8779 createOperands(
N,
Ops);
8792 Flags = Inserter->getFlags();
8793 return getNode(Opcode,
DL, VT, N1, N2, N3, Flags);
8802 "Operand is DELETED_NODE!");
8821 "SETCC operands must have the same type!");
8823 "SETCC type should be vector iff the operand type is vector!");
8826 "SETCC vector element counts must match!");
8850 "INSERT_VECTOR_ELT vector type mismatch");
8852 "INSERT_VECTOR_ELT scalar fp/int mismatch");
8855 "INSERT_VECTOR_ELT fp scalar type mismatch");
8858 "INSERT_VECTOR_ELT int scalar size mismatch");
8904 "Dest and insert subvector source types must match!");
8906 "Insert subvector VTs must be vectors!");
8908 "Insert subvector VTs must have the same element type!");
8910 "Cannot insert a scalable vector into a fixed length vector!");
8913 "Insert subvector must be from smaller vector to larger vector!");
8915 "Insert subvector index must be constant");
8919 "Insert subvector overflow!");
8922 "Constant index for INSERT_SUBVECTOR has an invalid size");
8966 case ISD::VP_TRUNCATE:
8967 case ISD::VP_SIGN_EXTEND:
8968 case ISD::VP_ZERO_EXTEND:
8977 assert(VT == VecVT &&
"Vector and result type don't match.");
8979 "All inputs must be vectors.");
8980 assert(VecVT == PassthruVT &&
"Vector and passthru types don't match.");
8982 "Vector and mask must have same number of elements.");
8997 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8998 "node to have the same type!");
9000 "Expected the first operand of the PARTIAL_REDUCE_MLA node to have "
9001 "the same type as its result!");
9004 "Expected the element count of the second and third operands of the "
9005 "PARTIAL_REDUCE_MLA node to be a positive integer multiple of the "
9006 "element count of the first operand and the result!");
9008 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
9009 "node to have an element type which is the same as or smaller than "
9010 "the element type of the first operand and result!");
9032 if (VT != MVT::Glue) {
9036 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9037 E->intersectFlagsWith(Flags);
9041 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
9043 createOperands(
N,
Ops);
9044 CSEMap.InsertNode(
N, IP);
9046 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
9047 createOperands(
N,
Ops);
9067 Flags = Inserter->getFlags();
9068 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, Flags);
9083 Flags = Inserter->getFlags();
9084 return getNode(Opcode,
DL, VT, N1, N2, N3, N4, N5, Flags);
9101 if (FI->getIndex() < 0)
9116 assert(
C->getAPIntValue().getBitWidth() == 8);
9121 return DAG.
getConstant(Val, dl, VT,
false, IsOpaque);
9126 assert(
Value.getValueType() == MVT::i8 &&
"memset with non-byte fill value?");
9142 if (VT !=
Value.getValueType())
9155 if (Slice.Array ==
nullptr) {
9164 unsigned NumVTBytes = NumVTBits / 8;
9165 unsigned NumBytes = std::min(NumVTBytes,
unsigned(Slice.Length));
9167 APInt Val(NumVTBits, 0);
9169 for (
unsigned i = 0; i != NumBytes; ++i)
9172 for (
unsigned i = 0; i != NumBytes; ++i)
9173 Val |= (
uint64_t)(
unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
9196 if (TLI->shouldPreservePtrArith(this->getMachineFunction().getFunction(),
9211 else if (Src->isAnyAdd() &&
9215 SrcDelta = Src.getConstantOperandVal(1);
9221 SrcDelta +
G->getOffset());
9237 assert(OutLoadChains.
size() &&
"Missing loads in memcpy inlining");
9238 assert(OutStoreChains.
size() &&
"Missing stores in memcpy inlining");
9240 for (
unsigned i = From; i < To; ++i) {
9242 GluedLoadChains.
push_back(OutLoadChains[i]);
9249 for (
unsigned i = From; i < To; ++i) {
9252 ST->getBasePtr(), ST->getMemoryVT(),
9253 ST->getMemOperand());
9275 std::vector<EVT> MemOps;
9276 bool DstAlignCanChange =
false;
9282 DstAlignCanChange =
true;
9284 if (!SrcAlign || Alignment > *SrcAlign)
9285 SrcAlign = Alignment;
9286 assert(SrcAlign &&
"SrcAlign must be set");
9290 bool isZeroConstant = CopyFromConstant && Slice.Array ==
nullptr;
9292 const MemOp Op = isZeroConstant
9296 *SrcAlign, isVol, CopyFromConstant);
9302 if (DstAlignCanChange) {
9303 Type *Ty = MemOps[0].getTypeForEVT(
C);
9304 Align NewAlign =
DL.getABITypeAlign(Ty);
9310 if (!
TRI->hasStackRealignment(MF))
9312 NewAlign = std::min(NewAlign, *StackAlign);
9314 if (NewAlign > Alignment) {
9318 Alignment = NewAlign;
9328 BatchAA && SrcVal &&
9336 unsigned NumMemOps = MemOps.size();
9338 for (
unsigned i = 0; i != NumMemOps; ++i) {
9343 if (VTSize >
Size) {
9346 assert(i == NumMemOps-1 && i != 0);
9347 SrcOff -= VTSize -
Size;
9348 DstOff -= VTSize -
Size;
9351 if (CopyFromConstant &&
9359 if (SrcOff < Slice.Length) {
9361 SubSlice.
move(SrcOff);
9364 SubSlice.
Array =
nullptr;
9366 SubSlice.
Length = VTSize;
9369 if (
Value.getNode()) {
9373 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
9378 if (!Store.getNode()) {
9387 bool isDereferenceable =
9390 if (isDereferenceable)
9405 DstPtrInfo.
getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
9415 unsigned NumLdStInMemcpy = OutStoreChains.
size();
9417 if (NumLdStInMemcpy) {
9423 for (
unsigned i = 0; i < NumLdStInMemcpy; ++i) {
9429 if (NumLdStInMemcpy <= GluedLdStLimit) {
9431 NumLdStInMemcpy, OutLoadChains,
9434 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
9435 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
9436 unsigned GlueIter = 0;
9439 if (RemainingLdStInMemcpy) {
9441 DAG, dl, OutChains, NumLdStInMemcpy - RemainingLdStInMemcpy,
9442 NumLdStInMemcpy, OutLoadChains, OutStoreChains);
9445 for (
unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
9446 unsigned IndexFrom = NumLdStInMemcpy - RemainingLdStInMemcpy -
9447 GlueIter - GluedLdStLimit;
9448 unsigned IndexTo = NumLdStInMemcpy - RemainingLdStInMemcpy - GlueIter;
9450 OutLoadChains, OutStoreChains);
9451 GlueIter += GluedLdStLimit;
9462 bool isVol,
bool AlwaysInline,
9476 std::vector<EVT> MemOps;
9477 bool DstAlignCanChange =
false;
9483 DstAlignCanChange =
true;
9485 if (!SrcAlign || Alignment > *SrcAlign)
9486 SrcAlign = Alignment;
9487 assert(SrcAlign &&
"SrcAlign must be set");
9496 if (DstAlignCanChange) {
9497 Type *Ty = MemOps[0].getTypeForEVT(
C);
9498 Align NewAlign =
DL.getABITypeAlign(Ty);
9504 if (!
TRI->hasStackRealignment(MF))
9506 NewAlign = std::min(NewAlign, *StackAlign);
9508 if (NewAlign > Alignment) {
9512 Alignment = NewAlign;
9526 unsigned NumMemOps = MemOps.size();
9527 for (
unsigned i = 0; i < NumMemOps; i++) {
9531 bool IsOverlapping =
false;
9533 if (i == NumMemOps - 1 && i != 0 && VTSize >
Size - SrcOff) {
9536 SrcOff =
Size - VTSize;
9537 IsOverlapping =
true;
9544 if (IsOverlapping) {
9549 SrcAlignAtOffset, MMOFlags,
9558 bool isDereferenceable =
9561 if (isDereferenceable)
9567 SrcMMOFlags, NewAAInfo);
9575 for (
unsigned i = 0; i < NumMemOps; i++) {
9579 bool IsOverlapping =
false;
9581 if (i == NumMemOps - 1 && i != 0 && VTSize >
Size - DstOff) {
9584 DstOff =
Size - VTSize;
9585 IsOverlapping =
true;
9592 if (IsOverlapping) {
9597 DstAlignAtOffset, MMOFlags,
9606 Chain, dl, LoadValues[i],
9608 DstPtrInfo.
getWithOffset(DstOff), DstAlignAtOffset, MMOFlags,
9649 std::vector<EVT> MemOps;
9650 bool DstAlignCanChange =
false;
9657 DstAlignCanChange =
true;
9664 MemOp::Set(
Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
9669 if (DstAlignCanChange) {
9672 Align NewAlign =
DL.getABITypeAlign(Ty);
9678 if (!
TRI->hasStackRealignment(MF))
9680 NewAlign = std::min(NewAlign, *StackAlign);
9682 if (NewAlign > Alignment) {
9686 Alignment = NewAlign;
9692 unsigned NumMemOps = MemOps.size();
9697 LargestVT = MemOps[0];
9698 for (
unsigned i = 1; i < NumMemOps; i++)
9699 if (MemOps[i].bitsGT(LargestVT))
9700 LargestVT = MemOps[i];
9708 for (
unsigned i = 0; i < NumMemOps; i++) {
9713 assert(
Size > 0 &&
"Target specified more stores than needed in "
9714 "findOptimalMemOpLowering");
9715 if (VTSize >
Size) {
9718 assert(i == NumMemOps-1 && i != 0);
9719 DstOff -= VTSize -
Size;
9726 if (VT.
bitsLT(LargestVT)) {
9746 assert(
Value.getValueType() == VT &&
"Value with wrong type.");
9757 if (VTSize >
Size) {
9766 assert(
Size == 0 &&
"Target's findOptimalMemOpLowering did not specify "
9767 "stores that exactly cover the memset size");
9784 bool AllowReturnsFirstArg) {
9790 AllowReturnsFirstArg &&
9794static std::pair<SDValue, SDValue>
9801 if (LCImpl == RTLIB::Unsupported)
9813 CI->
getType(), Callee, std::move(Args))
9826 RTLIB::STRCMP,
this, TLI);
9836 RTLIB::STRSTR,
this, TLI);
9852 RTLIB::MEMCCPY,
this, TLI);
9855std::pair<SDValue, SDValue>
9864 RTLIB::MEMCMP,
this, TLI);
9874 RTLIB::STRCPY,
this, TLI);
9885 RTLIB::STRLEN,
this, TLI);
9890 Align Alignment,
bool isVol,
bool AlwaysInline,
const CallInst *CI,
9899 if (ConstantSize->
isZero())
9903 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9904 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9905 if (Result.getNode())
9912 SDValue Result = TSI->EmitTargetCodeForMemcpy(
9913 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline,
9914 DstPtrInfo, SrcPtrInfo);
9915 if (Result.getNode())
9922 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
9924 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
9925 isVol,
true, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9940 Args.emplace_back(Dst, PtrTy);
9941 Args.emplace_back(Src, PtrTy);
9945 bool IsTailCall =
false;
9946 RTLIB::LibcallImpl MemCpyImpl = TLI->getMemcpyImpl();
9948 if (OverrideTailCall.has_value()) {
9949 IsTailCall = *OverrideTailCall;
9951 bool LowersToMemcpy = MemCpyImpl == RTLIB::impl_memcpy;
9958 Libcalls->getLibcallImplCallingConv(MemCpyImpl),
9959 Dst.getValueType().getTypeForEVT(*
getContext()),
9965 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
9966 return CallResult.second;
9971 Type *SizeTy,
unsigned ElemSz,
9978 Args.emplace_back(Dst, ArgTy);
9979 Args.emplace_back(Src, ArgTy);
9980 Args.emplace_back(
Size, SizeTy);
9982 RTLIB::Libcall LibraryCall =
9984 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(LibraryCall);
9985 if (LibcallImpl == RTLIB::Unsupported)
9992 Libcalls->getLibcallImplCallingConv(LibcallImpl),
9999 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10000 return CallResult.second;
10006 std::optional<bool> OverrideTailCall,
10014 if (ConstantSize) {
10016 if (ConstantSize->
isZero())
10020 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
10021 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo);
10022 if (Result.getNode())
10030 TSI->EmitTargetCodeForMemmove(*
this, dl, Chain, Dst, Src,
Size,
10031 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
10032 if (Result.getNode())
10045 Args.emplace_back(Dst, PtrTy);
10046 Args.emplace_back(Src, PtrTy);
10051 RTLIB::LibcallImpl MemmoveImpl = Libcalls->getLibcallImpl(RTLIB::MEMMOVE);
10053 bool IsTailCall =
false;
10054 if (OverrideTailCall.has_value()) {
10055 IsTailCall = *OverrideTailCall;
10057 bool LowersToMemmove = MemmoveImpl == RTLIB::impl_memmove;
10064 Libcalls->getLibcallImplCallingConv(MemmoveImpl),
10065 Dst.getValueType().getTypeForEVT(*
getContext()),
10071 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
10072 return CallResult.second;
10077 Type *SizeTy,
unsigned ElemSz,
10084 Args.emplace_back(Dst, IntPtrTy);
10085 Args.emplace_back(Src, IntPtrTy);
10086 Args.emplace_back(
Size, SizeTy);
10088 RTLIB::Libcall LibraryCall =
10090 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(LibraryCall);
10091 if (LibcallImpl == RTLIB::Unsupported)
10098 Libcalls->getLibcallImplCallingConv(LibcallImpl),
10105 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10106 return CallResult.second;
10111 bool isVol,
bool AlwaysInline,
10118 if (ConstantSize) {
10120 if (ConstantSize->
isZero())
10125 isVol,
false, DstPtrInfo, AAInfo);
10127 if (Result.getNode())
10134 SDValue Result = TSI->EmitTargetCodeForMemset(
10135 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
10136 if (Result.getNode())
10142 if (AlwaysInline) {
10143 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
10146 isVol,
true, DstPtrInfo, AAInfo);
10148 "getMemsetStores must return a valid sequence when AlwaysInline");
10162 RTLIB::LibcallImpl BzeroImpl = Libcalls->getLibcallImpl(RTLIB::BZERO);
10163 bool UseBZero = BzeroImpl != RTLIB::Unsupported &&
isNullConstant(Src);
10169 Args.emplace_back(
Size,
DL.getIntPtrType(Ctx));
10171 Libcalls->getLibcallImplCallingConv(BzeroImpl),
Type::getVoidTy(Ctx),
10174 RTLIB::LibcallImpl MemsetImpl = Libcalls->getLibcallImpl(RTLIB::MEMSET);
10178 Args.emplace_back(Src, Src.getValueType().getTypeForEVT(Ctx));
10179 Args.emplace_back(
Size,
DL.getIntPtrType(Ctx));
10180 CLI.
setLibCallee(Libcalls->getLibcallImplCallingConv(MemsetImpl),
10181 Dst.getValueType().getTypeForEVT(Ctx),
10186 RTLIB::LibcallImpl MemsetImpl = Libcalls->getLibcallImpl(RTLIB::MEMSET);
10187 bool LowersToMemset = MemsetImpl == RTLIB::impl_memset;
10198 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10199 return CallResult.second;
10204 Type *SizeTy,
unsigned ElemSz,
10211 Args.emplace_back(
Size, SizeTy);
10213 RTLIB::Libcall LibraryCall =
10215 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(LibraryCall);
10216 if (LibcallImpl == RTLIB::Unsupported)
10223 Libcalls->getLibcallImplCallingConv(LibcallImpl),
10230 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10231 return CallResult.second;
10241 ID.AddInteger(getSyntheticNodeSubclassData<AtomicSDNode>(
10242 dl.
getIROrder(), Opcode, VTList, MemVT, MMO, ExtType));
10245 void* IP =
nullptr;
10247 E->refineAlignment(MMO);
10248 E->refineRanges(MMO);
10253 VTList, MemVT, MMO, ExtType);
10254 createOperands(
N,
Ops);
10256 CSEMap.InsertNode(
N, IP);
10293 "Invalid Atomic Op");
10313 if (
Ops.size() == 1)
10328 if (
Size.hasValue() && !
Size.getValue())
10333 MF.getMachineMemOperand(PtrInfo, Flags,
Size, Alignment, AAInfo);
10349 assert(!MMOs.
empty() &&
"Must have at least one MMO");
10353 (Opcode <= (
unsigned)std::numeric_limits<int>::max() &&
10355 "Opcode is not a memory-accessing opcode!");
10358 if (MMOs.
size() == 1) {
10364 void *Buffer = Allocator.Allocate(AllocSize,
alignof(
size_t));
10365 size_t *CountPtr =
static_cast<size_t *
>(Buffer);
10366 *CountPtr = MMOs.
size();
10375 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
10378 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
10379 Opcode, dl.
getIROrder(), VTList, MemVT, MemRefs));
10382 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10383 ID.AddInteger(MMO->getFlags());
10385 void *IP =
nullptr;
10386 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10392 VTList, MemVT, MemRefs);
10393 createOperands(
N,
Ops);
10394 CSEMap.InsertNode(
N, IP);
10397 VTList, MemVT, MemRefs);
10398 createOperands(
N,
Ops);
10407 SDValue Chain,
int FrameIndex) {
10409 const auto VTs =
getVTList(MVT::Other);
10418 ID.AddInteger(FrameIndex);
10419 void *IP =
nullptr;
10420 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10425 createOperands(
N,
Ops);
10426 CSEMap.InsertNode(
N, IP);
10437 const auto VTs =
getVTList(MVT::Other);
10442 ID.AddInteger(Index);
10443 void *IP =
nullptr;
10444 if (
SDNode *E = FindNodeOrInsertPos(
ID, Dl, IP))
10447 auto *
N = newSDNode<PseudoProbeSDNode>(
10449 createOperands(
N,
Ops);
10450 CSEMap.InsertNode(
N, IP);
10467 FI->getIndex(),
Offset);
10504 "Invalid chain type");
10516 Alignment, AAInfo, Ranges);
10517 return getLoad(AM, ExtType, VT, dl, Chain, Ptr,
Offset, MemVT, MMO);
10527 assert(VT == MemVT &&
"Non-extending load from different memory type!");
10531 "Should only be an extending load, not truncating!");
10533 "Cannot convert from FP to Int or Int -> FP!");
10535 "Cannot use an ext load to convert to or from a vector!");
10538 "Cannot use an ext load to change the number of vector elements!");
10545 "Range metadata and load type must match!");
10556 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
10557 dl.
getIROrder(), VTs, AM, ExtType, MemVT, MMO));
10560 void *IP =
nullptr;
10562 E->refineAlignment(MMO);
10563 E->refineRanges(MMO);
10567 ExtType, MemVT, MMO);
10568 createOperands(
N,
Ops);
10570 CSEMap.InsertNode(
N, IP);
10584 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
10602 MemVT, Alignment, MMOFlags, AAInfo);
10617 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
10620 LD->getMemOperand()->getFlags() &
10623 LD->getChain(),
Base,
Offset, LD->getPointerInfo(),
10624 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
10643 MF.getMachineMemOperand(PtrInfo, MMOFlags,
Size, Alignment, AAInfo);
10644 return getStore(Chain, dl, Val, Ptr, MMO);
10657 bool IsTruncating) {
10661 IsTruncating =
false;
10662 }
else if (!IsTruncating) {
10663 assert(VT == SVT &&
"No-truncating store from different memory type!");
10666 "Should only be a truncating store, not extending!");
10669 "Cannot use trunc store to convert to or from a vector!");
10672 "Cannot use trunc store to change the number of vector elements!");
10683 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
10684 dl.
getIROrder(), VTs, AM, IsTruncating, SVT, MMO));
10687 void *IP =
nullptr;
10688 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10693 IsTruncating, SVT, MMO);
10694 createOperands(
N,
Ops);
10696 CSEMap.InsertNode(
N, IP);
10709 "Invalid chain type");
10719 PtrInfo, MMOFlags, SVT.
getStoreSize(), Alignment, AAInfo);
10734 assert(ST->getOffset().isUndef() &&
"Store is already a indexed store!");
10736 ST->getMemoryVT(), ST->getMemOperand(), AM,
10737 ST->isTruncatingStore());
10745 const MDNode *Ranges,
bool IsExpanding) {
10756 Alignment, AAInfo, Ranges);
10757 return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr,
Offset, Mask, EVL, MemVT,
10766 bool IsExpanding) {
10768 assert(Mask.getValueType().getVectorElementCount() ==
10770 "Vector width mismatch between mask and data");
10781 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
10782 dl.
getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
10785 void *IP =
nullptr;
10787 E->refineAlignment(MMO);
10788 E->refineRanges(MMO);
10792 ExtType, IsExpanding, MemVT, MMO);
10793 createOperands(
N,
Ops);
10795 CSEMap.InsertNode(
N, IP);
10808 bool IsExpanding) {
10811 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
10820 Mask, EVL, VT, MMO, IsExpanding);
10829 const AAMDNodes &AAInfo,
bool IsExpanding) {
10832 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo,
nullptr,
10842 EVL, MemVT, MMO, IsExpanding);
10849 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
10852 LD->getMemOperand()->getFlags() &
10855 LD->getChain(),
Base,
Offset, LD->getMask(),
10856 LD->getVectorLength(), LD->getPointerInfo(),
10857 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
10858 nullptr, LD->isExpandingLoad());
10865 bool IsCompressing) {
10867 assert(Mask.getValueType().getVectorElementCount() ==
10869 "Vector width mismatch between mask and data");
10879 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10880 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10883 void *IP =
nullptr;
10884 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10889 IsTruncating, IsCompressing, MemVT, MMO);
10890 createOperands(
N,
Ops);
10892 CSEMap.InsertNode(
N, IP);
10905 bool IsCompressing) {
10916 PtrInfo, MMOFlags, SVT.
getStoreSize(), Alignment, AAInfo);
10925 bool IsCompressing) {
10932 false, IsCompressing);
10935 "Should only be a truncating store, not extending!");
10938 "Cannot use trunc store to convert to or from a vector!");
10941 "Cannot use trunc store to change the number of vector elements!");
10949 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10953 void *IP =
nullptr;
10954 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
10961 createOperands(
N,
Ops);
10963 CSEMap.InsertNode(
N, IP);
10974 assert(ST->getOffset().isUndef() &&
"Store is already an indexed store!");
10977 Offset, ST->getMask(), ST->getVectorLength()};
10980 ID.AddInteger(ST->getMemoryVT().getRawBits());
10981 ID.AddInteger(ST->getRawSubclassData());
10982 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
10983 ID.AddInteger(ST->getMemOperand()->getFlags());
10984 void *IP =
nullptr;
10985 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
10988 auto *
N = newSDNode<VPStoreSDNode>(
10990 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
10991 createOperands(
N,
Ops);
10993 CSEMap.InsertNode(
N, IP);
11013 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
11014 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
11017 void *IP =
nullptr;
11018 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11024 newSDNode<VPStridedLoadSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs, AM,
11025 ExtType, IsExpanding, MemVT, MMO);
11026 createOperands(
N,
Ops);
11027 CSEMap.InsertNode(
N, IP);
11038 bool IsExpanding) {
11041 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
11050 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
11059 bool IsTruncating,
bool IsCompressing) {
11069 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
11070 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
11072 void *IP =
nullptr;
11073 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11077 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
11078 VTs, AM, IsTruncating,
11079 IsCompressing, MemVT, MMO);
11080 createOperands(
N,
Ops);
11082 CSEMap.InsertNode(
N, IP);
11094 bool IsCompressing) {
11101 false, IsCompressing);
11104 "Should only be a truncating store, not extending!");
11107 "Cannot use trunc store to convert to or from a vector!");
11110 "Cannot use trunc store to change the number of vector elements!");
11118 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
11121 void *IP =
nullptr;
11122 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11126 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
11128 IsCompressing, SVT, MMO);
11129 createOperands(
N,
Ops);
11131 CSEMap.InsertNode(
N, IP);
11141 assert(
Ops.size() == 6 &&
"Incompatible number of operands");
11146 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
11150 void *IP =
nullptr;
11151 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
11157 VT, MMO, IndexType);
11158 createOperands(
N,
Ops);
11160 assert(
N->getMask().getValueType().getVectorElementCount() ==
11161 N->getValueType(0).getVectorElementCount() &&
11162 "Vector width mismatch between mask and data");
11163 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11164 N->getValueType(0).getVectorElementCount().isScalable() &&
11165 "Scalable flags of index and data do not match");
11167 N->getIndex().getValueType().getVectorElementCount(),
11168 N->getValueType(0).getVectorElementCount()) &&
11169 "Vector width mismatch between index and data");
11171 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11172 "Scale should be a constant power of 2");
11174 CSEMap.InsertNode(
N, IP);
11185 assert(
Ops.size() == 7 &&
"Incompatible number of operands");
11190 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
11194 void *IP =
nullptr;
11195 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
11200 VT, MMO, IndexType);
11201 createOperands(
N,
Ops);
11203 assert(
N->getMask().getValueType().getVectorElementCount() ==
11204 N->getValue().getValueType().getVectorElementCount() &&
11205 "Vector width mismatch between mask and data");
11207 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11208 N->getValue().getValueType().getVectorElementCount().isScalable() &&
11209 "Scalable flags of index and data do not match");
11211 N->getIndex().getValueType().getVectorElementCount(),
11212 N->getValue().getValueType().getVectorElementCount()) &&
11213 "Vector width mismatch between index and data");
11215 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11216 "Scale should be a constant power of 2");
11218 CSEMap.InsertNode(
N, IP);
11233 "Unindexed masked load with an offset!");
11240 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
11241 dl.
getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
11244 void *IP =
nullptr;
11245 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
11250 AM, ExtTy, isExpanding, MemVT, MMO);
11251 createOperands(
N,
Ops);
11253 CSEMap.InsertNode(
N, IP);
11264 assert(LD->getOffset().isUndef() &&
"Masked load is already a indexed load!");
11266 Offset, LD->getMask(), LD->getPassThru(),
11267 LD->getMemoryVT(), LD->getMemOperand(), AM,
11268 LD->getExtensionType(), LD->isExpandingLoad());
11276 bool IsCompressing) {
11278 "Invalid chain type");
11281 "Unindexed masked store with an offset!");
11288 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
11289 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
11292 void *IP =
nullptr;
11293 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
11299 IsTruncating, IsCompressing, MemVT, MMO);
11300 createOperands(
N,
Ops);
11302 CSEMap.InsertNode(
N, IP);
11313 assert(ST->getOffset().isUndef() &&
11314 "Masked store is already a indexed store!");
11316 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
11317 AM, ST->isTruncatingStore(), ST->isCompressingStore());
11325 assert(
Ops.size() == 6 &&
"Incompatible number of operands");
11330 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
11331 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
11334 void *IP =
nullptr;
11335 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
11341 VTs, MemVT, MMO, IndexType, ExtTy);
11342 createOperands(
N,
Ops);
11344 assert(
N->getPassThru().getValueType() ==
N->getValueType(0) &&
11345 "Incompatible type of the PassThru value in MaskedGatherSDNode");
11346 assert(
N->getMask().getValueType().getVectorElementCount() ==
11347 N->getValueType(0).getVectorElementCount() &&
11348 "Vector width mismatch between mask and data");
11349 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11350 N->getValueType(0).getVectorElementCount().isScalable() &&
11351 "Scalable flags of index and data do not match");
11353 N->getIndex().getValueType().getVectorElementCount(),
11354 N->getValueType(0).getVectorElementCount()) &&
11355 "Vector width mismatch between index and data");
11357 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11358 "Scale should be a constant power of 2");
11360 CSEMap.InsertNode(
N, IP);
11372 assert(
Ops.size() == 6 &&
"Incompatible number of operands");
11377 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
11378 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
11381 void *IP =
nullptr;
11382 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
11388 VTs, MemVT, MMO, IndexType, IsTrunc);
11389 createOperands(
N,
Ops);
11391 assert(
N->getMask().getValueType().getVectorElementCount() ==
11392 N->getValue().getValueType().getVectorElementCount() &&
11393 "Vector width mismatch between mask and data");
11395 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11396 N->getValue().getValueType().getVectorElementCount().isScalable() &&
11397 "Scalable flags of index and data do not match");
11399 N->getIndex().getValueType().getVectorElementCount(),
11400 N->getValue().getValueType().getVectorElementCount()) &&
11401 "Vector width mismatch between index and data");
11403 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11404 "Scale should be a constant power of 2");
11406 CSEMap.InsertNode(
N, IP);
11417 assert(
Ops.size() == 7 &&
"Incompatible number of operands");
11422 ID.AddInteger(getSyntheticNodeSubclassData<MaskedHistogramSDNode>(
11423 dl.
getIROrder(), VTs, MemVT, MMO, IndexType));
11426 void *IP =
nullptr;
11427 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
11433 VTs, MemVT, MMO, IndexType);
11434 createOperands(
N,
Ops);
11436 assert(
N->getMask().getValueType().getVectorElementCount() ==
11437 N->getIndex().getValueType().getVectorElementCount() &&
11438 "Vector width mismatch between mask and data");
11440 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11441 "Scale should be a constant power of 2");
11442 assert(
N->getInc().getValueType().isInteger() &&
"Non integer update value");
11444 CSEMap.InsertNode(
N, IP);
11459 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadFFSDNode>(
DL.getIROrder(),
11463 void *IP =
nullptr;
11464 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11468 auto *
N = newSDNode<VPLoadFFSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs,
11470 createOperands(
N,
Ops);
11472 CSEMap.InsertNode(
N, IP);
11487 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
11491 void *IP =
nullptr;
11492 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
11497 createOperands(
N,
Ops);
11499 CSEMap.InsertNode(
N, IP);
11514 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
11518 void *IP =
nullptr;
11519 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
11524 createOperands(
N,
Ops);
11526 CSEMap.InsertNode(
N, IP);
11537 if (
Cond.isUndef())
11572 return !Val || Val->getAPIntValue().uge(
X.getScalarValueSizeInBits());
11578 if (
X.getValueType().getScalarType() == MVT::i1)
11591 bool HasNan = (XC && XC->
getValueAPF().isNaN()) ||
11593 bool HasInf = (XC && XC->
getValueAPF().isInfinity()) ||
11596 if (Flags.hasNoNaNs() && (HasNan ||
X.isUndef() ||
Y.isUndef()))
11599 if (Flags.hasNoInfs() && (HasInf ||
X.isUndef() ||
Y.isUndef()))
11622 if (Opcode ==
ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
11637 switch (
Ops.size()) {
11638 case 0:
return getNode(Opcode,
DL, VT);
11648 return getNode(Opcode,
DL, VT, NewOps);
11655 Flags = Inserter->getFlags();
11663 case 0:
return getNode(Opcode,
DL, VT);
11664 case 1:
return getNode(Opcode,
DL, VT,
Ops[0], Flags);
11671 for (
const auto &
Op :
Ops)
11673 "Operand is DELETED_NODE!");
11690 "LHS and RHS of condition must have same type!");
11692 "True and False arms of SelectCC must have same type!");
11694 "select_cc node must be of same type as true and false value!");
11698 "Expected select_cc with vector result to have the same sized "
11699 "comparison type!");
11704 "LHS/RHS of comparison should match types!");
11710 Opcode = ISD::VP_XOR;
11715 Opcode = ISD::VP_AND;
11717 case ISD::VP_REDUCE_MUL:
11720 Opcode = ISD::VP_REDUCE_AND;
11722 case ISD::VP_REDUCE_ADD:
11725 Opcode = ISD::VP_REDUCE_XOR;
11727 case ISD::VP_REDUCE_SMAX:
11728 case ISD::VP_REDUCE_UMIN:
11732 Opcode = ISD::VP_REDUCE_AND;
11734 case ISD::VP_REDUCE_SMIN:
11735 case ISD::VP_REDUCE_UMAX:
11739 Opcode = ISD::VP_REDUCE_OR;
11747 if (VT != MVT::Glue) {
11750 void *IP =
nullptr;
11752 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11753 E->intersectFlagsWith(Flags);
11757 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
11758 createOperands(
N,
Ops);
11760 CSEMap.InsertNode(
N, IP);
11762 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
11763 createOperands(
N,
Ops);
11766 N->setFlags(Flags);
11777 Flags = Inserter->getFlags();
11791 Flags = Inserter->getFlags();
11801 for (
const auto &
Op :
Ops)
11803 "Operand is DELETED_NODE!");
11812 "Invalid add/sub overflow op!");
11814 Ops[0].getValueType() ==
Ops[1].getValueType() &&
11815 Ops[0].getValueType() == VTList.
VTs[0] &&
11816 "Binary operator types must match!");
11823 if (N2CV && N2CV->
isZero()) {
11854 "Invalid add/sub overflow op!");
11856 Ops[0].getValueType() ==
Ops[1].getValueType() &&
11857 Ops[0].getValueType() == VTList.
VTs[0] &&
11858 Ops[2].getValueType() == VTList.
VTs[1] &&
11859 "Binary operator types must match!");
11863 assert(VTList.
NumVTs == 2 &&
Ops.size() == 2 &&
"Invalid mul lo/hi op!");
11865 VTList.
VTs[0] ==
Ops[0].getValueType() &&
11866 VTList.
VTs[0] ==
Ops[1].getValueType() &&
11867 "Binary operator types must match!");
11873 unsigned OutWidth = Width * 2;
11874 APInt Val = LHS->getAPIntValue();
11877 Val = Val.
sext(OutWidth);
11878 Mul =
Mul.sext(OutWidth);
11880 Val = Val.
zext(OutWidth);
11881 Mul =
Mul.zext(OutWidth);
11893 assert(VTList.
NumVTs == 2 &&
Ops.size() == 1 &&
"Invalid ffrexp op!");
11895 VTList.
VTs[0] ==
Ops[0].getValueType() &&
"frexp type mismatch");
11903 DL, VTList.
VTs[1]);
11911 "Invalid STRICT_FP_EXTEND!");
11913 Ops[1].getValueType().isFloatingPoint() &&
"Invalid FP cast!");
11915 "STRICT_FP_EXTEND result type should be vector iff the operand "
11916 "type is vector!");
11919 Ops[1].getValueType().getVectorElementCount()) &&
11920 "Vector element count mismatch!");
11922 "Invalid fpext node, dst <= src!");
11925 assert(VTList.
NumVTs == 2 &&
Ops.size() == 3 &&
"Invalid STRICT_FP_ROUND!");
11927 "STRICT_FP_ROUND result type should be vector iff the operand "
11928 "type is vector!");
11931 Ops[1].getValueType().getVectorElementCount()) &&
11932 "Vector element count mismatch!");
11934 Ops[1].getValueType().isFloatingPoint() &&
11937 (
Ops[2]->getAsZExtVal() == 0 ||
Ops[2]->getAsZExtVal() == 1) &&
11938 "Invalid STRICT_FP_ROUND!");
11944 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
11947 void *IP =
nullptr;
11948 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
11949 E->intersectFlagsWith(Flags);
11953 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
11954 createOperands(
N,
Ops);
11955 CSEMap.InsertNode(
N, IP);
11957 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
11958 createOperands(
N,
Ops);
11961 N->setFlags(Flags);
12008 return makeVTList(&(*EVTs.insert(VT).first), 1);
12017 void *IP =
nullptr;
12020 EVT *Array = Allocator.Allocate<
EVT>(2);
12023 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 2);
12024 VTListMap.InsertNode(Result, IP);
12026 return Result->getSDVTList();
12036 void *IP =
nullptr;
12039 EVT *Array = Allocator.Allocate<
EVT>(3);
12043 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 3);
12044 VTListMap.InsertNode(Result, IP);
12046 return Result->getSDVTList();
12057 void *IP =
nullptr;
12060 EVT *Array = Allocator.Allocate<
EVT>(4);
12065 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 4);
12066 VTListMap.InsertNode(Result, IP);
12068 return Result->getSDVTList();
12072 unsigned NumVTs = VTs.
size();
12074 ID.AddInteger(NumVTs);
12075 for (
unsigned index = 0; index < NumVTs; index++) {
12076 ID.AddInteger(VTs[index].getRawBits());
12079 void *IP =
nullptr;
12082 EVT *Array = Allocator.Allocate<
EVT>(NumVTs);
12084 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, NumVTs);
12085 VTListMap.InsertNode(Result, IP);
12087 return Result->getSDVTList();
12098 assert(
N->getNumOperands() == 1 &&
"Update with wrong number of operands");
12101 if (
Op ==
N->getOperand(0))
return N;
12104 void *InsertPos =
nullptr;
12105 if (
SDNode *Existing = FindModifiedNodeSlot(
N,
Op, InsertPos))
12110 if (!RemoveNodeFromCSEMaps(
N))
12111 InsertPos =
nullptr;
12114 N->OperandList[0].set(
Op);
12118 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
12123 assert(
N->getNumOperands() == 2 &&
"Update with wrong number of operands");
12126 if (Op1 ==
N->getOperand(0) && Op2 ==
N->getOperand(1))
12130 void *InsertPos =
nullptr;
12131 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Op1, Op2, InsertPos))
12136 if (!RemoveNodeFromCSEMaps(
N))
12137 InsertPos =
nullptr;
12140 if (
N->OperandList[0] != Op1)
12141 N->OperandList[0].set(Op1);
12142 if (
N->OperandList[1] != Op2)
12143 N->OperandList[1].set(Op2);
12147 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
12167 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
12175 "Update with wrong number of operands");
12178 if (std::equal(
Ops.begin(),
Ops.end(),
N->op_begin()))
12182 void *InsertPos =
nullptr;
12183 if (
SDNode *Existing = FindModifiedNodeSlot(
N,
Ops, InsertPos))
12188 if (!RemoveNodeFromCSEMaps(
N))
12189 InsertPos =
nullptr;
12192 for (
unsigned i = 0; i !=
NumOps; ++i)
12193 if (
N->OperandList[i] !=
Ops[i])
12194 N->OperandList[i].set(
Ops[i]);
12198 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
12215 if (NewMemRefs.
empty()) {
12221 if (NewMemRefs.
size() == 1) {
12222 N->MemRefs = NewMemRefs[0];
12228 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.
size());
12230 N->MemRefs = MemRefsBuffer;
12231 N->NumMemRefs =
static_cast<int>(NewMemRefs.
size());
12303 New->setNodeId(-1);
12323 unsigned Order = std::min(
N->getIROrder(), OLoc.
getIROrder());
12324 N->setIROrder(Order);
12347 void *IP =
nullptr;
12348 if (VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue) {
12352 return UpdateSDLocOnMergeSDNode(ON,
SDLoc(
N));
12355 if (!RemoveNodeFromCSEMaps(
N))
12360 N->ValueList = VTs.
VTs;
12370 if (Used->use_empty())
12371 DeadNodeSet.
insert(Used);
12376 MN->clearMemRefs();
12380 createOperands(
N,
Ops);
12384 if (!DeadNodeSet.
empty()) {
12386 for (
SDNode *
N : DeadNodeSet)
12387 if (
N->use_empty())
12393 CSEMap.InsertNode(
N, IP);
12398 unsigned OrigOpc =
Node->getOpcode();
12403#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
12404 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
12405#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
12406 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
12407#include "llvm/IR/ConstrainedOps.def"
12410 assert(
Node->getNumValues() == 2 &&
"Unexpected number of results!");
12418 for (
unsigned i = 1, e =
Node->getNumOperands(); i != e; ++i)
12419 Ops.push_back(
Node->getOperand(i));
12536 bool DoCSE = VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue;
12538 void *IP =
nullptr;
12544 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
12550 N = newSDNode<MachineSDNode>(~Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
12551 createOperands(
N,
Ops);
12554 CSEMap.InsertNode(
N, IP);
12567 VT, Operand, SRIdxVal);
12577 VT, Operand, Subreg, SRIdxVal);
12585 bool AllowCommute) {
12588 Flags = Inserter->getFlags();
12595 bool AllowCommute) {
12596 if (VTList.
VTs[VTList.
NumVTs - 1] == MVT::Glue)
12602 void *IP =
nullptr;
12603 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP)) {
12604 E->intersectFlagsWith(Flags);
12613 if (AllowCommute && TLI->isCommutativeBinOp(Opcode))
12622 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
12625 void *IP =
nullptr;
12626 if (FindNodeOrInsertPos(
ID,
SDLoc(), IP))
12636 SDNode *
N,
unsigned R,
bool IsIndirect,
12639 "Expected inlined-at fields to agree");
12640 return new (DbgInfo->getAlloc())
12642 {}, IsIndirect,
DL, O,
12652 "Expected inlined-at fields to agree");
12653 return new (DbgInfo->getAlloc())
12666 "Expected inlined-at fields to agree");
12678 "Expected inlined-at fields to agree");
12679 return new (DbgInfo->getAlloc())
12681 Dependencies, IsIndirect,
DL, O,
12690 "Expected inlined-at fields to agree");
12691 return new (DbgInfo->getAlloc())
12693 {}, IsIndirect,
DL, O,
12701 unsigned O,
bool IsVariadic) {
12703 "Expected inlined-at fields to agree");
12704 return new (DbgInfo->getAlloc())
12705 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect,
12706 DL, O, IsVariadic);
12710 unsigned OffsetInBits,
unsigned SizeInBits,
12711 bool InvalidateDbg) {
12714 assert(FromNode && ToNode &&
"Can't modify dbg values");
12719 if (From == To || FromNode == ToNode)
12731 if (Dbg->isInvalidated())
12739 auto NewLocOps = Dbg->copyLocationOps();
12741 NewLocOps.begin(), NewLocOps.end(),
12743 bool Match = Op == FromLocOp;
12753 auto *Expr = Dbg->getExpression();
12759 if (
auto FI = Expr->getFragmentInfo())
12760 if (OffsetInBits + SizeInBits > FI->SizeInBits)
12769 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
12772 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
12773 Dbg->getDebugLoc(), std::max(ToNode->
getIROrder(), Dbg->getOrder()),
12774 Dbg->isVariadic());
12777 if (InvalidateDbg) {
12779 Dbg->setIsInvalidated();
12780 Dbg->setIsEmitted();
12786 "Transferred DbgValues should depend on the new SDNode");
12792 if (!
N.getHasDebugValue())
12795 auto GetLocationOperand = [](
SDNode *
Node,
unsigned ResNo) {
12803 if (DV->isInvalidated())
12805 switch (
N.getOpcode()) {
12815 Offset =
N.getConstantOperandVal(1);
12818 if (!RHSConstant && DV->isIndirect())
12825 auto *DIExpr = DV->getExpression();
12826 auto NewLocOps = DV->copyLocationOps();
12828 size_t OrigLocOpsSize = NewLocOps.size();
12829 for (
size_t i = 0; i < OrigLocOpsSize; ++i) {
12834 NewLocOps[i].getSDNode() != &
N)
12845 const auto *TmpDIExpr =
12853 NewLocOps.push_back(RHS);
12862 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
12864 auto AdditionalDependencies = DV->getAdditionalDependencies();
12866 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
12867 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
12869 DV->setIsInvalidated();
12870 DV->setIsEmitted();
12872 N0.
getNode()->dumprFull(
this);
12873 dbgs() <<
" into " << *DIExpr <<
'\n');
12880 TypeSize ToSize =
N.getValueSizeInBits(0);
12884 auto NewLocOps = DV->copyLocationOps();
12886 for (
size_t i = 0; i < NewLocOps.size(); ++i) {
12888 NewLocOps[i].getSDNode() != &
N)
12900 DV->getAdditionalDependencies(), DV->isIndirect(),
12901 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
12904 DV->setIsInvalidated();
12905 DV->setIsEmitted();
12907 dbgs() <<
" into " << *DbgExpression <<
'\n');
12914 assert((!Dbg->getSDNodes().empty() ||
12917 return Op.getKind() == SDDbgOperand::FRAMEIX;
12919 "Salvaged DbgValue should depend on a new SDNode");
12928 "Expected inlined-at fields to agree");
12929 return new (DbgInfo->getAlloc())
SDDbgLabel(Label,
DL, O);
12944 while (UI != UE &&
N == UI->
getUser())
12952 :
SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
12965 "Cannot replace with this method!");
12966 assert(From != To.
getNode() &&
"Cannot replace uses of with self");
12981 RAUWUpdateListener Listener(*
this, UI, UE);
12986 RemoveNodeFromCSEMaps(
User);
13001 AddModifiedNodeToCSEMaps(
User);
13017 for (
unsigned i = 0, e = From->
getNumValues(); i != e; ++i)
13020 "Cannot use this version of ReplaceAllUsesWith!");
13028 for (
unsigned i = 0, e = From->
getNumValues(); i != e; ++i)
13030 assert((i < To->getNumValues()) &&
"Invalid To location");
13039 RAUWUpdateListener Listener(*
this, UI, UE);
13044 RemoveNodeFromCSEMaps(
User);
13060 AddModifiedNodeToCSEMaps(
User);
13077 for (
unsigned i = 0, e = From->
getNumValues(); i != e; ++i) {
13087 RAUWUpdateListener Listener(*
this, UI, UE);
13092 RemoveNodeFromCSEMaps(
User);
13098 bool To_IsDivergent =
false;
13113 AddModifiedNodeToCSEMaps(
User);
13126 if (From == To)
return;
13142 RAUWUpdateListener Listener(*
this, UI, UE);
13145 bool UserRemovedFromCSEMaps =
false;
13162 if (!UserRemovedFromCSEMaps) {
13163 RemoveNodeFromCSEMaps(
User);
13164 UserRemovedFromCSEMaps =
true;
13174 if (!UserRemovedFromCSEMaps)
13179 AddModifiedNodeToCSEMaps(
User);
13198bool operator<(
const UseMemo &L,
const UseMemo &R) {
13199 return (intptr_t)L.User < (intptr_t)R.User;
13206 SmallVectorImpl<UseMemo> &
Uses;
13208 void NodeDeleted(SDNode *
N, SDNode *
E)
override {
13209 for (UseMemo &Memo :
Uses)
13210 if (Memo.User ==
N)
13211 Memo.User =
nullptr;
13215 RAUOVWUpdateListener(SelectionDAG &d, SmallVectorImpl<UseMemo> &uses)
13216 : SelectionDAG::DAGUpdateListener(d),
Uses(uses) {}
13223 switch (
Node->getOpcode()) {
13235 if (TLI->isSDNodeAlwaysUniform(
N)) {
13236 assert(!TLI->isSDNodeSourceOfDivergence(
N, FLI, UA) &&
13237 "Conflicting divergence information!");
13240 if (TLI->isSDNodeSourceOfDivergence(
N, FLI, UA))
13242 for (
const auto &
Op :
N->ops()) {
13243 EVT VT =
Op.getValueType();
13246 if (VT != MVT::Other &&
Op.getNode()->isDivergent() &&
13258 if (
N->SDNodeBits.IsDivergent != IsDivergent) {
13259 N->SDNodeBits.IsDivergent = IsDivergent;
13262 }
while (!Worklist.
empty());
13265void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
13267 Order.reserve(AllNodes.size());
13269 unsigned NOps =
N.getNumOperands();
13272 Order.push_back(&
N);
13274 for (
size_t I = 0;
I != Order.size(); ++
I) {
13276 for (
auto *U :
N->users()) {
13277 unsigned &UnsortedOps = Degree[U];
13278 if (0 == --UnsortedOps)
13279 Order.push_back(U);
13284#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
13285void SelectionDAG::VerifyDAGDivergence() {
13286 std::vector<SDNode *> TopoOrder;
13287 CreateTopologicalOrder(TopoOrder);
13288 for (
auto *
N : TopoOrder) {
13290 "Divergence bit inconsistency detected");
13313 for (
unsigned i = 0; i != Num; ++i) {
13314 unsigned FromResNo = From[i].
getResNo();
13317 if (
Use.getResNo() == FromResNo) {
13319 Uses.push_back(Memo);
13326 RAUOVWUpdateListener Listener(*
this,
Uses);
13328 for (
unsigned UseIndex = 0, UseIndexEnd =
Uses.size();
13329 UseIndex != UseIndexEnd; ) {
13335 if (
User ==
nullptr) {
13341 RemoveNodeFromCSEMaps(
User);
13348 unsigned i =
Uses[UseIndex].Index;
13353 }
while (UseIndex != UseIndexEnd &&
Uses[UseIndex].
User ==
User);
13357 AddModifiedNodeToCSEMaps(
User);
13365 unsigned DAGSize = 0;
13381 unsigned Degree =
N.getNumOperands();
13384 N.setNodeId(DAGSize++);
13386 if (Q != SortedPos)
13387 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
13388 assert(SortedPos != AllNodes.end() &&
"Overran node list");
13392 N.setNodeId(Degree);
13404 unsigned Degree =
P->getNodeId();
13405 assert(Degree != 0 &&
"Invalid node degree");
13409 P->setNodeId(DAGSize++);
13410 if (
P->getIterator() != SortedPos)
13411 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(
P));
13412 assert(SortedPos != AllNodes.end() &&
"Overran node list");
13416 P->setNodeId(Degree);
13419 if (
Node.getIterator() == SortedPos) {
13423 dbgs() <<
"Overran sorted position:\n";
13425 dbgs() <<
"Checking if this is due to cycles\n";
13432 assert(SortedPos == AllNodes.end() &&
13433 "Topological sort incomplete!");
13435 "First node in topological sort is not the entry token!");
13436 assert(AllNodes.front().getNodeId() == 0 &&
13437 "First node in topological sort has non-zero id!");
13438 assert(AllNodes.front().getNumOperands() == 0 &&
13439 "First node in topological sort has operands!");
13440 assert(AllNodes.back().getNodeId() == (
int)DAGSize-1 &&
13441 "Last node in topologic sort has unexpected id!");
13442 assert(AllNodes.back().use_empty() &&
13443 "Last node in topologic sort has users!");
13450 SortedNodes.
clear();
13457 unsigned NumOperands =
N.getNumOperands();
13458 if (NumOperands == 0)
13462 RemainingOperands[&
N] = NumOperands;
13467 for (
unsigned i = 0U; i < SortedNodes.
size(); ++i) {
13468 const SDNode *
N = SortedNodes[i];
13469 for (
const SDNode *U :
N->users()) {
13474 unsigned &NumRemOperands = RemainingOperands[U];
13475 assert(NumRemOperands &&
"Invalid number of remaining operands");
13477 if (!NumRemOperands)
13482 assert(SortedNodes.
size() == AllNodes.size() &&
"Node count mismatch");
13484 "First node in topological sort is not the entry token");
13485 assert(SortedNodes.
front()->getNumOperands() == 0 &&
13486 "First node in topological sort has operands");
13492 for (
SDNode *SD : DB->getSDNodes()) {
13495 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
13496 SD->setHasDebugValue(
true);
13498 DbgInfo->add(DB, isParameter);
13511 if (OldChain == NewMemOpChain || OldChain.
use_empty())
13512 return NewMemOpChain;
13515 OldChain, NewMemOpChain);
13518 return TokenFactor;
13537 if (OutFunction !=
nullptr)
13545 std::string ErrorStr;
13547 ErrorFormatter <<
"Undefined external symbol ";
13548 ErrorFormatter <<
'"' << Symbol <<
'"';
13558 return Const !=
nullptr && Const->isZero();
13567 return Const !=
nullptr && Const->isZero() && !Const->isNegative();
13572 return Const !=
nullptr && Const->isAllOnes();
13577 return Const !=
nullptr && Const->isOne();
13582 return Const !=
nullptr && Const->isMinSignedValue();
13586 SDValue V,
unsigned OperandNo,
13587 unsigned Depth)
const {
13594 unsigned OperandNo,
unsigned Depth)
const {
13597 if (V.getValueType().isInteger()) {
13606 return Const.isZero();
13608 return Const.isOne();
13611 return Const.isAllOnes();
13613 return Const.isMinSignedValue();
13615 return Const.isMaxSignedValue();
13620 return OperandNo == 1 && Const.isZero();
13623 return OperandNo == 1 && Const.isOne();
13629 return ConstFP->isZero() &&
13630 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
13632 return OperandNo == 1 && ConstFP->isZero() &&
13633 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
13635 return ConstFP->isExactlyValue(1.0);
13637 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
13641 EVT VT = V.getValueType();
13649 return ConstFP->isExactlyValue(NeutralAF);
13663 while (V.getOpcode() ==
ISD::BITCAST && V.getOperand(0).hasOneUse())
13682 !DemandedElts[IndexC->getZExtValue()]) {
13701 unsigned NumBits = V.getScalarValueSizeInBits();
13704 return C && (
C->getAPIntValue().
countr_one() >= NumBits);
13708 bool AllowTruncation) {
13715 bool AllowTruncation) {
13722 EVT VecEltVT =
N->getValueType(0).getVectorElementType();
13724 EVT CVT = CN->getValueType(0);
13725 assert(CVT.
bitsGE(VecEltVT) &&
"Illegal splat_vector element extension");
13726 if (AllowTruncation || CVT == VecEltVT)
13733 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
13738 if (CN && (UndefElements.
none() || AllowUndefs)) {
13740 EVT NSVT =
N.getValueType().getScalarType();
13741 assert(CVT.
bitsGE(NSVT) &&
"Illegal build vector element extension");
13742 if (AllowTruncation || (CVT == NSVT))
13756 const APInt &DemandedElts,
13757 bool AllowUndefs) {
13764 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
13766 if (CN && (UndefElements.
none() || AllowUndefs))
13781 return C &&
C->isZero();
13787 return C &&
C->isOne();
13792 return C &&
C->isExactlyValue(1.0);
13797 unsigned BitWidth =
N.getScalarValueSizeInBits();
13799 return C &&
C->isAllOnes() &&
C->getValueSizeInBits(0) ==
BitWidth;
13805 APInt(
C->getAPIntValue().getBitWidth(), 1));
13811 return C &&
C->isZero();
13816 return C &&
C->isZero();
13827 bool IsVolatile =
false;
13828 bool IsNonTemporal =
false;
13829 bool IsDereferenceable =
true;
13830 bool IsInvariant =
true;
13832 IsVolatile |= MMO->isVolatile();
13833 IsNonTemporal |= MMO->isNonTemporal();
13834 IsDereferenceable &= MMO->isDereferenceable();
13835 IsInvariant &= MMO->isInvariant();
13861 std::vector<EVT> VTs;
13874const EVT *SDNode::getValueTypeList(
MVT VT) {
13875 static EVTArray SimpleVTArray;
13878 return &SimpleVTArray.VTs[VT.
SimpleTy];
13887 if (U.getResNo() ==
Value)
13925 return any_of(
N->op_values(),
13926 [
this](
SDValue Op) { return this == Op.getNode(); });
13940 unsigned Depth)
const {
13941 if (*
this == Dest)
return true;
13945 if (
Depth == 0)
return false;
13965 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
13971 if (Ld->isUnordered())
13972 return Ld->getChain().reachesChainWithoutSideEffects(Dest,
Depth-1);
13985 this->Flags &= Flags;
13991 bool AllowPartials) {
14006 unsigned CandidateBinOp =
Op.getOpcode();
14007 if (
Op.getValueType().isFloatingPoint()) {
14009 switch (CandidateBinOp) {
14011 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
14021 auto PartialReduction = [&](
SDValue Op,
unsigned NumSubElts) {
14022 if (!AllowPartials || !
Op)
14024 EVT OpVT =
Op.getValueType();
14027 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
14046 unsigned Stages =
Log2_32(
Op.getValueType().getVectorNumElements());
14048 for (
unsigned i = 0; i < Stages; ++i) {
14049 unsigned MaskEnd = (1 << i);
14051 if (
Op.getOpcode() != CandidateBinOp)
14052 return PartialReduction(PrevOp, MaskEnd);
14068 return PartialReduction(PrevOp, MaskEnd);
14071 for (
int Index = 0; Index < (int)MaskEnd; ++Index)
14072 if (Shuffle->
getMaskElt(Index) != (
int)(MaskEnd + Index))
14073 return PartialReduction(PrevOp, MaskEnd);
14080 while (
Op.getOpcode() == CandidateBinOp) {
14081 unsigned NumElts =
Op.getValueType().getVectorNumElements();
14090 if (NumSrcElts != (2 * NumElts))
14105 EVT VT =
N->getValueType(0);
14114 else if (NE > ResNE)
14117 if (
N->getNumValues() == 2) {
14120 EVT VT1 =
N->getValueType(1);
14124 for (i = 0; i != NE; ++i) {
14125 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
14126 SDValue Operand =
N->getOperand(j);
14134 SDValue EltOp =
getNode(
N->getOpcode(), dl, {EltVT, EltVT1}, Operands);
14139 for (; i < ResNE; ++i) {
14151 assert(
N->getNumValues() == 1 &&
14152 "Can't unroll a vector with multiple results!");
14158 for (i= 0; i != NE; ++i) {
14159 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
14160 SDValue Operand =
N->getOperand(j);
14168 Operands[j] = Operand;
14172 switch (
N->getOpcode()) {
14200 ASC->getSrcAddressSpace(),
14201 ASC->getDestAddressSpace()));
14207 for (; i < ResNE; ++i)
14216 unsigned Opcode =
N->getOpcode();
14220 "Expected an overflow opcode");
14222 EVT ResVT =
N->getValueType(0);
14223 EVT OvVT =
N->getValueType(1);
14232 else if (NE > ResNE)
14244 for (
unsigned i = 0; i < NE; ++i) {
14245 SDValue Res =
getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
14268 if (LD->isVolatile() ||
Base->isVolatile())
14271 if (!LD->isSimple())
14273 if (LD->isIndexed() ||
Base->isIndexed())
14275 if (LD->getChain() !=
Base->getChain())
14277 EVT VT = LD->getMemoryVT();
14285 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *
this,
Offset))
14286 return (Dist * (int64_t)Bytes ==
Offset);
14295 int64_t GVOffset = 0;
14296 if (TLI->isGAPlusOffset(Ptr.
getNode(), GV, GVOffset)) {
14307 int FrameIdx = INT_MIN;
14308 int64_t FrameOffset = 0;
14310 FrameIdx = FI->getIndex();
14318 if (FrameIdx != INT_MIN) {
14323 return std::nullopt;
14333 "Split node must be a scalar type");
14338 return std::make_pair(
Lo,
Hi);
14347 LoVT = HiVT = TLI->getTypeToTransformTo(*
getContext(), VT);
14351 return std::make_pair(LoVT, HiVT);
14359 bool *HiIsEmpty)
const {
14369 "Mixing fixed width and scalable vectors when enveloping a type");
14374 *HiIsEmpty =
false;
14382 return std::make_pair(LoVT, HiVT);
14387std::pair<SDValue, SDValue>
14392 "Splitting vector with an invalid mixture of fixed and scalable "
14395 N.getValueType().getVectorMinNumElements() &&
14396 "More vector elements requested than available!");
14405 return std::make_pair(
Lo,
Hi);
14412 EVT VT =
N.getValueType();
14414 "Expecting the mask to be an evenly-sized vector");
14419 return std::make_pair(
Lo,
Hi);
14424 EVT VT =
N.getValueType();
14432 unsigned Start,
unsigned Count,
14434 EVT VT =
Op.getValueType();
14437 if (EltVT ==
EVT())
14440 for (
unsigned i = Start, e = Start +
Count; i != e; ++i) {
14452 return Val.MachineCPVal->getType();
14453 return Val.ConstVal->getType();
14457 unsigned &SplatBitSize,
14458 bool &HasAnyUndefs,
14459 unsigned MinSplatBits,
14460 bool IsBigEndian)
const {
14464 if (MinSplatBits > VecWidth)
14469 SplatValue =
APInt(VecWidth, 0);
14470 SplatUndef =
APInt(VecWidth, 0);
14477 assert(
NumOps > 0 &&
"isConstantSplat has 0-size build vector");
14480 for (
unsigned j = 0; j <
NumOps; ++j) {
14481 unsigned i = IsBigEndian ?
NumOps - 1 - j : j;
14483 unsigned BitPos = j * EltWidth;
14486 SplatUndef.
setBits(BitPos, BitPos + EltWidth);
14488 SplatValue.
insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
14490 SplatValue.
insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
14497 HasAnyUndefs = (SplatUndef != 0);
14500 while (VecWidth > 8) {
14505 unsigned HalfSize = VecWidth / 2;
14512 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
14513 MinSplatBits > HalfSize)
14516 SplatValue = HighValue | LowValue;
14517 SplatUndef = HighUndef & LowUndef;
14519 VecWidth = HalfSize;
14528 SplatBitSize = VecWidth;
14535 if (UndefElements) {
14536 UndefElements->
clear();
14543 for (
unsigned i = 0; i !=
NumOps; ++i) {
14544 if (!DemandedElts[i])
14547 if (
Op.isUndef()) {
14549 (*UndefElements)[i] =
true;
14550 }
else if (!Splatted) {
14552 }
else if (Splatted !=
Op) {
14558 unsigned FirstDemandedIdx = DemandedElts.
countr_zero();
14560 "Can only have a splat without a constant for all undefs.");
14577 if (UndefElements) {
14578 UndefElements->
clear();
14589 (*UndefElements)[
I] =
true;
14592 for (
unsigned SeqLen = 1; SeqLen <
NumOps; SeqLen *= 2) {
14593 Sequence.append(SeqLen,
SDValue());
14594 for (
unsigned I = 0;
I !=
NumOps; ++
I) {
14595 if (!DemandedElts[
I])
14597 SDValue &SeqOp = Sequence[
I % SeqLen];
14599 if (
Op.isUndef()) {
14604 if (SeqOp && !SeqOp.
isUndef() && SeqOp !=
Op) {
14610 if (!Sequence.empty())
14614 assert(Sequence.empty() &&
"Failed to empty non-repeating sequence pattern");
14655 const APFloat &APF = CN->getValueAPF();
14661 return IntVal.exactLogBase2();
14667 bool IsLittleEndian,
unsigned DstEltSizeInBits,
14675 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
14676 "Invalid bitcast scale");
14681 BitVector SrcUndeElements(NumSrcOps,
false);
14683 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
14685 if (
Op.isUndef()) {
14686 SrcUndeElements.
set(
I);
14691 assert((CInt || CFP) &&
"Unknown constant");
14692 SrcBitElements[
I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
14693 : CFP->getValueAPF().bitcastToAPInt();
14697 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
14698 SrcBitElements, UndefElements, SrcUndeElements);
14703 unsigned DstEltSizeInBits,
14708 unsigned NumSrcOps = SrcBitElements.
size();
14709 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
14710 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
14711 "Invalid bitcast scale");
14712 assert(NumSrcOps == SrcUndefElements.
size() &&
14713 "Vector size mismatch");
14715 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
14716 DstUndefElements.
clear();
14717 DstUndefElements.
resize(NumDstOps,
false);
14721 if (SrcEltSizeInBits <= DstEltSizeInBits) {
14722 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
14723 for (
unsigned I = 0;
I != NumDstOps; ++
I) {
14724 DstUndefElements.
set(
I);
14725 APInt &DstBits = DstBitElements[
I];
14726 for (
unsigned J = 0; J != Scale; ++J) {
14727 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
14728 if (SrcUndefElements[Idx])
14730 DstUndefElements.
reset(
I);
14731 const APInt &SrcBits = SrcBitElements[Idx];
14733 "Illegal constant bitwidths");
14734 DstBits.
insertBits(SrcBits, J * SrcEltSizeInBits);
14741 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
14742 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
14743 if (SrcUndefElements[
I]) {
14744 DstUndefElements.
set(
I * Scale, (
I + 1) * Scale);
14747 const APInt &SrcBits = SrcBitElements[
I];
14748 for (
unsigned J = 0; J != Scale; ++J) {
14749 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
14750 APInt &DstBits = DstBitElements[Idx];
14751 DstBits = SrcBits.
extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
14758 unsigned Opc =
Op.getOpcode();
14765std::optional<std::pair<APInt, APInt>>
14769 return std::nullopt;
14772 APInt Start, Stride;
14773 int FirstIdx = -1, SecondIdx = -1;
14777 for (
unsigned I = 0;
I <
NumOps; ++
I) {
14782 return std::nullopt;
14785 if (FirstIdx < 0) {
14788 }
else if (SecondIdx < 0) {
14794 unsigned IdxDiff =
I - FirstIdx;
14795 APInt ValDiff = Val - Start;
14800 return std::nullopt;
14801 IdxDiff >>= CommonPow2Bits;
14809 return std::nullopt;
14812 Start -= Stride * FirstIdx;
14815 if (Val != Start + Stride *
I)
14816 return std::nullopt;
14822 return std::nullopt;
14824 return std::make_pair(Start, Stride);
14830 for (i = 0, e = Mask.size(); i != e && Mask[i] < 0; ++i)
14840 for (
int Idx = Mask[i]; i != e; ++i)
14841 if (Mask[i] >= 0 && Mask[i] != Idx)
14849 SDValue N,
bool AllowOpaques)
const {
14853 return AllowOpaques || !
C->isOpaque();
14862 TLI->isOffsetFoldingLegal(GA))
14890 return std::nullopt;
14892 EVT VT =
N->getValueType(0);
14894 switch (TLI->getBooleanContents(
N.getValueType())) {
14900 return std::nullopt;
14906 return std::nullopt;
14914 assert(!
Node->OperandList &&
"Node already has operands");
14916 "too many operands to fit into SDNode");
14917 SDUse *
Ops = OperandRecycler.allocate(
14920 bool IsDivergent =
false;
14921 for (
unsigned I = 0;
I != Vals.
size(); ++
I) {
14923 Ops[
I].setInitial(Vals[
I]);
14924 EVT VT =
Ops[
I].getValueType();
14927 if (VT != MVT::Other &&
14930 IsDivergent =
true;
14935 if (!TLI->isSDNodeAlwaysUniform(Node)) {
14936 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, UA);
14937 Node->SDNodeBits.IsDivergent = IsDivergent;
14945 while (Vals.
size() > Limit) {
14946 unsigned SliceIdx = Vals.
size() - Limit;
15017 "Unexpected opcode");
15038 const SDLoc &DLoc) {
15042 RTLIB::LibcallImpl LibcallImpl =
15043 Libcalls->getLibcallImpl(
static_cast<RTLIB::Libcall
>(LibFunc));
15044 if (LibcallImpl == RTLIB::Unsupported)
15051 Libcalls->getLibcallImplCallingConv(LibcallImpl),
15053 return TLI->LowerCallTo(CLI).second;
15057 assert(From && To &&
"Invalid SDNode; empty source SDValue?");
15058 auto I = SDEI.find(From);
15059 if (
I == SDEI.end())
15064 NodeExtraInfo NEI =
I->second;
15073 SDEI[To] = std::move(NEI);
15090 auto VisitFrom = [&](
auto &&Self,
const SDNode *
N,
int MaxDepth) {
15091 if (MaxDepth == 0) {
15097 if (!FromReach.
insert(
N).second)
15100 Self(Self,
Op.getNode(), MaxDepth - 1);
15105 auto DeepCopyTo = [&](
auto &&Self,
const SDNode *
N) {
15108 if (!Visited.
insert(
N).second)
15113 if (
N == To &&
Op.getNode() == EntrySDN) {
15118 if (!Self(Self,
Op.getNode()))
15122 SDEI[
N] = std::move(NEI);
15132 for (
int PrevDepth = 0, MaxDepth = 16; MaxDepth <= 1024;
15133 PrevDepth = MaxDepth, MaxDepth *= 2, Visited.
clear()) {
15138 for (
const SDNode *
N : StartFrom)
15139 VisitFrom(VisitFrom,
N, MaxDepth - PrevDepth);
15143 LLVM_DEBUG(
dbgs() << __func__ <<
": MaxDepth=" << MaxDepth <<
" too low\n");
15151 errs() <<
"warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
15152 assert(
false &&
"From subgraph too complex - increase max. MaxDepth?");
15154 SDEI[To] = std::move(NEI);
15168 if (!Visited.
insert(
N).second) {
15169 errs() <<
"Detected cycle in SelectionDAG\n";
15170 dbgs() <<
"Offending node:\n";
15171 N->dumprFull(DAG);
dbgs() <<
"\n";
15187 bool check = force;
15188#ifdef EXPENSIVE_CHECKS
15192 assert(
N &&
"Checking nonexistent SDNode");
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isConstant(const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define __asan_unpoison_memory_region(p, size)
#define LLVM_LIKELY(EXPR)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
This file defines a hash set that can be used to remove duplication of nodes in a graph.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static constexpr Value * getValue(Ty &ValueOrUse)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static bool isConstantSplatVector(SDValue N, APInt &SplatValue, unsigned MinSizeInBits)
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
Contains matchers for matching SelectionDAG nodes and values.
static Type * getValueType(Value *V, bool LookThroughCmp=false)
Returns the "element type" of the given value/instruction V.
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, BatchAAResults *BatchAA)
static SDValue getFixedOrScalableQuantity(SelectionDAG &DAG, const SDLoc &DL, EVT VT, Ty Quantity)
static std::pair< SDValue, SDValue > getRuntimeCallSDValueHelper(SDValue Chain, const SDLoc &dl, TargetLowering::ArgListTy &&Args, const CallInst *CI, RTLIB::Libcall Call, SelectionDAG *DAG, const TargetLowering *TLI)
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
static std::optional< APInt > FoldValueWithUndef(unsigned Opcode, const APInt &C1, bool IsUndef1, const APInt &C2, bool IsUndef2)
static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, SelectionDAG &DAG)
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, const ConstantDataArraySlice &Slice)
getMemsetStringVal - Similar to getMemsetValue.
static cl::opt< bool > EnableMemCpyDAGOpt("enable-memcpy-dag-opt", cl::Hidden, cl::init(true), cl::desc("Gang up loads and stores generated by inlining of memcpy"))
static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B)
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice)
Returns true if memcpy source is constant data.
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo)
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike)
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
static cl::opt< int > MaxLdStGlue("ldstmemcpy-glue-max", cl::desc("Number limit for gluing ld/st of memcpy."), cl::Hidden, cl::init(0))
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
Try to simplify vector concatenation to an input value, undef, or build vector.
static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
static bool isInTailCallPositionWrapper(const CallInst *CI, const SelectionDAG *SelDAG, bool AllowReturnsFirstArg)
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
static bool gluePropagatesDivergence(const SDNode *Node)
Return true if a glue output should propagate divergence information.
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G)
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SmallVector< SDValue, 32 > &OutChains, unsigned From, unsigned To, SmallVector< SDValue, 16 > &OutLoadChains, SmallVector< SDValue, 16 > &OutStoreChains)
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
static std::optional< APInt > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
static APInt getDemandAllEltsMask(SDValue V)
Construct a DemandedElts mask which demands all elements of V.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
static void removeOperands(MachineInstr &MI, unsigned i)
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static unsigned getSize(unsigned Kind)
static const fltSemantics & IEEEsingle()
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardZero
static const fltSemantics & BFloat()
static const fltSemantics & IEEEquad()
static const fltSemantics & IEEEdouble()
static constexpr roundingMode rmTowardNegative
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardPositive
static const fltSemantics & IEEEhalf()
opStatus
IEEE-754R 7: Default exception handling.
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
opStatus add(const APFloat &RHS, roundingMode RM)
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
opStatus mod(const APFloat &RHS)
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt usub_sat(const APInt &RHS) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
LLVM_ABI APInt sadd_sat(const APInt &RHS) const
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
LLVM_ABI APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sshl_sat(const APInt &RHS) const
LLVM_ABI APInt ushl_sat(const APInt &RHS) const
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
static bool isSameValue(const APInt &I1, const APInt &I2, bool SignedCompare=false)
Determine if two APInts have the same value, after zero-extending or sign-extending (if SignedCompare...
LLVM_ABI APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
LLVM_ABI APInt byteSwap() const
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void clearBits(unsigned LoBit, unsigned HiBit)
Clear the bits from LoBit (inclusive) to HiBit (exclusive) to 0.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
LLVM_ABI APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
unsigned getSrcAddressSpace() const
unsigned getDestAddressSpace() const
static Capacity get(size_t N)
Get the capacity of an array that can hold at least N elements.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
Get the array size.
bool empty() const
Check if the array is empty.
This is an SDNode representing atomic operations.
static LLVM_ABI BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
BitVector & reset()
Reset all bits in the bitvector.
void resize(unsigned N, bool t=false)
Grow or shrink the bitvector.
void clear()
Removes all bits from the bitvector.
BitVector & set()
Set all bits in the bitvector.
bool none() const
Returns true if none of the bits are set.
size_type size() const
Returns the number of bits in this bitvector.
int64_t getOffset() const
unsigned getTargetFlags() const
const BlockAddress * getBlockAddress() const
The address of a basic block.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &RawBitElements, BitVector &UndefElements) const
Extract the raw bit data from a build vector of Undef, Constant or ConstantFP node elements.
static LLVM_ABI void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
LLVM_ABI bool getRepeatedSequence(const APInt &DemandedElts, SmallVectorImpl< SDValue > &Sequence, BitVector *UndefElements=nullptr) const
Find the shortest repeating sequence of values in the build vector.
LLVM_ABI ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
LLVM_ABI SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
LLVM_ABI ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
LLVM_ABI int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
LLVM_ABI std::optional< std::pair< APInt, APInt > > isArithmeticSequence() const
If this BuildVector is constant and represents an arithmetic sequence "<a, a+n, a+2n,...
LLVM_ABI bool isConstant() const
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isValueValidForType(EVT VT, const APFloat &Val)
const APFloat & getValueAPF() const
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
ConstantFP - Floating Point Values [float, double].
const APFloat & getValue() const
This is the shared class of boolean and integer constants.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
LLVM_ABI Type * getType() const
unsigned getTargetFlags() const
This class represents a range of values.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI ConstantRange multiply(const ConstantRange &Other, unsigned NoWrapKind=0) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
Base class for variables.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Implements a dense probed hash-table based set.
const char * getSymbol() const
unsigned getTargetFlags() const
This class is used to gather all the unique data bits of a node.
Data structure describing the variable locations in a function.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
LLVM_ABI unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
This class is used to form a handle around another node that is persistent and is updated across invo...
const SDValue & getValue() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
This is an important class for using LLVM in a threaded context.
Tracks which library functions to use for a particular subtarget.
LLVM_ABI CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
This SDNode is used for LIFETIME_START/LIFETIME_END values.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
const MDOperand & getOperand(unsigned I) const
static MVT getIntegerVT(unsigned BitWidth)
Abstract base class for all machine specific constantpool value subclasses.
virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID)=0
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
A description of a memory reference used in the backend.
const MDNode * getRanges() const
Return the range tag for the memory reference.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
This class contains meta information specific to a module.
An SDNode that represents everything that will be needed to construct a MachineInstr.
This class is used to represent an MGATHER node.
This class is used to represent an MLOAD node.
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
size_t getNumMemOperands() const
Return the number of memory operands.
LLVM_ABI MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, PointerUnion< MachineMemOperand *, MachineMemOperand ** > memrefs)
Constructor that supports single or multiple MMOs.
PointerUnion< MachineMemOperand *, MachineMemOperand ** > MemRefs
Memory reference information.
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
ArrayRef< MachineMemOperand * > memoperands() const
Return the memory operands for this node.
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Represent a mutable reference to an array (0 or more elements consecutively in memory),...
Pass interface - Implemented by all 'passes'.
Class to represent pointers.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A discriminated union of two or more pointer types, with the discriminator in the low bits of the poi...
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Analysis providing profile information.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
Wrapper class representing virtual and physical registers.
Keeps track of dbg_value information through SDISel.
LLVM_ABI void add(SDDbgValue *V, bool isParameter)
LLVM_ABI void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map.
Holds the information from a dbg_label node through SDISel.
Holds the information for a single machine location through SDISel; either an SDNode,...
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
@ SDNODE
Value is the result of an expression.
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
unsigned getIROrder() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
LLVM_ABI void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
iterator_range< use_iterator > uses()
MemSDNodeBitfields MemSDNodeBits
LLVM_ABI void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool getHasDebugValue() const
SDNodeFlags getFlags() const
void setNodeId(int Id)
Set unique node id.
LLVM_ABI void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
static LLVM_ABI bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
std::optional< APInt > bitcastToAPInt() const
LLVM_ABI bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
LLVM_ABI bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
LLVM_ABI void DropOperands()
Release the operands and set this node to have zero operands.
SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
Create an SDNode.
Represents a use of a SDNode.
SDNode * getUser()
This returns the SDNode that contains this Use.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if the referenced return value is an operand of N.
LLVM_ABI bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
virtual void verifyTargetNode(const SelectionDAG &DAG, const SDNode *N) const
Checks that the given target-specific node is valid. Aborts if it is not.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC)
LLVM_ABI Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
LLVM_ABI SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI std::pair< SDValue, SDValue > getMemccpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue C, SDValue Size, const CallInst *CI)
Lower a memccpy operation into a target library call and return the resulting chain and call result a...
LLVM_ABI bool isKnownNeverLogicalZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Test whether the given floating point SDValue (or all elements of it, if it is a vector) is known to ...
LLVM_ABI SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsExpanding=false)
SDValue getExtractVectorElt(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Extract element at Idx from Vec.
LLVM_ABI SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
LLVM_ABI SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const
Determine if the result of the unsigned sub of 2 nodes can overflow.
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI std::pair< SDValue, SDValue > getStrlen(SDValue Chain, const SDLoc &dl, SDValue Src, const CallInst *CI)
Lower a strlen operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl, SDNodeFlags Flags={})
Constant fold a setcc to true or false.
bool isKnownNeverSNaN(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
LLVM_ABI std::optional< bool > isBoolConstant(SDValue N) const
Check if a value \op N is a constant using the target's BooleanContent for its type.
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI ConstantRange computeConstantRange(SDValue Op, bool ForSigned, unsigned Depth=0) const
Determine the possible constant range of an integer or vector of integers.
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI void updateDivergence(SDNode *N)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
LLVM_ABI SDValue getAtomicLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO)
LLVM_ABI SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags, bool AllowCommute=false)
Get the specified node if it's already available, or else return NULL.
LLVM_ABI SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
LLVM_ABI void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, const LibcallLoweringInfo *LibcallsInfo, UniformityInfo *UA, ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin, MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs)
Prepare this SelectionDAG to process code in the given MachineFunction.
LLVM_ABI SelectionDAG(const TargetMachine &TM, CodeGenOptLevel)
LLVM_ABI SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
LLVM_ABI SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI std::optional< unsigned > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
OverflowKind
Used to represent the possible overflow behavior of an operation.
static LLVM_ABI unsigned getHasPredecessorMaxSteps()
LLVM_ABI bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI bool cannotBeOrderedNegativeFP(SDValue Op) const
Test whether the given float value is known to be positive.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI bool calculateDivergence(SDNode *N)
LLVM_ABI std::pair< SDValue, SDValue > getStrcmp(SDValue Chain, const SDLoc &dl, SDValue S0, SDValue S1, const CallInst *CI)
Lower a strcmp operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
LLVM_ABI SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI bool canIgnoreSignBitOfZero(const SDUse &Use) const
Check if a use of a float value is insensitive to signed zeros.
LLVM_ABI bool SignBitIsZeroFP(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero, for a floating-point value.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
LLVM_ABI SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
static constexpr unsigned MaxRecursionDepth
LLVM_ABI SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
bool isGuaranteedNotToBePoison(SDValue Op, unsigned Depth=0) const
Return true if this function can prove that Op is never poison.
LLVM_ABI SDValue getIdentityElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) identity element for the given opcode, if it exists.
LLVM_ABI SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
LLVM_ABI SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
For each demanded element of a vector, see if it is known to be zero.
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
LLVM_ABI std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
LLVM_ABI void salvageDebugInfo(SDNode &N)
To be invoked on an SDNode that is slated to be erased.
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
LLVM_ABI std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
LLVM_ABI SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI void DeleteNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
LLVM_ABI std::pair< SDValue, SDValue > getStrcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, const CallInst *CI)
Lower a strcpy operation into a target library call and return the resulting chain and call result as...
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI std::optional< unsigned > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
LLVM_ABI SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI bool isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
const DataLayout & getDataLayout() const
SDValue getPartialReduceMLS(unsigned Opc, const SDLoc &DL, SDValue Acc, SDValue LHS, SDValue RHS)
Get an expression that implements a partial multiply-subtract reduction.
LLVM_ABI SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(SDValue Op, bool ForSigned, unsigned Depth=0) const
Combine constant ranges from computeConstantRange() and computeKnownBits().
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
LLVM_ABI bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
LLVM_ABI SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
LLVM_ABI SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI OverflowKind computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const
Determine if the result of the unsigned mul of 2 nodes can overflow.
LLVM_ABI void copyExtraInfo(SDNode *From, SDNode *To)
Copy extra info associated with one node to another.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getLoadFFVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachineMemOperand *MMO)
LLVM_ABI SDValue getTypeSize(const SDLoc &DL, EVT VT, TypeSize TS)
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
LLVM_ABI void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block.
LLVM_ABI std::pair< SDValue, SDValue > getMemcmp(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, const CallInst *CI)
Lower a memcmp operation into a target library call and return the resulting chain and call result as...
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain, const SDLoc &DLoc)
Helper used to make a call to a library function that has one argument of pointer type.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
LLVM_ABI SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
LLVM_ABI SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI OverflowKind computeOverflowForSignedMul(SDValue N0, SDValue N1) const
Determine if the result of the signed mul of 2 nodes can overflow.
LLVM_ABI MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
LLVM_ABI void dump() const
Dump the textual format of this DAG.
LLVM_ABI bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if '(Op & Mask) == Mask'.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
bool isConstantValueOfAnyType(SDValue N) const
LLVM_ABI bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, UndefPoisonKind Kind=UndefPoisonKind::UndefOrPoison, bool ConsiderFlags=true, unsigned Depth=0) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, Register VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
LLVM_ABI KnownFPClass computeKnownFPClass(SDValue Op, FPClassTest InterestedClasses, unsigned Depth=0) const
Determine floating-point class information about Op.
LLVM_ABI bool isIdentityElement(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo, unsigned Depth=0) const
Returns true if V is an identity element of Opc with Flags.
LLVM_ABI SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, UndefPoisonKind Kind=UndefPoisonKind::UndefOrPoison, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, Kind can be used to track poison ...
LLVM_ABI bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
LLVM_ABI SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI std::optional< unsigned > getValidMinimumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
LLVM_ABI SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
LLVM_ABI std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
LLVM_ABI SDValue getMaskFromElementCount(const SDLoc &DL, EVT VT, ElementCount Len)
Return a vector with the first 'Len' lanes set to true and remaining lanes set to false.
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
iterator_range< allnodes_iterator > allnodes()
LLVM_ABI SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
const LibcallLoweringInfo & getLibcalls() const
LLVM_ABI SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
LLVM_ABI SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the Fra...
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD) const
Get the debug values which reference the given SDNode.
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the signed addition of 2 nodes can overflow.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
LLVM_ABI unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
ilist< SDNode >::size_type allnodes_size() const
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
LLVM_ABI SDValue FoldConstantBuildVector(BuildVectorSDNode *BV, const SDLoc &DL, EVT DstEltVT)
Fold BUILD_VECTOR of constants/undefs to the destination type BUILD_VECTOR of constants/undefs elemen...
LLVM_ABI SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsCompressing=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Return true if 'Op' is known to be zero in DemandedElts.
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
LLVM_ABI SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
LLVM_ABI SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
LLVM_ABI SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void getTopologicallyOrderedNodes(SmallVectorImpl< const SDNode * > &SortedNodes) const
Get all the nodes in their topological order without modifying any states.
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI std::pair< SDValue, SDValue > getStrstr(SDValue Chain, const SDLoc &dl, SDValue S0, SDValue S1, const CallInst *CI)
Lower a strstr operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
LLVM_ABI OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the unsigned addition of 2 nodes can overflow.
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT SVT, MachineMemOperand *MMO, bool IsCompressing=false)
LLVM_ABI void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, SDValue &N2) const
Swap N1 and N2 if Opcode is a commutative binary opcode and the canonical form expects the opposite o...
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVM_ABI bool isKnownToBeAPowerOfTwoFP(SDValue Val, unsigned Depth=0) const
Test if the given fp value is known to be an integer power-of-2, either positive or negative.
LLVM_ABI OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const
Determine if the result of the signed sub of 2 nodes can overflow.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
LLVM_ABI SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI bool isKnownToBeAPowerOfTwo(SDValue Val, bool OrZero=false, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
LLVM_ABI SDValue getDeactivationSymbol(const GlobalValue *GV)
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
LLVM_ABI bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
LLVM_ABI std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
LLVM_ABI SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
Fold floating-point operations when all operands are constants and/or undefined.
LLVM_ABI std::optional< ConstantRange > getValidShiftAmountRange(SDValue V, const APInt &DemandedElts, unsigned Depth) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
LLVM_ABI SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
LLVM_ABI SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
DenormalMode getDenormalMode(EVT VT) const
Return the current function's default denormal handling kind for the given floating point type.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, ArrayRef< ISD::NodeType > CandidateBinOps, bool AllowPartials=false)
Match a binop + shuffle pyramid that represents a horizontal reduction over the elements of a vector ...
LLVM_ABI bool isADDLike(SDValue Op, bool NoWrap=false) const
Return true if the specified operand is an ISD::OR or ISD::XOR node that can be treated as an ISD::AD...
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
LLVM_ABI SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
LLVM_ABI void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
LLVM_ABI SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
ilist< SDNode >::iterator allnodes_iterator
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
static LLVM_ABI bool isSplatMask(ArrayRef< int > Mask)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
Represent a constant reference to a string, i.e.
constexpr const char * data() const
Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Completely target-dependent object reference.
int64_t getOffset() const
unsigned getTargetFlags() const
Provides information about what library functions are available for the current target.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
std::vector< ArgListEntry > ArgListTy
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, EVT *LargestVT=nullptr) const
Determines the optimal series of memory ops to replace the memset / memcpy.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
Primary interface to the complete machine description for the target machine.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
A Use represents the edge between a Value definition and its users.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
LLVM_ABI void set(Value *Val)
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
This class is used to represent an VP_GATHER node.
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt clmulr(const APInt &LHS, const APInt &RHS)
Perform a reversed carry-less multiply.
LLVM_ABI APInt mulhu(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on zero-extended operands.
LLVM_ABI APInt avgCeilU(const APInt &C1, const APInt &C2)
Compute the ceil of the unsigned average of C1 and C2.
LLVM_ABI APInt avgFloorU(const APInt &C1, const APInt &C2)
Compute the floor of the unsigned average of C1 and C2.
LLVM_ABI APInt fshr(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift right.
LLVM_ABI APInt mulhs(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on sign-extended operands.
LLVM_ABI APInt clmul(const APInt &LHS, const APInt &RHS)
Perform a carry-less multiply, also known as XOR multiplication, and return low-bits.
APInt abds(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be signed.
LLVM_ABI APInt fshl(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift left.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
LLVM_ABI APInt clmulh(const APInt &LHS, const APInt &RHS)
Perform a carry-less multiply, and return high-bits.
APInt abdu(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be unsigned.
LLVM_ABI APInt avgFloorS(const APInt &C1, const APInt &C2)
Compute the floor of the signed average of C1 and C2.
LLVM_ABI APInt avgCeilS(const APInt &C1, const APInt &C2)
Compute the ceil of the signed average of C1 and C2.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
LLVM_ABI CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
LLVM_ABI bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ MDNODE_SDNODE
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ PTRADD
PTRADD represents pointer arithmetic semantics, for targets that opt in using shouldPreservePtrArith(...
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ POISON
POISON - A poison node.
@ PARTIAL_REDUCE_SMLA
PARTIAL_REDUCE_[U|S]MLA(Accumulator, Input1, Input2) The partial reduction nodes sign or zero extend ...
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ JUMP_TABLE_DEBUG_INFO
JUMP_TABLE_DEBUG_INFO - Jumptable debug info.
@ BSWAP
Byte Swap and Counting operators.
@ DEACTIVATION_SYMBOL
Untyped node storing deactivation symbol reference (DeactivationSymbolSDNode).
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ CLMUL
Carry-less multiplication operations.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SRCVALUE
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
@ GET_ACTIVE_LANE_MASK
GET_ACTIVE_LANE_MASK - this corrosponds to the llvm.get.active.lane.mask intrinsic.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ CTLS
Count leading redundant sign bits.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ VECTOR_SPLICE_LEFT
VECTOR_SPLICE_LEFT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1, VEC2) left by OFFSET elements an...
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ MASKED_UDIV
Masked vector arithmetic that returns poison on disabled lanes.
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ GET_FPENV_MEM
Gets the current floating-point environment.
@ PSEUDO_PROBE
Pseudo probe for AutoFDO, as a place holder in a basic block to improve the sample counts quality.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ VECTOR_SPLICE_RIGHT
VECTOR_SPLICE_RIGHT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1,VEC2) right by OFFSET elements a...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ CTTZ_ZERO_POISON
Bit counting operators with a poisoned result for zero inputs.
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EXPERIMENTAL_VECTOR_HISTOGRAM
Experimental vector histogram intrinsic Operands: Input Chain, Inc, Mask, Base, Index,...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ SET_FPENV_MEM
Sets the current floating point environment.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ TRUNCATE_SSAT_S
TRUNCATE_[SU]SAT_[SU] - Truncate for saturated operand [SU] located in middle, prefix for SAT means i...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ ABS_MIN_POISON
ABS with a poison result for INT_MIN.
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI NodeType getOppositeSignednessMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns the corresponding opcode with the opposi...
LLVM_ABI bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
LLVM_ABI NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool isExtOpcode(unsigned Opcode)
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI NodeType getUnmaskedBinOpOpcode(unsigned MaskedOpc)
Given a MaskedOpc of ISD::MASKED_(U|S)(DIV|REM), returns the unmasked ISD::(U|S)(DIV|REM).
LLVM_ABI bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed)
Returns true if the specified node is a vector where all elements can be truncated to the specified e...
LLVM_ABI bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI std::optional< unsigned > getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept)
Translate this VP Opcode to its corresponding non-VP Opcode.
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
LLVM_ABI std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
LLVM_ABI std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool matchUnaryPredicateImpl(SDValue Op, std::function< bool(ConstNodeType *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_ABI NodeType getInverseMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns ISD::(U|S)MAX and ISD::(U|S)MIN,...
LLVM_ABI bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
LLVM_ABI bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LLVM_ABI bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
LLVM_ABI CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
LLVM_ABI Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
GenericUniformityInfo< SSAContext > UniformityInfo
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool operator<(int64_t V1, const APSInt &V2)
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
LLVM_ABI SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
@ Undef
Value of the register doesn't matter.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
LLVM_ABI bool isOneOrOneSplatFP(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant floating-point value, or a splatted vector of a constant float...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
auto cast_or_null(const Y &Val)
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
LLVM_ABI SDValue peekThroughInsertVectorElt(SDValue V, const APInt &DemandedElts)
Recursively peek through INSERT_VECTOR_ELT nodes, returning the source vector operand of V,...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void checkForCycles(const SelectionDAG *DAG, bool force=false)
void sort(IteratorTy Start, IteratorTy End)
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI SDValue peekThroughTruncates(SDValue V)
Return the non-truncated source operand of V if it exists.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr std::underlying_type_t< Enum > to_underlying(Enum E)
Returns underlying integer value of an enum.
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
CodeGenOptLevel
Code generation optimization level.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
bool includesPoison(UndefPoisonKind Kind)
Returns true if Kind includes the Poison bit.
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool includesUndef(UndefPoisonKind Kind)
Returns true if Kind includes the Undef bit.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
LLVM_ABI bool isNullConstantOrUndef(SDValue V)
Returns true if V is a constant integer zero or an UNDEF node.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
UndefPoisonKind
Enumeration to track whether we are interested in Undef, Poison, or both.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isZeroOrZeroSplatFP(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant (+/-)0.0 floating-point value or a splatted vector thereof (wi...
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
LLVM_ABI bool isOnesOrOnesSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
MDNode * TBAA
The tag for type-based alias analysis.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
void move(uint64_t Delta)
Moves the Offset and adjusts Length accordingly.
const ConstantDataArray * Array
ConstantDataArray pointer.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
intptr_t getRawBits() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
void makeNegative()
Make this value negative.
void setAllConflict()
Make all bits known to be both zero and one.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
static LLVM_ABI KnownBits fshl(const KnownBits &LHS, const KnownBits &RHS, const APInt &Amt)
Compute known bits for fshl(LHS, RHS, Amt).
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
bool isConstant() const
Returns true if we know the value of all bits.
void resetAll()
Resets the known state of all bits.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false, bool SelfAdd=false)
Compute knownbits resulting from addition of LHS and RHS.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
static LLVM_ABI KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
static LLVM_ABI KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorU.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static LLVM_ABI KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS, const KnownBits &Borrow)
Compute known bits results from subtracting RHS from LHS with 1-bit Borrow.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits fshr(const KnownBits &LHS, const KnownBits &RHS, const APInt &Amt)
Compute known bits for fshr(LHS, RHS, Amt).
static LLVM_ABI KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static LLVM_ABI KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorS.
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
LLVM_ABI KnownBits truncSSat(unsigned BitWidth) const
Truncate with signed saturation (signed input -> signed output)
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilU.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static LLVM_ABI KnownBits clmul(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for clmul(LHS, RHS).
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
LLVM_ABI KnownBits truncUSat(unsigned BitWidth) const
Truncate with unsigned saturation (unsigned input -> unsigned output)
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
LLVM_ABI KnownBits truncSSatU(unsigned BitWidth) const
Truncate with signed saturation to unsigned (signed input -> unsigned output)
static LLVM_ABI KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilS.
const APInt & getConstant() const
Returns the value when all bits have a known value.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
void copysign(const KnownFPClass &Sign)
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
KnownFPClass intersectWith(const KnownFPClass &RHS) const
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
static LLVM_ABI KnownFPClass bitcast(const fltSemantics &FltSemantics, const KnownBits &Bits)
Report known values for a bitcast into a float with provided semantics.
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
DAGUpdateListener *const Next
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it.
virtual void NodeInserted(SDNode *N)
The node N that was inserted.
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)