31#include "llvm/Config/config.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsNVPTX.h"
49#include "llvm/IR/IntrinsicsWebAssembly.h"
50#include "llvm/IR/IntrinsicsX86.h"
68 "disable-fp-call-folding",
69 cl::desc(
"Disable constant-folding of FP intrinsics and libcalls."),
84 unsigned BitShift =
DL.getTypeSizeInBits(SrcEltTy);
85 for (
unsigned i = 0; i != NumSrcElts; ++i) {
87 if (
DL.isLittleEndian())
88 Element =
C->getAggregateElement(NumSrcElts - i - 1);
90 Element =
C->getAggregateElement(i);
102 Result |= ElementCI->getValue().zext(
Result.getBitWidth());
113 "Invalid constantexpr bitcast!");
123 Type *SrcEltTy = VTy->getElementType();
136 if (
Constant *CE = foldConstVectorToAPInt(Result, DestTy,
C,
137 SrcEltTy, NumSrcElts,
DL))
141 return ConstantInt::get(DestTy, Result);
174 if (NumDstElt == NumSrcElt)
178 Type *DstEltTy = DestVTy->getElementType();
212 "Constant folding cannot fail for plain fp->int bitcast!");
219 bool isLittleEndian =
DL.isLittleEndian();
222 if (NumDstElt < NumSrcElt) {
225 unsigned Ratio = NumSrcElt/NumDstElt;
228 for (
unsigned i = 0; i != NumDstElt; ++i) {
231 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
232 for (
unsigned j = 0;
j != Ratio; ++
j) {
233 Constant *Src =
C->getAggregateElement(SrcElt++);
245 assert(Src &&
"Constant folding cannot fail on plain integers");
249 Instruction::Shl, Src, ConstantInt::get(Src->getType(), ShiftAmt),
251 assert(Src &&
"Constant folding cannot fail on plain integers");
253 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
257 assert(Elt &&
"Constant folding cannot fail on plain integers");
265 unsigned Ratio = NumDstElt/NumSrcElt;
266 unsigned DstBitSize =
DL.getTypeSizeInBits(DstEltTy);
269 for (
unsigned i = 0; i != NumSrcElt; ++i) {
270 auto *Element =
C->getAggregateElement(i);
285 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
286 for (
unsigned j = 0;
j != Ratio; ++
j) {
289 APInt Elt = Src->getValue().lshr(ShiftAmt);
290 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
293 Result.push_back(ConstantInt::get(DstEltTy, Elt.
trunc(DstBitSize)));
319 *DSOEquiv = FoundDSOEquiv;
320 GV = FoundDSOEquiv->getGlobalValue();
328 if (!CE)
return false;
331 if (CE->getOpcode() == Instruction::PtrToInt ||
332 CE->getOpcode() == Instruction::PtrToAddr ||
333 CE->getOpcode() == Instruction::BitCast)
342 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
351 if (!
GEP->accumulateConstantOffset(
DL, TmpOffset))
361 Type *SrcTy =
C->getType();
365 TypeSize DestSize =
DL.getTypeSizeInBits(DestTy);
366 TypeSize SrcSize =
DL.getTypeSizeInBits(SrcTy);
378 if (SrcSize == DestSize &&
379 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
385 Cast = Instruction::IntToPtr;
386 else if (SrcTy->isPointerTy() && DestTy->
isIntegerTy())
387 Cast = Instruction::PtrToInt;
395 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
402 if (SrcTy->isStructTy()) {
408 ElemC =
C->getAggregateElement(Elem++);
409 }
while (ElemC &&
DL.getTypeSizeInBits(ElemC->
getType()).isZero());
415 if (!
DL.typeSizeEqualsStoreSize(VT->getElementType()))
418 C =
C->getAggregateElement(0u);
433 assert(ByteOffset <=
DL.getTypeAllocSize(
C->getType()) &&
434 "Out of range access");
437 if (ByteOffset >=
DL.getTypeStoreSize(
C->getType()))
446 if ((CI->getBitWidth() & 7) != 0)
448 const APInt &Val = CI->getValue();
449 unsigned IntBytes =
unsigned(CI->getBitWidth()/8);
451 for (
unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
452 unsigned n = ByteOffset;
453 if (!
DL.isLittleEndian())
454 n = IntBytes - n - 1;
462 if (CFP->getType()->isDoubleTy()) {
464 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
466 if (CFP->getType()->isFloatTy()){
468 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
470 if (CFP->getType()->isHalfTy()){
472 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
481 ByteOffset -= CurEltOffset;
486 uint64_t EltSize =
DL.getTypeAllocSize(CS->getOperand(Index)->getType());
488 if (ByteOffset < EltSize &&
489 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
496 if (Index == CS->getType()->getNumElements())
502 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
506 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
507 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
509 CurEltOffset = NextEltOffset;
519 NumElts = AT->getNumElements();
520 EltTy = AT->getElementType();
521 EltSize =
DL.getTypeAllocSize(EltTy);
527 if (!
DL.typeSizeEqualsStoreSize(EltTy))
530 EltSize =
DL.getTypeStoreSize(EltTy);
532 uint64_t Index = ByteOffset / EltSize;
535 for (; Index != NumElts; ++Index) {
536 if (!ReadDataFromGlobal(
C->getAggregateElement(Index),
Offset, CurPtr,
541 assert(BytesWritten <= EltSize &&
"Not indexing into this element?");
542 if (BytesWritten >= BytesLeft)
546 BytesLeft -= BytesWritten;
547 CurPtr += BytesWritten;
553 if (
CE->getOpcode() == Instruction::IntToPtr &&
554 CE->getOperand(0)->getType() ==
DL.getIntPtrType(
CE->getType())) {
555 return ReadDataFromGlobal(
CE->getOperand(0), ByteOffset, CurPtr,
583 DL.getTypeSizeInBits(LoadTy).getFixedValue());
604 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
605 if (BytesLoaded > 32 || BytesLoaded == 0)
609 if (
Offset <= -1 *
static_cast<int64_t
>(BytesLoaded))
613 TypeSize InitializerSize =
DL.getTypeAllocSize(
C->getType());
621 unsigned char RawBytes[32] = {0};
622 unsigned char *CurPtr = RawBytes;
623 unsigned BytesLeft = BytesLoaded;
632 if (!ReadDataFromGlobal(
C,
Offset, CurPtr, BytesLeft,
DL))
635 APInt ResultVal =
APInt(IntType->getBitWidth(), 0);
636 if (
DL.isLittleEndian()) {
637 ResultVal = RawBytes[BytesLoaded - 1];
638 for (
unsigned i = 1; i != BytesLoaded; ++i) {
640 ResultVal |= RawBytes[BytesLoaded - 1 - i];
643 ResultVal = RawBytes[0];
644 for (
unsigned i = 1; i != BytesLoaded; ++i) {
646 ResultVal |= RawBytes[i];
650 return ConstantInt::get(IntType->getContext(), ResultVal);
670 if (NBytes > UINT16_MAX)
678 unsigned char *CurPtr = RawBytes.
data();
680 if (!ReadDataFromGlobal(
Init,
Offset, CurPtr, NBytes,
DL))
698 if (!
Offset.isZero() || !Indices[0].isZero())
703 if (Index.isNegative() || Index.getActiveBits() >= 32)
706 C =
C->getAggregateElement(Index.getZExtValue());
732 if (
Offset.getSignificantBits() <= 64)
734 FoldReinterpretLoadFromConst(
C, Ty,
Offset.getSExtValue(),
DL))
751 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
781 if (!
DL.typeSizeEqualsStoreSize(
C->getType()))
783 if (
C->isNullValue() && !Ty->isX86_AMXTy())
785 if (
C->isAllOnesValue() &&
786 (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
805 if (
Opc == Instruction::And) {
808 if ((Known1.
One | Known0.
Zero).isAllOnes()) {
812 if ((Known0.
One | Known1.
Zero).isAllOnes()) {
824 if (
Opc == Instruction::Sub) {
830 unsigned OpSize =
DL.getTypeSizeInBits(Op0->
getType());
847 std::optional<ConstantRange>
InRange,
849 Type *IntIdxTy =
DL.getIndexType(ResultTy);
854 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
857 SrcElemTy,
Ops.slice(1, i - 1)))) &&
858 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
861 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
885 Type *SrcElemTy =
GEP->getSourceElementType();
890 if (
Constant *
C = CastGEPIndices(SrcElemTy,
Ops, ResTy,
GEP->getNoWrapFlags(),
891 GEP->getInRange(),
DL, TLI))
900 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
904 unsigned BitWidth =
DL.getTypeSizeInBits(IntIdxTy);
907 DL.getIndexedOffsetInType(
911 std::optional<ConstantRange>
InRange =
GEP->getInRange();
917 bool Overflow =
false;
919 NW &=
GEP->getNoWrapFlags();
924 bool AllConstantInt =
true;
925 for (
Value *NestedOp : NestedOps)
927 AllConstantInt =
false;
934 if (
auto GEPRange =
GEP->getInRange()) {
935 auto AdjustedGEPRange = GEPRange->sextOrTrunc(
BitWidth).subtract(
Offset);
937 InRange ?
InRange->intersectWith(AdjustedGEPRange) : AdjustedGEPRange;
941 SrcElemTy =
GEP->getSourceElementType();
957 if (
CE->getOpcode() == Instruction::IntToPtr) {
959 BaseIntVal =
Base->getValue().zextOrTrunc(BaseIntVal.getBitWidth());
964 !
DL.mustNotIntroduceIntToPtr(Ptr->
getType())) {
975 bool CanBeNull, CanBeFreed;
978 if (DerefBytes != 0 && !CanBeNull &&
Offset.sle(DerefBytes))
989 ConstantInt::get(Ctx,
Offset), NW,
998Constant *ConstantFoldInstOperandsImpl(
const Value *InstOrCE,
unsigned Opcode,
1002 bool AllowNonDeterministic) {
1012 case Instruction::FAdd:
1013 case Instruction::FSub:
1014 case Instruction::FMul:
1015 case Instruction::FDiv:
1016 case Instruction::FRem:
1022 AllowNonDeterministic);
1032 Type *SrcElemTy =
GEP->getSourceElementType();
1040 GEP->getNoWrapFlags(),
1045 return CE->getWithOperands(
Ops);
1048 default:
return nullptr;
1049 case Instruction::ICmp:
1050 case Instruction::FCmp: {
1055 case Instruction::Freeze:
1057 case Instruction::Call:
1062 AllowNonDeterministic);
1065 case Instruction::Select:
1067 case Instruction::ExtractElement:
1069 case Instruction::ExtractValue:
1072 case Instruction::InsertElement:
1074 case Instruction::InsertValue:
1077 case Instruction::ShuffleVector:
1080 case Instruction::Load: {
1082 if (LI->isVolatile())
1105 for (
const Use &OldU :
C->operands()) {
1111 auto It = FoldedOps.
find(OldC);
1112 if (It == FoldedOps.
end()) {
1113 NewC = ConstantFoldConstantImpl(OldC,
DL, TLI, FoldedOps);
1114 FoldedOps.
insert({OldC, NewC});
1119 Ops.push_back(NewC);
1123 if (
Constant *Res = ConstantFoldInstOperandsImpl(
1124 CE,
CE->getOpcode(),
Ops,
DL, TLI,
true))
1155 C = ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1158 if (CommonValue &&
C != CommonValue)
1169 if (!
all_of(
I->operands(), [](
const Use &U) { return isa<Constant>(U); }))
1174 for (
const Use &OpU :
I->operands()) {
1177 Op = ConstantFoldConstantImpl(
Op,
DL, TLI, FoldedOps);
1187 return ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1194 bool AllowNonDeterministic) {
1195 return ConstantFoldInstOperandsImpl(
I,
I->getOpcode(),
Ops,
DL, TLI,
1196 AllowNonDeterministic);
1215 if (CE0->getOpcode() == Instruction::IntToPtr) {
1216 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1228 if (CE0->getOpcode() == Instruction::PtrToInt ||
1229 CE0->getOpcode() == Instruction::PtrToAddr) {
1230 Type *AddrTy =
DL.getAddressType(CE0->getOperand(0)->getType());
1231 if (CE0->getType() == AddrTy) {
1240 if (CE0->getOpcode() == CE1->getOpcode()) {
1241 if (CE0->getOpcode() == Instruction::IntToPtr) {
1242 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1256 if (CE0->getOpcode() == Instruction::PtrToInt ||
1257 CE0->getOpcode() == Instruction::PtrToAddr) {
1258 Type *AddrTy =
DL.getAddressType(CE0->getOperand(0)->getType());
1259 if (CE0->getType() == AddrTy &&
1260 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1262 Predicate, CE0->getOperand(0), CE1->getOperand(0),
DL, TLI);
1274 unsigned IndexWidth =
DL.getIndexTypeSizeInBits(Ops0->
getType());
1275 APInt Offset0(IndexWidth, 0);
1278 DL, Offset0, IsEqPred,
1281 APInt Offset1(IndexWidth, 0);
1283 DL, Offset1, IsEqPred,
1286 if (Stripped0 == Stripped1)
1325 if (
Constant *
C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS,
DL))
1339 return ConstantFP::get(Ty->getContext(), APF);
1341 return ConstantFP::get(
1345 return ConstantFP::get(Ty->getContext(),
1371 IsOutput ?
Mode.Output :
Mode.Input);
1400 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1422 for (
unsigned I = 0, E = CDV->getNumElements();
I < E; ++
I) {
1423 const APFloat &Elt = CDV->getElementAsAPFloat(
I);
1425 NewElts.
push_back(ConstantFP::get(Ty, Elt));
1445 bool AllowNonDeterministic) {
1458 if (!AllowNonDeterministic)
1460 if (
FP->hasNoSignedZeros() ||
FP->hasAllowReassoc() ||
1461 FP->hasAllowContract() ||
FP->hasAllowReciprocal())
1475 if (!AllowNonDeterministic &&
C->isNaN())
1494 C->getType(), DestTy, &
DL))
1500 case Instruction::PtrToAddr:
1501 case Instruction::PtrToInt:
1506 if (CE->getOpcode() == Instruction::IntToPtr) {
1508 Type *MidTy = Opcode == Instruction::PtrToInt
1509 ?
DL.getAddressType(CE->getType())
1510 :
DL.getIntPtrType(CE->getType());
1517 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1520 DL, BaseOffset,
true));
1521 if (
Base->isNullValue()) {
1522 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1526 if (
GEP->getNumIndices() == 1 &&
1527 GEP->getSourceElementType()->isIntegerTy(8)) {
1531 if (
Sub &&
Sub->getType() == IntIdxTy &&
1532 Sub->getOpcode() == Instruction::Sub &&
1533 Sub->getOperand(0)->isNullValue())
1536 Sub->getOperand(1));
1547 case Instruction::IntToPtr:
1553 if (CE->getOpcode() == Instruction::PtrToInt) {
1554 Constant *SrcPtr = CE->getOperand(0);
1555 unsigned SrcPtrSize =
DL.getPointerTypeSizeInBits(SrcPtr->
getType());
1556 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1558 if (MidIntSize >= SrcPtrSize) {
1566 case Instruction::Trunc:
1567 case Instruction::ZExt:
1568 case Instruction::SExt:
1569 case Instruction::FPTrunc:
1570 case Instruction::FPExt:
1571 case Instruction::UIToFP:
1572 case Instruction::SIToFP:
1573 case Instruction::FPToUI:
1574 case Instruction::FPToSI:
1575 case Instruction::AddrSpaceCast:
1577 case Instruction::BitCast:
1588 Type *SrcTy =
C->getType();
1589 if (SrcTy == DestTy)
1603 if (
Call->isNoBuiltin())
1605 if (
Call->getFunctionType() !=
F->getFunctionType())
1614 return Arg.getType()->isFloatingPointTy();
1618 switch (
F->getIntrinsicID()) {
1621 case Intrinsic::bswap:
1622 case Intrinsic::ctpop:
1623 case Intrinsic::ctlz:
1624 case Intrinsic::cttz:
1625 case Intrinsic::fshl:
1626 case Intrinsic::fshr:
1627 case Intrinsic::launder_invariant_group:
1628 case Intrinsic::strip_invariant_group:
1629 case Intrinsic::masked_load:
1630 case Intrinsic::get_active_lane_mask:
1631 case Intrinsic::abs:
1632 case Intrinsic::smax:
1633 case Intrinsic::smin:
1634 case Intrinsic::umax:
1635 case Intrinsic::umin:
1636 case Intrinsic::scmp:
1637 case Intrinsic::ucmp:
1638 case Intrinsic::sadd_with_overflow:
1639 case Intrinsic::uadd_with_overflow:
1640 case Intrinsic::ssub_with_overflow:
1641 case Intrinsic::usub_with_overflow:
1642 case Intrinsic::smul_with_overflow:
1643 case Intrinsic::umul_with_overflow:
1644 case Intrinsic::sadd_sat:
1645 case Intrinsic::uadd_sat:
1646 case Intrinsic::ssub_sat:
1647 case Intrinsic::usub_sat:
1648 case Intrinsic::smul_fix:
1649 case Intrinsic::smul_fix_sat:
1650 case Intrinsic::bitreverse:
1651 case Intrinsic::is_constant:
1652 case Intrinsic::vector_reduce_add:
1653 case Intrinsic::vector_reduce_mul:
1654 case Intrinsic::vector_reduce_and:
1655 case Intrinsic::vector_reduce_or:
1656 case Intrinsic::vector_reduce_xor:
1657 case Intrinsic::vector_reduce_smin:
1658 case Intrinsic::vector_reduce_smax:
1659 case Intrinsic::vector_reduce_umin:
1660 case Intrinsic::vector_reduce_umax:
1661 case Intrinsic::vector_extract:
1662 case Intrinsic::vector_insert:
1663 case Intrinsic::vector_interleave2:
1664 case Intrinsic::vector_interleave3:
1665 case Intrinsic::vector_interleave4:
1666 case Intrinsic::vector_interleave5:
1667 case Intrinsic::vector_interleave6:
1668 case Intrinsic::vector_interleave7:
1669 case Intrinsic::vector_interleave8:
1670 case Intrinsic::vector_deinterleave2:
1671 case Intrinsic::vector_deinterleave3:
1672 case Intrinsic::vector_deinterleave4:
1673 case Intrinsic::vector_deinterleave5:
1674 case Intrinsic::vector_deinterleave6:
1675 case Intrinsic::vector_deinterleave7:
1676 case Intrinsic::vector_deinterleave8:
1678 case Intrinsic::amdgcn_perm:
1679 case Intrinsic::amdgcn_wave_reduce_umin:
1680 case Intrinsic::amdgcn_wave_reduce_umax:
1681 case Intrinsic::amdgcn_wave_reduce_max:
1682 case Intrinsic::amdgcn_wave_reduce_min:
1683 case Intrinsic::amdgcn_wave_reduce_add:
1684 case Intrinsic::amdgcn_wave_reduce_sub:
1685 case Intrinsic::amdgcn_wave_reduce_and:
1686 case Intrinsic::amdgcn_wave_reduce_or:
1687 case Intrinsic::amdgcn_wave_reduce_xor:
1688 case Intrinsic::amdgcn_s_wqm:
1689 case Intrinsic::amdgcn_s_quadmask:
1690 case Intrinsic::amdgcn_s_bitreplicate:
1691 case Intrinsic::arm_mve_vctp8:
1692 case Intrinsic::arm_mve_vctp16:
1693 case Intrinsic::arm_mve_vctp32:
1694 case Intrinsic::arm_mve_vctp64:
1695 case Intrinsic::aarch64_sve_convert_from_svbool:
1696 case Intrinsic::wasm_alltrue:
1697 case Intrinsic::wasm_anytrue:
1698 case Intrinsic::wasm_dot:
1700 case Intrinsic::wasm_trunc_signed:
1701 case Intrinsic::wasm_trunc_unsigned:
1706 case Intrinsic::minnum:
1707 case Intrinsic::maxnum:
1708 case Intrinsic::minimum:
1709 case Intrinsic::maximum:
1710 case Intrinsic::minimumnum:
1711 case Intrinsic::maximumnum:
1712 case Intrinsic::log:
1713 case Intrinsic::log2:
1714 case Intrinsic::log10:
1715 case Intrinsic::exp:
1716 case Intrinsic::exp2:
1717 case Intrinsic::exp10:
1718 case Intrinsic::sqrt:
1719 case Intrinsic::sin:
1720 case Intrinsic::cos:
1721 case Intrinsic::sincos:
1722 case Intrinsic::sinh:
1723 case Intrinsic::cosh:
1724 case Intrinsic::atan:
1725 case Intrinsic::pow:
1726 case Intrinsic::powi:
1727 case Intrinsic::ldexp:
1728 case Intrinsic::fma:
1729 case Intrinsic::fmuladd:
1730 case Intrinsic::frexp:
1731 case Intrinsic::fptoui_sat:
1732 case Intrinsic::fptosi_sat:
1733 case Intrinsic::convert_from_fp16:
1734 case Intrinsic::convert_to_fp16:
1735 case Intrinsic::amdgcn_cos:
1736 case Intrinsic::amdgcn_cubeid:
1737 case Intrinsic::amdgcn_cubema:
1738 case Intrinsic::amdgcn_cubesc:
1739 case Intrinsic::amdgcn_cubetc:
1740 case Intrinsic::amdgcn_fmul_legacy:
1741 case Intrinsic::amdgcn_fma_legacy:
1742 case Intrinsic::amdgcn_fract:
1743 case Intrinsic::amdgcn_sin:
1745 case Intrinsic::x86_sse_cvtss2si:
1746 case Intrinsic::x86_sse_cvtss2si64:
1747 case Intrinsic::x86_sse_cvttss2si:
1748 case Intrinsic::x86_sse_cvttss2si64:
1749 case Intrinsic::x86_sse2_cvtsd2si:
1750 case Intrinsic::x86_sse2_cvtsd2si64:
1751 case Intrinsic::x86_sse2_cvttsd2si:
1752 case Intrinsic::x86_sse2_cvttsd2si64:
1753 case Intrinsic::x86_avx512_vcvtss2si32:
1754 case Intrinsic::x86_avx512_vcvtss2si64:
1755 case Intrinsic::x86_avx512_cvttss2si:
1756 case Intrinsic::x86_avx512_cvttss2si64:
1757 case Intrinsic::x86_avx512_vcvtsd2si32:
1758 case Intrinsic::x86_avx512_vcvtsd2si64:
1759 case Intrinsic::x86_avx512_cvttsd2si:
1760 case Intrinsic::x86_avx512_cvttsd2si64:
1761 case Intrinsic::x86_avx512_vcvtss2usi32:
1762 case Intrinsic::x86_avx512_vcvtss2usi64:
1763 case Intrinsic::x86_avx512_cvttss2usi:
1764 case Intrinsic::x86_avx512_cvttss2usi64:
1765 case Intrinsic::x86_avx512_vcvtsd2usi32:
1766 case Intrinsic::x86_avx512_vcvtsd2usi64:
1767 case Intrinsic::x86_avx512_cvttsd2usi:
1768 case Intrinsic::x86_avx512_cvttsd2usi64:
1771 case Intrinsic::nvvm_fmax_d:
1772 case Intrinsic::nvvm_fmax_f:
1773 case Intrinsic::nvvm_fmax_ftz_f:
1774 case Intrinsic::nvvm_fmax_ftz_nan_f:
1775 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
1776 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
1777 case Intrinsic::nvvm_fmax_nan_f:
1778 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
1779 case Intrinsic::nvvm_fmax_xorsign_abs_f:
1782 case Intrinsic::nvvm_fmin_d:
1783 case Intrinsic::nvvm_fmin_f:
1784 case Intrinsic::nvvm_fmin_ftz_f:
1785 case Intrinsic::nvvm_fmin_ftz_nan_f:
1786 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
1787 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
1788 case Intrinsic::nvvm_fmin_nan_f:
1789 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
1790 case Intrinsic::nvvm_fmin_xorsign_abs_f:
1793 case Intrinsic::nvvm_f2i_rm:
1794 case Intrinsic::nvvm_f2i_rn:
1795 case Intrinsic::nvvm_f2i_rp:
1796 case Intrinsic::nvvm_f2i_rz:
1797 case Intrinsic::nvvm_f2i_rm_ftz:
1798 case Intrinsic::nvvm_f2i_rn_ftz:
1799 case Intrinsic::nvvm_f2i_rp_ftz:
1800 case Intrinsic::nvvm_f2i_rz_ftz:
1801 case Intrinsic::nvvm_f2ui_rm:
1802 case Intrinsic::nvvm_f2ui_rn:
1803 case Intrinsic::nvvm_f2ui_rp:
1804 case Intrinsic::nvvm_f2ui_rz:
1805 case Intrinsic::nvvm_f2ui_rm_ftz:
1806 case Intrinsic::nvvm_f2ui_rn_ftz:
1807 case Intrinsic::nvvm_f2ui_rp_ftz:
1808 case Intrinsic::nvvm_f2ui_rz_ftz:
1809 case Intrinsic::nvvm_d2i_rm:
1810 case Intrinsic::nvvm_d2i_rn:
1811 case Intrinsic::nvvm_d2i_rp:
1812 case Intrinsic::nvvm_d2i_rz:
1813 case Intrinsic::nvvm_d2ui_rm:
1814 case Intrinsic::nvvm_d2ui_rn:
1815 case Intrinsic::nvvm_d2ui_rp:
1816 case Intrinsic::nvvm_d2ui_rz:
1819 case Intrinsic::nvvm_f2ll_rm:
1820 case Intrinsic::nvvm_f2ll_rn:
1821 case Intrinsic::nvvm_f2ll_rp:
1822 case Intrinsic::nvvm_f2ll_rz:
1823 case Intrinsic::nvvm_f2ll_rm_ftz:
1824 case Intrinsic::nvvm_f2ll_rn_ftz:
1825 case Intrinsic::nvvm_f2ll_rp_ftz:
1826 case Intrinsic::nvvm_f2ll_rz_ftz:
1827 case Intrinsic::nvvm_f2ull_rm:
1828 case Intrinsic::nvvm_f2ull_rn:
1829 case Intrinsic::nvvm_f2ull_rp:
1830 case Intrinsic::nvvm_f2ull_rz:
1831 case Intrinsic::nvvm_f2ull_rm_ftz:
1832 case Intrinsic::nvvm_f2ull_rn_ftz:
1833 case Intrinsic::nvvm_f2ull_rp_ftz:
1834 case Intrinsic::nvvm_f2ull_rz_ftz:
1835 case Intrinsic::nvvm_d2ll_rm:
1836 case Intrinsic::nvvm_d2ll_rn:
1837 case Intrinsic::nvvm_d2ll_rp:
1838 case Intrinsic::nvvm_d2ll_rz:
1839 case Intrinsic::nvvm_d2ull_rm:
1840 case Intrinsic::nvvm_d2ull_rn:
1841 case Intrinsic::nvvm_d2ull_rp:
1842 case Intrinsic::nvvm_d2ull_rz:
1845 case Intrinsic::nvvm_ceil_d:
1846 case Intrinsic::nvvm_ceil_f:
1847 case Intrinsic::nvvm_ceil_ftz_f:
1849 case Intrinsic::nvvm_fabs:
1850 case Intrinsic::nvvm_fabs_ftz:
1852 case Intrinsic::nvvm_floor_d:
1853 case Intrinsic::nvvm_floor_f:
1854 case Intrinsic::nvvm_floor_ftz_f:
1856 case Intrinsic::nvvm_rcp_rm_d:
1857 case Intrinsic::nvvm_rcp_rm_f:
1858 case Intrinsic::nvvm_rcp_rm_ftz_f:
1859 case Intrinsic::nvvm_rcp_rn_d:
1860 case Intrinsic::nvvm_rcp_rn_f:
1861 case Intrinsic::nvvm_rcp_rn_ftz_f:
1862 case Intrinsic::nvvm_rcp_rp_d:
1863 case Intrinsic::nvvm_rcp_rp_f:
1864 case Intrinsic::nvvm_rcp_rp_ftz_f:
1865 case Intrinsic::nvvm_rcp_rz_d:
1866 case Intrinsic::nvvm_rcp_rz_f:
1867 case Intrinsic::nvvm_rcp_rz_ftz_f:
1869 case Intrinsic::nvvm_round_d:
1870 case Intrinsic::nvvm_round_f:
1871 case Intrinsic::nvvm_round_ftz_f:
1873 case Intrinsic::nvvm_saturate_d:
1874 case Intrinsic::nvvm_saturate_f:
1875 case Intrinsic::nvvm_saturate_ftz_f:
1877 case Intrinsic::nvvm_sqrt_f:
1878 case Intrinsic::nvvm_sqrt_rn_d:
1879 case Intrinsic::nvvm_sqrt_rn_f:
1880 case Intrinsic::nvvm_sqrt_rn_ftz_f:
1881 return !
Call->isStrictFP();
1884 case Intrinsic::nvvm_add_rm_d:
1885 case Intrinsic::nvvm_add_rn_d:
1886 case Intrinsic::nvvm_add_rp_d:
1887 case Intrinsic::nvvm_add_rz_d:
1888 case Intrinsic::nvvm_add_rm_f:
1889 case Intrinsic::nvvm_add_rn_f:
1890 case Intrinsic::nvvm_add_rp_f:
1891 case Intrinsic::nvvm_add_rz_f:
1892 case Intrinsic::nvvm_add_rm_ftz_f:
1893 case Intrinsic::nvvm_add_rn_ftz_f:
1894 case Intrinsic::nvvm_add_rp_ftz_f:
1895 case Intrinsic::nvvm_add_rz_ftz_f:
1898 case Intrinsic::nvvm_div_rm_d:
1899 case Intrinsic::nvvm_div_rn_d:
1900 case Intrinsic::nvvm_div_rp_d:
1901 case Intrinsic::nvvm_div_rz_d:
1902 case Intrinsic::nvvm_div_rm_f:
1903 case Intrinsic::nvvm_div_rn_f:
1904 case Intrinsic::nvvm_div_rp_f:
1905 case Intrinsic::nvvm_div_rz_f:
1906 case Intrinsic::nvvm_div_rm_ftz_f:
1907 case Intrinsic::nvvm_div_rn_ftz_f:
1908 case Intrinsic::nvvm_div_rp_ftz_f:
1909 case Intrinsic::nvvm_div_rz_ftz_f:
1912 case Intrinsic::nvvm_mul_rm_d:
1913 case Intrinsic::nvvm_mul_rn_d:
1914 case Intrinsic::nvvm_mul_rp_d:
1915 case Intrinsic::nvvm_mul_rz_d:
1916 case Intrinsic::nvvm_mul_rm_f:
1917 case Intrinsic::nvvm_mul_rn_f:
1918 case Intrinsic::nvvm_mul_rp_f:
1919 case Intrinsic::nvvm_mul_rz_f:
1920 case Intrinsic::nvvm_mul_rm_ftz_f:
1921 case Intrinsic::nvvm_mul_rn_ftz_f:
1922 case Intrinsic::nvvm_mul_rp_ftz_f:
1923 case Intrinsic::nvvm_mul_rz_ftz_f:
1926 case Intrinsic::nvvm_fma_rm_d:
1927 case Intrinsic::nvvm_fma_rn_d:
1928 case Intrinsic::nvvm_fma_rp_d:
1929 case Intrinsic::nvvm_fma_rz_d:
1930 case Intrinsic::nvvm_fma_rm_f:
1931 case Intrinsic::nvvm_fma_rn_f:
1932 case Intrinsic::nvvm_fma_rp_f:
1933 case Intrinsic::nvvm_fma_rz_f:
1934 case Intrinsic::nvvm_fma_rm_ftz_f:
1935 case Intrinsic::nvvm_fma_rn_ftz_f:
1936 case Intrinsic::nvvm_fma_rp_ftz_f:
1937 case Intrinsic::nvvm_fma_rz_ftz_f:
1941 case Intrinsic::fabs:
1942 case Intrinsic::copysign:
1943 case Intrinsic::is_fpclass:
1946 case Intrinsic::ceil:
1947 case Intrinsic::floor:
1948 case Intrinsic::round:
1949 case Intrinsic::roundeven:
1950 case Intrinsic::trunc:
1951 case Intrinsic::nearbyint:
1952 case Intrinsic::rint:
1953 case Intrinsic::canonicalize:
1957 case Intrinsic::experimental_constrained_fma:
1958 case Intrinsic::experimental_constrained_fmuladd:
1959 case Intrinsic::experimental_constrained_fadd:
1960 case Intrinsic::experimental_constrained_fsub:
1961 case Intrinsic::experimental_constrained_fmul:
1962 case Intrinsic::experimental_constrained_fdiv:
1963 case Intrinsic::experimental_constrained_frem:
1964 case Intrinsic::experimental_constrained_ceil:
1965 case Intrinsic::experimental_constrained_floor:
1966 case Intrinsic::experimental_constrained_round:
1967 case Intrinsic::experimental_constrained_roundeven:
1968 case Intrinsic::experimental_constrained_trunc:
1969 case Intrinsic::experimental_constrained_nearbyint:
1970 case Intrinsic::experimental_constrained_rint:
1971 case Intrinsic::experimental_constrained_fcmp:
1972 case Intrinsic::experimental_constrained_fcmps:
1979 if (!
F->hasName() ||
Call->isStrictFP())
1991 return Name ==
"acos" || Name ==
"acosf" ||
1992 Name ==
"asin" || Name ==
"asinf" ||
1993 Name ==
"atan" || Name ==
"atanf" ||
1994 Name ==
"atan2" || Name ==
"atan2f";
1996 return Name ==
"ceil" || Name ==
"ceilf" ||
1997 Name ==
"cos" || Name ==
"cosf" ||
1998 Name ==
"cosh" || Name ==
"coshf";
2000 return Name ==
"exp" || Name ==
"expf" || Name ==
"exp2" ||
2001 Name ==
"exp2f" || Name ==
"erf" || Name ==
"erff";
2003 return Name ==
"fabs" || Name ==
"fabsf" ||
2004 Name ==
"floor" || Name ==
"floorf" ||
2005 Name ==
"fmod" || Name ==
"fmodf";
2007 return Name ==
"ilogb" || Name ==
"ilogbf";
2009 return Name ==
"log" || Name ==
"logf" || Name ==
"logl" ||
2010 Name ==
"log2" || Name ==
"log2f" || Name ==
"log10" ||
2011 Name ==
"log10f" || Name ==
"logb" || Name ==
"logbf" ||
2012 Name ==
"log1p" || Name ==
"log1pf";
2014 return Name ==
"nearbyint" || Name ==
"nearbyintf";
2016 return Name ==
"pow" || Name ==
"powf";
2018 return Name ==
"remainder" || Name ==
"remainderf" ||
2019 Name ==
"rint" || Name ==
"rintf" ||
2020 Name ==
"round" || Name ==
"roundf" ||
2021 Name ==
"roundeven" || Name ==
"roundevenf";
2023 return Name ==
"sin" || Name ==
"sinf" ||
2024 Name ==
"sinh" || Name ==
"sinhf" ||
2025 Name ==
"sqrt" || Name ==
"sqrtf";
2027 return Name ==
"tan" || Name ==
"tanf" ||
2028 Name ==
"tanh" || Name ==
"tanhf" ||
2029 Name ==
"trunc" || Name ==
"truncf";
2037 if (Name.size() < 12 || Name[1] !=
'_')
2043 return Name ==
"__acos_finite" || Name ==
"__acosf_finite" ||
2044 Name ==
"__asin_finite" || Name ==
"__asinf_finite" ||
2045 Name ==
"__atan2_finite" || Name ==
"__atan2f_finite";
2047 return Name ==
"__cosh_finite" || Name ==
"__coshf_finite";
2049 return Name ==
"__exp_finite" || Name ==
"__expf_finite" ||
2050 Name ==
"__exp2_finite" || Name ==
"__exp2f_finite";
2052 return Name ==
"__log_finite" || Name ==
"__logf_finite" ||
2053 Name ==
"__log10_finite" || Name ==
"__log10f_finite";
2055 return Name ==
"__pow_finite" || Name ==
"__powf_finite";
2057 return Name ==
"__sinh_finite" || Name ==
"__sinhf_finite";
2066 if (Ty->isHalfTy() || Ty->isFloatTy()) {
2070 return ConstantFP::get(Ty->getContext(), APF);
2072 if (Ty->isDoubleTy())
2073 return ConstantFP::get(Ty->getContext(),
APFloat(V));
2077#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2078Constant *GetConstantFoldFPValue128(float128 V,
Type *Ty) {
2079 if (Ty->isFP128Ty())
2080 return ConstantFP::get(Ty, V);
2086inline void llvm_fenv_clearexcept() {
2087#if HAVE_DECL_FE_ALL_EXCEPT
2088 feclearexcept(FE_ALL_EXCEPT);
2094inline bool llvm_fenv_testexcept() {
2095 int errno_val = errno;
2096 if (errno_val == ERANGE || errno_val == EDOM)
2098#if HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
2099 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
2121 switch (DenormKind) {
2125 return FTZPreserveSign(V);
2127 return FlushToPositiveZero(V);
2135 if (!DenormMode.isValid() ||
2140 llvm_fenv_clearexcept();
2141 auto Input = FlushWithDenormKind(V, DenormMode.Input);
2142 double Result = NativeFP(
Input.convertToDouble());
2143 if (llvm_fenv_testexcept()) {
2144 llvm_fenv_clearexcept();
2148 Constant *Output = GetConstantFoldFPValue(Result, Ty);
2151 const auto *CFP =
static_cast<ConstantFP *
>(Output);
2152 const auto Res = FlushWithDenormKind(CFP->getValueAPF(), DenormMode.Output);
2153 return ConstantFP::get(Ty->getContext(), Res);
2156#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2157Constant *ConstantFoldFP128(float128 (*NativeFP)(float128),
const APFloat &V,
2159 llvm_fenv_clearexcept();
2160 float128
Result = NativeFP(V.convertToQuad());
2161 if (llvm_fenv_testexcept()) {
2162 llvm_fenv_clearexcept();
2166 return GetConstantFoldFPValue128(Result, Ty);
2170Constant *ConstantFoldBinaryFP(
double (*NativeFP)(
double,
double),
2172 llvm_fenv_clearexcept();
2173 double Result = NativeFP(V.convertToDouble(),
W.convertToDouble());
2174 if (llvm_fenv_testexcept()) {
2175 llvm_fenv_clearexcept();
2179 return GetConstantFoldFPValue(Result, Ty);
2186 if (
Op->containsPoisonElement())
2190 if (
Constant *SplatVal =
Op->getSplatValue()) {
2192 case Intrinsic::vector_reduce_and:
2193 case Intrinsic::vector_reduce_or:
2194 case Intrinsic::vector_reduce_smin:
2195 case Intrinsic::vector_reduce_smax:
2196 case Intrinsic::vector_reduce_umin:
2197 case Intrinsic::vector_reduce_umax:
2199 case Intrinsic::vector_reduce_add:
2200 if (SplatVal->isNullValue())
2203 case Intrinsic::vector_reduce_mul:
2204 if (SplatVal->isNullValue() || SplatVal->isOneValue())
2207 case Intrinsic::vector_reduce_xor:
2208 if (SplatVal->isNullValue())
2210 if (OpVT->getElementCount().isKnownMultipleOf(2))
2225 APInt Acc = EltC->getValue();
2229 const APInt &
X = EltC->getValue();
2231 case Intrinsic::vector_reduce_add:
2234 case Intrinsic::vector_reduce_mul:
2237 case Intrinsic::vector_reduce_and:
2240 case Intrinsic::vector_reduce_or:
2243 case Intrinsic::vector_reduce_xor:
2246 case Intrinsic::vector_reduce_smin:
2249 case Intrinsic::vector_reduce_smax:
2252 case Intrinsic::vector_reduce_umin:
2255 case Intrinsic::vector_reduce_umax:
2261 return ConstantInt::get(
Op->getContext(), Acc);
2271Constant *ConstantFoldSSEConvertToInt(
const APFloat &Val,
bool roundTowardZero,
2272 Type *Ty,
bool IsSigned) {
2274 unsigned ResultWidth = Ty->getIntegerBitWidth();
2275 assert(ResultWidth <= 64 &&
2276 "Can only constant fold conversions to 64 and 32 bit ints");
2279 bool isExact =
false;
2284 IsSigned,
mode, &isExact);
2288 return ConstantInt::get(Ty, UIntVal, IsSigned);
2292 Type *Ty =
Op->getType();
2294 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
2295 return Op->getValueAPF().convertToDouble();
2305 C = &CI->getValue();
2364 return ConstantFP::get(
2369 if (!Ty->isIEEELikeFPTy())
2376 if (Src.isNormal() || Src.isInfinity())
2377 return ConstantFP::get(CI->
getContext(), Src);
2384 return ConstantFP::get(CI->
getContext(), Src);
2414 assert(Operands.
size() == 1 &&
"Wrong number of operands.");
2416 if (IntrinsicID == Intrinsic::is_constant) {
2420 if (Operands[0]->isManifestConstant())
2429 if (IntrinsicID == Intrinsic::cos ||
2430 IntrinsicID == Intrinsic::ctpop ||
2431 IntrinsicID == Intrinsic::fptoui_sat ||
2432 IntrinsicID == Intrinsic::fptosi_sat ||
2433 IntrinsicID == Intrinsic::canonicalize)
2435 if (IntrinsicID == Intrinsic::bswap ||
2436 IntrinsicID == Intrinsic::bitreverse ||
2437 IntrinsicID == Intrinsic::launder_invariant_group ||
2438 IntrinsicID == Intrinsic::strip_invariant_group)
2444 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2445 IntrinsicID == Intrinsic::strip_invariant_group) {
2450 Call->getParent() ?
Call->getCaller() :
nullptr;
2461 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2472 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2473 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2474 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2479 unsigned Width = Ty->getIntegerBitWidth();
2481 bool IsExact =
false;
2486 return ConstantInt::get(Ty,
Int);
2491 if (IntrinsicID == Intrinsic::fptoui_sat ||
2492 IntrinsicID == Intrinsic::fptosi_sat) {
2495 IntrinsicID == Intrinsic::fptoui_sat);
2498 return ConstantInt::get(Ty,
Int);
2501 if (IntrinsicID == Intrinsic::canonicalize)
2502 return constantFoldCanonicalize(Ty,
Call, U);
2504#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2505 if (Ty->isFP128Ty()) {
2506 if (IntrinsicID == Intrinsic::log) {
2507 float128
Result = logf128(
Op->getValueAPF().convertToQuad());
2508 return GetConstantFoldFPValue128(Result, Ty);
2511 LibFunc Fp128Func = NotLibFunc;
2512 if (TLI && TLI->
getLibFunc(Name, Fp128Func) && TLI->
has(Fp128Func) &&
2513 Fp128Func == LibFunc_logl)
2514 return ConstantFoldFP128(logf128,
Op->getValueAPF(), Ty);
2518 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy() &&
2524 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint ||
2525 IntrinsicID == Intrinsic::roundeven) {
2527 return ConstantFP::get(Ty, U);
2530 if (IntrinsicID == Intrinsic::round) {
2532 return ConstantFP::get(Ty, U);
2535 if (IntrinsicID == Intrinsic::roundeven) {
2537 return ConstantFP::get(Ty, U);
2540 if (IntrinsicID == Intrinsic::ceil) {
2542 return ConstantFP::get(Ty, U);
2545 if (IntrinsicID == Intrinsic::floor) {
2547 return ConstantFP::get(Ty, U);
2550 if (IntrinsicID == Intrinsic::trunc) {
2552 return ConstantFP::get(Ty, U);
2555 if (IntrinsicID == Intrinsic::fabs) {
2557 return ConstantFP::get(Ty, U);
2560 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2568 APFloat AlmostOne(U.getSemantics(), 1);
2569 AlmostOne.next(
true);
2570 return ConstantFP::get(Ty,
minimum(FractU, AlmostOne));
2576 std::optional<APFloat::roundingMode>
RM;
2577 switch (IntrinsicID) {
2580 case Intrinsic::experimental_constrained_nearbyint:
2581 case Intrinsic::experimental_constrained_rint: {
2583 RM = CI->getRoundingMode();
2588 case Intrinsic::experimental_constrained_round:
2591 case Intrinsic::experimental_constrained_ceil:
2594 case Intrinsic::experimental_constrained_floor:
2597 case Intrinsic::experimental_constrained_trunc:
2605 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2607 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2611 }
else if (U.isSignaling()) {
2612 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2617 return ConstantFP::get(Ty, U);
2621 switch (IntrinsicID) {
2623 case Intrinsic::nvvm_f2i_rm:
2624 case Intrinsic::nvvm_f2i_rn:
2625 case Intrinsic::nvvm_f2i_rp:
2626 case Intrinsic::nvvm_f2i_rz:
2627 case Intrinsic::nvvm_f2i_rm_ftz:
2628 case Intrinsic::nvvm_f2i_rn_ftz:
2629 case Intrinsic::nvvm_f2i_rp_ftz:
2630 case Intrinsic::nvvm_f2i_rz_ftz:
2632 case Intrinsic::nvvm_f2ui_rm:
2633 case Intrinsic::nvvm_f2ui_rn:
2634 case Intrinsic::nvvm_f2ui_rp:
2635 case Intrinsic::nvvm_f2ui_rz:
2636 case Intrinsic::nvvm_f2ui_rm_ftz:
2637 case Intrinsic::nvvm_f2ui_rn_ftz:
2638 case Intrinsic::nvvm_f2ui_rp_ftz:
2639 case Intrinsic::nvvm_f2ui_rz_ftz:
2641 case Intrinsic::nvvm_d2i_rm:
2642 case Intrinsic::nvvm_d2i_rn:
2643 case Intrinsic::nvvm_d2i_rp:
2644 case Intrinsic::nvvm_d2i_rz:
2646 case Intrinsic::nvvm_d2ui_rm:
2647 case Intrinsic::nvvm_d2ui_rn:
2648 case Intrinsic::nvvm_d2ui_rp:
2649 case Intrinsic::nvvm_d2ui_rz:
2651 case Intrinsic::nvvm_f2ll_rm:
2652 case Intrinsic::nvvm_f2ll_rn:
2653 case Intrinsic::nvvm_f2ll_rp:
2654 case Intrinsic::nvvm_f2ll_rz:
2655 case Intrinsic::nvvm_f2ll_rm_ftz:
2656 case Intrinsic::nvvm_f2ll_rn_ftz:
2657 case Intrinsic::nvvm_f2ll_rp_ftz:
2658 case Intrinsic::nvvm_f2ll_rz_ftz:
2660 case Intrinsic::nvvm_f2ull_rm:
2661 case Intrinsic::nvvm_f2ull_rn:
2662 case Intrinsic::nvvm_f2ull_rp:
2663 case Intrinsic::nvvm_f2ull_rz:
2664 case Intrinsic::nvvm_f2ull_rm_ftz:
2665 case Intrinsic::nvvm_f2ull_rn_ftz:
2666 case Intrinsic::nvvm_f2ull_rp_ftz:
2667 case Intrinsic::nvvm_f2ull_rz_ftz:
2669 case Intrinsic::nvvm_d2ll_rm:
2670 case Intrinsic::nvvm_d2ll_rn:
2671 case Intrinsic::nvvm_d2ll_rp:
2672 case Intrinsic::nvvm_d2ll_rz:
2674 case Intrinsic::nvvm_d2ull_rm:
2675 case Intrinsic::nvvm_d2ull_rn:
2676 case Intrinsic::nvvm_d2ull_rp:
2677 case Intrinsic::nvvm_d2ull_rz: {
2683 return ConstantInt::get(Ty, 0);
2686 unsigned BitWidth = Ty->getIntegerBitWidth();
2696 APSInt ResInt(Ty->getIntegerBitWidth(), !IsSigned);
2697 auto FloatToRound = IsFTZ ? FTZPreserveSign(U) : U;
2701 bool IsExact =
false;
2702 FloatToRound.convertToInteger(ResInt, RMode, &IsExact);
2703 return ConstantInt::get(Ty, ResInt);
2719 switch (IntrinsicID) {
2721 case Intrinsic::log:
2726 if (U.isExactlyValue(1.0))
2728 return ConstantFoldFP(log, APF, Ty);
2729 case Intrinsic::log2:
2734 if (U.isExactlyValue(1.0))
2737 return ConstantFoldFP(
log2, APF, Ty);
2738 case Intrinsic::log10:
2743 if (U.isExactlyValue(1.0))
2746 return ConstantFoldFP(log10, APF, Ty);
2747 case Intrinsic::exp:
2748 return ConstantFoldFP(exp, APF, Ty);
2749 case Intrinsic::exp2:
2751 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2752 case Intrinsic::exp10:
2754 return ConstantFoldBinaryFP(pow,
APFloat(10.0), APF, Ty);
2755 case Intrinsic::sin:
2756 return ConstantFoldFP(sin, APF, Ty);
2757 case Intrinsic::cos:
2758 return ConstantFoldFP(cos, APF, Ty);
2759 case Intrinsic::sinh:
2760 return ConstantFoldFP(sinh, APF, Ty);
2761 case Intrinsic::cosh:
2762 return ConstantFoldFP(cosh, APF, Ty);
2763 case Intrinsic::atan:
2766 return ConstantFP::get(Ty, U);
2767 return ConstantFoldFP(atan, APF, Ty);
2768 case Intrinsic::sqrt:
2769 return ConstantFoldFP(sqrt, APF, Ty);
2772 case Intrinsic::nvvm_ceil_ftz_f:
2773 case Intrinsic::nvvm_ceil_f:
2774 case Intrinsic::nvvm_ceil_d:
2775 return ConstantFoldFP(
2780 case Intrinsic::nvvm_fabs_ftz:
2781 case Intrinsic::nvvm_fabs:
2782 return ConstantFoldFP(
2787 case Intrinsic::nvvm_floor_ftz_f:
2788 case Intrinsic::nvvm_floor_f:
2789 case Intrinsic::nvvm_floor_d:
2790 return ConstantFoldFP(
2795 case Intrinsic::nvvm_rcp_rm_ftz_f:
2796 case Intrinsic::nvvm_rcp_rn_ftz_f:
2797 case Intrinsic::nvvm_rcp_rp_ftz_f:
2798 case Intrinsic::nvvm_rcp_rz_ftz_f:
2799 case Intrinsic::nvvm_rcp_rm_d:
2800 case Intrinsic::nvvm_rcp_rm_f:
2801 case Intrinsic::nvvm_rcp_rn_d:
2802 case Intrinsic::nvvm_rcp_rn_f:
2803 case Intrinsic::nvvm_rcp_rp_d:
2804 case Intrinsic::nvvm_rcp_rp_f:
2805 case Intrinsic::nvvm_rcp_rz_d:
2806 case Intrinsic::nvvm_rcp_rz_f: {
2810 auto Denominator = IsFTZ ? FTZPreserveSign(APF) : APF;
2816 Res = FTZPreserveSign(Res);
2817 return ConstantFP::get(Ty, Res);
2822 case Intrinsic::nvvm_round_ftz_f:
2823 case Intrinsic::nvvm_round_f:
2824 case Intrinsic::nvvm_round_d: {
2829 auto V = IsFTZ ? FTZPreserveSign(APF) : APF;
2831 return ConstantFP::get(Ty, V);
2834 case Intrinsic::nvvm_saturate_ftz_f:
2835 case Intrinsic::nvvm_saturate_d:
2836 case Intrinsic::nvvm_saturate_f: {
2838 auto V = IsFTZ ? FTZPreserveSign(APF) : APF;
2839 if (V.isNegative() || V.isZero() || V.isNaN())
2843 return ConstantFP::get(Ty, One);
2844 return ConstantFP::get(Ty, APF);
2847 case Intrinsic::nvvm_sqrt_rn_ftz_f:
2848 case Intrinsic::nvvm_sqrt_f:
2849 case Intrinsic::nvvm_sqrt_rn_d:
2850 case Intrinsic::nvvm_sqrt_rn_f:
2853 return ConstantFoldFP(
2859 case Intrinsic::amdgcn_cos:
2860 case Intrinsic::amdgcn_sin: {
2861 double V = getValueAsDouble(
Op);
2862 if (V < -256.0 || V > 256.0)
2867 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2868 double V4 = V * 4.0;
2869 if (V4 == floor(V4)) {
2871 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2872 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2879 return GetConstantFoldFPValue(V, Ty);
2886 LibFunc
Func = NotLibFunc;
2895 case LibFunc_acos_finite:
2896 case LibFunc_acosf_finite:
2898 return ConstantFoldFP(acos, APF, Ty);
2902 case LibFunc_asin_finite:
2903 case LibFunc_asinf_finite:
2905 return ConstantFoldFP(asin, APF, Ty);
2911 return ConstantFP::get(Ty, U);
2913 return ConstantFoldFP(atan, APF, Ty);
2917 if (TLI->
has(Func)) {
2919 return ConstantFP::get(Ty, U);
2925 return ConstantFoldFP(cos, APF, Ty);
2929 case LibFunc_cosh_finite:
2930 case LibFunc_coshf_finite:
2932 return ConstantFoldFP(cosh, APF, Ty);
2936 case LibFunc_exp_finite:
2937 case LibFunc_expf_finite:
2939 return ConstantFoldFP(exp, APF, Ty);
2943 case LibFunc_exp2_finite:
2944 case LibFunc_exp2f_finite:
2947 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2951 if (TLI->
has(Func)) {
2953 return ConstantFP::get(Ty, U);
2957 case LibFunc_floorf:
2958 if (TLI->
has(Func)) {
2960 return ConstantFP::get(Ty, U);
2965 case LibFunc_log_finite:
2966 case LibFunc_logf_finite:
2968 return ConstantFoldFP(log, APF, Ty);
2972 case LibFunc_log2_finite:
2973 case LibFunc_log2f_finite:
2976 return ConstantFoldFP(
log2, APF, Ty);
2979 case LibFunc_log10f:
2980 case LibFunc_log10_finite:
2981 case LibFunc_log10f_finite:
2984 return ConstantFoldFP(log10, APF, Ty);
2987 case LibFunc_ilogbf:
2989 return ConstantInt::get(Ty,
ilogb(APF),
true);
2994 return ConstantFoldFP(logb, APF, Ty);
2997 case LibFunc_log1pf:
3000 return ConstantFP::get(Ty, U);
3002 return ConstantFoldFP(log1p, APF, Ty);
3009 return ConstantFoldFP(erf, APF, Ty);
3011 case LibFunc_nearbyint:
3012 case LibFunc_nearbyintf:
3015 case LibFunc_roundeven:
3016 case LibFunc_roundevenf:
3017 if (TLI->
has(Func)) {
3019 return ConstantFP::get(Ty, U);
3023 case LibFunc_roundf:
3024 if (TLI->
has(Func)) {
3026 return ConstantFP::get(Ty, U);
3032 return ConstantFoldFP(sin, APF, Ty);
3036 case LibFunc_sinh_finite:
3037 case LibFunc_sinhf_finite:
3039 return ConstantFoldFP(sinh, APF, Ty);
3044 return ConstantFoldFP(sqrt, APF, Ty);
3049 return ConstantFoldFP(tan, APF, Ty);
3054 return ConstantFoldFP(tanh, APF, Ty);
3057 case LibFunc_truncf:
3058 if (TLI->
has(Func)) {
3060 return ConstantFP::get(Ty, U);
3068 switch (IntrinsicID) {
3069 case Intrinsic::bswap:
3070 return ConstantInt::get(Ty->getContext(),
Op->getValue().byteSwap());
3071 case Intrinsic::ctpop:
3072 return ConstantInt::get(Ty,
Op->getValue().popcount());
3073 case Intrinsic::bitreverse:
3074 return ConstantInt::get(Ty->getContext(),
Op->getValue().reverseBits());
3075 case Intrinsic::convert_from_fp16: {
3085 "Precision lost during fp16 constfolding");
3087 return ConstantFP::get(Ty, Val);
3090 case Intrinsic::amdgcn_s_wqm: {
3092 Val |= (Val & 0x5555555555555555ULL) << 1 |
3093 ((Val >> 1) & 0x5555555555555555ULL);
3094 Val |= (Val & 0x3333333333333333ULL) << 2 |
3095 ((Val >> 2) & 0x3333333333333333ULL);
3096 return ConstantInt::get(Ty, Val);
3099 case Intrinsic::amdgcn_s_quadmask: {
3102 for (
unsigned I = 0;
I <
Op->getBitWidth() / 4; ++
I, Val >>= 4) {
3106 QuadMask |= (1ULL <<
I);
3108 return ConstantInt::get(Ty, QuadMask);
3111 case Intrinsic::amdgcn_s_bitreplicate: {
3113 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
3114 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
3115 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
3116 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
3117 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
3118 Val = Val | Val << 1;
3119 return ConstantInt::get(Ty, Val);
3124 if (Operands[0]->
getType()->isVectorTy()) {
3126 switch (IntrinsicID) {
3128 case Intrinsic::vector_reduce_add:
3129 case Intrinsic::vector_reduce_mul:
3130 case Intrinsic::vector_reduce_and:
3131 case Intrinsic::vector_reduce_or:
3132 case Intrinsic::vector_reduce_xor:
3133 case Intrinsic::vector_reduce_smin:
3134 case Intrinsic::vector_reduce_smax:
3135 case Intrinsic::vector_reduce_umin:
3136 case Intrinsic::vector_reduce_umax:
3137 if (
Constant *
C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
3140 case Intrinsic::x86_sse_cvtss2si:
3141 case Intrinsic::x86_sse_cvtss2si64:
3142 case Intrinsic::x86_sse2_cvtsd2si:
3143 case Intrinsic::x86_sse2_cvtsd2si64:
3146 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3150 case Intrinsic::x86_sse_cvttss2si:
3151 case Intrinsic::x86_sse_cvttss2si64:
3152 case Intrinsic::x86_sse2_cvttsd2si:
3153 case Intrinsic::x86_sse2_cvttsd2si64:
3156 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3161 case Intrinsic::wasm_anytrue:
3162 return Op->isZeroValue() ? ConstantInt::get(Ty, 0)
3165 case Intrinsic::wasm_alltrue:
3168 for (
unsigned I = 0;
I !=
E; ++
I) {
3172 return ConstantInt::get(Ty, 0);
3178 return ConstantInt::get(Ty, 1);
3190 if (FCmp->isSignaling()) {
3199 return ConstantInt::get(
Call->getType()->getScalarType(), Result);
3209 LibFunc
Func = NotLibFunc;
3221 const APFloat &Op1V = Op1->getValueAPF();
3222 const APFloat &Op2V = Op2->getValueAPF();
3229 case LibFunc_pow_finite:
3230 case LibFunc_powf_finite:
3232 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
3236 if (TLI->
has(Func)) {
3237 APFloat V = Op1->getValueAPF();
3239 return ConstantFP::get(Ty, V);
3242 case LibFunc_remainder:
3243 case LibFunc_remainderf:
3244 if (TLI->
has(Func)) {
3245 APFloat V = Op1->getValueAPF();
3247 return ConstantFP::get(Ty, V);
3251 case LibFunc_atan2f:
3257 case LibFunc_atan2_finite:
3258 case LibFunc_atan2f_finite:
3260 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
3270 assert(Operands.
size() == 2 &&
"Wrong number of operands.");
3272 if (Ty->isFloatingPointTy()) {
3277 switch (IntrinsicID) {
3278 case Intrinsic::maxnum:
3279 case Intrinsic::minnum:
3280 case Intrinsic::maximum:
3281 case Intrinsic::minimum:
3282 case Intrinsic::maximumnum:
3283 case Intrinsic::minimumnum:
3284 case Intrinsic::nvvm_fmax_d:
3285 case Intrinsic::nvvm_fmin_d:
3293 case Intrinsic::nvvm_fmax_f:
3294 case Intrinsic::nvvm_fmax_ftz_f:
3295 case Intrinsic::nvvm_fmax_ftz_nan_f:
3296 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3297 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3298 case Intrinsic::nvvm_fmax_nan_f:
3299 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3300 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3302 case Intrinsic::nvvm_fmin_f:
3303 case Intrinsic::nvvm_fmin_ftz_f:
3304 case Intrinsic::nvvm_fmin_ftz_nan_f:
3305 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
3306 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
3307 case Intrinsic::nvvm_fmin_nan_f:
3308 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
3309 case Intrinsic::nvvm_fmin_xorsign_abs_f:
3313 if (!IsOp0Undef && !IsOp1Undef)
3317 APInt NVCanonicalNaN(32, 0x7fffffff);
3318 return ConstantFP::get(
3319 Ty,
APFloat(Ty->getFltSemantics(), NVCanonicalNaN));
3322 return ConstantFP::get(Ty, FTZPreserveSign(
Op->getValueAPF()));
3331 const APFloat &Op1V = Op1->getValueAPF();
3334 if (Op2->getType() != Op1->getType())
3336 const APFloat &Op2V = Op2->getValueAPF();
3338 if (
const auto *ConstrIntr =
3343 switch (IntrinsicID) {
3346 case Intrinsic::experimental_constrained_fadd:
3347 St = Res.
add(Op2V, RM);
3349 case Intrinsic::experimental_constrained_fsub:
3352 case Intrinsic::experimental_constrained_fmul:
3355 case Intrinsic::experimental_constrained_fdiv:
3356 St = Res.
divide(Op2V, RM);
3358 case Intrinsic::experimental_constrained_frem:
3361 case Intrinsic::experimental_constrained_fcmp:
3362 case Intrinsic::experimental_constrained_fcmps:
3363 return evaluateCompare(Op1V, Op2V, ConstrIntr);
3367 return ConstantFP::get(Ty, Res);
3371 switch (IntrinsicID) {
3374 case Intrinsic::copysign:
3376 case Intrinsic::minnum:
3379 return ConstantFP::get(Ty,
minnum(Op1V, Op2V));
3380 case Intrinsic::maxnum:
3383 return ConstantFP::get(Ty,
maxnum(Op1V, Op2V));
3384 case Intrinsic::minimum:
3385 return ConstantFP::get(Ty,
minimum(Op1V, Op2V));
3386 case Intrinsic::maximum:
3387 return ConstantFP::get(Ty,
maximum(Op1V, Op2V));
3388 case Intrinsic::minimumnum:
3389 return ConstantFP::get(Ty,
minimumnum(Op1V, Op2V));
3390 case Intrinsic::maximumnum:
3391 return ConstantFP::get(Ty,
maximumnum(Op1V, Op2V));
3393 case Intrinsic::nvvm_fmax_d:
3394 case Intrinsic::nvvm_fmax_f:
3395 case Intrinsic::nvvm_fmax_ftz_f:
3396 case Intrinsic::nvvm_fmax_ftz_nan_f:
3397 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3398 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3399 case Intrinsic::nvvm_fmax_nan_f:
3400 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3401 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3403 case Intrinsic::nvvm_fmin_d:
3404 case Intrinsic::nvvm_fmin_f:
3405 case Intrinsic::nvvm_fmin_ftz_f:
3406 case Intrinsic::nvvm_fmin_ftz_nan_f:
3407 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
3408 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
3409 case Intrinsic::nvvm_fmin_nan_f:
3410 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
3411 case Intrinsic::nvvm_fmin_xorsign_abs_f: {
3413 bool ShouldCanonicalizeNaNs = !(IntrinsicID == Intrinsic::nvvm_fmax_d ||
3414 IntrinsicID == Intrinsic::nvvm_fmin_d);
3419 APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V;
3420 APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V;
3422 bool XorSign =
false;
3424 XorSign =
A.isNegative() ^
B.isNegative();
3429 bool IsFMax =
false;
3430 switch (IntrinsicID) {
3431 case Intrinsic::nvvm_fmax_d:
3432 case Intrinsic::nvvm_fmax_f:
3433 case Intrinsic::nvvm_fmax_ftz_f:
3434 case Intrinsic::nvvm_fmax_ftz_nan_f:
3435 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3436 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3437 case Intrinsic::nvvm_fmax_nan_f:
3438 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3439 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3445 if (ShouldCanonicalizeNaNs) {
3447 if (
A.isNaN() &&
B.isNaN())
3448 return ConstantFP::get(Ty, NVCanonicalNaN);
3449 else if (IsNaNPropagating && (
A.isNaN() ||
B.isNaN()))
3450 return ConstantFP::get(Ty, NVCanonicalNaN);
3453 if (
A.isNaN() &&
B.isNaN())
3463 return ConstantFP::get(Ty, Res);
3466 case Intrinsic::nvvm_add_rm_f:
3467 case Intrinsic::nvvm_add_rn_f:
3468 case Intrinsic::nvvm_add_rp_f:
3469 case Intrinsic::nvvm_add_rz_f:
3470 case Intrinsic::nvvm_add_rm_d:
3471 case Intrinsic::nvvm_add_rn_d:
3472 case Intrinsic::nvvm_add_rp_d:
3473 case Intrinsic::nvvm_add_rz_d:
3474 case Intrinsic::nvvm_add_rm_ftz_f:
3475 case Intrinsic::nvvm_add_rn_ftz_f:
3476 case Intrinsic::nvvm_add_rp_ftz_f:
3477 case Intrinsic::nvvm_add_rz_ftz_f: {
3480 APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V;
3481 APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V;
3491 Res = IsFTZ ? FTZPreserveSign(Res) : Res;
3492 return ConstantFP::get(Ty, Res);
3497 case Intrinsic::nvvm_mul_rm_f:
3498 case Intrinsic::nvvm_mul_rn_f:
3499 case Intrinsic::nvvm_mul_rp_f:
3500 case Intrinsic::nvvm_mul_rz_f:
3501 case Intrinsic::nvvm_mul_rm_d:
3502 case Intrinsic::nvvm_mul_rn_d:
3503 case Intrinsic::nvvm_mul_rp_d:
3504 case Intrinsic::nvvm_mul_rz_d:
3505 case Intrinsic::nvvm_mul_rm_ftz_f:
3506 case Intrinsic::nvvm_mul_rn_ftz_f:
3507 case Intrinsic::nvvm_mul_rp_ftz_f:
3508 case Intrinsic::nvvm_mul_rz_ftz_f: {
3511 APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V;
3512 APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V;
3522 Res = IsFTZ ? FTZPreserveSign(Res) : Res;
3523 return ConstantFP::get(Ty, Res);
3528 case Intrinsic::nvvm_div_rm_f:
3529 case Intrinsic::nvvm_div_rn_f:
3530 case Intrinsic::nvvm_div_rp_f:
3531 case Intrinsic::nvvm_div_rz_f:
3532 case Intrinsic::nvvm_div_rm_d:
3533 case Intrinsic::nvvm_div_rn_d:
3534 case Intrinsic::nvvm_div_rp_d:
3535 case Intrinsic::nvvm_div_rz_d:
3536 case Intrinsic::nvvm_div_rm_ftz_f:
3537 case Intrinsic::nvvm_div_rn_ftz_f:
3538 case Intrinsic::nvvm_div_rp_ftz_f:
3539 case Intrinsic::nvvm_div_rz_ftz_f: {
3541 APFloat A = IsFTZ ? FTZPreserveSign(Op1V) : Op1V;
3542 APFloat B = IsFTZ ? FTZPreserveSign(Op2V) : Op2V;
3550 Res = IsFTZ ? FTZPreserveSign(Res) : Res;
3551 return ConstantFP::get(Ty, Res);
3557 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
3560 switch (IntrinsicID) {
3563 case Intrinsic::pow:
3564 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
3565 case Intrinsic::amdgcn_fmul_legacy:
3570 return ConstantFP::get(Ty, Op1V * Op2V);
3574 switch (IntrinsicID) {
3575 case Intrinsic::ldexp: {
3576 return ConstantFP::get(
3580 case Intrinsic::is_fpclass: {
3593 return ConstantInt::get(Ty, Result);
3595 case Intrinsic::powi: {
3596 int Exp =
static_cast<int>(Op2C->getSExtValue());
3597 switch (Ty->getTypeID()) {
3601 if (Ty->isHalfTy()) {
3606 return ConstantFP::get(Ty, Res);
3621 if (Operands[0]->
getType()->isIntegerTy() &&
3622 Operands[1]->
getType()->isIntegerTy()) {
3623 const APInt *C0, *C1;
3624 if (!getConstIntOrUndef(Operands[0], C0) ||
3625 !getConstIntOrUndef(Operands[1], C1))
3628 switch (IntrinsicID) {
3630 case Intrinsic::smax:
3631 case Intrinsic::smin:
3632 case Intrinsic::umax:
3633 case Intrinsic::umin:
3638 return ConstantInt::get(
3644 case Intrinsic::scmp:
3645 case Intrinsic::ucmp:
3647 return ConstantInt::get(Ty, 0);
3650 if (IntrinsicID == Intrinsic::scmp)
3651 Res = C0->
sgt(*C1) ? 1 : C0->
slt(*C1) ? -1 : 0;
3653 Res = C0->
ugt(*C1) ? 1 : C0->
ult(*C1) ? -1 : 0;
3654 return ConstantInt::get(Ty, Res,
true);
3656 case Intrinsic::usub_with_overflow:
3657 case Intrinsic::ssub_with_overflow:
3663 case Intrinsic::uadd_with_overflow:
3664 case Intrinsic::sadd_with_overflow:
3674 case Intrinsic::smul_with_overflow:
3675 case Intrinsic::umul_with_overflow: {
3683 switch (IntrinsicID) {
3685 case Intrinsic::sadd_with_overflow:
3686 Res = C0->
sadd_ov(*C1, Overflow);
3688 case Intrinsic::uadd_with_overflow:
3689 Res = C0->
uadd_ov(*C1, Overflow);
3691 case Intrinsic::ssub_with_overflow:
3692 Res = C0->
ssub_ov(*C1, Overflow);
3694 case Intrinsic::usub_with_overflow:
3695 Res = C0->
usub_ov(*C1, Overflow);
3697 case Intrinsic::smul_with_overflow:
3698 Res = C0->
smul_ov(*C1, Overflow);
3700 case Intrinsic::umul_with_overflow:
3701 Res = C0->
umul_ov(*C1, Overflow);
3705 ConstantInt::get(Ty->getContext(), Res),
3710 case Intrinsic::uadd_sat:
3711 case Intrinsic::sadd_sat:
3716 if (IntrinsicID == Intrinsic::uadd_sat)
3717 return ConstantInt::get(Ty, C0->
uadd_sat(*C1));
3719 return ConstantInt::get(Ty, C0->
sadd_sat(*C1));
3720 case Intrinsic::usub_sat:
3721 case Intrinsic::ssub_sat:
3726 if (IntrinsicID == Intrinsic::usub_sat)
3727 return ConstantInt::get(Ty, C0->
usub_sat(*C1));
3729 return ConstantInt::get(Ty, C0->
ssub_sat(*C1));
3730 case Intrinsic::cttz:
3731 case Intrinsic::ctlz:
3732 assert(C1 &&
"Must be constant int");
3739 if (IntrinsicID == Intrinsic::cttz)
3744 case Intrinsic::abs:
3745 assert(C1 &&
"Must be constant int");
3756 return ConstantInt::get(Ty, C0->
abs());
3757 case Intrinsic::amdgcn_wave_reduce_umin:
3758 case Intrinsic::amdgcn_wave_reduce_umax:
3759 case Intrinsic::amdgcn_wave_reduce_max:
3760 case Intrinsic::amdgcn_wave_reduce_min:
3761 case Intrinsic::amdgcn_wave_reduce_add:
3762 case Intrinsic::amdgcn_wave_reduce_sub:
3763 case Intrinsic::amdgcn_wave_reduce_and:
3764 case Intrinsic::amdgcn_wave_reduce_or:
3765 case Intrinsic::amdgcn_wave_reduce_xor:
3780 switch (IntrinsicID) {
3782 case Intrinsic::x86_avx512_vcvtss2si32:
3783 case Intrinsic::x86_avx512_vcvtss2si64:
3784 case Intrinsic::x86_avx512_vcvtsd2si32:
3785 case Intrinsic::x86_avx512_vcvtsd2si64:
3788 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3792 case Intrinsic::x86_avx512_vcvtss2usi32:
3793 case Intrinsic::x86_avx512_vcvtss2usi64:
3794 case Intrinsic::x86_avx512_vcvtsd2usi32:
3795 case Intrinsic::x86_avx512_vcvtsd2usi64:
3798 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3802 case Intrinsic::x86_avx512_cvttss2si:
3803 case Intrinsic::x86_avx512_cvttss2si64:
3804 case Intrinsic::x86_avx512_cvttsd2si:
3805 case Intrinsic::x86_avx512_cvttsd2si64:
3808 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3812 case Intrinsic::x86_avx512_cvttss2usi:
3813 case Intrinsic::x86_avx512_cvttss2usi64:
3814 case Intrinsic::x86_avx512_cvttsd2usi:
3815 case Intrinsic::x86_avx512_cvttsd2usi64:
3818 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3833 APFloat MA(Sem), SC(Sem), TC(Sem);
3846 if (
S1.isNegative() &&
S1.isNonZero() && !
S1.isNaN()) {
3868 switch (IntrinsicID) {
3871 case Intrinsic::amdgcn_cubeid:
3873 case Intrinsic::amdgcn_cubema:
3875 case Intrinsic::amdgcn_cubesc:
3877 case Intrinsic::amdgcn_cubetc:
3884 const APInt *C0, *C1, *C2;
3885 if (!getConstIntOrUndef(Operands[0], C0) ||
3886 !getConstIntOrUndef(Operands[1], C1) ||
3887 !getConstIntOrUndef(Operands[2], C2))
3894 unsigned NumUndefBytes = 0;
3895 for (
unsigned I = 0;
I < 32;
I += 8) {
3904 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3908 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3910 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3913 Val.insertBits(
B,
I, 8);
3916 if (NumUndefBytes == 4)
3919 return ConstantInt::get(Ty, Val);
3928 assert(Operands.
size() == 3 &&
"Wrong number of operands.");
3933 const APFloat &C1 = Op1->getValueAPF();
3934 const APFloat &C2 = Op2->getValueAPF();
3935 const APFloat &C3 = Op3->getValueAPF();
3941 switch (IntrinsicID) {
3944 case Intrinsic::experimental_constrained_fma:
3945 case Intrinsic::experimental_constrained_fmuladd:
3949 if (mayFoldConstrained(
3951 return ConstantFP::get(Ty, Res);
3955 switch (IntrinsicID) {
3957 case Intrinsic::amdgcn_fma_legacy: {
3963 return ConstantFP::get(Ty,
APFloat(0.0f) + C3);
3967 case Intrinsic::fma:
3968 case Intrinsic::fmuladd: {
3971 return ConstantFP::get(Ty, V);
3974 case Intrinsic::nvvm_fma_rm_f:
3975 case Intrinsic::nvvm_fma_rn_f:
3976 case Intrinsic::nvvm_fma_rp_f:
3977 case Intrinsic::nvvm_fma_rz_f:
3978 case Intrinsic::nvvm_fma_rm_d:
3979 case Intrinsic::nvvm_fma_rn_d:
3980 case Intrinsic::nvvm_fma_rp_d:
3981 case Intrinsic::nvvm_fma_rz_d:
3982 case Intrinsic::nvvm_fma_rm_ftz_f:
3983 case Intrinsic::nvvm_fma_rn_ftz_f:
3984 case Intrinsic::nvvm_fma_rp_ftz_f:
3985 case Intrinsic::nvvm_fma_rz_ftz_f: {
3987 APFloat A = IsFTZ ? FTZPreserveSign(C1) : C1;
3988 APFloat B = IsFTZ ? FTZPreserveSign(C2) : C2;
3989 APFloat C = IsFTZ ? FTZPreserveSign(C3) : C3;
3999 Res = IsFTZ ? FTZPreserveSign(Res) : Res;
4000 return ConstantFP::get(Ty, Res);
4005 case Intrinsic::amdgcn_cubeid:
4006 case Intrinsic::amdgcn_cubema:
4007 case Intrinsic::amdgcn_cubesc:
4008 case Intrinsic::amdgcn_cubetc: {
4009 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
4010 return ConstantFP::get(Ty, V);
4017 if (IntrinsicID == Intrinsic::smul_fix ||
4018 IntrinsicID == Intrinsic::smul_fix_sat) {
4019 const APInt *C0, *C1;
4020 if (!getConstIntOrUndef(Operands[0], C0) ||
4021 !getConstIntOrUndef(Operands[1], C1))
4037 assert(Scale < Width &&
"Illegal scale.");
4038 unsigned ExtendedWidth = Width * 2;
4040 (C0->
sext(ExtendedWidth) * C1->
sext(ExtendedWidth)).
ashr(Scale);
4041 if (IntrinsicID == Intrinsic::smul_fix_sat) {
4047 return ConstantInt::get(Ty->getContext(), Product.
sextOrTrunc(Width));
4050 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
4051 const APInt *C0, *C1, *C2;
4052 if (!getConstIntOrUndef(Operands[0], C0) ||
4053 !getConstIntOrUndef(Operands[1], C1) ||
4054 !getConstIntOrUndef(Operands[2], C2))
4057 bool IsRight = IntrinsicID == Intrinsic::fshr;
4059 return Operands[IsRight ? 1 : 0];
4068 return Operands[IsRight ? 1 : 0];
4071 unsigned LshrAmt = IsRight ? ShAmt :
BitWidth - ShAmt;
4072 unsigned ShlAmt = !IsRight ? ShAmt :
BitWidth - ShAmt;
4074 return ConstantInt::get(Ty, C1->
lshr(LshrAmt));
4076 return ConstantInt::get(Ty, C0->
shl(ShlAmt));
4077 return ConstantInt::get(Ty, C0->
shl(ShlAmt) | C1->
lshr(LshrAmt));
4080 if (IntrinsicID == Intrinsic::amdgcn_perm)
4081 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
4097 if (Operands.
size() == 1)
4098 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI,
Call);
4100 if (Operands.
size() == 2) {
4102 ConstantFoldLibCall2(Name, Ty, Operands, TLI)) {
4103 return FoldedLibCall;
4105 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty, Operands,
Call);
4108 if (Operands.
size() == 3)
4109 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI,
Call);
4114static Constant *ConstantFoldFixedVectorCall(
4122 switch (IntrinsicID) {
4123 case Intrinsic::masked_load: {
4124 auto *SrcPtr = Operands[0];
4125 auto *
Mask = Operands[1];
4126 auto *Passthru = Operands[2];
4132 auto *MaskElt =
Mask->getAggregateElement(
I);
4135 auto *PassthruElt = Passthru->getAggregateElement(
I);
4145 if (MaskElt->isNullValue()) {
4149 }
else if (MaskElt->isOneValue()) {
4161 case Intrinsic::arm_mve_vctp8:
4162 case Intrinsic::arm_mve_vctp16:
4163 case Intrinsic::arm_mve_vctp32:
4164 case Intrinsic::arm_mve_vctp64: {
4170 for (
unsigned i = 0; i < Lanes; i++) {
4180 case Intrinsic::get_active_lane_mask: {
4186 uint64_t Limit = Op1->getZExtValue();
4189 for (
unsigned i = 0; i < Lanes; i++) {
4190 if (
Base + i < Limit)
4199 case Intrinsic::vector_extract: {
4206 unsigned VecNumElements =
4208 unsigned StartingIndex = Idx->getZExtValue();
4211 if (NumElements == VecNumElements && StartingIndex == 0)
4214 for (
unsigned I = StartingIndex,
E = StartingIndex + NumElements;
I <
E;
4219 Result[
I - StartingIndex] = Elt;
4224 case Intrinsic::vector_insert: {
4231 unsigned SubVecNumElements =
4233 unsigned VecNumElements =
4235 unsigned IdxN = Idx->getZExtValue();
4237 if (SubVecNumElements == VecNumElements && IdxN == 0)
4240 for (
unsigned I = 0;
I < VecNumElements; ++
I) {
4242 if (
I < IdxN + SubVecNumElements)
4252 case Intrinsic::vector_interleave2:
4253 case Intrinsic::vector_interleave3:
4254 case Intrinsic::vector_interleave4:
4255 case Intrinsic::vector_interleave5:
4256 case Intrinsic::vector_interleave6:
4257 case Intrinsic::vector_interleave7:
4258 case Intrinsic::vector_interleave8: {
4259 unsigned NumElements =
4261 unsigned NumOperands = Operands.
size();
4262 for (
unsigned I = 0;
I < NumElements; ++
I) {
4263 for (
unsigned J = 0; J < NumOperands; ++J) {
4264 Constant *Elt = Operands[J]->getAggregateElement(
I);
4267 Result[NumOperands *
I + J] = Elt;
4272 case Intrinsic::wasm_dot: {
4273 unsigned NumElements =
4277 "wasm dot takes i16x8 and produces i32x4");
4278 assert(Ty->isIntegerTy());
4279 int32_t MulVector[8];
4281 for (
unsigned I = 0;
I < NumElements; ++
I) {
4289 for (
unsigned I = 0;
I <
Result.size();
I++) {
4290 int64_t IAdd = (int64_t)MulVector[
I * 2] + (int64_t)MulVector[
I * 2 + 1];
4291 Result[
I] = ConstantInt::get(Ty, IAdd);
4302 for (
unsigned J = 0, JE = Operands.
size(); J != JE; ++J) {
4305 Lane[J] = Operands[J];
4309 Constant *Agg = Operands[J]->getAggregateElement(
I);
4318 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI,
Call);
4327static Constant *ConstantFoldScalableVectorCall(
4331 switch (IntrinsicID) {
4332 case Intrinsic::aarch64_sve_convert_from_svbool: {
4334 if (!Src || !Src->isNullValue())
4339 case Intrinsic::get_active_lane_mask: {
4342 if (Op0 && Op1 && Op0->getValue().uge(Op1->getValue()))
4346 case Intrinsic::vector_interleave2:
4347 case Intrinsic::vector_interleave3:
4348 case Intrinsic::vector_interleave4:
4349 case Intrinsic::vector_interleave5:
4350 case Intrinsic::vector_interleave6:
4351 case Intrinsic::vector_interleave7:
4352 case Intrinsic::vector_interleave8: {
4353 Constant *SplatVal = Operands[0]->getSplatValue();
4384 Constant *Folded = ConstantFoldScalarCall(
4391static std::pair<Constant *, Constant *>
4400 const APFloat &U = ConstFP->getValueAPF();
4403 Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
4410 return {Result0, Result1};
4420 switch (IntrinsicID) {
4421 case Intrinsic::frexp: {
4429 for (
unsigned I = 0,
E = FVTy0->getNumElements();
I !=
E; ++
I) {
4430 Constant *Lane = Operands[0]->getAggregateElement(
I);
4431 std::tie(Results0[
I], Results1[
I]) =
4432 ConstantFoldScalarFrexpCall(Lane, Ty1);
4441 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(Operands[0], Ty1);
4446 case Intrinsic::sincos: {
4450 auto ConstantFoldScalarSincosCall =
4451 [&](
Constant *
Op) -> std::pair<Constant *, Constant *> {
4453 ConstantFoldScalarCall(Name, Intrinsic::sin, TyScalar,
Op, TLI,
Call);
4455 ConstantFoldScalarCall(Name, Intrinsic::cos, TyScalar,
Op, TLI,
Call);
4456 return std::make_pair(SinResult, CosResult);
4464 Constant *Lane = Operands[0]->getAggregateElement(
I);
4465 std::tie(SinResults[
I], CosResults[
I]) =
4466 ConstantFoldScalarSincosCall(Lane);
4467 if (!SinResults[
I] || !CosResults[
I])
4475 auto [SinResult, CosResult] = ConstantFoldScalarSincosCall(Operands[0]);
4476 if (!SinResult || !CosResult)
4480 case Intrinsic::vector_deinterleave2:
4481 case Intrinsic::vector_deinterleave3:
4482 case Intrinsic::vector_deinterleave4:
4483 case Intrinsic::vector_deinterleave5:
4484 case Intrinsic::vector_deinterleave6:
4485 case Intrinsic::vector_deinterleave7:
4486 case Intrinsic::vector_deinterleave8: {
4488 auto *Vec = Operands[0];
4506 for (
unsigned I = 0;
I != NumResults; ++
I) {
4507 for (
unsigned J = 0; J != NumElements; ++J) {
4520 return ConstantFoldScalarCall(Name, IntrinsicID, StTy, Operands, TLI,
Call);
4536 return ConstantFoldIntrinsicCall2(
ID, Ty, {LHS, RHS},
Call);
4542 bool AllowNonDeterministic) {
4543 if (
Call->isNoBuiltin())
4560 Type *Ty =
F->getReturnType();
4561 if (!AllowNonDeterministic && Ty->isFPOrFPVectorTy())
4566 return ConstantFoldFixedVectorCall(
4567 Name, IID, FVTy, Operands,
F->getDataLayout(), TLI,
Call);
4570 return ConstantFoldScalableVectorCall(
4571 Name, IID, SVTy, Operands,
F->getDataLayout(), TLI,
Call);
4574 return ConstantFoldStructCall(Name, IID, StTy, Operands,
4575 F->getDataLayout(), TLI,
Call);
4580 return ConstantFoldScalarCall(Name, IID, Ty, Operands, TLI,
Call);
4587 if (
Call->isNoBuiltin() ||
Call->isStrictFP())
4597 if (
Call->arg_size() == 1) {
4607 case LibFunc_log10l:
4609 case LibFunc_log10f:
4610 return Op.isNaN() || (!
Op.isZero() && !
Op.isNegative());
4613 return !
Op.isNaN() && !
Op.isZero() && !
Op.isInfinity();
4619 if (OpC->getType()->isDoubleTy())
4621 if (OpC->getType()->isFloatTy())
4629 if (OpC->getType()->isDoubleTy())
4631 if (OpC->getType()->isFloatTy())
4641 return !
Op.isInfinity();
4645 case LibFunc_tanf: {
4648 Type *Ty = OpC->getType();
4649 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
4650 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) !=
nullptr;
4676 if (OpC->getType()->isDoubleTy())
4678 if (OpC->getType()->isFloatTy())
4685 return Op.isNaN() ||
Op.isZero() || !
Op.isNegative();
4695 if (
Call->arg_size() == 2) {
4705 case LibFunc_powf: {
4709 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
4711 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) !=
nullptr;
4719 case LibFunc_remainderl:
4720 case LibFunc_remainder:
4721 case LibFunc_remainderf:
4726 case LibFunc_atan2f:
4727 case LibFunc_atan2l:
4747 case Instruction::BitCast:
4750 case Instruction::Trunc: {
4758 Flags->NSW = ZExtC == SExtC;
4762 case Instruction::SExt:
4763 case Instruction::ZExt: {
4767 if (!CastInvC || CastInvC !=
C)
4769 if (Flags && CastOp == Instruction::ZExt) {
4773 Flags->NNeg = CastInvC == SExtInvC;
4794void TargetFolder::anchor() {}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static Constant * FoldBitCast(Constant *V, Type *DestTy)
static ConstantFP * flushDenormalConstant(Type *Ty, const APFloat &APF, DenormalMode::DenormalModeKind Mode)
Constant * getConstantAtOffset(Constant *Base, APInt Offset, const DataLayout &DL)
If this Offset points exactly to the start of an aggregate element, return that element,...
static cl::opt< bool > DisableFPCallFolding("disable-fp-call-folding", cl::desc("Disable constant-folding of FP intrinsics and libcalls."), cl::init(false), cl::Hidden)
static ConstantFP * flushDenormalConstantFP(ConstantFP *CFP, const Instruction *Inst, bool IsOutput)
static DenormalMode getInstrDenormalMode(const Instruction *CtxI, Type *Ty)
Return the denormal mode that can be assumed when executing a floating point operation at CtxI.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
amode Optimize addressing mode
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool InRange(int64_t Value, unsigned short Shift, int LBound, int HBound)
This file contains the definitions of the enumerations and flags associated with NVVM Intrinsics,...
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static constexpr roundingMode rmTowardZero
llvm::RoundingMode roundingMode
IEEE-754R 4.3: Rounding-direction attributes.
static const fltSemantics & IEEEdouble()
static constexpr roundingMode rmTowardNegative
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardPositive
static const fltSemantics & IEEEhalf()
static constexpr roundingMode rmNearestTiesToAway
opStatus
IEEE-754R 7: Default exception handling.
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
LLVM_ABI double convertToDouble() const
Converts this APFloat to host double value.
bool isPosInfinity() const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
opStatus multiply(const APFloat &RHS, roundingMode RM)
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
APInt bitcastToAPInt() const
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus mod(const APFloat &RHS)
bool isNegInfinity() const
opStatus roundToIntegral(roundingMode RM)
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt usub_sat(const APInt &RHS) const
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
LLVM_ABI uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
APInt abs() const
Get the absolute value.
LLVM_ABI APInt sadd_sat(const APInt &RHS) const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool slt(const APInt &RHS) const
Signed less than comparison.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
LLVM_ABI APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
static bool isFPPredicate(Predicate P)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI bool isDesirableCastOp(unsigned Opcode)
Whether creating a constant expression for this cast is desirable.
static LLVM_ABI Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static LLVM_ABI Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static LLVM_ABI bool isDesirableBinOp(unsigned Opcode)
Whether creating a constant expression for this binary operator is desirable.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=true)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Constrained floating point compare intrinsics.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
Wrapper for a function that represents a value that functionally represents the original function.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
This provides a helper for copying FMF from an instruction or setting specified flags.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags noUnsignedWrap()
bool hasNoUnsignedSignedWrap() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
PointerType * getType() const
Global values are always pointers.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Class to represent scalable SIMD vectors.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
LLVM_ABI unsigned getElementContainingOffset(uint64_t FixedOffset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
APFloat::roundingMode GetFMARoundingMode(Intrinsic::ID IntrinsicID)
DenormalMode GetNVVMDenormMode(bool ShouldFTZ)
bool FPToIntegerIntrinsicNaNZero(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetFDivRoundingMode(Intrinsic::ID IntrinsicID)
bool FPToIntegerIntrinsicResultIsSigned(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetFPToIntegerRoundingMode(Intrinsic::ID IntrinsicID)
bool RCPShouldFTZ(Intrinsic::ID IntrinsicID)
bool FPToIntegerIntrinsicShouldFTZ(Intrinsic::ID IntrinsicID)
bool FDivShouldFTZ(Intrinsic::ID IntrinsicID)
bool FAddShouldFTZ(Intrinsic::ID IntrinsicID)
bool FMinFMaxIsXorSignAbs(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetFMulRoundingMode(Intrinsic::ID IntrinsicID)
bool UnaryMathIntrinsicShouldFTZ(Intrinsic::ID IntrinsicID)
bool FMinFMaxShouldFTZ(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetFAddRoundingMode(Intrinsic::ID IntrinsicID)
bool FMAShouldFTZ(Intrinsic::ID IntrinsicID)
bool FMulShouldFTZ(Intrinsic::ID IntrinsicID)
APFloat::roundingMode GetRCPRoundingMode(Intrinsic::ID IntrinsicID)
bool FMinFMaxPropagatesNaNs(Intrinsic::ID IntrinsicID)
NodeAddr< FuncNode * > Func
LLVM_ABI std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI Constant * ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS, Constant *RHS, Type *Ty, Instruction *FMFSource)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static double log2(double V)
LLVM_ABI Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
LLVM_ABI Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
LLVM_ABI Constant * ConstantFoldCompareInstruction(CmpInst::Predicate Predicate, Constant *C1, Constant *C2)
LLVM_ABI Constant * ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V)
LLVM_ABI bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
LLVM_ABI bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
LLVM_ABI Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
bool isa_and_nonnull(const Y &Val)
LLVM_ABI Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
LLVM_ABI Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
LLVM_ABI Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI Constant * FlushFPConstant(Constant *Operand, const Instruction *I, bool IsOutput)
Attempt to flush float point constant according to denormal mode set in the instruction's parent func...
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI Constant * getLosslessSignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
MutableArrayRef(T &OneElt) -> MutableArrayRef< T >
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
@ Sub
Subtraction of integers.
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
@ NearestTiesToEven
roundTiesToEven.
@ Dynamic
Denotes mode unknown at compile time.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
LLVM_ABI Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
Attempt to constant fold an insertvalue instruction with the specified operands and indices.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
DenormalModeKind
Represent handled modes for denormal (aka subnormal) modes in the floating point environment.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getDynamic()
static constexpr DenormalMode getIEEE()
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
bool isConstant() const
Returns true if we know the value of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.