267#define DEBUG_TYPE "frame-info"
270 cl::desc(
"enable use of redzone on AArch64"),
274 "stack-tagging-merge-settag",
284 cl::desc(
"Split allocation of ZPR & PPR objects"),
289 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
290 "optimization (default = off)"));
302 "aarch64-disable-multivector-spill-fill",
311 bool IsTailCallReturn = (
MBB.end() !=
MBBI)
315 int64_t ArgumentPopSize = 0;
316 if (IsTailCallReturn) {
322 ArgumentPopSize = StackAdjust.
getImm();
331 return ArgumentPopSize;
389bool AArch64FrameLowering::homogeneousPrologEpilog(
411 if (Exit && getArgumentStackToRestore(MF, *Exit))
414 auto *AFI = MF.
getInfo<AArch64FunctionInfo>();
422 unsigned NumGPRs = 0;
423 for (
unsigned I = 0; CSRegs[
I]; ++
I) {
425 if (
Reg == AArch64::LR) {
426 assert(CSRegs[
I + 1] == AArch64::FP);
427 if (NumGPRs % 2 != 0)
439bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
458 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
459 MI.getOpcode() == AArch64::ADDXri ||
460 MI.getOpcode() == AArch64::ADDSXri)
485 bool IsWin64,
bool IsFunclet)
const {
487 "Tail call reserved stack must be aligned to 16 bytes");
488 if (!IsWin64 || IsFunclet) {
493 Attribute::SwiftAsync))
507 int FrameIndex =
H.CatchObj.FrameIndex;
508 if ((FrameIndex != INT_MAX) &&
509 CatchObjFrameIndices.
insert(FrameIndex)) {
510 FixedObjectSize =
alignTo(FixedObjectSize,
517 FixedObjectSize += 8;
519 return alignTo(FixedObjectSize, 16);
530 const unsigned RedZoneSize =
543 bool LowerQRegCopyThroughMem = Subtarget.hasFPARMv8() &&
547 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
568 RegInfo->hasStackRealignment(MF))
611 const Triple &TT = TM.getTargetTriple();
615 if (TT.isOSDarwin() || TT.isOSWindows())
623 if (TM.Options.FramePointerIsReserved(MF))
653 unsigned Opc =
I->getOpcode();
654 bool IsDestroy =
Opc ==
TII->getCallFrameDestroyOpcode();
655 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
658 int64_t Amount =
I->getOperand(0).getImm();
666 if (CalleePopAmount == 0) {
677 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
688 "non-reserved call frame without var sized objects?");
697 }
else if (CalleePopAmount != 0) {
700 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
712 const auto &
TRI = *Subtarget.getRegisterInfo();
718 CFIBuilder.buildDefCFA(AArch64::SP, 0);
721 if (MFI.shouldSignReturnAddress(MF))
722 MFI.branchProtectionPAuthLR() ? CFIBuilder.buildNegateRAStateWithPC()
723 : CFIBuilder.buildNegateRAState();
726 if (MFI.needsShadowCallStackPrologueEpilogue(MF))
727 CFIBuilder.buildSameValue(AArch64::X18);
730 const std::vector<CalleeSavedInfo> &CSI =
732 for (
const auto &Info : CSI) {
734 if (!
TRI.regNeedsCFI(Reg, Reg))
736 CFIBuilder.buildSameValue(Reg);
749 case AArch64::W##n: \
750 case AArch64::X##n: \
775 case AArch64::B##n: \
776 case AArch64::H##n: \
777 case AArch64::S##n: \
778 case AArch64::D##n: \
779 case AArch64::Q##n: \
780 return HasSVE ? AArch64::Z##n : AArch64::Q##n
817void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
828 const AArch64Subtarget &STI = MF.
getSubtarget<AArch64Subtarget>();
831 BitVector GPRsToZero(
TRI.getNumRegs());
832 BitVector FPRsToZero(
TRI.getNumRegs());
835 if (
TRI.isGeneralPurposeRegister(MF,
Reg)) {
838 GPRsToZero.set(XReg);
842 FPRsToZero.set(XReg);
849 for (MCRegister
Reg : GPRsToZero.set_bits())
853 for (MCRegister
Reg : FPRsToZero.set_bits())
857 for (MCRegister PReg :
858 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
859 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
860 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
862 if (RegsToZero[PReg])
868bool AArch64FrameLowering::windowsRequiresStackProbe(
870 const AArch64Subtarget &Subtarget = MF.
getSubtarget<AArch64Subtarget>();
871 const AArch64FunctionInfo &MFI = *MF.
getInfo<AArch64FunctionInfo>();
875 StackSizeInBytes >= uint64_t(MFI.getStackProbeSize());
884 for (
unsigned i = 0; CSRegs[i]; ++i)
890 bool HasCall)
const {
900 const AArch64Subtarget &Subtarget = MF->
getSubtarget<AArch64Subtarget>();
902 LivePhysRegs LiveRegs(
TRI);
905 LiveRegs.addReg(AArch64::X16);
906 LiveRegs.addReg(AArch64::X17);
907 LiveRegs.addReg(AArch64::X18);
912 if (LiveRegs.available(
MRI, AArch64::X9))
915 for (
unsigned Reg : AArch64::GPR64RegClass) {
916 if (LiveRegs.available(
MRI,
Reg))
919 return AArch64::NoRegister;
946 MBB.isLiveIn(AArch64::NZCV))
950 if (findScratchNonCalleeSaveRegister(TmpMBB) == AArch64::NoRegister)
956 windowsRequiresStackProbe(*MF, std::numeric_limits<uint64_t>::max()))
957 if (findScratchNonCalleeSaveRegister(TmpMBB,
true) == AArch64::NoRegister)
966 F.needsUnwindTableEntry();
969bool AArch64FrameLowering::shouldSignReturnAddressEverywhere(
977 return SignReturnAddressAll;
986 unsigned Opc =
MBBI->getOpcode();
990 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
991 int Imm =
MBBI->getOperand(ImmIdx).getImm();
999 case AArch64::STR_ZXI:
1000 case AArch64::LDR_ZXI: {
1001 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1008 case AArch64::STR_PXI:
1009 case AArch64::LDR_PXI: {
1010 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1017 case AArch64::LDPDpost:
1020 case AArch64::STPDpre: {
1021 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1022 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1023 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
1030 case AArch64::LDPXpost:
1033 case AArch64::STPXpre: {
1036 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1037 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1041 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1042 .
addImm(RegInfo->getSEHRegNum(Reg0))
1043 .
addImm(RegInfo->getSEHRegNum(Reg1))
1048 case AArch64::LDRDpost:
1051 case AArch64::STRDpre: {
1052 unsigned Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1053 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1059 case AArch64::LDRXpost:
1062 case AArch64::STRXpre: {
1063 unsigned Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1070 case AArch64::STPDi:
1071 case AArch64::LDPDi: {
1072 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1073 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1081 case AArch64::STPXi:
1082 case AArch64::LDPXi: {
1086 int SEHReg0 = RegInfo->getSEHRegNum(Reg0);
1087 int SEHReg1 = RegInfo->getSEHRegNum(Reg1);
1089 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1093 else if (SEHReg0 >= 19 && SEHReg1 >= 19)
1100 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegIP))
1107 case AArch64::STRXui:
1108 case AArch64::LDRXui: {
1109 int Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1116 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegI))
1122 case AArch64::STRDui:
1123 case AArch64::LDRDui: {
1124 unsigned Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1131 case AArch64::STPQi:
1132 case AArch64::LDPQi: {
1133 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1134 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1135 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQP))
1142 case AArch64::LDPQpost:
1145 case AArch64::STPQpre: {
1146 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1147 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1148 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQPX))
1167 if (ST.isTargetDarwin())
1193 DL =
MBBI->getDebugLoc();
1200 EmitSignRA(MF.
front());
1202 if (
MBB.isEHFuncletEntry())
1204 if (
MBB.isReturnBlock())
1260 StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
1265 if (MFI.isVariableSizedObjectIndex(FI)) {
1274 bool FPAfterSVECalleeSaves =
1276 if (MFI.hasScalableStackID(FI)) {
1277 if (FPAfterSVECalleeSaves &&
1280 "split-sve-objects not supported with FPAfterSVECalleeSaves");
1288 AccessOffset = -PPRStackSize;
1289 return AccessOffset +
1294 bool IsFixed = MFI.isFixedObjectIndex(FI);
1299 if (!IsFixed && !IsCSR) {
1300 ScalableOffset = -SVEStackSize;
1301 }
else if (FPAfterSVECalleeSaves && IsCSR) {
1316 int64_t ObjectOffset)
const {
1320 bool IsWin64 = Subtarget.isCallingConvWin64(
F.getCallingConv(),
F.isVarArg());
1321 unsigned FixedObject =
1322 getFixedObjectSize(MF, AFI, IsWin64,
false);
1330 int64_t ObjectOffset)
const {
1341 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
1342 ? getFPOffset(MF, ObjectOffset).getFixed()
1343 : getStackOffset(MF, ObjectOffset).getFixed();
1348 bool ForSimm)
const {
1350 int64_t ObjectOffset = MFI.getObjectOffset(FI);
1351 bool isFixed = MFI.isFixedObjectIndex(FI);
1354 FrameReg, PreferFP, ForSimm);
1360 bool ForSimm)
const {
1366 int64_t FPOffset = getFPOffset(MF, ObjectOffset).getFixed();
1367 int64_t
Offset = getStackOffset(MF, ObjectOffset).getFixed();
1370 bool isSVE = MFI.isScalableStackID(StackID);
1374 StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
1385 PreferFP &= !SVEStackSize;
1393 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
1397 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
1399 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
1404 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
1405 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
1407 if (FPOffset >= 0) {
1411 }
else if (MFI.hasVarSizedObjects()) {
1415 bool CanUseBP = RegInfo->hasBasePointer(MF);
1416 if (FPOffsetFits && CanUseBP)
1423 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
1430 "Funclets should only be present on Win64");
1434 if (FPOffsetFits && PreferFP)
1441 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
1442 "In the presence of dynamic stack pointer realignment, "
1443 "non-argument/CSR objects cannot be accessed through the frame pointer");
1445 bool FPAfterSVECalleeSaves =
1461 FPOffset -= PPRStackSize;
1463 SPOffset -= PPRStackSize;
1468 if (FPAfterSVECalleeSaves) {
1479 RegInfo->hasStackRealignment(MF))) {
1480 FrameReg = RegInfo->getFrameRegister(MF);
1483 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
1490 if (FPAfterSVECalleeSaves) {
1497 SVEAreaOffset = SVECalleeSavedStack;
1499 SVEAreaOffset = SVECalleeSavedStack - SVEStackSize;
1502 SVEAreaOffset = SVEStackSize;
1504 SVEAreaOffset = SVEStackSize - SVECalleeSavedStack;
1507 if (UseFP && !(isFixed || isCSR))
1508 SVEAreaOffset = -SVEStackSize;
1509 if (!UseFP && (isFixed || isCSR))
1510 SVEAreaOffset = SVEStackSize;
1514 FrameReg = RegInfo->getFrameRegister(MF);
1519 if (RegInfo->hasBasePointer(MF))
1520 FrameReg = RegInfo->getBaseRegister();
1522 assert(!MFI.hasVarSizedObjects() &&
1523 "Can't use SP when we have var sized objects.");
1524 FrameReg = AArch64::SP;
1552 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
1558 bool NeedsWinCFI,
bool IsFirst,
1567 if (Reg2 == AArch64::FP)
1571 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
1578 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
1579 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
1589 bool UsesWinAAPCS,
bool NeedsWinCFI,
1590 bool NeedsFrameRecord,
bool IsFirst,
1598 if (NeedsFrameRecord)
1599 return Reg2 == AArch64::LR;
1611 enum RegType { GPR, FPR64, FPR128, PPR, ZPR, VG }
Type;
1612 const TargetRegisterClass *RC;
1614 RegPairInfo() =
default;
1616 bool isPaired()
const {
return Reg2.
isValid(); }
1618 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
1624 for (
unsigned PReg = AArch64::P8; PReg <= AArch64::P15; ++PReg) {
1625 if (SavedRegs.
test(PReg)) {
1626 unsigned PNReg = PReg - AArch64::P0 + AArch64::PN0;
1640 bool IsLocallyStreaming =
1646 return Subtarget.hasSVE2p1() ||
1647 (Subtarget.hasSME2() &&
1648 (!IsLocallyStreaming && Subtarget.
isStreaming()));
1656 bool NeedsFrameRecord) {
1674 (
Count & 1) == 0) &&
1675 "Odd number of callee-saved regs to spill!");
1677 int StackFillDir = -1;
1679 unsigned FirstReg = 0;
1687 FirstReg =
Count - 1;
1692 int ZPRByteOffset = 0;
1693 int PPRByteOffset = 0;
1698 }
else if (!FPAfterSVECalleeSaves) {
1710 for (
unsigned i = FirstReg; i <
Count; i += RegInc) {
1712 RPI.Reg1 = CSI[i].getReg();
1714 if (AArch64::GPR64RegClass.
contains(RPI.Reg1)) {
1715 RPI.Type = RegPairInfo::GPR;
1716 RPI.RC = &AArch64::GPR64RegClass;
1717 }
else if (AArch64::FPR64RegClass.
contains(RPI.Reg1)) {
1718 RPI.Type = RegPairInfo::FPR64;
1719 RPI.RC = &AArch64::FPR64RegClass;
1720 }
else if (AArch64::FPR128RegClass.
contains(RPI.Reg1)) {
1721 RPI.Type = RegPairInfo::FPR128;
1722 RPI.RC = &AArch64::FPR128RegClass;
1723 }
else if (AArch64::ZPRRegClass.
contains(RPI.Reg1)) {
1724 RPI.Type = RegPairInfo::ZPR;
1725 RPI.RC = &AArch64::ZPRRegClass;
1726 }
else if (AArch64::PPRRegClass.
contains(RPI.Reg1)) {
1727 RPI.Type = RegPairInfo::PPR;
1728 RPI.RC = &AArch64::PPRRegClass;
1729 }
else if (RPI.Reg1 == AArch64::VG) {
1730 RPI.Type = RegPairInfo::VG;
1731 RPI.RC = &AArch64::FIXED_REGSRegClass;
1736 int &ScalableByteOffset = RPI.Type == RegPairInfo::PPR && SplitPPRs
1741 if (HasCSHazardPadding &&
1744 ByteOffset += StackFillDir * StackHazardSize;
1747 int Scale =
TRI->getSpillSize(*RPI.RC);
1749 if (
unsigned(i + RegInc) <
Count && !HasCSHazardPadding) {
1750 MCRegister NextReg = CSI[i + RegInc].getReg();
1751 bool IsFirst = i == FirstReg;
1753 case RegPairInfo::GPR:
1754 if (AArch64::GPR64RegClass.
contains(NextReg) &&
1756 NeedsWinCFI, NeedsFrameRecord, IsFirst,
1760 case RegPairInfo::FPR64:
1761 if (AArch64::FPR64RegClass.
contains(NextReg) &&
1766 case RegPairInfo::FPR128:
1767 if (AArch64::FPR128RegClass.
contains(NextReg))
1770 case RegPairInfo::PPR:
1772 case RegPairInfo::ZPR:
1774 ((RPI.Reg1 - AArch64::Z0) & 1) == 0 && (NextReg == RPI.Reg1 + 1)) {
1777 int Offset = (ScalableByteOffset + StackFillDir * 2 * Scale) / Scale;
1782 case RegPairInfo::VG:
1793 assert((!RPI.isPaired() ||
1794 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
1795 "Out of order callee saved regs!");
1797 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
1798 RPI.Reg1 == AArch64::LR) &&
1799 "FrameRecord must be allocated together with LR");
1802 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
1803 RPI.Reg2 == AArch64::LR) &&
1804 "FrameRecord must be allocated together with LR");
1812 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
1813 RPI.Reg1 + 1 == RPI.Reg2))) &&
1814 "Callee-save registers not saved as adjacent register pair!");
1816 RPI.FrameIdx = CSI[i].getFrameIdx();
1819 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
1823 if (RPI.isScalable() && ScalableByteOffset % Scale != 0) {
1824 ScalableByteOffset =
alignTo(ScalableByteOffset, Scale);
1827 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
1828 assert(OffsetPre % Scale == 0);
1830 if (RPI.isScalable())
1831 ScalableByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
1833 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
1838 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
1839 (IsWindows && RPI.Reg2 == AArch64::LR)))
1840 ByteOffset += StackFillDir * 8;
1844 if (NeedGapToAlignStack && !NeedsWinCFI && !RPI.isScalable() &&
1845 RPI.Type != RegPairInfo::FPR128 && !RPI.isPaired() &&
1846 ByteOffset % 16 != 0) {
1847 ByteOffset += 8 * StackFillDir;
1853 NeedGapToAlignStack =
false;
1856 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
1857 assert(OffsetPost % Scale == 0);
1860 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
1865 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
1866 (IsWindows && RPI.Reg2 == AArch64::LR)))
1868 RPI.Offset =
Offset / Scale;
1870 assert((!RPI.isPaired() ||
1871 (!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
1872 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
1873 "Offset out of bounds for LDP/STP immediate");
1875 auto isFrameRecord = [&] {
1877 return IsWindows ? RPI.Reg1 == AArch64::FP && RPI.Reg2 == AArch64::LR
1878 : RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP;
1886 return i > 0 && RPI.Reg1 == AArch64::FP &&
1887 CSI[i - 1].getReg() == AArch64::LR;
1892 if (NeedsFrameRecord && isFrameRecord())
1909 std::reverse(RegPairs.
begin(), RegPairs.
end());
1928 MRI.freezeReservedRegs();
1930 if (homogeneousPrologEpilog(MF)) {
1934 for (
auto &RPI : RegPairs) {
1939 if (!
MRI.isReserved(RPI.Reg1))
1940 MBB.addLiveIn(RPI.Reg1);
1941 if (RPI.isPaired() && !
MRI.isReserved(RPI.Reg2))
1942 MBB.addLiveIn(RPI.Reg2);
1946 bool PTrueCreated =
false;
1962 unsigned Size =
TRI->getSpillSize(*RPI.RC);
1963 Align Alignment =
TRI->getSpillAlign(*RPI.RC);
1965 case RegPairInfo::GPR:
1966 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
1968 case RegPairInfo::FPR64:
1969 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
1971 case RegPairInfo::FPR128:
1972 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
1974 case RegPairInfo::ZPR:
1975 StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI;
1977 case RegPairInfo::PPR:
1978 StrOpc = AArch64::STR_PXI;
1980 case RegPairInfo::VG:
1981 StrOpc = AArch64::STRXui;
1987 if (X0Scratch != AArch64::NoRegister)
1993 if (Reg1 == AArch64::VG) {
1995 Reg1 = findScratchNonCalleeSaveRegister(&
MBB,
true);
1996 assert(Reg1 != AArch64::NoRegister);
2006 return STI.getRegisterInfo()->isSuperOrSubRegisterEq(
2007 AArch64::X0, LiveIn.PhysReg);
2015 RTLIB::Libcall LC = RTLIB::SMEABI_GET_CURRENT_VG;
2017 TRI->getCallPreservedMask(MF, TLI.getLibcallCallingConv(LC));
2031 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2033 dbgs() <<
", " << RPI.FrameIdx + 1;
2037 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
2038 "Windows unwdinding requires a consecutive (FP,LR) pair");
2042 unsigned FrameIdxReg1 = RPI.FrameIdx;
2043 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2044 if (NeedsWinCFI && RPI.isPaired()) {
2049 if (RPI.isPaired() && RPI.isScalable()) {
2055 "Expects SVE2.1 or SME2 target and a predicate register");
2056#ifdef EXPENSIVE_CHECKS
2057 auto IsPPR = [](
const RegPairInfo &c) {
2058 return c.Reg1 == RegPairInfo::PPR;
2060 auto PPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsPPR);
2061 auto IsZPR = [](
const RegPairInfo &c) {
2062 return c.Type == RegPairInfo::ZPR;
2064 auto ZPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsZPR);
2065 assert(!(PPRBegin < ZPRBegin) &&
2066 "Expected callee save predicate to be handled first");
2068 if (!PTrueCreated) {
2069 PTrueCreated =
true;
2074 if (!
MRI.isReserved(Reg1))
2075 MBB.addLiveIn(Reg1);
2076 if (!
MRI.isReserved(Reg2))
2077 MBB.addLiveIn(Reg2);
2078 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0));
2094 if (!
MRI.isReserved(Reg1))
2095 MBB.addLiveIn(Reg1);
2096 if (RPI.isPaired()) {
2097 if (!
MRI.isReserved(Reg2))
2098 MBB.addLiveIn(Reg2);
2117 if (RPI.Type == RegPairInfo::ZPR) {
2121 }
else if (RPI.Type == RegPairInfo::PPR) {
2140 DL =
MBBI->getDebugLoc();
2143 if (homogeneousPrologEpilog(MF, &
MBB)) {
2146 for (
auto &RPI : RegPairs) {
2154 auto IsPPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::PPR; };
2156 auto PPREnd = std::find_if_not(PPRBegin, RegPairs.
end(), IsPPR);
2157 std::reverse(PPRBegin, PPREnd);
2158 auto IsZPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::ZPR; };
2160 auto ZPREnd = std::find_if_not(ZPRBegin, RegPairs.
end(), IsZPR);
2161 std::reverse(ZPRBegin, ZPREnd);
2163 bool PTrueCreated =
false;
2164 for (
const RegPairInfo &RPI : RegPairs) {
2177 unsigned Size =
TRI->getSpillSize(*RPI.RC);
2178 Align Alignment =
TRI->getSpillAlign(*RPI.RC);
2180 case RegPairInfo::GPR:
2181 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
2183 case RegPairInfo::FPR64:
2184 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
2186 case RegPairInfo::FPR128:
2187 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
2189 case RegPairInfo::ZPR:
2190 LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI;
2192 case RegPairInfo::PPR:
2193 LdrOpc = AArch64::LDR_PXI;
2195 case RegPairInfo::VG:
2202 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2204 dbgs() <<
", " << RPI.FrameIdx + 1;
2211 unsigned FrameIdxReg1 = RPI.FrameIdx;
2212 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2213 if (NeedsWinCFI && RPI.isPaired()) {
2219 if (RPI.isPaired() && RPI.isScalable()) {
2224 "Expects SVE2.1 or SME2 target and a predicate register");
2225#ifdef EXPENSIVE_CHECKS
2226 assert(!(PPRBegin < ZPRBegin) &&
2227 "Expected callee save predicate to be handled first");
2229 if (!PTrueCreated) {
2230 PTrueCreated =
true;
2235 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0),
2252 if (RPI.isPaired()) {
2279 return std::optional<int>(PSV->getFrameIndex());
2290 return std::nullopt;
2296 if (!
MI.mayLoadOrStore() ||
MI.getNumMemOperands() < 1)
2297 return std::nullopt;
2304 return AArch64::PPRRegClass.contains(
MI.getOperand(0).getReg());
2310void AArch64FrameLowering::determineStackHazardSlot(
2313 auto *AFI = MF.
getInfo<AArch64FunctionInfo>();
2314 if (StackHazardSize == 0 || StackHazardSize % 16 != 0 ||
2328 return AArch64::FPR64RegClass.contains(Reg) ||
2329 AArch64::FPR128RegClass.contains(Reg) ||
2330 AArch64::ZPRRegClass.contains(Reg);
2333 return AArch64::PPRRegClass.contains(Reg);
2335 bool HasFPRStackObjects =
false;
2336 bool HasPPRStackObjects =
false;
2338 enum SlotType : uint8_t {
2349 for (
auto &
MBB : MF) {
2350 for (
auto &
MI :
MBB) {
2352 if (!FI || FI < 0 || FI >
int(SlotTypes.size()))
2359 ? SlotType::ZPRorFPR
2365 for (
int FI = 0; FI < int(SlotTypes.size()); ++FI) {
2366 HasFPRStackObjects |= SlotTypes[FI] == SlotType::ZPRorFPR;
2369 if (SlotTypes[FI] == SlotType::PPR) {
2371 HasPPRStackObjects =
true;
2376 if (HasFPRCSRs || HasFPRStackObjects) {
2379 << StackHazardSize <<
"\n");
2386 if (!HasPPRCSRs && !HasPPRStackObjects) {
2388 dbgs() <<
"Not using SplitSVEObjects as no PPRs are on the stack\n");
2392 if (!HasFPRCSRs && !HasFPRStackObjects) {
2395 <<
"Not using SplitSVEObjects as no FPRs or ZPRs are on the stack\n");
2403 MF.getFunction().getCallingConv())) {
2405 dbgs() <<
"Calling convention is not supported with SplitSVEObjects");
2409 [[maybe_unused]]
const AArch64Subtarget &Subtarget =
2410 MF.getSubtarget<AArch64Subtarget>();
2412 "Expected SVE to be available for PPRs");
2414 const TargetRegisterInfo *
TRI = MF.getSubtarget().getRegisterInfo();
2418 BitVector FPRZRegs(SavedRegs.
size());
2419 for (
size_t Reg = 0,
E = SavedRegs.
size(); HasFPRCSRs &&
Reg <
E; ++
Reg) {
2420 BitVector::reference RegBit = SavedRegs[
Reg];
2423 unsigned SubRegIdx = 0;
2425 SubRegIdx = AArch64::dsub;
2427 SubRegIdx = AArch64::zsub;
2434 TRI->getMatchingSuperReg(
Reg, SubRegIdx, &AArch64::ZPRRegClass);
2437 SavedRegs |= FPRZRegs;
2457 unsigned UnspilledCSGPR = AArch64::NoRegister;
2458 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
2464 RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister() :
MCRegister();
2466 unsigned ExtraCSSpill = 0;
2467 bool HasUnpairedGPR64 =
false;
2468 bool HasPairZReg =
false;
2469 BitVector UserReservedRegs = RegInfo->getUserReservedRegs(MF);
2470 BitVector ReservedRegs = RegInfo->getReservedRegs(MF);
2473 for (
unsigned i = 0; CSRegs[i]; ++i) {
2477 if (Reg == BasePointerReg)
2482 if (UserReservedRegs[Reg]) {
2483 SavedRegs.
reset(Reg);
2487 bool RegUsed = SavedRegs.
test(Reg);
2489 const bool RegIsGPR64 = AArch64::GPR64RegClass.contains(Reg);
2490 if (RegIsGPR64 || AArch64::FPR64RegClass.
contains(Reg) ||
2491 AArch64::FPR128RegClass.
contains(Reg)) {
2494 if (HasUnpairedGPR64)
2495 PairedReg = CSRegs[i % 2 == 0 ? i - 1 : i + 1];
2497 PairedReg = CSRegs[i ^ 1];
2504 if (RegIsGPR64 && !AArch64::GPR64RegClass.
contains(PairedReg)) {
2505 PairedReg = AArch64::NoRegister;
2506 HasUnpairedGPR64 =
true;
2508 assert(PairedReg == AArch64::NoRegister ||
2509 AArch64::GPR64RegClass.
contains(Reg, PairedReg) ||
2510 AArch64::FPR64RegClass.
contains(Reg, PairedReg) ||
2511 AArch64::FPR128RegClass.
contains(Reg, PairedReg));
2514 if (AArch64::GPR64RegClass.
contains(Reg) && !ReservedRegs[Reg]) {
2515 UnspilledCSGPR = Reg;
2516 UnspilledCSGPRPaired = PairedReg;
2524 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
2525 !SavedRegs.
test(PairedReg)) {
2526 SavedRegs.
set(PairedReg);
2527 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
2528 !ReservedRegs[PairedReg])
2529 ExtraCSSpill = PairedReg;
2532 HasPairZReg |= (AArch64::ZPRRegClass.contains(Reg, CSRegs[i ^ 1]) &&
2533 SavedRegs.
test(CSRegs[i ^ 1]));
2541 if (PnReg.isValid())
2547 SavedRegs.
set(AArch64::P8);
2552 "Predicate cannot be a reserved register");
2562 SavedRegs.
set(AArch64::X18);
2568 determineStackHazardSlot(MF, SavedRegs);
2571 unsigned CSStackSize = 0;
2572 unsigned ZPRCSStackSize = 0;
2573 unsigned PPRCSStackSize = 0;
2575 for (
unsigned Reg : SavedRegs.
set_bits()) {
2577 assert(RC &&
"expected register class!");
2578 auto SpillSize =
TRI->getSpillSize(*RC);
2579 bool IsZPR = AArch64::ZPRRegClass.contains(Reg);
2580 bool IsPPR = !IsZPR && AArch64::PPRRegClass.contains(Reg);
2582 ZPRCSStackSize += SpillSize;
2584 PPRCSStackSize += SpillSize;
2586 CSStackSize += SpillSize;
2592 unsigned NumSavedRegs = SavedRegs.
count();
2605 SavedRegs.
set(AArch64::LR);
2610 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) {
2611 SavedRegs.
set(AArch64::FP);
2612 SavedRegs.
set(AArch64::LR);
2616 dbgs() <<
"*** determineCalleeSaves\nSaved CSRs:";
2617 for (
unsigned Reg : SavedRegs.
set_bits())
2623 auto [ZPRLocalStackSize, PPRLocalStackSize] =
2625 uint64_t SVELocals = ZPRLocalStackSize + PPRLocalStackSize;
2627 alignTo(ZPRCSStackSize + PPRCSStackSize + SVELocals, 16);
2628 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
2637 int64_t CalleeStackUsed = 0;
2640 if (FixedOff > CalleeStackUsed)
2641 CalleeStackUsed = FixedOff;
2645 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
2646 CalleeStackUsed) > EstimatedStackSizeLimit;
2647 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
2657 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
2659 <<
" to get a scratch register.\n");
2660 SavedRegs.
set(UnspilledCSGPR);
2661 ExtraCSSpill = UnspilledCSGPR;
2666 if (producePairRegisters(MF)) {
2667 if (UnspilledCSGPRPaired == AArch64::NoRegister) {
2670 SavedRegs.
reset(UnspilledCSGPR);
2671 ExtraCSSpill = AArch64::NoRegister;
2674 SavedRegs.
set(UnspilledCSGPRPaired);
2683 unsigned Size =
TRI->getSpillSize(RC);
2684 Align Alignment =
TRI->getSpillAlign(RC);
2686 RS->addScavengingFrameIndex(FI);
2687 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
2688 <<
" as the emergency spill slot.\n");
2693 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
2702 << EstimatedStackSize + AlignedCSStackSize <<
" bytes.\n");
2706 "Should not invalidate callee saved info");
2717 std::vector<CalleeSavedInfo> &CSI,
unsigned &MinCSFrameIndex,
2718 unsigned &MaxCSFrameIndex)
const {
2727 std::reverse(CSI.begin(), CSI.end());
2741 if ((
unsigned)FrameIdx < MinCSFrameIndex)
2742 MinCSFrameIndex = FrameIdx;
2743 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
2744 MaxCSFrameIndex = FrameIdx;
2751 find_if(CSI, [](
auto &Info) {
return Info.getReg() == AArch64::LR; });
2752 if (It != CSI.end())
2753 CSI.insert(It, VGInfo);
2755 CSI.push_back(VGInfo);
2759 int HazardSlotIndex = std::numeric_limits<int>::max();
2760 for (
auto &CS : CSI) {
2768 assert(HazardSlotIndex == std::numeric_limits<int>::max() &&
2769 "Unexpected register order for hazard slot");
2771 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
2774 if ((
unsigned)HazardSlotIndex < MinCSFrameIndex)
2775 MinCSFrameIndex = HazardSlotIndex;
2776 if ((
unsigned)HazardSlotIndex > MaxCSFrameIndex)
2777 MaxCSFrameIndex = HazardSlotIndex;
2780 unsigned Size = RegInfo->getSpillSize(*RC);
2781 Align Alignment(RegInfo->getSpillAlign(*RC));
2783 CS.setFrameIdx(FrameIdx);
2785 if ((
unsigned)FrameIdx < MinCSFrameIndex)
2786 MinCSFrameIndex = FrameIdx;
2787 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
2788 MaxCSFrameIndex = FrameIdx;
2792 Reg == AArch64::FP) {
2795 if ((
unsigned)FrameIdx < MinCSFrameIndex)
2796 MinCSFrameIndex = FrameIdx;
2797 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
2798 MaxCSFrameIndex = FrameIdx;
2805 HazardSlotIndex == std::numeric_limits<int>::max()) {
2807 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
2810 if ((
unsigned)HazardSlotIndex < MinCSFrameIndex)
2811 MinCSFrameIndex = HazardSlotIndex;
2812 if ((
unsigned)HazardSlotIndex > MaxCSFrameIndex)
2813 MaxCSFrameIndex = HazardSlotIndex;
2837 int &Min,
int &Max) {
2838 Min = std::numeric_limits<int>::max();
2839 Max = std::numeric_limits<int>::min();
2845 for (
auto &CS : CSI) {
2846 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
2847 AArch64::PPRRegClass.contains(CS.getReg())) {
2848 assert((Max == std::numeric_limits<int>::min() ||
2849 Max + 1 == CS.getFrameIdx()) &&
2850 "SVE CalleeSaves are not consecutive");
2851 Min = std::min(Min, CS.getFrameIdx());
2852 Max = std::max(Max, CS.getFrameIdx());
2855 return Min != std::numeric_limits<int>::max();
2868 uint64_t &ZPRStackTop = SVEStack.ZPRStackSize;
2876 "SVE vectors should never be passed on the stack by value, only by "
2880 auto AllocateObject = [&](
int FI) {
2889 if (Alignment >
Align(16))
2891 "Alignment of scalable vectors > 16 bytes is not yet supported");
2894 StackTop =
alignTo(StackTop, Alignment);
2896 assert(StackTop < (
uint64_t)std::numeric_limits<int64_t>::max() &&
2897 "SVE StackTop far too large?!");
2899 int64_t
Offset = -int64_t(StackTop);
2907 int MinCSFrameIndex, MaxCSFrameIndex;
2909 for (
int FI = MinCSFrameIndex; FI <= MaxCSFrameIndex; ++FI)
2922 int StackProtectorFI = -1;
2926 ObjectsToAllocate.
push_back(StackProtectorFI);
2932 if (MaxCSFrameIndex >= FI && FI >= MinCSFrameIndex)
2943 for (
unsigned FI : ObjectsToAllocate)
2958 "Upwards growing stack unsupported");
2973 int64_t CurrentOffset =
2977 int FrameIndex =
H.CatchObj.FrameIndex;
2978 if ((FrameIndex != INT_MAX) && MFI.
getObjectOffset(FrameIndex) == 0) {
2989 int64_t UnwindHelpOffset =
alignTo(CurrentOffset + 8,
Align(16));
2990 assert(UnwindHelpOffset == getFixedObjectSize(MF, AFI,
true,
2992 "UnwindHelpOffset must be at the start of the fixed object area");
2995 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
3005 RS->enterBasicBlockEnd(
MBB);
3007 Register DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass);
3008 assert(DstReg &&
"There must be a free register after frame setup");
3018struct TagStoreInstr {
3026 MachineFunction *MF;
3027 MachineBasicBlock *
MBB;
3028 MachineRegisterInfo *
MRI;
3037 StackOffset FrameRegOffset;
3041 std::optional<int64_t> FrameRegUpdate;
3043 unsigned FrameRegUpdateFlags;
3053 TagStoreEdit(MachineBasicBlock *
MBB,
bool ZeroData)
3054 :
MBB(
MBB), ZeroData(ZeroData) {
3060 void addInstruction(TagStoreInstr
I) {
3062 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
3063 "Non-adjacent tag store instructions.");
3066 void clear() { TagStores.
clear(); }
3071 const AArch64FrameLowering *TFI,
bool TryMergeSPUpdate);
3078 const int64_t kMinOffset = -256 * 16;
3079 const int64_t kMaxOffset = 255 * 16;
3082 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
3083 if (BaseRegOffsetBytes < kMinOffset ||
3084 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
3088 BaseRegOffsetBytes % 16 != 0) {
3089 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3093 BaseRegOffsetBytes = 0;
3098 int64_t InstrSize = (
Size > 16) ? 32 : 16;
3101 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
3103 assert(BaseRegOffsetBytes % 16 == 0);
3107 .
addImm(BaseRegOffsetBytes / 16)
3111 if (BaseRegOffsetBytes == 0)
3113 BaseRegOffsetBytes += InstrSize;
3127 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3128 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3132 int64_t LoopSize =
Size;
3135 if (FrameRegUpdate && *FrameRegUpdate)
3136 LoopSize -= LoopSize % 32;
3138 TII->get(ZeroData ? AArch64::STZGloop_wback
3139 : AArch64::STGloop_wback))
3146 LoopI->
setFlags(FrameRegUpdateFlags);
3148 int64_t ExtraBaseRegUpdate =
3149 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
3150 LLVM_DEBUG(
dbgs() <<
"TagStoreEdit::emitLoop: LoopSize=" << LoopSize
3151 <<
", Size=" <<
Size
3152 <<
", ExtraBaseRegUpdate=" << ExtraBaseRegUpdate
3153 <<
", FrameRegUpdate=" << FrameRegUpdate
3154 <<
", FrameRegOffset.getFixed()="
3155 << FrameRegOffset.
getFixed() <<
"\n");
3156 if (LoopSize <
Size) {
3160 int64_t STGOffset = ExtraBaseRegUpdate + 16;
3161 assert(STGOffset % 16 == 0 && STGOffset >= -4096 && STGOffset <= 4080 &&
3162 "STG immediate out of range");
3164 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
3171 }
else if (ExtraBaseRegUpdate) {
3173 int64_t AddSubOffset = std::abs(ExtraBaseRegUpdate);
3174 assert(AddSubOffset <= 4095 &&
"ADD/SUB immediate out of range");
3177 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
3190 int64_t
Size, int64_t *TotalOffset) {
3192 if ((
MI.getOpcode() == AArch64::ADDXri ||
3193 MI.getOpcode() == AArch64::SUBXri) &&
3194 MI.getOperand(0).getReg() ==
Reg &&
MI.getOperand(1).getReg() ==
Reg) {
3196 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
3197 if (
MI.getOpcode() == AArch64::SUBXri)
3208 const int64_t kMaxOffset = 4080 - 16;
3210 const int64_t kMinOffset = -4095;
3211 if (PostOffset <= kMaxOffset && PostOffset >= kMinOffset &&
3212 PostOffset % 16 == 0) {
3223 for (
auto &TS : TSE) {
3227 if (
MI->memoperands_empty()) {
3231 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
3237 bool TryMergeSPUpdate) {
3238 if (TagStores.
empty())
3240 TagStoreInstr &FirstTagStore = TagStores[0];
3241 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
3242 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
3243 DL = TagStores[0].MI->getDebugLoc();
3247 *MF, FirstTagStore.Offset,
false ,
3251 FrameRegUpdate = std::nullopt;
3253 mergeMemRefs(TagStores, CombinedMemRefs);
3256 dbgs() <<
"Replacing adjacent STG instructions:\n";
3257 for (
const auto &Instr : TagStores) {
3266 if (TagStores.
size() < 2)
3268 emitUnrolled(InsertI);
3271 int64_t TotalOffset = 0;
3272 if (TryMergeSPUpdate) {
3278 if (InsertI !=
MBB->
end() &&
3279 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
3281 UpdateInstr = &*InsertI++;
3287 if (!UpdateInstr && TagStores.
size() < 2)
3291 FrameRegUpdate = TotalOffset;
3292 FrameRegUpdateFlags = UpdateInstr->
getFlags();
3299 for (
auto &TS : TagStores)
3300 TS.MI->eraseFromParent();
3304 int64_t &
Size,
bool &ZeroData) {
3308 unsigned Opcode =
MI.getOpcode();
3309 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
3310 Opcode == AArch64::STZ2Gi);
3312 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
3313 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
3315 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
3318 Size =
MI.getOperand(2).getImm();
3322 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
3324 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
3329 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
3333 16 *
MI.getOperand(2).getImm();
3353 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
3359 constexpr int kScanLimit = 10;
3362 NextI !=
E &&
Count < kScanLimit; ++NextI) {
3371 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
3372 if (ZeroData != FirstZeroData)
3380 if (!
MI.isTransient())
3389 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects() ||
MI.isCall())
3405 LiveRegs.addLiveOuts(*
MBB);
3410 LiveRegs.stepBackward(*
I);
3413 if (LiveRegs.contains(AArch64::NZCV))
3417 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
3422 int64_t CurOffset = Instrs[0].Offset;
3423 for (
auto &Instr : Instrs) {
3424 if (CurOffset >
Instr.Offset)
3431 TagStoreEdit TSE(
MBB, FirstZeroData);
3432 std::optional<int64_t> EndOffset;
3433 for (
auto &Instr : Instrs) {
3434 if (EndOffset && *EndOffset !=
Instr.Offset) {
3436 TSE.emitCode(InsertI, TFI,
false);
3440 TSE.addInstruction(Instr);
3459 II = tryMergeAdjacentSTG(
II,
this, RS);
3466 shouldSignReturnAddressEverywhere(MF))
3475 bool IgnoreSPUpdates)
const {
3477 if (IgnoreSPUpdates) {
3480 FrameReg = AArch64::SP;
3490 FrameReg = AArch64::SP;
3515 bool IsValid =
false;
3517 int ObjectIndex = 0;
3519 int GroupIndex = -1;
3521 bool ObjectFirst =
false;
3524 bool GroupFirst =
false;
3529 enum { AccessFPR = 1, AccessHazard = 2, AccessGPR = 4 };
3533 SmallVector<int, 8> CurrentMembers;
3534 int NextGroupIndex = 0;
3535 std::vector<FrameObject> &Objects;
3538 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
3539 void AddMember(
int Index) { CurrentMembers.
push_back(Index); }
3540 void EndCurrentGroup() {
3541 if (CurrentMembers.
size() > 1) {
3546 for (
int Index : CurrentMembers) {
3547 Objects[
Index].GroupIndex = NextGroupIndex;
3553 CurrentMembers.clear();
3557bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
3579 return std::make_tuple(!
A.IsValid,
A.Accesses,
A.ObjectFirst,
A.GroupFirst,
3580 A.GroupIndex,
A.ObjectIndex) <
3581 std::make_tuple(!
B.IsValid,
B.Accesses,
B.ObjectFirst,
B.GroupFirst,
3582 B.GroupIndex,
B.ObjectIndex);
3591 ObjectsToAllocate.
empty())
3596 for (
auto &Obj : ObjectsToAllocate) {
3597 FrameObjects[Obj].IsValid =
true;
3598 FrameObjects[Obj].ObjectIndex = Obj;
3603 GroupBuilder GB(FrameObjects);
3604 for (
auto &
MBB : MF) {
3605 for (
auto &
MI :
MBB) {
3606 if (
MI.isDebugInstr())
3611 if (FI && *FI >= 0 && *FI < (
int)FrameObjects.size()) {
3614 FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
3616 FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
3621 switch (
MI.getOpcode()) {
3622 case AArch64::STGloop:
3623 case AArch64::STZGloop:
3627 case AArch64::STZGi:
3628 case AArch64::ST2Gi:
3629 case AArch64::STZ2Gi:
3642 FrameObjects[FI].IsValid)
3650 GB.AddMember(TaggedFI);
3652 GB.EndCurrentGroup();
3655 GB.EndCurrentGroup();
3660 FrameObject::AccessHazard;
3662 for (
auto &Obj : FrameObjects)
3663 if (!Obj.Accesses ||
3664 Obj.Accesses == (FrameObject::AccessGPR | FrameObject::AccessFPR))
3665 Obj.Accesses = FrameObject::AccessGPR;
3674 FrameObjects[*TBPI].ObjectFirst =
true;
3675 FrameObjects[*TBPI].GroupFirst =
true;
3676 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
3677 if (FirstGroupIndex >= 0)
3678 for (FrameObject &Object : FrameObjects)
3679 if (Object.GroupIndex == FirstGroupIndex)
3680 Object.GroupFirst =
true;
3686 for (
auto &Obj : FrameObjects) {
3690 ObjectsToAllocate[i++] = Obj.ObjectIndex;
3694 dbgs() <<
"Final frame order:\n";
3695 for (
auto &Obj : FrameObjects) {
3698 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
3699 if (Obj.ObjectFirst)
3700 dbgs() <<
", first";
3702 dbgs() <<
", group-first";
3713AArch64FrameLowering::inlineStackProbeLoopExactMultiple(
3724 MF.
insert(MBBInsertPoint, LoopMBB);
3726 MF.
insert(MBBInsertPoint, ExitMBB);
3757 MBB.addSuccessor(LoopMBB);
3761 return ExitMBB->
begin();
3764void AArch64FrameLowering::inlineStackProbeFixed(
3769 const AArch64InstrInfo *
TII =
3771 AArch64FunctionInfo *AFI = MF.
getInfo<AArch64FunctionInfo>();
3776 int64_t ProbeSize = MF.
getInfo<AArch64FunctionInfo>()->getStackProbeSize();
3777 int64_t NumBlocks = FrameSize / ProbeSize;
3778 int64_t ResidualSize = FrameSize % ProbeSize;
3780 LLVM_DEBUG(
dbgs() <<
"Stack probing: total " << FrameSize <<
" bytes, "
3781 << NumBlocks <<
" blocks of " << ProbeSize
3782 <<
" bytes, plus " << ResidualSize <<
" bytes\n");
3787 for (
int i = 0; i < NumBlocks; ++i) {
3793 EmitAsyncCFI && !HasFP, CFAOffset);
3802 }
else if (NumBlocks != 0) {
3808 EmitAsyncCFI && !HasFP, CFAOffset);
3810 MBBI = inlineStackProbeLoopExactMultiple(
MBBI, ProbeSize, ScratchReg);
3812 if (EmitAsyncCFI && !HasFP) {
3815 .buildDefCFARegister(AArch64::SP);
3819 if (ResidualSize != 0) {
3825 EmitAsyncCFI && !HasFP, CFAOffset);
3842 SmallVector<MachineInstr *, 4> ToReplace;
3843 for (MachineInstr &
MI :
MBB)
3844 if (
MI.getOpcode() == AArch64::PROBED_STACKALLOC ||
3845 MI.getOpcode() == AArch64::PROBED_STACKALLOC_VAR)
3848 for (MachineInstr *
MI : ToReplace) {
3849 if (
MI->getOpcode() == AArch64::PROBED_STACKALLOC) {
3850 Register ScratchReg =
MI->getOperand(0).getReg();
3851 int64_t FrameSize =
MI->getOperand(1).getImm();
3853 MI->getOperand(3).getImm());
3854 inlineStackProbeFixed(
MI->getIterator(), ScratchReg, FrameSize,
3857 assert(
MI->getOpcode() == AArch64::PROBED_STACKALLOC_VAR &&
3858 "Stack probe pseudo-instruction expected");
3859 const AArch64InstrInfo *
TII =
3860 MI->getMF()->getSubtarget<AArch64Subtarget>().getInstrInfo();
3861 Register TargetReg =
MI->getOperand(0).getReg();
3862 (void)
TII->probedStackAlloc(
MI->getIterator(), TargetReg,
true);
3864 MI->eraseFromParent();
3884 return std::make_tuple(
start(),
Idx) <
3885 std::make_tuple(Rhs.
start(), Rhs.
Idx);
3915 << (
Offset.getFixed() < 0 ?
"" :
"+") <<
Offset.getFixed();
3916 if (
Offset.getScalable())
3917 OS << (
Offset.getScalable() < 0 ?
"" :
"+") <<
Offset.getScalable()
3928void AArch64FrameLowering::emitRemarks(
3931 auto *AFI = MF.
getInfo<AArch64FunctionInfo>();
3936 const uint64_t HazardSize =
3939 if (HazardSize == 0)
3947 std::vector<StackAccess> StackAccesses(MFI.
getNumObjects());
3949 size_t NumFPLdSt = 0;
3950 size_t NumNonFPLdSt = 0;
3953 for (
const MachineBasicBlock &
MBB : MF) {
3954 for (
const MachineInstr &
MI :
MBB) {
3955 if (!
MI.mayLoadOrStore() ||
MI.getNumMemOperands() < 1)
3957 for (MachineMemOperand *MMO :
MI.memoperands()) {
3964 StackAccesses[ArrIdx].Idx = FrameIdx;
3965 StackAccesses[ArrIdx].Offset =
3976 StackAccesses[ArrIdx].AccessTypes |= RegTy;
3987 if (NumFPLdSt == 0 || NumNonFPLdSt == 0)
3998 if (StackAccesses.front().isMixed())
3999 MixedObjects.push_back(&StackAccesses.front());
4001 for (
auto It = StackAccesses.begin(), End = std::prev(StackAccesses.end());
4003 const auto &
First = *It;
4004 const auto &Second = *(It + 1);
4006 if (Second.isMixed())
4007 MixedObjects.push_back(&Second);
4009 if ((
First.isSME() && Second.isCPU()) ||
4010 (
First.isCPU() && Second.isSME())) {
4011 uint64_t Distance =
static_cast<uint64_t
>(Second.start() -
First.end());
4012 if (Distance < HazardSize)
4017 auto EmitRemark = [&](llvm::StringRef Str) {
4019 auto R = MachineOptimizationRemarkAnalysis(
4020 "sme",
"StackHazard", MF.getFunction().getSubprogram(), &MF.front());
4021 return R <<
formatv(
"stack hazard in '{0}': ", MF.getName()).str() << Str;
4025 for (
const auto &
P : HazardPairs)
4026 EmitRemark(
formatv(
"{0} is too close to {1}", *
P.first, *
P.second).str());
4028 for (
const auto *Obj : MixedObjects)
4030 formatv(
"{0} accessed by both GP and FP instructions", *Obj).str());
unsigned const MachineRegisterInfo * MRI
static void getLiveRegsForEntryMBB(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static bool produceCompactUnwindFrame(const AArch64FrameLowering &, MachineFunction &MF)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
bool enableMultiVectorSpillFill(const AArch64Subtarget &Subtarget, MachineFunction &MF)
static std::optional< int > getLdStFrameID(const MachineInstr &MI, const MachineFrameInfo &MFI)
static cl::opt< bool > SplitSVEObjects("aarch64-split-sve-objects", cl::desc("Split allocation of ZPR & PPR objects"), cl::init(true), cl::Hidden)
static cl::opt< bool > StackHazardInNonStreaming("aarch64-stack-hazard-in-non-streaming", cl::init(false), cl::Hidden)
void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL, MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
static cl::opt< bool > DisableMultiVectorSpillFill("aarch64-disable-multivector-spill-fill", cl::desc("Disable use of LD/ST pairs for SME2 or SVE2p1"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL, const MachineFunction &MF)
static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static SVEStackSizes determineSVEStackSizes(MachineFunction &MF, AssignObjectOffsets AssignOffsets)
Process all the SVE stack objects and the SVE stack size and offsets for each object.
static bool isTargetWindows(const MachineFunction &MF)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static cl::opt< unsigned > StackHazardRemarkSize("aarch64-stack-hazard-remark-size", cl::init(0), cl::Hidden)
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static unsigned getStackHazardSize(const MachineFunction &MF)
MCRegister findFreePredicateReg(BitVector &SavedRegs)
static bool isPPRAccess(const MachineInstr &MI)
static std::optional< int > getMMOFrameID(MachineMemOperand *MMO, const MachineFrameInfo &MFI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file contains the declaration of the AArch64PrologueEmitter and AArch64EpilogueEmitter classes,...
static const int kSetTagLoopThreshold
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
#define CASE(ATTRNAME, AANAME,...)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
const HexagonInstrInfo * TII
static std::string getTypeString(Type *T)
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
void emitEpilogue()
Emit the epilogue.
StackOffset getSVEStackSize(const MachineFunction &MF) const
Returns the size of the entire SVE stackframe (PPRs + ZPRs).
StackOffset getZPRStackSize(const MachineFunction &MF) const
Returns the size of the entire ZPR stackframe (calleesaves + spills).
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
bool enableFullCFIFixup(const MachineFunction &MF) const override
enableFullCFIFixup - Returns true if we may need to fix the unwind information such that it is accura...
StackOffset getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI) const override
getFrameIndexReferenceFromSP - This method returns the offset from the stack pointer to the slot of t...
bool enableCFIFixup(const MachineFunction &MF) const override
Returns true if we may need to fix the unwind information for the function.
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
friend class AArch64PrologueEmitter
bool hasFPImpl(const MachineFunction &MF) const override
hasFPImpl - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
friend class AArch64EpilogueEmitter
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon function entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, TargetStackID::Value StackID, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
bool needsWinCFI(const MachineFunction &MF) const
bool isFPReserved(const MachineFunction &MF) const
Should the Frame Pointer be reserved for the current function?
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
StackOffset getPPRStackSize(const MachineFunction &MF) const
Returns the size of the entire PPR stackframe (calleesaves + spills + hazard padding).
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
bool requiresSaveVG(const MachineFunction &MF) const
void emitPacRetPlusLeafHardening(MachineFunction &MF) const
Harden the entire function with pac-ret.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
unsigned getPPRCalleeSavedStackSize() const
void setHasStackFrame(bool s)
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
bool hasStackProbing() const
unsigned getArgumentStackToRestore() const
void setCalleeSaveStackHasFreeSpace(bool s)
int getCalleeSaveBaseToFrameRecordOffset() const
bool hasStreamingModeChanges() const
bool shouldSignReturnAddress(const MachineFunction &MF) const
void setPredicateRegForFillSpill(unsigned Reg)
int getStackHazardSlotIndex() const
void setCalleeSavedStackSize(unsigned Size)
void setSplitSVEObjects(bool s)
bool hasStackFrame() const
void setStackSizeSVE(uint64_t ZPR, uint64_t PPR)
std::optional< int > getTaggedBasePointerIndex() const
SMEAttrs getSMEFnAttrs() const
uint64_t getLocalStackSize() const
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
uint64_t getStackSizePPR() const
bool hasSwiftAsyncContext() const
bool hasStackHazardSlotIndex() const
void setStackHazardSlotIndex(int Index)
unsigned getZPRCalleeSavedStackSize() const
void setStackHazardCSRSlotIndex(int Index)
unsigned getPredicateRegForFillSpill() const
void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR)
bool hasCalculatedStackSizeSVE() const
uint64_t getStackSizeZPR() const
bool hasSVEStackSize() const
bool isStackHazardIncludedInCalleeSaveArea() const
unsigned getSVECalleeSavedStackSize() const
bool hasSplitSVEObjects() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
bool hasCalleeSaveStackFreeSpace() const
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isFpOrNEON(Register Reg)
Returns whether the physical register is FP or NEON.
void emitPrologue()
Emit the prologue.
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
const AArch64InstrInfo * getInstrInfo() const override
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
bool isSVEorStreamingSVEAvailable() const
Returns true if the target has access to either the full range of SVE instructions,...
bool isStreaming() const
Returns true if the function has a streaming body.
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
unsigned getRedZoneSize(const Function &F) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
size_type size() const
size - Returns the number of bits in this bitvector.
Helper class for creating CFI instructions and inserting them into MIR.
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool usesWindowsCFI() const
Wrapper class representing physical registers. Should be passed by value.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
reverse_iterator rbegin()
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
const AllocaInst * getObjectAllocation(int ObjectIdx) const
Return the underlying Alloca of the specified stack object if it exists.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
bool hasScalableStackID(int ObjectIdx) const
int getStackProtectorIndex() const
Return the index for the stack protector object.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
LLVM_ABI uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getNumObjects() const
Return the number of objects.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
bool hasStackObjects() const
Return true if there are any stack objects in this function.
uint8_t getStackID(int ObjectIdx) const
unsigned getNumFixedObjects() const
Return the number of fixed objects.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setFlags(unsigned flags)
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const Value * getValue() const
Return the base address of the memory access.
MachineOperand class - Representation of each machine instruction operand.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI bool isLiveIn(Register Reg) const
LLVM_ABI const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
LLVM_ABI bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest=false) const
Return true if the specified register is modified or read in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
bool hasStreamingInterface() const
bool hasNonStreamingInterfaceAndBody() const
bool hasStreamingBody() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
virtual bool enableCFIFixup(const MachineFunction &MF) const
Returns true if we may need to fix the unwind information for the function.
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Triple - Helper class for working with autoconf configuration names.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ C
The default llvm calling convention, compatible with C.
@ Define
Register definition.
@ ScalablePredicateVector
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ LLVM_MARK_AS_BITMASK_ENUM
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool operator<(const StackAccess &Rhs) const
void print(raw_ostream &OS) const
std::string getTypeString() const
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
SmallVector< WinEHTryBlockMapEntry, 4 > TryBlockMap
SmallVector< WinEHHandlerType, 1 > HandlerArray