LLVM 23.0.0git
RISCVFrameLowering.cpp
Go to the documentation of this file.
1//===-- RISCVFrameLowering.cpp - RISC-V Frame Information -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of TargetFrameLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVFrameLowering.h"
16#include "RISCVSubtarget.h"
26#include "llvm/MC/MCDwarf.h"
27#include "llvm/Support/LEB128.h"
28
29#include <algorithm>
30
31#define DEBUG_TYPE "riscv-frame"
32
33using namespace llvm;
34
36 if (ABI == RISCVABI::ABI_ILP32E)
37 return Align(4);
38 if (ABI == RISCVABI::ABI_LP64E)
39 return Align(8);
40 return Align(16);
41}
42
46 /*LocalAreaOffset=*/0,
47 /*TransientStackAlignment=*/getABIStackAlignment(STI.getTargetABI())),
48 STI(STI) {}
49
50// The register used to hold the frame pointer.
51static constexpr MCPhysReg FPReg = RISCV::X8;
52
53// The register used to hold the stack pointer.
54static constexpr MCPhysReg SPReg = RISCV::X2;
55
56// The register used to hold the return address.
57static constexpr MCPhysReg RAReg = RISCV::X1;
58
59// LIst of CSRs that are given a fixed location by save/restore libcalls or
60// Zcmp/Xqccmp Push/Pop. The order in this table indicates the order the
61// registers are saved on the stack. Zcmp uses the reverse order of save/restore
62// and Xqccmp on the stack, but this is handled when offsets are calculated.
63static const MCPhysReg FixedCSRFIMap[] = {
64 /*ra*/ RAReg, /*s0*/ FPReg, /*s1*/ RISCV::X9,
65 /*s2*/ RISCV::X18, /*s3*/ RISCV::X19, /*s4*/ RISCV::X20,
66 /*s5*/ RISCV::X21, /*s6*/ RISCV::X22, /*s7*/ RISCV::X23,
67 /*s8*/ RISCV::X24, /*s9*/ RISCV::X25, /*s10*/ RISCV::X26,
68 /*s11*/ RISCV::X27};
69
70// The number of stack bytes allocated by `QC.C.MIENTER(.NEST)` and popped by
71// `QC.C.MILEAVERET`.
72static constexpr uint64_t QCIInterruptPushAmount = 96;
73
74static const std::pair<MCPhysReg, int8_t> FixedCSRFIQCIInterruptMap[] = {
75 /* -1 is a gap for mepc/mnepc */
76 {/*fp*/ FPReg, -2},
77 /* -3 is a gap for qc.mcause */
78 {/*ra*/ RAReg, -4},
79 /* -5 is reserved */
80 {/*t0*/ RISCV::X5, -6},
81 {/*t1*/ RISCV::X6, -7},
82 {/*t2*/ RISCV::X7, -8},
83 {/*a0*/ RISCV::X10, -9},
84 {/*a1*/ RISCV::X11, -10},
85 {/*a2*/ RISCV::X12, -11},
86 {/*a3*/ RISCV::X13, -12},
87 {/*a4*/ RISCV::X14, -13},
88 {/*a5*/ RISCV::X15, -14},
89 {/*a6*/ RISCV::X16, -15},
90 {/*a7*/ RISCV::X17, -16},
91 {/*t3*/ RISCV::X28, -17},
92 {/*t4*/ RISCV::X29, -18},
93 {/*t5*/ RISCV::X30, -19},
94 {/*t6*/ RISCV::X31, -20},
95 /* -21, -22, -23, -24 are reserved */
96};
97
98/// Returns true if DWARF CFI instructions ("frame moves") should be emitted.
99static bool needsDwarfCFI(const MachineFunction &MF) {
100 return MF.needsFrameMoves();
101}
102
103// For now we use x3, a.k.a gp, as pointer to shadow call stack.
104// User should not use x3 in their asm.
107 const DebugLoc &DL) {
108 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
109 // We check Zimop instead of (Zimop || Zcmop) to determine whether HW shadow
110 // stack is available despite the fact that sspush/sspopchk both have a
111 // compressed form, because if only Zcmop is available, we would need to
112 // reserve X5 due to c.sspopchk only takes X5 and we currently do not support
113 // using X5 as the return address register.
114 // However, we can still aggressively use c.sspush x1 if zcmop is available.
115 bool HasHWShadowStack = MF.getFunction().hasFnAttribute("hw-shadow-stack") &&
116 STI.hasStdExtZimop();
117 bool HasSWShadowStack =
118 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
119 if (!HasHWShadowStack && !HasSWShadowStack)
120 return;
121
122 const llvm::RISCVRegisterInfo *TRI = STI.getRegisterInfo();
123
124 // Do not save RA to the SCS if it's not saved to the regular stack,
125 // i.e. RA is not at risk of being overwritten.
126 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
127 if (llvm::none_of(
128 CSI, [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
129 return;
130
131 const RISCVInstrInfo *TII = STI.getInstrInfo();
132 if (HasHWShadowStack) {
133 if (STI.hasStdExtZcmop()) {
134 static_assert(RAReg == RISCV::X1, "C.SSPUSH only accepts X1");
135 BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoMOP_C_SSPUSH));
136 } else {
137 BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoMOP_SSPUSH)).addReg(RAReg);
138 }
139 return;
140 }
141
142 Register SCSPReg = RISCVABI::getSCSPReg();
143
144 bool IsRV64 = STI.is64Bit();
145 int64_t SlotSize = STI.getXLen() / 8;
146 // Store return address to shadow call stack
147 // addi gp, gp, [4|8]
148 // s[w|d] ra, -[4|8](gp)
149 BuildMI(MBB, MI, DL, TII->get(RISCV::ADDI))
150 .addReg(SCSPReg, RegState::Define)
151 .addReg(SCSPReg)
152 .addImm(SlotSize)
154 BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
155 .addReg(RAReg)
156 .addReg(SCSPReg)
157 .addImm(-SlotSize)
159
160 if (!needsDwarfCFI(MF))
161 return;
162
163 // Emit a CFI instruction that causes SlotSize to be subtracted from the value
164 // of the shadow stack pointer when unwinding past this frame.
165 char DwarfSCSReg = TRI->getDwarfRegNum(SCSPReg, /*IsEH*/ true);
166 assert(DwarfSCSReg < 32 && "SCS Register should be < 32 (X3).");
167
168 char Offset = static_cast<char>(-SlotSize) & 0x7f;
169 const char CFIInst[] = {
170 dwarf::DW_CFA_val_expression,
171 DwarfSCSReg, // register
172 2, // length
173 static_cast<char>(unsigned(dwarf::DW_OP_breg0 + DwarfSCSReg)),
174 Offset, // addend (sleb128)
175 };
176
178 .buildEscape(StringRef(CFIInst, sizeof(CFIInst)));
179}
180
183 const DebugLoc &DL) {
184 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
185 bool HasHWShadowStack = MF.getFunction().hasFnAttribute("hw-shadow-stack") &&
186 STI.hasStdExtZimop();
187 bool HasSWShadowStack =
188 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
189 if (!HasHWShadowStack && !HasSWShadowStack)
190 return;
191
192 // See emitSCSPrologue() above.
193 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
194 if (llvm::none_of(
195 CSI, [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
196 return;
197
198 const RISCVInstrInfo *TII = STI.getInstrInfo();
199 if (HasHWShadowStack) {
200 BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoMOP_SSPOPCHK)).addReg(RAReg);
201 return;
202 }
203
204 Register SCSPReg = RISCVABI::getSCSPReg();
205
206 bool IsRV64 = STI.is64Bit();
207 int64_t SlotSize = STI.getXLen() / 8;
208 // Load return address from shadow call stack
209 // l[w|d] ra, -[4|8](gp)
210 // addi gp, gp, -[4|8]
211 BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::LD : RISCV::LW))
213 .addReg(SCSPReg)
214 .addImm(-SlotSize)
216 BuildMI(MBB, MI, DL, TII->get(RISCV::ADDI))
217 .addReg(SCSPReg, RegState::Define)
218 .addReg(SCSPReg)
219 .addImm(-SlotSize)
221 if (needsDwarfCFI(MF)) {
222 // Restore the SCS pointer
224 }
225}
226
227// Insert instruction to swap mscratchsw with sp
230 const DebugLoc &DL) {
231 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
232
233 if (!RVFI->isSiFiveStackSwapInterrupt(MF))
234 return;
235
236 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
237 const RISCVInstrInfo *TII = STI.getInstrInfo();
238
239 assert(STI.hasVendorXSfmclic() && "Stack Swapping Requires XSfmclic");
240
241 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRW))
243 .addImm(RISCVSysReg::sf_mscratchcsw)
246
247 // FIXME: CFI Information for this swap.
248}
249
250static void
253 if (!RVFI.isSiFivePreemptibleInterrupt(MF))
254 return;
255
256 const TargetRegisterClass &RC = RISCV::GPRRegClass;
257 const TargetRegisterInfo &TRI =
258 *MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
259 MachineFrameInfo &MFI = MF.getFrameInfo();
260
261 // Create two frame objects for spilling X8 and X9, which will be done in
262 // `emitSiFiveCLICPreemptibleSaves`. This is in addition to any other stack
263 // objects we might have for X8 and X9, as they might be saved twice.
264 for (int I = 0; I < 2; ++I) {
265 int FI = MFI.CreateStackObject(TRI.getSpillSize(RC), TRI.getSpillAlign(RC),
266 true);
268 }
269}
270
274 const DebugLoc &DL) {
275 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
276
277 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
278 return;
279
280 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
281 const RISCVInstrInfo *TII = STI.getInstrInfo();
282
283 // FIXME: CFI Information here is nonexistent/wrong.
284
285 // X8 and X9 might be stored into the stack twice, initially into the
286 // `interruptCSRFrameIndex` here, and then maybe again into their CSI frame
287 // index.
288 //
289 // This is done instead of telling the register allocator that we need two
290 // VRegs to store the value of `mcause` and `mepc` through the instruction,
291 // which affects other passes.
292 TII->storeRegToStackSlot(MBB, MBBI, RISCV::X8, /* IsKill=*/true,
293 RVFI->getInterruptCSRFrameIndex(0),
294 &RISCV::GPRRegClass, Register(),
296 TII->storeRegToStackSlot(MBB, MBBI, RISCV::X9, /* IsKill=*/true,
297 RVFI->getInterruptCSRFrameIndex(1),
298 &RISCV::GPRRegClass, Register(),
300
301 // Put `mcause` into X8 (s0), and `mepc` into X9 (s1). If either of these are
302 // used in the function, then they will appear in `getUnmanagedCSI` and will
303 // be saved again.
304 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRS))
305 .addReg(RISCV::X8, RegState::Define)
306 .addImm(RISCVSysReg::mcause)
307 .addReg(RISCV::X0)
309 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRS))
310 .addReg(RISCV::X9, RegState::Define)
311 .addImm(RISCVSysReg::mepc)
312 .addReg(RISCV::X0)
314
315 // Enable interrupts.
316 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRSI))
317 .addReg(RISCV::X0, RegState::Define)
318 .addImm(RISCVSysReg::mstatus)
319 .addImm(8)
321}
322
326 const DebugLoc &DL) {
327 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
328
329 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
330 return;
331
332 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
333 const RISCVInstrInfo *TII = STI.getInstrInfo();
334
335 // FIXME: CFI Information here is nonexistent/wrong.
336
337 // Disable interrupts.
338 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRCI))
339 .addReg(RISCV::X0, RegState::Define)
340 .addImm(RISCVSysReg::mstatus)
341 .addImm(8)
343
344 // Restore `mepc` from x9 (s1), and `mcause` from x8 (s0). If either were used
345 // in the function, they have already been restored once, so now have the
346 // value stored in `emitSiFiveCLICPreemptibleSaves`.
347 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRW))
348 .addReg(RISCV::X0, RegState::Define)
349 .addImm(RISCVSysReg::mepc)
350 .addReg(RISCV::X9, RegState::Kill)
352 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRW))
353 .addReg(RISCV::X0, RegState::Define)
354 .addImm(RISCVSysReg::mcause)
355 .addReg(RISCV::X8, RegState::Kill)
357
358 // X8 and X9 need to be restored to their values on function entry, which we
359 // saved onto the stack in `emitSiFiveCLICPreemptibleSaves`.
360 TII->loadRegFromStackSlot(MBB, MBBI, RISCV::X9,
361 RVFI->getInterruptCSRFrameIndex(1),
362 &RISCV::GPRRegClass, Register(),
363 RISCV::NoSubRegister, MachineInstr::FrameSetup);
364 TII->loadRegFromStackSlot(MBB, MBBI, RISCV::X8,
365 RVFI->getInterruptCSRFrameIndex(0),
366 &RISCV::GPRRegClass, Register(),
367 RISCV::NoSubRegister, MachineInstr::FrameSetup);
368}
369
370// Get the ID of the libcall used for spilling and restoring callee saved
371// registers. The ID is representative of the number of registers saved or
372// restored by the libcall, except it is zero-indexed - ID 0 corresponds to a
373// single register.
374static int getLibCallID(const MachineFunction &MF,
375 const std::vector<CalleeSavedInfo> &CSI) {
376 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
377
378 if (CSI.empty() || !RVFI->useSaveRestoreLibCalls(MF))
379 return -1;
380
381 MCRegister MaxReg;
382 for (auto &CS : CSI)
383 // assignCalleeSavedSpillSlots assigns negative frame indexes to
384 // registers which can be saved by libcall.
385 if (CS.getFrameIdx() < 0)
386 MaxReg = std::max(MaxReg.id(), CS.getReg().id());
387
388 if (!MaxReg)
389 return -1;
390
391 switch (MaxReg.id()) {
392 default:
393 llvm_unreachable("Something has gone wrong!");
394 // clang-format off
395 case /*s11*/ RISCV::X27: return 12;
396 case /*s10*/ RISCV::X26: return 11;
397 case /*s9*/ RISCV::X25: return 10;
398 case /*s8*/ RISCV::X24: return 9;
399 case /*s7*/ RISCV::X23: return 8;
400 case /*s6*/ RISCV::X22: return 7;
401 case /*s5*/ RISCV::X21: return 6;
402 case /*s4*/ RISCV::X20: return 5;
403 case /*s3*/ RISCV::X19: return 4;
404 case /*s2*/ RISCV::X18: return 3;
405 case /*s1*/ RISCV::X9: return 2;
406 case /*s0*/ FPReg: return 1;
407 case /*ra*/ RAReg: return 0;
408 // clang-format on
409 }
410}
411
412// Get the name of the libcall used for spilling callee saved registers.
413// If this function will not use save/restore libcalls, then return a nullptr.
414static const char *
416 const std::vector<CalleeSavedInfo> &CSI) {
417 static const char *const SpillLibCalls[] = {
418 "__riscv_save_0",
419 "__riscv_save_1",
420 "__riscv_save_2",
421 "__riscv_save_3",
422 "__riscv_save_4",
423 "__riscv_save_5",
424 "__riscv_save_6",
425 "__riscv_save_7",
426 "__riscv_save_8",
427 "__riscv_save_9",
428 "__riscv_save_10",
429 "__riscv_save_11",
430 "__riscv_save_12"
431 };
432
433 int LibCallID = getLibCallID(MF, CSI);
434 if (LibCallID == -1)
435 return nullptr;
436 return SpillLibCalls[LibCallID];
437}
438
439// Get the name of the libcall used for restoring callee saved registers.
440// If this function will not use save/restore libcalls, then return a nullptr.
441static const char *
443 const std::vector<CalleeSavedInfo> &CSI) {
444 static const char *const RestoreLibCalls[] = {
445 "__riscv_restore_0",
446 "__riscv_restore_1",
447 "__riscv_restore_2",
448 "__riscv_restore_3",
449 "__riscv_restore_4",
450 "__riscv_restore_5",
451 "__riscv_restore_6",
452 "__riscv_restore_7",
453 "__riscv_restore_8",
454 "__riscv_restore_9",
455 "__riscv_restore_10",
456 "__riscv_restore_11",
457 "__riscv_restore_12"
458 };
459
460 int LibCallID = getLibCallID(MF, CSI);
461 if (LibCallID == -1)
462 return nullptr;
463 return RestoreLibCalls[LibCallID];
464}
465
466// Get the max reg of Push/Pop for restoring callee saved registers.
467static unsigned getNumPushPopRegs(const std::vector<CalleeSavedInfo> &CSI) {
468 unsigned NumPushPopRegs = 0;
469 for (auto &CS : CSI) {
470 auto *FII = llvm::find_if(FixedCSRFIMap,
471 [&](MCPhysReg P) { return P == CS.getReg(); });
472 if (FII != std::end(FixedCSRFIMap)) {
473 unsigned RegNum = std::distance(std::begin(FixedCSRFIMap), FII);
474 NumPushPopRegs = std::max(NumPushPopRegs, RegNum + 1);
475 }
476 }
477 assert(NumPushPopRegs != 12 && "x26 requires x27 to also be pushed");
478 return NumPushPopRegs;
479}
480
481// Return true if the specified function should have a dedicated frame
482// pointer register. This is true if frame pointer elimination is
483// disabled, if it needs dynamic stack realignment, if the function has
484// variable sized allocas, or if the frame address is taken.
486 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
487
488 const MachineFrameInfo &MFI = MF.getFrameInfo();
490 RegInfo->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
492 return true;
493
494 // With large callframes around we may need to use FP to access the scavenging
495 // emergency spillslot.
496 //
497 // We calculate the MaxCallFrameSize at the end of isel so this value should
498 // be stable for the whole post-isel MIR pipeline.
499 //
500 // NOTE: The idea of forcing a frame pointer is copied from AArch64, but they
501 // conservatively return true when the call frame size hasd not been
502 // computed yet. On RISC-V that caused MachineOutliner tests to fail the
503 // MachineVerifier due to outlined functions not computing max call frame
504 // size thus the frame pointer would always be reserved.
505 if (MFI.isMaxCallFrameSizeComputed() && MFI.getMaxCallFrameSize() > 2047)
506 return true;
507
508 return false;
509}
510
512 const MachineFrameInfo &MFI = MF.getFrameInfo();
513 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
514
515 // If we do not reserve stack space for outgoing arguments in prologue,
516 // we will adjust the stack pointer before call instruction. After the
517 // adjustment, we can not use SP to access the stack objects for the
518 // arguments. Instead, use BP to access these stack objects.
519 return (MFI.hasVarSizedObjects() ||
521 MFI.getMaxCallFrameSize() != 0))) &&
522 TRI->hasStackRealignment(MF);
523}
524
525// Determines the size of the frame and maximum call frame size.
526void RISCVFrameLowering::determineFrameLayout(MachineFunction &MF) const {
527 MachineFrameInfo &MFI = MF.getFrameInfo();
528 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
529
530 // Get the number of bytes to allocate from the FrameInfo.
531 uint64_t FrameSize = MFI.getStackSize();
532
533 // QCI Interrupts use at least 96 bytes of stack space
534 if (RVFI->useQCIInterrupt(MF))
535 FrameSize = std::max(FrameSize, QCIInterruptPushAmount);
536
537 // Get the alignment.
538 Align StackAlign = getStackAlign();
539
540 // Make sure the frame is aligned.
541 FrameSize = alignTo(FrameSize, StackAlign);
542
543 // Update frame info.
544 MFI.setStackSize(FrameSize);
545
546 // When using SP or BP to access stack objects, we may require extra padding
547 // to ensure the bottom of the RVV stack is correctly aligned within the main
548 // stack. We calculate this as the amount required to align the scalar local
549 // variable section up to the RVV alignment.
551 if (RVFI->getRVVStackSize() && (!hasFP(MF) || TRI->hasStackRealignment(MF))) {
552 int ScalarLocalVarSize = FrameSize - RVFI->getCalleeSavedStackSize() -
553 RVFI->getVarArgsSaveSize();
554 if (auto RVVPadding =
555 offsetToAlignment(ScalarLocalVarSize, RVFI->getRVVStackAlign()))
556 RVFI->setRVVPadding(RVVPadding);
557 }
558}
559
560// Returns the stack size including RVV padding (when required), rounded back
561// up to the required stack alignment.
563 const MachineFunction &MF) const {
564 const MachineFrameInfo &MFI = MF.getFrameInfo();
565 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
566 return alignTo(MFI.getStackSize() + RVFI->getRVVPadding(), getStackAlign());
567}
568
571 const std::vector<CalleeSavedInfo> &CSI,
572 bool ReverseOrder = false) {
573 const MachineFrameInfo &MFI = MF.getFrameInfo();
575
576 for (auto &CS : CSI) {
577 int FI = CS.getFrameIdx();
578 if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::Default)
579 NonLibcallCSI.push_back(CS);
580 }
581
582 // Reverse the order so that load/store operations use ascending addresses,
583 // enabling better load/store clustering and fusion.
584 if (ReverseOrder)
585 std::reverse(NonLibcallCSI.begin(), NonLibcallCSI.end());
586
587 return NonLibcallCSI;
588}
589
592 const std::vector<CalleeSavedInfo> &CSI) {
593 const MachineFrameInfo &MFI = MF.getFrameInfo();
595
596 for (auto &CS : CSI) {
597 int FI = CS.getFrameIdx();
598 if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::ScalableVector)
599 RVVCSI.push_back(CS);
600 }
601
602 return RVVCSI;
603}
604
607 const std::vector<CalleeSavedInfo> &CSI) {
608 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
609
610 SmallVector<CalleeSavedInfo, 8> PushOrLibCallsCSI;
611 if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
612 return PushOrLibCallsCSI;
613
614 for (const auto &CS : CSI) {
615 if (RVFI->useQCIInterrupt(MF)) {
616 // Some registers are saved by both `QC.C.MIENTER(.NEST)` and
617 // `QC.CM.PUSH(FP)`. In these cases, prioritise the CFI info that points
618 // to the versions saved by `QC.C.MIENTER(.NEST)` which is what FP
619 // unwinding would use.
621 CS.getReg()))
622 continue;
623 }
624
625 if (llvm::is_contained(FixedCSRFIMap, CS.getReg()))
626 PushOrLibCallsCSI.push_back(CS);
627 }
628
629 return PushOrLibCallsCSI;
630}
631
634 const std::vector<CalleeSavedInfo> &CSI) {
635 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
636
637 SmallVector<CalleeSavedInfo, 8> QCIInterruptCSI;
638 if (!RVFI->useQCIInterrupt(MF))
639 return QCIInterruptCSI;
640
641 for (const auto &CS : CSI) {
643 CS.getReg()))
644 QCIInterruptCSI.push_back(CS);
645 }
646
647 return QCIInterruptCSI;
648}
649
650void RISCVFrameLowering::allocateAndProbeStackForRVV(
652 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int64_t Amount,
653 MachineInstr::MIFlag Flag, bool EmitCFI, bool DynAllocation) const {
654 assert(Amount != 0 && "Did not need to adjust stack pointer for RVV.");
655
656 // Emit a variable-length allocation probing loop.
657
658 // Get VLEN in TargetReg
659 const RISCVInstrInfo *TII = STI.getInstrInfo();
660 Register TargetReg = RISCV::X6;
661 uint32_t NumOfVReg = Amount / RISCV::RVVBytesPerBlock;
662 BuildMI(MBB, MBBI, DL, TII->get(RISCV::PseudoReadVLENB), TargetReg)
663 .setMIFlag(Flag);
664 TII->mulImm(MF, MBB, MBBI, DL, TargetReg, NumOfVReg, Flag);
665
666 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
667 if (EmitCFI) {
668 // Set the CFA register to TargetReg.
669 CFIBuilder.buildDefCFA(TargetReg, -Amount);
670 }
671
672 // It will be expanded to a probe loop in `inlineStackProbe`.
673 BuildMI(MBB, MBBI, DL, TII->get(RISCV::PROBED_STACKALLOC_RVV))
674 .addReg(TargetReg);
675
676 if (EmitCFI) {
677 // Set the CFA register back to SP.
678 CFIBuilder.buildDefCFARegister(SPReg);
679 }
680
681 // SUB SP, SP, T1
682 BuildMI(MBB, MBBI, DL, TII->get(RISCV::SUB), SPReg)
683 .addReg(SPReg)
684 .addReg(TargetReg)
685 .setMIFlag(Flag);
686
687 // If we have a dynamic allocation later we need to probe any residuals.
688 if (DynAllocation) {
689 BuildMI(MBB, MBBI, DL, TII->get(STI.is64Bit() ? RISCV::SD : RISCV::SW))
690 .addReg(RISCV::X0)
691 .addReg(SPReg)
692 .addImm(0)
694 }
695}
696
700 llvm::raw_string_ostream &Comment) {
701 int64_t FixedOffset = Offset.getFixed();
702 int64_t ScalableOffset = Offset.getScalable();
703 unsigned DwarfVLenB = TRI.getDwarfRegNum(RISCV::VLENB, true);
704 if (FixedOffset) {
705 Expr.push_back(dwarf::DW_OP_consts);
706 appendLEB128<LEB128Sign::Signed>(Expr, FixedOffset);
707 Expr.push_back((uint8_t)dwarf::DW_OP_plus);
708 Comment << (FixedOffset < 0 ? " - " : " + ") << std::abs(FixedOffset);
709 }
710
711 Expr.push_back((uint8_t)dwarf::DW_OP_consts);
712 appendLEB128<LEB128Sign::Signed>(Expr, ScalableOffset);
713
714 Expr.push_back((uint8_t)dwarf::DW_OP_bregx);
715 appendLEB128<LEB128Sign::Unsigned>(Expr, DwarfVLenB);
716 Expr.push_back(0);
717
718 Expr.push_back((uint8_t)dwarf::DW_OP_mul);
719 Expr.push_back((uint8_t)dwarf::DW_OP_plus);
720
721 Comment << (ScalableOffset < 0 ? " - " : " + ") << std::abs(ScalableOffset)
722 << " * vlenb";
723}
724
728 assert(Offset.getScalable() != 0 && "Did not need to adjust CFA for RVV");
729 SmallString<64> Expr;
730 std::string CommentBuffer;
731 llvm::raw_string_ostream Comment(CommentBuffer);
732 // Build up the expression (Reg + FixedOffset + ScalableOffset * VLENB).
733 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
734 Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
735 Expr.push_back(0);
736 if (Reg == SPReg)
737 Comment << "sp";
738 else
739 Comment << printReg(Reg, &TRI);
740
742
743 SmallString<64> DefCfaExpr;
744 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
745 appendLEB128<LEB128Sign::Unsigned>(DefCfaExpr, Expr.size());
746 DefCfaExpr.append(Expr.str());
747
748 return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), SMLoc(),
749 Comment.str());
750}
751
754 assert(Offset.getScalable() != 0 && "Did not need to adjust CFA for RVV");
755 SmallString<64> Expr;
756 std::string CommentBuffer;
757 llvm::raw_string_ostream Comment(CommentBuffer);
758 Comment << printReg(Reg, &TRI) << " @ cfa";
759
760 // Build up the expression (FixedOffset + ScalableOffset * VLENB).
762
763 SmallString<64> DefCfaExpr;
764 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
765 DefCfaExpr.push_back(dwarf::DW_CFA_expression);
766 appendLEB128<LEB128Sign::Unsigned>(DefCfaExpr, DwarfReg);
767 appendLEB128<LEB128Sign::Unsigned>(DefCfaExpr, Expr.size());
768 DefCfaExpr.append(Expr.str());
769
770 return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), SMLoc(),
771 Comment.str());
772}
773
774// Allocate stack space and probe it if necessary.
778 uint64_t RealStackSize, bool EmitCFI,
779 bool NeedProbe, uint64_t ProbeSize,
780 bool DynAllocation,
781 MachineInstr::MIFlag Flag) const {
782 DebugLoc DL;
783 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
784 const RISCVInstrInfo *TII = STI.getInstrInfo();
785 bool IsRV64 = STI.is64Bit();
787
788 // Simply allocate the stack if it's not big enough to require a probe.
789 if (!NeedProbe || Offset <= ProbeSize) {
791 Flag, getStackAlign());
792
793 if (EmitCFI)
794 CFIBuilder.buildDefCFAOffset(RealStackSize);
795
796 if (NeedProbe && DynAllocation) {
797 // s[d|w] zero, 0(sp)
798 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
799 .addReg(RISCV::X0)
800 .addReg(SPReg)
801 .addImm(0)
802 .setMIFlags(Flag);
803 }
804
805 return;
806 }
807
808 // Unroll the probe loop depending on the number of iterations.
809 if (Offset < ProbeSize * 5) {
810 uint64_t CFAAdjust = RealStackSize - Offset;
811
812 uint64_t CurrentOffset = 0;
813 while (CurrentOffset + ProbeSize <= Offset) {
814 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
815 StackOffset::getFixed(-ProbeSize), Flag, getStackAlign());
816 // s[d|w] zero, 0(sp)
817 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
818 .addReg(RISCV::X0)
819 .addReg(SPReg)
820 .addImm(0)
821 .setMIFlags(Flag);
822
823 CurrentOffset += ProbeSize;
824 if (EmitCFI)
825 CFIBuilder.buildDefCFAOffset(CurrentOffset + CFAAdjust);
826 }
827
828 uint64_t Residual = Offset - CurrentOffset;
829 if (Residual) {
830 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
831 StackOffset::getFixed(-Residual), Flag, getStackAlign());
832 if (EmitCFI)
833 CFIBuilder.buildDefCFAOffset(RealStackSize);
834
835 if (DynAllocation) {
836 // s[d|w] zero, 0(sp)
837 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
838 .addReg(RISCV::X0)
839 .addReg(SPReg)
840 .addImm(0)
841 .setMIFlags(Flag);
842 }
843 }
844
845 return;
846 }
847
848 // Emit a variable-length allocation probing loop.
849 uint64_t RoundedSize = alignDown(Offset, ProbeSize);
850 uint64_t Residual = Offset - RoundedSize;
851
852 Register TargetReg = RISCV::X6;
853 // SUB TargetReg, SP, RoundedSize
854 RI->adjustReg(MBB, MBBI, DL, TargetReg, SPReg,
855 StackOffset::getFixed(-RoundedSize), Flag, getStackAlign());
856
857 if (EmitCFI) {
858 // Set the CFA register to TargetReg.
859 CFIBuilder.buildDefCFA(TargetReg, RoundedSize);
860 }
861
862 // It will be expanded to a probe loop in `inlineStackProbe`.
863 BuildMI(MBB, MBBI, DL, TII->get(RISCV::PROBED_STACKALLOC)).addReg(TargetReg);
864
865 if (EmitCFI) {
866 // Set the CFA register back to SP.
867 CFIBuilder.buildDefCFARegister(SPReg);
868 }
869
870 if (Residual) {
872 Flag, getStackAlign());
873 if (DynAllocation) {
874 // s[d|w] zero, 0(sp)
875 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
876 .addReg(RISCV::X0)
877 .addReg(SPReg)
878 .addImm(0)
879 .setMIFlags(Flag);
880 }
881 }
882
883 if (EmitCFI)
884 CFIBuilder.buildDefCFAOffset(Offset);
885}
886
887static bool isPush(unsigned Opcode) {
888 switch (Opcode) {
889 case RISCV::CM_PUSH:
890 case RISCV::QC_CM_PUSH:
891 case RISCV::QC_CM_PUSHFP:
892 return true;
893 default:
894 return false;
895 }
896}
897
898static bool isPop(unsigned Opcode) {
899 // There are other pops but these are the only ones introduced during this
900 // pass.
901 switch (Opcode) {
902 case RISCV::CM_POP:
903 case RISCV::QC_CM_POP:
904 return true;
905 default:
906 return false;
907 }
908}
909
911 bool UpdateFP) {
912 switch (Kind) {
914 return RISCV::CM_PUSH;
916 return UpdateFP ? RISCV::QC_CM_PUSHFP : RISCV::QC_CM_PUSH;
917 default:
918 llvm_unreachable("Unhandled PushPopKind");
919 }
920}
921
923 // There are other pops but they are introduced later by the Push/Pop
924 // Optimizer.
925 switch (Kind) {
927 return RISCV::CM_POP;
929 return RISCV::QC_CM_POP;
930 default:
931 llvm_unreachable("Unhandled PushPopKind");
932 }
933}
934
936 MachineBasicBlock &MBB) const {
937 MachineFrameInfo &MFI = MF.getFrameInfo();
938 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
939 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
941 bool PreferAscendingLS = STI.preferAscendingLoadStore();
942
944
945 // Debug location must be unknown since the first debug location is used
946 // to determine the end of the prologue.
947 DebugLoc DL;
948
949 // All calls are tail calls in GHC calling conv, and functions have no
950 // prologue/epilogue.
952 return;
953
954 // SiFive CLIC needs to swap `sp` into `sf.mscratchcsw`
956
957 // Emit prologue for shadow call stack.
958 emitSCSPrologue(MF, MBB, MBBI, DL);
959
960 // We keep track of the first instruction because it might be a
961 // `(QC.)CM.PUSH(FP)`, and we may need to adjust the immediate rather than
962 // inserting an `addi sp, sp, -N*16`
963 auto PossiblePush = MBBI;
964
965 // Skip past all callee-saved register spill instructions.
966 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
967 ++MBBI;
968
969 // Determine the correct frame layout
970 determineFrameLayout(MF);
971
972 const auto &CSI = MFI.getCalleeSavedInfo();
973
974 // Skip to before the spills of scalar callee-saved registers
975 // FIXME: assumes exactly one instruction is used to restore each
976 // callee-saved register.
977 MBBI =
978 std::prev(MBBI, getRVVCalleeSavedInfo(MF, CSI).size() +
979 getUnmanagedCSI(MF, CSI, PreferAscendingLS).size());
981 bool NeedsDwarfCFI = needsDwarfCFI(MF);
982
983 // If libcalls are used to spill and restore callee-saved registers, the frame
984 // has two sections; the opaque section managed by the libcalls, and the
985 // section managed by MachineFrameInfo which can also hold callee saved
986 // registers in fixed stack slots, both of which have negative frame indices.
987 // This gets even more complicated when incoming arguments are passed via the
988 // stack, as these too have negative frame indices. An example is detailed
989 // below:
990 //
991 // | incoming arg | <- FI[-3]
992 // | libcallspill |
993 // | calleespill | <- FI[-2]
994 // | calleespill | <- FI[-1]
995 // | this_frame | <- FI[0]
996 //
997 // For negative frame indices, the offset from the frame pointer will differ
998 // depending on which of these groups the frame index applies to.
999 // The following calculates the correct offset knowing the number of callee
1000 // saved registers spilt by the two methods.
1001 if (int LibCallRegs = getLibCallID(MF, MFI.getCalleeSavedInfo()) + 1) {
1002 // Calculate the size of the frame managed by the libcall. The stack
1003 // alignment of these libcalls should be the same as how we set it in
1004 // getABIStackAlignment.
1005 unsigned LibCallFrameSize =
1006 alignTo((STI.getXLen() / 8) * LibCallRegs, getStackAlign());
1007 RVFI->setLibCallStackSize(LibCallFrameSize);
1008
1009 if (NeedsDwarfCFI) {
1010 CFIBuilder.buildDefCFAOffset(LibCallFrameSize);
1011 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1012 CFIBuilder.buildOffset(CS.getReg(),
1013 MFI.getObjectOffset(CS.getFrameIdx()));
1014 }
1015 }
1016
1017 // FIXME (note copied from Lanai): This appears to be overallocating. Needs
1018 // investigation. Get the number of bytes to allocate from the FrameInfo.
1019 uint64_t RealStackSize = getStackSizeWithRVVPadding(MF);
1020 uint64_t StackSize = RealStackSize - RVFI->getReservedSpillsSize();
1021 uint64_t RVVStackSize = RVFI->getRVVStackSize();
1022
1023 // Early exit if there is no need to allocate on the stack
1024 if (RealStackSize == 0 && !MFI.adjustsStack() && RVVStackSize == 0)
1025 return;
1026
1027 // If the stack pointer has been marked as reserved, then produce an error if
1028 // the frame requires stack allocation
1029 if (STI.isRegisterReservedByUser(SPReg))
1031 MF.getFunction(), "Stack pointer required, but has been reserved."});
1032
1033 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1034 // Split the SP adjustment to reduce the offsets of callee saved spill.
1035 if (FirstSPAdjustAmount) {
1036 StackSize = FirstSPAdjustAmount;
1037 RealStackSize = FirstSPAdjustAmount;
1038 }
1039
1040 if (RVFI->useQCIInterrupt(MF)) {
1041 // The function starts with `QC.C.MIENTER(.NEST)`, so the `(QC.)CM.PUSH(FP)`
1042 // could only be the next instruction.
1043 ++PossiblePush;
1044
1045 if (NeedsDwarfCFI) {
1046 // Insert the CFI metadata before where we think the `(QC.)CM.PUSH(FP)`
1047 // could be. The PUSH will also get its own CFI metadata for its own
1048 // modifications, which should come after the PUSH.
1049 CFIInstBuilder PushCFIBuilder(MBB, PossiblePush,
1052 for (const CalleeSavedInfo &CS : getQCISavedInfo(MF, CSI))
1053 PushCFIBuilder.buildOffset(CS.getReg(),
1054 MFI.getObjectOffset(CS.getFrameIdx()));
1055 }
1056 }
1057
1058 if (RVFI->isPushable(MF) && PossiblePush != MBB.end() &&
1059 isPush(PossiblePush->getOpcode())) {
1060 // Use available stack adjustment in push instruction to allocate additional
1061 // stack space. Align the stack size down to a multiple of 16. This is
1062 // needed for RVE.
1063 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1064 uint64_t StackAdj =
1065 std::min(alignDown(StackSize, 16), static_cast<uint64_t>(48));
1066 PossiblePush->getOperand(1).setImm(StackAdj);
1067 StackSize -= StackAdj;
1068
1069 if (NeedsDwarfCFI) {
1070 CFIBuilder.buildDefCFAOffset(RealStackSize - StackSize);
1071 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1072 CFIBuilder.buildOffset(CS.getReg(),
1073 MFI.getObjectOffset(CS.getFrameIdx()));
1074 }
1075 }
1076
1077 // Allocate space on the stack if necessary.
1078 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
1079 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
1080 bool NeedProbe = TLI->hasInlineStackProbe(MF);
1081 uint64_t ProbeSize = TLI->getStackProbeSize(MF, getStackAlign());
1082 bool DynAllocation =
1083 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
1084 if (StackSize != 0)
1085 allocateStack(MBB, MBBI, MF, StackSize, RealStackSize, NeedsDwarfCFI,
1086 NeedProbe, ProbeSize, DynAllocation,
1088
1089 // Save SiFive CLIC CSRs into Stack
1091
1092 // The frame pointer is callee-saved, and code has been generated for us to
1093 // save it to the stack. We need to skip over the storing of callee-saved
1094 // registers as the frame pointer must be modified after it has been saved
1095 // to the stack, not before.
1096 // FIXME: assumes exactly one instruction is used to save each callee-saved
1097 // register.
1098 std::advance(MBBI, getUnmanagedCSI(MF, CSI, PreferAscendingLS).size());
1099 CFIBuilder.setInsertPoint(MBBI);
1100
1101 // Iterate over list of callee-saved registers and emit .cfi_offset
1102 // directives.
1103 if (NeedsDwarfCFI) {
1104 for (const CalleeSavedInfo &CS :
1105 getUnmanagedCSI(MF, CSI, PreferAscendingLS)) {
1106 MCRegister Reg = CS.getReg();
1107 int64_t Offset = MFI.getObjectOffset(CS.getFrameIdx());
1108 // Emit CFI for both sub-registers. The even register is at the base
1109 // offset and odd at base+4.
1110 if (RISCV::GPRPairRegClass.contains(Reg)) {
1111 MCRegister EvenReg = RI->getSubReg(Reg, RISCV::sub_gpr_even);
1112 MCRegister OddReg = RI->getSubReg(Reg, RISCV::sub_gpr_odd);
1113 CFIBuilder.buildOffset(EvenReg, Offset);
1114 CFIBuilder.buildOffset(OddReg, Offset + 4);
1115 } else {
1116 CFIBuilder.buildOffset(Reg, Offset);
1117 }
1118 }
1119 }
1120
1121 // Generate new FP.
1122 if (hasFP(MF)) {
1123 if (STI.isRegisterReservedByUser(FPReg))
1125 MF.getFunction(), "Frame pointer required, but has been reserved."});
1126 // The frame pointer does need to be reserved from register allocation.
1127 assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved");
1128
1129 // Some stack management variants automatically keep FP updated, so we don't
1130 // need an instruction to do so.
1131 if (!RVFI->hasImplicitFPUpdates(MF)) {
1132 RI->adjustReg(
1133 MBB, MBBI, DL, FPReg, SPReg,
1134 StackOffset::getFixed(RealStackSize - RVFI->getVarArgsSaveSize()),
1136 }
1137
1138 if (NeedsDwarfCFI)
1139 CFIBuilder.buildDefCFA(FPReg, RVFI->getVarArgsSaveSize());
1140 }
1141
1142 uint64_t SecondSPAdjustAmount = 0;
1143 // Emit the second SP adjustment after saving callee saved registers.
1144 if (FirstSPAdjustAmount) {
1145 SecondSPAdjustAmount = getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1146 assert(SecondSPAdjustAmount > 0 &&
1147 "SecondSPAdjustAmount should be greater than zero");
1148
1149 allocateStack(MBB, MBBI, MF, SecondSPAdjustAmount,
1150 getStackSizeWithRVVPadding(MF), NeedsDwarfCFI && !hasFP(MF),
1151 NeedProbe, ProbeSize, DynAllocation,
1153 }
1154
1155 if (RVVStackSize) {
1156 if (NeedProbe) {
1157 allocateAndProbeStackForRVV(MF, MBB, MBBI, DL, RVVStackSize,
1159 NeedsDwarfCFI && !hasFP(MF), DynAllocation);
1160 } else {
1161 // We must keep the stack pointer aligned through any intermediate
1162 // updates.
1163 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
1164 StackOffset::getScalable(-RVVStackSize),
1166 }
1167
1168 if (NeedsDwarfCFI && !hasFP(MF)) {
1169 // Emit .cfi_def_cfa_expression "sp + StackSize + RVVStackSize * vlenb".
1171 *RI, SPReg,
1172 StackOffset::get(getStackSizeWithRVVPadding(MF), RVVStackSize / 8)));
1173 }
1174
1175 std::advance(MBBI, getRVVCalleeSavedInfo(MF, CSI).size());
1176 if (NeedsDwarfCFI)
1177 emitCalleeSavedRVVPrologCFI(MBB, MBBI, hasFP(MF));
1178 }
1179
1180 if (hasFP(MF)) {
1181 // Realign Stack
1182 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1183 if (RI->hasStackRealignment(MF)) {
1184 Align MaxAlignment = MFI.getMaxAlign();
1185
1186 const RISCVInstrInfo *TII = STI.getInstrInfo();
1187 if (isInt<12>(-(int)MaxAlignment.value())) {
1188 BuildMI(MBB, MBBI, DL, TII->get(RISCV::ANDI), SPReg)
1189 .addReg(SPReg)
1190 .addImm(-(int)MaxAlignment.value())
1192 } else {
1193 unsigned ShiftAmount = Log2(MaxAlignment);
1194 Register VR =
1195 MF.getRegInfo().createVirtualRegister(&RISCV::GPRRegClass);
1196 BuildMI(MBB, MBBI, DL, TII->get(RISCV::SRLI), VR)
1197 .addReg(SPReg)
1198 .addImm(ShiftAmount)
1200 BuildMI(MBB, MBBI, DL, TII->get(RISCV::SLLI), SPReg)
1201 .addReg(VR)
1202 .addImm(ShiftAmount)
1204 }
1205 if (NeedProbe && RVVStackSize == 0) {
1206 // Do a probe if the align + size allocated just passed the probe size
1207 // and was not yet probed.
1208 if (SecondSPAdjustAmount < ProbeSize &&
1209 SecondSPAdjustAmount + MaxAlignment.value() >= ProbeSize) {
1210 bool IsRV64 = STI.is64Bit();
1211 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
1212 .addReg(RISCV::X0)
1213 .addReg(SPReg)
1214 .addImm(0)
1216 }
1217 }
1218 // FP will be used to restore the frame in the epilogue, so we need
1219 // another base register BP to record SP after re-alignment. SP will
1220 // track the current stack after allocating variable sized objects.
1221 if (hasBP(MF)) {
1222 // move BP, SP
1223 BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADDI), BPReg)
1224 .addReg(SPReg)
1225 .addImm(0)
1227 }
1228 }
1229 }
1230}
1231
1232void RISCVFrameLowering::deallocateStack(MachineFunction &MF,
1235 const DebugLoc &DL,
1236 uint64_t &StackSize,
1237 int64_t CFAOffset) const {
1239
1240 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(StackSize),
1242 StackSize = 0;
1243
1244 if (needsDwarfCFI(MF))
1246 .buildDefCFAOffset(CFAOffset);
1247}
1248
1250 MachineBasicBlock &MBB) const {
1251 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1252 MachineFrameInfo &MFI = MF.getFrameInfo();
1253 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1254 bool PreferAscendingLS = STI.preferAscendingLoadStore();
1255
1256 // All calls are tail calls in GHC calling conv, and functions have no
1257 // prologue/epilogue.
1259 return;
1260
1261 // Get the insert location for the epilogue. If there were no terminators in
1262 // the block, get the last instruction.
1264 DebugLoc DL;
1265 if (!MBB.empty()) {
1266 MBBI = MBB.getLastNonDebugInstr();
1267 if (MBBI != MBB.end())
1268 DL = MBBI->getDebugLoc();
1269
1270 MBBI = MBB.getFirstTerminator();
1271
1272 // Skip to before the restores of all callee-saved registers.
1273 while (MBBI != MBB.begin() &&
1274 std::prev(MBBI)->getFlag(MachineInstr::FrameDestroy))
1275 --MBBI;
1276 }
1277
1278 const auto &CSI = MFI.getCalleeSavedInfo();
1279
1280 // Skip to before the restores of scalar callee-saved registers
1281 // FIXME: assumes exactly one instruction is used to restore each
1282 // callee-saved register.
1283 auto FirstScalarCSRRestoreInsn =
1284 std::next(MBBI, getRVVCalleeSavedInfo(MF, CSI).size());
1285 CFIInstBuilder CFIBuilder(MBB, FirstScalarCSRRestoreInsn,
1287 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1288
1289 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1290 uint64_t RealStackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1292 uint64_t StackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1294 RVFI->getReservedSpillsSize();
1295 uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
1296 uint64_t RVVStackSize = RVFI->getRVVStackSize();
1297
1298 bool RestoreSPFromFP = RI->hasStackRealignment(MF) ||
1300 if (RVVStackSize) {
1301 // If RestoreSPFromFP the stack pointer will be restored using the frame
1302 // pointer value.
1303 if (!RestoreSPFromFP)
1304 RI->adjustReg(MBB, FirstScalarCSRRestoreInsn, DL, SPReg, SPReg,
1305 StackOffset::getScalable(RVVStackSize),
1307
1308 if (NeedsDwarfCFI) {
1309 if (!hasFP(MF))
1310 CFIBuilder.buildDefCFA(SPReg, RealStackSize);
1311 emitCalleeSavedRVVEpilogCFI(MBB, FirstScalarCSRRestoreInsn);
1312 }
1313 }
1314
1315 if (FirstSPAdjustAmount) {
1316 uint64_t SecondSPAdjustAmount =
1317 getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1318 assert(SecondSPAdjustAmount > 0 &&
1319 "SecondSPAdjustAmount should be greater than zero");
1320
1321 // If RestoreSPFromFP the stack pointer will be restored using the frame
1322 // pointer value.
1323 if (!RestoreSPFromFP)
1324 RI->adjustReg(MBB, FirstScalarCSRRestoreInsn, DL, SPReg, SPReg,
1325 StackOffset::getFixed(SecondSPAdjustAmount),
1327
1328 if (NeedsDwarfCFI && !hasFP(MF))
1329 CFIBuilder.buildDefCFAOffset(FirstSPAdjustAmount);
1330 }
1331
1332 // Restore the stack pointer using the value of the frame pointer. Only
1333 // necessary if the stack pointer was modified, meaning the stack size is
1334 // unknown.
1335 //
1336 // In order to make sure the stack point is right through the EH region,
1337 // we also need to restore stack pointer from the frame pointer if we
1338 // don't preserve stack space within prologue/epilogue for outgoing variables,
1339 // normally it's just checking the variable sized object is present or not
1340 // is enough, but we also don't preserve that at prologue/epilogue when
1341 // have vector objects in stack.
1342 if (RestoreSPFromFP) {
1343 assert(hasFP(MF) && "frame pointer should not have been eliminated");
1344 RI->adjustReg(MBB, FirstScalarCSRRestoreInsn, DL, SPReg, FPReg,
1346 getStackAlign());
1347 }
1348
1349 if (NeedsDwarfCFI && hasFP(MF))
1350 CFIBuilder.buildDefCFA(SPReg, RealStackSize);
1351
1352 // Skip to after the restores of scalar callee-saved registers
1353 // FIXME: assumes exactly one instruction is used to restore each
1354 // callee-saved register.
1355 MBBI = std::next(FirstScalarCSRRestoreInsn,
1356 getUnmanagedCSI(MF, CSI, PreferAscendingLS).size());
1357 CFIBuilder.setInsertPoint(MBBI);
1358
1359 if (getLibCallID(MF, CSI) != -1) {
1360 // tail __riscv_restore_[0-12] instruction is considered as a terminator,
1361 // therefore it is unnecessary to place any CFI instructions after it. Just
1362 // deallocate stack if needed and return.
1363 if (StackSize != 0)
1364 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1365 RVFI->getLibCallStackSize());
1366
1367 // Emit epilogue for shadow call stack.
1368 emitSCSEpilogue(MF, MBB, MBBI, DL);
1369 return;
1370 }
1371
1372 // Recover callee-saved registers.
1373 if (NeedsDwarfCFI) {
1374 for (const CalleeSavedInfo &CS :
1375 getUnmanagedCSI(MF, CSI, PreferAscendingLS)) {
1376 MCRegister Reg = CS.getReg();
1377 // Emit CFI for both sub-registers.
1378 if (RISCV::GPRPairRegClass.contains(Reg)) {
1379 MCRegister EvenReg = RI->getSubReg(Reg, RISCV::sub_gpr_even);
1380 MCRegister OddReg = RI->getSubReg(Reg, RISCV::sub_gpr_odd);
1381 CFIBuilder.buildRestore(EvenReg);
1382 CFIBuilder.buildRestore(OddReg);
1383 } else {
1384 CFIBuilder.buildRestore(Reg);
1385 }
1386 }
1387 }
1388
1389 if (RVFI->isPushable(MF) && MBBI != MBB.end() && isPop(MBBI->getOpcode())) {
1390 // Use available stack adjustment in pop instruction to deallocate stack
1391 // space. Align the stack size down to a multiple of 16. This is needed for
1392 // RVE.
1393 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1394 uint64_t StackAdj =
1395 std::min(alignDown(StackSize, 16), static_cast<uint64_t>(48));
1396 MBBI->getOperand(1).setImm(StackAdj);
1397 StackSize -= StackAdj;
1398
1399 if (StackSize != 0)
1400 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1401 /*stack_adj of cm.pop instr*/ RealStackSize - StackSize);
1402
1403 auto NextI = next_nodbg(MBBI, MBB.end());
1404 if (NextI == MBB.end() || NextI->getOpcode() != RISCV::PseudoRET) {
1405 ++MBBI;
1406 if (NeedsDwarfCFI) {
1407 CFIBuilder.setInsertPoint(MBBI);
1408
1409 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1410 CFIBuilder.buildRestore(CS.getReg());
1411
1412 // Update CFA Offset. If this is a QCI interrupt function, there will
1413 // be a leftover offset which is deallocated by `QC.C.MILEAVERET`,
1414 // otherwise getQCIInterruptStackSize() will be 0.
1415 CFIBuilder.buildDefCFAOffset(RVFI->getQCIInterruptStackSize());
1416 }
1417 }
1418 }
1419
1421
1422 // Deallocate stack if StackSize isn't a zero yet. If this is a QCI interrupt
1423 // function, there will be a leftover offset which is deallocated by
1424 // `QC.C.MILEAVERET`, otherwise getQCIInterruptStackSize() will be 0.
1425 if (StackSize != 0)
1426 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1427 RVFI->getQCIInterruptStackSize());
1428
1429 // Emit epilogue for shadow call stack.
1430 emitSCSEpilogue(MF, MBB, MBBI, DL);
1431
1432 // SiFive CLIC needs to swap `sf.mscratchcsw` into `sp`
1434}
1435
1438 Register &FrameReg) const {
1439 const MachineFrameInfo &MFI = MF.getFrameInfo();
1441 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1442
1443 // Callee-saved registers should be referenced relative to the stack
1444 // pointer (positive offset), otherwise use the frame pointer (negative
1445 // offset).
1446 const auto &CSI = getUnmanagedCSI(MF, MFI.getCalleeSavedInfo(),
1447 STI.preferAscendingLoadStore());
1448 int MinCSFI = 0;
1449 int MaxCSFI = -1;
1451 auto StackID = MFI.getStackID(FI);
1452
1453 assert((StackID == TargetStackID::Default ||
1454 StackID == TargetStackID::ScalableVector) &&
1455 "Unexpected stack ID for the frame object.");
1456 if (StackID == TargetStackID::Default) {
1457 assert(getOffsetOfLocalArea() == 0 && "LocalAreaOffset is not 0!");
1459 MFI.getOffsetAdjustment());
1460 } else if (StackID == TargetStackID::ScalableVector) {
1462 }
1463
1464 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1465
1466 if (CSI.size()) {
1467 MinCSFI = std::min(CSI.front().getFrameIdx(), CSI.back().getFrameIdx());
1468 MaxCSFI = std::max(CSI.front().getFrameIdx(), CSI.back().getFrameIdx());
1469 }
1470
1471 if (FI >= MinCSFI && FI <= MaxCSFI) {
1472 FrameReg = SPReg;
1473
1474 if (FirstSPAdjustAmount)
1475 Offset += StackOffset::getFixed(FirstSPAdjustAmount);
1476 else
1478 return Offset;
1479 }
1480
1481 if (RI->hasStackRealignment(MF) && !MFI.isFixedObjectIndex(FI)) {
1482 // If the stack was realigned, the frame pointer is set in order to allow
1483 // SP to be restored, so we need another base register to record the stack
1484 // after realignment.
1485 // |--------------------------| --
1486 // | callee-allocated save | | <----|
1487 // | area for register varargs| | |
1488 // |--------------------------| <-- FP |
1489 // | callee-saved registers | | |
1490 // |--------------------------| -- |
1491 // | realignment (the size of | | |
1492 // | this area is not counted | | |
1493 // | in MFI.getStackSize()) | | |
1494 // |--------------------------| -- |-- MFI.getStackSize()
1495 // | RVV alignment padding | | |
1496 // | (not counted in | | |
1497 // | MFI.getStackSize() but | | |
1498 // | counted in | | |
1499 // | RVFI.getRVVStackSize()) | | |
1500 // |--------------------------| -- |
1501 // | RVV objects | | |
1502 // | (not counted in | | |
1503 // | MFI.getStackSize()) | | |
1504 // |--------------------------| -- |
1505 // | padding before RVV | | |
1506 // | (not counted in | | |
1507 // | MFI.getStackSize() or in | | |
1508 // | RVFI.getRVVStackSize()) | | |
1509 // |--------------------------| -- |
1510 // | scalar local variables | | <----'
1511 // |--------------------------| -- <-- BP (if var sized objects present)
1512 // | VarSize objects | |
1513 // |--------------------------| -- <-- SP
1514 if (hasBP(MF)) {
1515 FrameReg = RISCVABI::getBPReg();
1516 } else {
1517 // VarSize objects must be empty in this case!
1518 assert(!MFI.hasVarSizedObjects());
1519 FrameReg = SPReg;
1520 }
1521 } else if (!RI->hasStackRealignment(MF)) {
1522 // Note: Keeping the following as multiple 'if' statements rather than
1523 // merging to a single expression for readability.
1524 if (!hasFP(MF)) {
1525 // No FP available, must use SP.
1526 FrameReg = SPReg;
1527 } else {
1528 FrameReg = FPReg;
1529 if (RVFI->getRVVStackSize() == 0 && !MFI.hasVarSizedObjects()) {
1530 // Both FP and SP are candidates.
1531 // Prefer SP when the SP-relative offset fits in the compressed
1532 // instruction immediate range.
1533 int64_t SPOff = Offset.getFixed() + MFI.getStackSize();
1534 int64_t CLWSPMaxOffset = 252;
1535 int64_t CLDSPMaxOffset = 504;
1536 int64_t SPThreshold = STI.is64Bit() ? CLDSPMaxOffset : CLWSPMaxOffset;
1537 if (SPOff >= 0 && SPOff <= SPThreshold)
1538 FrameReg = SPReg;
1539 }
1540 }
1541 } else {
1542 assert(RI->hasStackRealignment(MF) && MFI.isFixedObjectIndex(FI) &&
1543 "Expected fixed object with stack realignment");
1544 assert(hasFP(MF) && "Re-aligned stack must have frame pointer");
1545 FrameReg = FPReg;
1546 }
1547
1548 if (FrameReg == FPReg) {
1549 Offset += StackOffset::getFixed(RVFI->getVarArgsSaveSize());
1550 // When using FP to access scalable vector objects, we need to minus
1551 // the frame size.
1552 //
1553 // |--------------------------| --
1554 // | callee-allocated save | |
1555 // | area for register varargs| |
1556 // |--------------------------| | -- <-- FP
1557 // | callee-saved registers | |
1558 // |--------------------------| | MFI.getStackSize()
1559 // | scalar local variables | |
1560 // |--------------------------| -- (Offset of RVV objects is from here.)
1561 // | RVV objects |
1562 // |--------------------------|
1563 // | VarSize objects |
1564 // |--------------------------| <-- SP
1565 if (StackID == TargetStackID::ScalableVector) {
1566 assert(!RI->hasStackRealignment(MF) &&
1567 "Can't index across variable sized realign");
1568 // We don't expect any extra RVV alignment padding, as the stack size
1569 // and RVV object sections should be correct aligned in their own
1570 // right.
1572 "Inconsistent stack layout");
1574 }
1575 return Offset;
1576 }
1577
1578 // This case handles indexing off both SP and BP.
1579 // If indexing off SP, there must not be any var sized objects
1580 assert(FrameReg == RISCVABI::getBPReg() || !MFI.hasVarSizedObjects());
1581
1582 // When using SP to access frame objects, we need to add RVV stack size.
1583 //
1584 // |--------------------------| --
1585 // | callee-allocated save | | <----|
1586 // | area for register varargs| | |
1587 // |--------------------------| | | <-- FP
1588 // | callee-saved registers | | |
1589 // |--------------------------| -- |
1590 // | RVV alignment padding | | |
1591 // | (not counted in | | |
1592 // | MFI.getStackSize() but | | |
1593 // | counted in | | |
1594 // | RVFI.getRVVStackSize()) | | |
1595 // |--------------------------| -- |
1596 // | RVV objects | | |-- MFI.getStackSize()
1597 // | (not counted in | | |
1598 // | MFI.getStackSize()) | | |
1599 // |--------------------------| -- |
1600 // | padding before RVV | | |
1601 // | (not counted in | | |
1602 // | MFI.getStackSize()) | | |
1603 // |--------------------------| -- |
1604 // | scalar local variables | | <----'
1605 // |--------------------------| -- <-- BP (if var sized objects present)
1606 // | VarSize objects | |
1607 // |--------------------------| -- <-- SP
1608 //
1609 // The total amount of padding surrounding RVV objects is described by
1610 // RVV->getRVVPadding() and it can be zero. It allows us to align the RVV
1611 // objects to the required alignment.
1612 if (MFI.getStackID(FI) == TargetStackID::Default) {
1613 if (MFI.isFixedObjectIndex(FI)) {
1614 assert(!RI->hasStackRealignment(MF) &&
1615 "Can't index across variable sized realign");
1617 RVFI->getRVVStackSize());
1618 } else {
1620 }
1621 } else if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
1622 // Ensure the base of the RVV stack is correctly aligned: add on the
1623 // alignment padding.
1624 int ScalarLocalVarSize = MFI.getStackSize() -
1625 RVFI->getCalleeSavedStackSize() -
1626 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
1627 Offset += StackOffset::get(ScalarLocalVarSize, RVFI->getRVVStackSize());
1628 }
1629 return Offset;
1630}
1631
1633 const Register &Reg) {
1634 MCRegister BaseReg = TRI.getSubReg(Reg, RISCV::sub_vrm1_0);
1635 // If it's not a grouped vector register, it doesn't have subregister, so
1636 // the base register is just itself.
1637 if (!BaseReg.isValid())
1638 BaseReg = Reg;
1639 return BaseReg;
1640}
1641
1643 BitVector &SavedRegs,
1644 RegScavenger *RS) const {
1646
1647 // In TargetFrameLowering::determineCalleeSaves, any vector register is marked
1648 // as saved if any of its subregister is clobbered, this is not correct in
1649 // vector registers. We only want the vector register to be marked as saved
1650 // if all of its subregisters are clobbered.
1651 // For example:
1652 // Original behavior: If v24 is marked, v24m2, v24m4, v24m8 are also marked.
1653 // Correct behavior: v24m2 is marked only if v24 and v25 are marked.
1654 MachineRegisterInfo &MRI = MF.getRegInfo();
1655 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
1656 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
1657 for (unsigned i = 0; CSRegs[i]; ++i) {
1658 unsigned CSReg = CSRegs[i];
1659 // Only vector registers need special care.
1660 if (!RISCV::VRRegClass.contains(getRVVBaseRegister(TRI, CSReg)))
1661 continue;
1662
1663 SavedRegs.reset(CSReg);
1664
1665 auto SubRegs = TRI.subregs(CSReg);
1666 // Set the register and all its subregisters.
1667 if (!MRI.def_empty(CSReg) || MRI.getUsedPhysRegsMask().test(CSReg)) {
1668 SavedRegs.set(CSReg);
1669 for (unsigned Reg : SubRegs)
1670 SavedRegs.set(Reg);
1671 }
1672
1673 }
1674
1675 // Unconditionally spill RA and FP only if the function uses a frame
1676 // pointer.
1677 if (hasFP(MF)) {
1678 SavedRegs.set(RAReg);
1679 SavedRegs.set(FPReg);
1680 }
1681 // Mark BP as used if function has dedicated base pointer.
1682 if (hasBP(MF))
1683 SavedRegs.set(RISCVABI::getBPReg());
1684
1685 // When using cm.push/pop we must save X27 if we save X26.
1686 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1687 if (RVFI->isPushable(MF) && SavedRegs.test(RISCV::X26))
1688 SavedRegs.set(RISCV::X27);
1689
1690 // For Zilsd on RV32, append GPRPair registers to the CSR list. This prevents
1691 // the need to create register sets for each abi which is a lot more complex.
1692 // Don't use Zilsd for callee-saved coalescing if the required alignment
1693 // exceeds the stack alignment.
1694 bool UseZilsd = !STI.is64Bit() && STI.hasStdExtZilsd() &&
1695 STI.getZilsdAlign() <= getStackAlign();
1696 if (UseZilsd) {
1699 for (unsigned i = 0; CSRegs[i]; ++i) {
1700 NewCSRs.push_back(CSRegs[i]);
1701 CSRSet.insert(CSRegs[i]);
1702 }
1703
1704 // Append GPRPair registers for pairs where both sub-registers are in CSR
1705 // list. Iterate through all GPRPairs and check if both sub-regs are CSRs.
1706 for (MCPhysReg Pair : RISCV::GPRPairRegClass) {
1707 MCPhysReg EvenReg = TRI.getSubReg(Pair, RISCV::sub_gpr_even);
1708 MCPhysReg OddReg = TRI.getSubReg(Pair, RISCV::sub_gpr_odd);
1709 if (CSRSet.contains(EvenReg) && CSRSet.contains(OddReg))
1710 NewCSRs.push_back(Pair);
1711 }
1712
1713 MRI.setCalleeSavedRegs(NewCSRs);
1714 CSRegs = MRI.getCalleeSavedRegs();
1715 }
1716
1717 // Check if all subregisters are marked for saving. If so, set the super
1718 // register bit. For GPRPair, only check sub_gpr_even and sub_gpr_odd, not
1719 // aliases like X8_W or X8_H which are not set in SavedRegs.
1720 for (unsigned i = 0; CSRegs[i]; ++i) {
1721 unsigned CSReg = CSRegs[i];
1722 bool CombineToSuperReg;
1723 if (RISCV::GPRPairRegClass.contains(CSReg)) {
1724 MCPhysReg EvenReg = TRI.getSubReg(CSReg, RISCV::sub_gpr_even);
1725 MCPhysReg OddReg = TRI.getSubReg(CSReg, RISCV::sub_gpr_odd);
1726 CombineToSuperReg = SavedRegs.test(EvenReg) && SavedRegs.test(OddReg);
1727 // If s0(x8) is used as FP we can't generate load/store pair because it
1728 // breaks the frame chain.
1729 if (hasFP(MF) && CSReg == RISCV::X8_X9)
1730 CombineToSuperReg = false;
1731 } else {
1732 auto SubRegs = TRI.subregs(CSReg);
1733 CombineToSuperReg =
1734 !SubRegs.empty() && llvm::all_of(SubRegs, [&](unsigned Reg) {
1735 return SavedRegs.test(Reg);
1736 });
1737 }
1738
1739 if (CombineToSuperReg)
1740 SavedRegs.set(CSReg);
1741 }
1742
1743 // SiFive Preemptible Interrupt Handlers need additional frame entries
1745}
1746
1747std::pair<int64_t, Align>
1748RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
1749 MachineFrameInfo &MFI = MF.getFrameInfo();
1750 // Create a buffer of RVV objects to allocate.
1751 SmallVector<int, 8> ObjectsToAllocate;
1752 auto pushRVVObjects = [&](int FIBegin, int FIEnd) {
1753 for (int I = FIBegin, E = FIEnd; I != E; ++I) {
1754 unsigned StackID = MFI.getStackID(I);
1755 if (StackID != TargetStackID::ScalableVector)
1756 continue;
1757 if (MFI.isDeadObjectIndex(I))
1758 continue;
1759
1760 ObjectsToAllocate.push_back(I);
1761 }
1762 };
1763 // First push RVV Callee Saved object, then push RVV stack object
1764 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
1765 const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
1766 if (!RVVCSI.empty())
1767 pushRVVObjects(RVVCSI[0].getFrameIdx(),
1768 RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
1769 pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());
1770
1771 // The minimum alignment is 16 bytes.
1772 Align RVVStackAlign(16);
1773 const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1774
1775 if (!ST.hasVInstructions()) {
1776 assert(ObjectsToAllocate.empty() &&
1777 "Can't allocate scalable-vector objects without V instructions");
1778 return std::make_pair(0, RVVStackAlign);
1779 }
1780
1781 // Allocate all RVV locals and spills
1782 int64_t Offset = 0;
1783 for (int FI : ObjectsToAllocate) {
1784 // ObjectSize in bytes.
1785 int64_t ObjectSize = MFI.getObjectSize(FI);
1786 auto ObjectAlign =
1787 std::max(Align(RISCV::RVVBytesPerBlock), MFI.getObjectAlign(FI));
1788 // If the data type is the fractional vector type, reserve one vector
1789 // register for it.
1790 if (ObjectSize < RISCV::RVVBytesPerBlock)
1791 ObjectSize = RISCV::RVVBytesPerBlock;
1792 Offset = alignTo(Offset + ObjectSize, ObjectAlign);
1793 MFI.setObjectOffset(FI, -Offset);
1794 // Update the maximum alignment of the RVV stack section
1795 RVVStackAlign = std::max(RVVStackAlign, ObjectAlign);
1796 }
1797
1798 uint64_t StackSize = Offset;
1799
1800 // Ensure the alignment of the RVV stack. Since we want the most-aligned
1801 // object right at the bottom (i.e., any padding at the top of the frame),
1802 // readjust all RVV objects down by the alignment padding.
1803 // Stack size and offsets are multiples of vscale, stack alignment is in
1804 // bytes, we can divide stack alignment by minimum vscale to get a maximum
1805 // stack alignment multiple of vscale.
1806 auto VScale =
1807 std::max<uint64_t>(ST.getRealMinVLen() / RISCV::RVVBitsPerBlock, 1);
1808 if (auto RVVStackAlignVScale = RVVStackAlign.value() / VScale) {
1809 if (auto AlignmentPadding =
1810 offsetToAlignment(StackSize, Align(RVVStackAlignVScale))) {
1811 StackSize += AlignmentPadding;
1812 for (int FI : ObjectsToAllocate)
1813 MFI.setObjectOffset(FI, MFI.getObjectOffset(FI) - AlignmentPadding);
1814 }
1815 }
1816
1817 return std::make_pair(StackSize, RVVStackAlign);
1818}
1819
1821 // For RVV spill, scalable stack offsets computing requires up to two scratch
1822 // registers
1823 static constexpr unsigned ScavSlotsNumRVVSpillScalableObject = 2;
1824
1825 // For RVV spill, non-scalable stack offsets computing requires up to one
1826 // scratch register.
1827 static constexpr unsigned ScavSlotsNumRVVSpillNonScalableObject = 1;
1828
1829 // ADDI instruction's destination register can be used for computing
1830 // offsets. So Scalable stack offsets require up to one scratch register.
1831 static constexpr unsigned ScavSlotsADDIScalableObject = 1;
1832
1833 static constexpr unsigned MaxScavSlotsNumKnown =
1834 std::max({ScavSlotsADDIScalableObject, ScavSlotsNumRVVSpillScalableObject,
1835 ScavSlotsNumRVVSpillNonScalableObject});
1836
1837 unsigned MaxScavSlotsNum = 0;
1839 return false;
1840 for (const MachineBasicBlock &MBB : MF)
1841 for (const MachineInstr &MI : MBB) {
1842 bool IsRVVSpill = RISCV::isRVVSpill(MI);
1843 for (auto &MO : MI.operands()) {
1844 if (!MO.isFI())
1845 continue;
1846 bool IsScalableVectorID = MF.getFrameInfo().getStackID(MO.getIndex()) ==
1848 if (IsRVVSpill) {
1849 MaxScavSlotsNum = std::max(
1850 MaxScavSlotsNum, IsScalableVectorID
1851 ? ScavSlotsNumRVVSpillScalableObject
1852 : ScavSlotsNumRVVSpillNonScalableObject);
1853 } else if (MI.getOpcode() == RISCV::ADDI && IsScalableVectorID) {
1854 MaxScavSlotsNum =
1855 std::max(MaxScavSlotsNum, ScavSlotsADDIScalableObject);
1856 }
1857 }
1858 if (MaxScavSlotsNum == MaxScavSlotsNumKnown)
1859 return MaxScavSlotsNumKnown;
1860 }
1861 return MaxScavSlotsNum;
1862}
1863
1864static bool hasRVVFrameObject(const MachineFunction &MF) {
1865 // Originally, the function will scan all the stack objects to check whether
1866 // if there is any scalable vector object on the stack or not. However, it
1867 // causes errors in the register allocator. In issue 53016, it returns false
1868 // before RA because there is no RVV stack objects. After RA, it returns true
1869 // because there are spilling slots for RVV values during RA. It will not
1870 // reserve BP during register allocation and generate BP access in the PEI
1871 // pass due to the inconsistent behavior of the function.
1872 //
1873 // The function is changed to use hasVInstructions() as the return value. It
1874 // is not precise, but it can make the register allocation correct.
1875 //
1876 // FIXME: Find a better way to make the decision or revisit the solution in
1877 // D103622.
1878 //
1879 // Refer to https://github.com/llvm/llvm-project/issues/53016.
1880 return MF.getSubtarget<RISCVSubtarget>().hasVInstructions();
1881}
1882
1884 const RISCVInstrInfo &TII) {
1885 unsigned FnSize = 0;
1886 for (auto &MBB : MF) {
1887 for (auto &MI : MBB) {
1888 // Far branches over 20-bit offset will be relaxed in branch relaxation
1889 // pass. In the worst case, conditional branches will be relaxed into
1890 // the following instruction sequence. Unconditional branches are
1891 // relaxed in the same way, with the exception that there is no first
1892 // branch instruction.
1893 //
1894 // foo
1895 // bne t5, t6, .rev_cond # `TII->getInstSizeInBytes(MI)` bytes
1896 // sd s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1897 // jump .restore, s11 # 8 bytes
1898 // .rev_cond
1899 // bar
1900 // j .dest_bb # 4 bytes, or 2 bytes with Zca
1901 // .restore:
1902 // ld s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1903 // .dest:
1904 // baz
1905 if (MI.isConditionalBranch())
1906 FnSize += TII.getInstSizeInBytes(MI);
1907 if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
1908 if (MF.getSubtarget<RISCVSubtarget>().hasStdExtZca())
1909 FnSize += 2 + 8 + 2 + 2;
1910 else
1911 FnSize += 4 + 8 + 4 + 4;
1912 continue;
1913 }
1914
1915 FnSize += TII.getInstSizeInBytes(MI);
1916 }
1917 }
1918 return FnSize;
1919}
1920
1922 MachineFunction &MF, RegScavenger *RS) const {
1923 const RISCVRegisterInfo *RegInfo =
1924 MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
1925 const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1926 MachineFrameInfo &MFI = MF.getFrameInfo();
1927 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1928 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1929
1930 int64_t RVVStackSize;
1931 Align RVVStackAlign;
1932 std::tie(RVVStackSize, RVVStackAlign) = assignRVVStackObjectOffsets(MF);
1933
1934 RVFI->setRVVStackSize(RVVStackSize);
1935 RVFI->setRVVStackAlign(RVVStackAlign);
1936
1937 if (hasRVVFrameObject(MF)) {
1938 // Ensure the entire stack is aligned to at least the RVV requirement: some
1939 // scalable-vector object alignments are not considered by the
1940 // target-independent code.
1941 MFI.ensureMaxAlignment(RVVStackAlign);
1942 }
1943
1944 unsigned ScavSlotsNum = 0;
1945
1946 // estimateStackSize has been observed to under-estimate the final stack
1947 // size, so give ourselves wiggle-room by checking for stack size
1948 // representable an 11-bit signed field rather than 12-bits.
1949 if (!isInt<11>(MFI.estimateStackSize(MF)))
1950 ScavSlotsNum = 1;
1951
1952 // Far branches over 20-bit offset require a spill slot for scratch register.
1953 bool IsLargeFunction = !isInt<20>(estimateFunctionSizeInBytes(MF, *TII));
1954 if (IsLargeFunction)
1955 ScavSlotsNum = std::max(ScavSlotsNum, 1u);
1956
1957 // RVV loads & stores have no capacity to hold the immediate address offsets
1958 // so we must always reserve an emergency spill slot if the MachineFunction
1959 // contains any RVV spills.
1960 ScavSlotsNum = std::max(ScavSlotsNum, getScavSlotsNumForRVV(MF));
1961
1962 for (unsigned I = 0; I < ScavSlotsNum; I++) {
1963 int FI = MFI.CreateSpillStackObject(RegInfo->getSpillSize(*RC),
1964 RegInfo->getSpillAlign(*RC));
1965 RS->addScavengingFrameIndex(FI);
1966
1967 if (IsLargeFunction && RVFI->getBranchRelaxationScratchFrameIndex() == -1)
1968 RVFI->setBranchRelaxationScratchFrameIndex(FI);
1969 }
1970
1971 unsigned Size = RVFI->getReservedSpillsSize();
1972 for (const auto &Info : MFI.getCalleeSavedInfo()) {
1973 int FrameIdx = Info.getFrameIdx();
1974 if (FrameIdx < 0 || MFI.getStackID(FrameIdx) != TargetStackID::Default)
1975 continue;
1976
1977 Size += MFI.getObjectSize(FrameIdx);
1978 }
1979 RVFI->setCalleeSavedStackSize(Size);
1980}
1981
1982// Not preserve stack space within prologue for outgoing variables when the
1983// function contains variable size objects or there are vector objects accessed
1984// by the frame pointer.
1985// Let eliminateCallFramePseudoInstr preserve stack space for it.
1987 return !MF.getFrameInfo().hasVarSizedObjects() &&
1988 !(hasFP(MF) && hasRVVFrameObject(MF));
1989}
1990
1991// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions.
1995 DebugLoc DL = MI->getDebugLoc();
1996
1997 if (!hasReservedCallFrame(MF)) {
1998 // If space has not been reserved for a call frame, ADJCALLSTACKDOWN and
1999 // ADJCALLSTACKUP must be converted to instructions manipulating the stack
2000 // pointer. This is necessary when there is a variable length stack
2001 // allocation (e.g. alloca), which means it's not possible to allocate
2002 // space for outgoing arguments from within the function prologue.
2003 int64_t Amount = MI->getOperand(0).getImm();
2004
2005 if (Amount != 0) {
2006 // Ensure the stack remains aligned after adjustment.
2007 Amount = alignSPAdjust(Amount);
2008
2009 if (MI->getOpcode() == RISCV::ADJCALLSTACKDOWN)
2010 Amount = -Amount;
2011
2012 const RISCVTargetLowering *TLI =
2013 MF.getSubtarget<RISCVSubtarget>().getTargetLowering();
2014 int64_t ProbeSize = TLI->getStackProbeSize(MF, getStackAlign());
2015 if (TLI->hasInlineStackProbe(MF) && -Amount >= ProbeSize) {
2016 // When stack probing is enabled, the decrement of SP may need to be
2017 // probed. We can handle both the decrement and the probing in
2018 // allocateStack.
2019 bool DynAllocation =
2020 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
2021 allocateStack(MBB, MI, MF, -Amount, -Amount,
2022 needsDwarfCFI(MF) && !hasFP(MF),
2023 /*NeedProbe=*/true, ProbeSize, DynAllocation,
2025 inlineStackProbe(MF, MBB);
2026 } else {
2027 const RISCVRegisterInfo &RI = *STI.getRegisterInfo();
2030 }
2031 }
2032 }
2033
2034 return MBB.erase(MI);
2035}
2036
2037// We would like to split the SP adjustment to reduce prologue/epilogue
2038// as following instructions. In this way, the offset of the callee saved
2039// register could fit in a single store. Supposed that the first sp adjust
2040// amount is 2032.
2041// add sp,sp,-2032
2042// sw ra,2028(sp)
2043// sw s0,2024(sp)
2044// sw s1,2020(sp)
2045// sw s3,2012(sp)
2046// sw s4,2008(sp)
2047// add sp,sp,-64
2050 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2051 const MachineFrameInfo &MFI = MF.getFrameInfo();
2052 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
2053 uint64_t StackSize = getStackSizeWithRVVPadding(MF);
2054
2055 // Disable SplitSPAdjust if save-restore libcall, push/pop or QCI interrupts
2056 // are used. The callee-saved registers will be pushed by the save-restore
2057 // libcalls, so we don't have to split the SP adjustment in this case.
2058 if (RVFI->getReservedSpillsSize())
2059 return 0;
2060
2061 // Return the FirstSPAdjustAmount if the StackSize can not fit in a signed
2062 // 12-bit and there exists a callee-saved register needing to be pushed.
2063 if (!isInt<12>(StackSize) && (CSI.size() > 0)) {
2064 // FirstSPAdjustAmount is chosen at most as (2048 - StackAlign) because
2065 // 2048 will cause sp = sp + 2048 in the epilogue to be split into multiple
2066 // instructions. Offsets smaller than 2048 can fit in a single load/store
2067 // instruction, and we have to stick with the stack alignment. 2048 has
2068 // 16-byte alignment. The stack alignment for RV32 and RV64 is 16 and for
2069 // RV32E it is 4. So (2048 - StackAlign) will satisfy the stack alignment.
2070 const uint64_t StackAlign = getStackAlign().value();
2071
2072 // Amount of (2048 - StackAlign) will prevent callee saved and restored
2073 // instructions be compressed, so try to adjust the amount to the largest
2074 // offset that stack compression instructions accept when target supports
2075 // compression instructions.
2076 if (STI.hasStdExtZca()) {
2077 // The compression extensions may support the following instructions:
2078 // riscv32: c.lwsp rd, offset[7:2] => 2^(6 + 2)
2079 // c.swsp rs2, offset[7:2] => 2^(6 + 2)
2080 // c.flwsp rd, offset[7:2] => 2^(6 + 2)
2081 // c.fswsp rs2, offset[7:2] => 2^(6 + 2)
2082 // riscv64: c.ldsp rd, offset[8:3] => 2^(6 + 3)
2083 // c.sdsp rs2, offset[8:3] => 2^(6 + 3)
2084 // c.fldsp rd, offset[8:3] => 2^(6 + 3)
2085 // c.fsdsp rs2, offset[8:3] => 2^(6 + 3)
2086 const uint64_t RVCompressLen = STI.getXLen() * 8;
2087 // Compared with amount (2048 - StackAlign), StackSize needs to
2088 // satisfy the following conditions to avoid using more instructions
2089 // to adjust the sp after adjusting the amount, such as
2090 // StackSize meets the condition (StackSize <= 2048 + RVCompressLen),
2091 // case1: Amount is 2048 - StackAlign: use addi + addi to adjust sp.
2092 // case2: Amount is RVCompressLen: use addi + addi to adjust sp.
2093 auto CanCompress = [&](uint64_t CompressLen) -> bool {
2094 if (StackSize <= 2047 + CompressLen ||
2095 (StackSize > 2048 * 2 - StackAlign &&
2096 StackSize <= 2047 * 2 + CompressLen) ||
2097 StackSize > 2048 * 3 - StackAlign)
2098 return true;
2099
2100 return false;
2101 };
2102 // In the epilogue, addi sp, sp, 496 is used to recover the sp and it
2103 // can be compressed(C.ADDI16SP, offset can be [-512, 496]), but
2104 // addi sp, sp, 512 can not be compressed. So try to use 496 first.
2105 const uint64_t ADDI16SPCompressLen = 496;
2106 if (STI.is64Bit() && CanCompress(ADDI16SPCompressLen))
2107 return ADDI16SPCompressLen;
2108 if (CanCompress(RVCompressLen))
2109 return RVCompressLen;
2110 }
2111 return 2048 - StackAlign;
2112 }
2113 return 0;
2114}
2115
2118 std::vector<CalleeSavedInfo> &CSI) const {
2119 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2120 MachineFrameInfo &MFI = MF.getFrameInfo();
2121 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
2122
2123 // Preemptible Interrupts have two additional Callee-save Frame Indexes,
2124 // not tracked by `CSI`.
2125 if (RVFI->isSiFivePreemptibleInterrupt(MF)) {
2126 for (int I = 0; I < 2; ++I) {
2127 int FI = RVFI->getInterruptCSRFrameIndex(I);
2128 MFI.setIsCalleeSavedObjectIndex(FI, true);
2129 }
2130 }
2131
2132 // Early exit if no callee saved registers are modified!
2133 if (CSI.empty())
2134 return true;
2135
2136 if (RVFI->useQCIInterrupt(MF)) {
2137 RVFI->setQCIInterruptStackSize(QCIInterruptPushAmount);
2138 }
2139
2140 if (RVFI->isPushable(MF)) {
2141 // Determine how many GPRs we need to push and save it to RVFI.
2142 unsigned PushedRegNum = getNumPushPopRegs(CSI);
2143
2144 // `QC.C.MIENTER(.NEST)` will save `ra` and `s0`, so we should only push if
2145 // we want to push more than 2 registers. Otherwise, we should push if we
2146 // want to push more than 0 registers.
2147 unsigned OnlyPushIfMoreThan = RVFI->useQCIInterrupt(MF) ? 2 : 0;
2148 if (PushedRegNum > OnlyPushIfMoreThan) {
2149 RVFI->setRVPushRegs(PushedRegNum);
2150 RVFI->setRVPushStackSize(alignTo((STI.getXLen() / 8) * PushedRegNum, 16));
2151 }
2152 }
2153
2154 for (auto &CS : CSI) {
2155 MCRegister Reg = CS.getReg();
2156 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
2157 unsigned Size = RegInfo->getSpillSize(*RC);
2158
2159 if (RVFI->useQCIInterrupt(MF)) {
2160 const auto *FFI = llvm::find_if(FixedCSRFIQCIInterruptMap, [&](auto P) {
2161 return P.first == CS.getReg();
2162 });
2163 if (FFI != std::end(FixedCSRFIQCIInterruptMap)) {
2164 int64_t Offset = FFI->second * (int64_t)Size;
2165
2166 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, Offset);
2167 assert(FrameIdx < 0);
2168 CS.setFrameIdx(FrameIdx);
2169 continue;
2170 }
2171 }
2172
2173 if (RVFI->useSaveRestoreLibCalls(MF) || RVFI->isPushable(MF)) {
2174 const auto *FII = llvm::find_if(
2175 FixedCSRFIMap, [&](MCPhysReg P) { return P == CS.getReg(); });
2176 unsigned RegNum = std::distance(std::begin(FixedCSRFIMap), FII);
2177
2178 if (FII != std::end(FixedCSRFIMap)) {
2179 int64_t Offset;
2180 if (RVFI->getPushPopKind(MF) ==
2182 Offset = -int64_t(RVFI->getRVPushRegs() - RegNum) * Size;
2183 else
2184 Offset = -int64_t(RegNum + 1) * Size;
2185
2186 if (RVFI->useQCIInterrupt(MF))
2188
2189 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, Offset);
2190 assert(FrameIdx < 0);
2191 CS.setFrameIdx(FrameIdx);
2192 continue;
2193 }
2194 }
2195
2196 // For GPRPair registers, use 8-byte slots with required alignment by zilsd.
2197 if (!STI.is64Bit() && STI.hasStdExtZilsd() &&
2198 RISCV::GPRPairRegClass.contains(Reg)) {
2199 Align PairAlign = STI.getZilsdAlign();
2200 int FrameIdx = MFI.CreateStackObject(8, PairAlign, true);
2201 MFI.setIsCalleeSavedObjectIndex(FrameIdx, true);
2202 CS.setFrameIdx(FrameIdx);
2203 continue;
2204 }
2205
2206 // Not a fixed slot.
2207 Align Alignment = RegInfo->getSpillAlign(*RC);
2208 // We may not be able to satisfy the desired alignment specification of
2209 // the TargetRegisterClass if the stack alignment is smaller. Use the
2210 // min.
2211 Alignment = std::min(Alignment, getStackAlign());
2212 int FrameIdx = MFI.CreateStackObject(Size, Alignment, true);
2213 MFI.setIsCalleeSavedObjectIndex(FrameIdx, true);
2214 CS.setFrameIdx(FrameIdx);
2216 MFI.setStackID(FrameIdx, TargetStackID::ScalableVector);
2217 }
2218
2219 if (RVFI->useQCIInterrupt(MF)) {
2220 // Allocate a fixed object that covers the entire QCI stack allocation,
2221 // because there are gaps which are reserved for future use.
2222 MFI.CreateFixedSpillStackObject(
2223 QCIInterruptPushAmount, -static_cast<int64_t>(QCIInterruptPushAmount));
2224 }
2225
2226 if (RVFI->isPushable(MF)) {
2227 int64_t QCIOffset = RVFI->useQCIInterrupt(MF) ? QCIInterruptPushAmount : 0;
2228 // Allocate a fixed object that covers the full push.
2229 if (int64_t PushSize = RVFI->getRVPushStackSize())
2230 MFI.CreateFixedSpillStackObject(PushSize, -PushSize - QCIOffset);
2231 } else if (int LibCallRegs = getLibCallID(MF, CSI) + 1) {
2232 int64_t LibCallFrameSize =
2233 alignTo((STI.getXLen() / 8) * LibCallRegs, getStackAlign());
2234 MFI.CreateFixedSpillStackObject(LibCallFrameSize, -LibCallFrameSize);
2235 }
2236
2237 return true;
2238}
2239
2243 if (CSI.empty())
2244 return true;
2245
2246 MachineFunction *MF = MBB.getParent();
2247 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2248 DebugLoc DL;
2249 if (MI != MBB.end() && !MI->isDebugInstr())
2250 DL = MI->getDebugLoc();
2251
2253 if (RVFI->useQCIInterrupt(*MF)) {
2254 // Emit QC.C.MIENTER(.NEST)
2255 BuildMI(
2256 MBB, MI, DL,
2257 TII.get(RVFI->getInterruptStackKind(*MF) ==
2259 ? RISCV::QC_C_MIENTER_NEST
2260 : RISCV::QC_C_MIENTER))
2262
2263 for (auto [Reg, _Offset] : FixedCSRFIQCIInterruptMap)
2264 MBB.addLiveIn(Reg);
2265 }
2266
2267 if (RVFI->isPushable(*MF)) {
2268 // Emit CM.PUSH with base StackAdj & evaluate Push stack
2269 unsigned PushedRegNum = RVFI->getRVPushRegs();
2270 if (PushedRegNum > 0) {
2271 // Use encoded number to represent registers to spill.
2272 unsigned Opcode = getPushOpcode(
2273 RVFI->getPushPopKind(*MF), hasFP(*MF) && !RVFI->useQCIInterrupt(*MF));
2274 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(PushedRegNum);
2275 MachineInstrBuilder PushBuilder =
2276 BuildMI(MBB, MI, DL, TII.get(Opcode))
2278 PushBuilder.addImm(RegEnc);
2279 PushBuilder.addImm(0);
2280
2281 for (unsigned i = 0; i < PushedRegNum; i++)
2282 PushBuilder.addUse(FixedCSRFIMap[i], RegState::Implicit);
2283 }
2284 } else if (const char *SpillLibCall = getSpillLibCallName(*MF, CSI)) {
2285 // Add spill libcall via non-callee-saved register t0.
2286 MachineInstrBuilder NewMI =
2287 BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoCALLReg), RISCV::X5)
2288 .addExternalSymbol(SpillLibCall, RISCVII::MO_CALL)
2290 .addUse(RISCV::X2, RegState::Implicit)
2291 .addDef(RISCV::X2, RegState::ImplicitDefine);
2292
2293 // Add registers spilled as implicit used.
2294 for (auto &CS : CSI)
2295 NewMI.addUse(CS.getReg(), RegState::Implicit);
2296 }
2297
2298 // Manually spill values not spilled by libcall & Push/Pop.
2299 const auto &UnmanagedCSI =
2300 getUnmanagedCSI(*MF, CSI, STI.preferAscendingLoadStore());
2301 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
2302
2303 auto storeRegsToStackSlots = [&](decltype(UnmanagedCSI) CSInfo) {
2304 for (auto &CS : CSInfo) {
2305 // Insert the spill to the stack frame.
2306 MCRegister Reg = CS.getReg();
2307 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2308 TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg),
2309 CS.getFrameIdx(), RC, Register(),
2311 }
2312 };
2313 storeRegsToStackSlots(UnmanagedCSI);
2314 storeRegsToStackSlots(RVVCSI);
2315
2316 return true;
2317}
2318
2319static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg) {
2320 return RISCV::VRRegClass.contains(BaseReg) ? 1
2321 : RISCV::VRM2RegClass.contains(BaseReg) ? 2
2322 : RISCV::VRM4RegClass.contains(BaseReg) ? 4
2323 : 8;
2324}
2325
2326void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
2328 MachineFunction *MF = MBB.getParent();
2329 const MachineFrameInfo &MFI = MF->getFrameInfo();
2330 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2331 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2332
2333 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo());
2334 if (RVVCSI.empty())
2335 return;
2336
2337 uint64_t FixedSize = getStackSizeWithRVVPadding(*MF);
2338 if (!HasFP) {
2339 uint64_t ScalarLocalVarSize =
2340 MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
2341 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
2342 FixedSize -= ScalarLocalVarSize;
2343 }
2344
2345 CFIInstBuilder CFIBuilder(MBB, MI, MachineInstr::FrameSetup);
2346 for (auto &CS : RVVCSI) {
2347 // Insert the spill to the stack frame.
2348 int FI = CS.getFrameIdx();
2349 MCRegister BaseReg = getRVVBaseRegister(TRI, CS.getReg());
2350 unsigned NumRegs = getCalleeSavedRVVNumRegs(CS.getReg());
2351 for (unsigned i = 0; i < NumRegs; ++i) {
2352 CFIBuilder.insertCFIInst(createDefCFAOffset(
2353 TRI, BaseReg + i,
2354 StackOffset::get(-FixedSize, MFI.getObjectOffset(FI) / 8 + i)));
2355 }
2356 }
2357}
2358
2359void RISCVFrameLowering::emitCalleeSavedRVVEpilogCFI(
2361 MachineFunction *MF = MBB.getParent();
2362 const MachineFrameInfo &MFI = MF->getFrameInfo();
2363 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2364
2365 CFIInstBuilder CFIHelper(MBB, MI, MachineInstr::FrameDestroy);
2366 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo());
2367 for (auto &CS : RVVCSI) {
2368 MCRegister BaseReg = getRVVBaseRegister(TRI, CS.getReg());
2369 unsigned NumRegs = getCalleeSavedRVVNumRegs(CS.getReg());
2370 for (unsigned i = 0; i < NumRegs; ++i)
2371 CFIHelper.buildRestore(BaseReg + i);
2372 }
2373}
2374
2378 if (CSI.empty())
2379 return true;
2380
2381 MachineFunction *MF = MBB.getParent();
2382 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2383 DebugLoc DL;
2384 if (MI != MBB.end() && !MI->isDebugInstr())
2385 DL = MI->getDebugLoc();
2386
2387 // Manually restore values not restored by libcall & Push/Pop.
2388 // Reverse the restore order in epilog. In addition, the return
2389 // address will be restored first in the epilogue. It increases
2390 // the opportunity to avoid the load-to-use data hazard between
2391 // loading RA and return by RA. loadRegFromStackSlot can insert
2392 // multiple instructions.
2393 const auto &UnmanagedCSI =
2394 getUnmanagedCSI(*MF, CSI, STI.preferAscendingLoadStore());
2395 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
2396
2397 auto loadRegFromStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
2398 for (auto &CS : CSInfo) {
2399 MCRegister Reg = CS.getReg();
2400 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2401 TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, Register(),
2402 RISCV::NoSubRegister,
2404 assert(MI != MBB.begin() &&
2405 "loadRegFromStackSlot didn't insert any code!");
2406 }
2407 };
2408 loadRegFromStackSlot(RVVCSI);
2409 loadRegFromStackSlot(UnmanagedCSI);
2410
2412 if (RVFI->useQCIInterrupt(*MF)) {
2413 // Don't emit anything here because restoration is handled by
2414 // QC.C.MILEAVERET which we already inserted to return.
2415 assert(MI->getOpcode() == RISCV::QC_C_MILEAVERET &&
2416 "Unexpected QCI Interrupt Return Instruction");
2417 }
2418
2419 if (RVFI->isPushable(*MF)) {
2420 unsigned PushedRegNum = RVFI->getRVPushRegs();
2421 if (PushedRegNum > 0) {
2422 unsigned Opcode = getPopOpcode(RVFI->getPushPopKind(*MF));
2423 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(PushedRegNum);
2424 MachineInstrBuilder PopBuilder =
2425 BuildMI(MBB, MI, DL, TII.get(Opcode))
2427 // Use encoded number to represent registers to restore.
2428 PopBuilder.addImm(RegEnc);
2429 PopBuilder.addImm(0);
2430
2431 for (unsigned i = 0; i < RVFI->getRVPushRegs(); i++)
2433 }
2434 } else if (const char *RestoreLibCall = getRestoreLibCallName(*MF, CSI)) {
2435 // Add restore libcall via tail call.
2436 MachineInstrBuilder NewMI =
2437 BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoTAIL))
2438 .addExternalSymbol(RestoreLibCall, RISCVII::MO_CALL)
2440 .addDef(RISCV::X2, RegState::ImplicitDefine);
2441
2442 // Add registers restored as implicit defined.
2443 for (auto &CS : CSI)
2444 NewMI.addDef(CS.getReg(), RegState::ImplicitDefine);
2445
2446 // Remove trailing returns, since the terminator is now a tail call to the
2447 // restore function.
2448 if (MI != MBB.end() && MI->getOpcode() == RISCV::PseudoRET) {
2449 NewMI.getInstr()->copyImplicitOps(*MF, *MI);
2450 MI->eraseFromParent();
2451 }
2452 }
2453 return true;
2454}
2455
2457 // Keep the conventional code flow when not optimizing.
2458 if (MF.getFunction().hasOptNone())
2459 return false;
2460
2461 return true;
2462}
2463
2465 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2466 const MachineFunction *MF = MBB.getParent();
2467 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2468
2469 // Make sure VTYPE and VL are not live-in since we will use vsetvli in the
2470 // prologue to get the VLEN, and that will clobber these registers.
2471 //
2472 // We may do also check the stack contains objects with scalable vector type,
2473 // but this will require iterating over all the stack objects, but this may
2474 // not worth since the situation is rare, we could do further check in future
2475 // if we find it is necessary.
2476 if (STI.preferVsetvliOverReadVLENB() &&
2477 (MBB.isLiveIn(RISCV::VTYPE) || MBB.isLiveIn(RISCV::VL)))
2478 return false;
2479
2480 if (!RVFI->useSaveRestoreLibCalls(*MF))
2481 return true;
2482
2483 // Inserting a call to a __riscv_save libcall requires the use of the register
2484 // t0 (X5) to hold the return address. Therefore if this register is already
2485 // used we can't insert the call.
2486
2487 RegScavenger RS;
2488 RS.enterBasicBlock(*TmpMBB);
2489 return !RS.isRegUsed(RISCV::X5);
2490}
2491
2493 const MachineFunction *MF = MBB.getParent();
2494 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2495 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2496
2497 // We do not want QC.C.MILEAVERET to be subject to shrink-wrapping - it must
2498 // come in the final block of its function as it both pops and returns.
2499 if (RVFI->useQCIInterrupt(*MF))
2500 return MBB.succ_empty();
2501
2502 if (!RVFI->useSaveRestoreLibCalls(*MF))
2503 return true;
2504
2505 // Using the __riscv_restore libcalls to restore CSRs requires a tail call.
2506 // This means if we still need to continue executing code within this function
2507 // the restore cannot take place in this basic block.
2508
2509 if (MBB.succ_size() > 1)
2510 return false;
2511
2512 MachineBasicBlock *SuccMBB =
2513 MBB.succ_empty() ? TmpMBB->getFallThrough() : *MBB.succ_begin();
2514
2515 // Doing a tail call should be safe if there are no successors, because either
2516 // we have a returning block or the end of the block is unreachable, so the
2517 // restore will be eliminated regardless.
2518 if (!SuccMBB)
2519 return true;
2520
2521 // The successor can only contain a return, since we would effectively be
2522 // replacing the successor with our own tail return at the end of our block.
2523 return SuccMBB->isReturnBlock() && SuccMBB->size() == 1;
2524}
2525
2527 switch (ID) {
2530 return true;
2535 return false;
2536 }
2537 llvm_unreachable("Invalid TargetStackID::Value");
2538}
2539
2543
2544// Synthesize the probe loop.
2546 Register TargetReg, bool IsRVV) {
2547 assert(TargetReg != RISCV::X2 && "New top of stack cannot already be in SP");
2548
2549 MachineBasicBlock &MBB = *MBBI->getParent();
2550 MachineFunction &MF = *MBB.getParent();
2551
2552 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
2553 const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
2554 bool IsRV64 = Subtarget.is64Bit();
2555 Align StackAlign = Subtarget.getFrameLowering()->getStackAlign();
2556 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
2557 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign);
2558
2559 MachineFunction::iterator MBBInsertPoint = std::next(MBB.getIterator());
2560 MachineBasicBlock *LoopTestMBB =
2561 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
2562 MF.insert(MBBInsertPoint, LoopTestMBB);
2563 MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(MBB.getBasicBlock());
2564 MF.insert(MBBInsertPoint, ExitMBB);
2566 Register ScratchReg = RISCV::X7;
2567
2568 // ScratchReg = ProbeSize
2569 TII->movImm(MBB, MBBI, DL, ScratchReg, ProbeSize, Flags);
2570
2571 // LoopTest:
2572 // SUB SP, SP, ProbeSize
2573 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::SUB), SPReg)
2574 .addReg(SPReg)
2575 .addReg(ScratchReg)
2576 .setMIFlags(Flags);
2577
2578 // s[d|w] zero, 0(sp)
2579 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL,
2580 TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
2581 .addReg(RISCV::X0)
2582 .addReg(SPReg)
2583 .addImm(0)
2584 .setMIFlags(Flags);
2585
2586 if (IsRVV) {
2587 // SUB TargetReg, TargetReg, ProbeSize
2588 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::SUB),
2589 TargetReg)
2590 .addReg(TargetReg)
2591 .addReg(ScratchReg)
2592 .setMIFlags(Flags);
2593
2594 // BGE TargetReg, ProbeSize, LoopTest
2595 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::BGE))
2596 .addReg(TargetReg)
2597 .addReg(ScratchReg)
2598 .addMBB(LoopTestMBB)
2599 .setMIFlags(Flags);
2600
2601 } else {
2602 // BNE SP, TargetReg, LoopTest
2603 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::BNE))
2604 .addReg(SPReg)
2605 .addReg(TargetReg)
2606 .addMBB(LoopTestMBB)
2607 .setMIFlags(Flags);
2608 }
2609
2610 ExitMBB->splice(ExitMBB->end(), &MBB, std::next(MBBI), MBB.end());
2612
2613 LoopTestMBB->addSuccessor(ExitMBB);
2614 LoopTestMBB->addSuccessor(LoopTestMBB);
2615 MBB.addSuccessor(LoopTestMBB);
2616 // Update liveins.
2617 fullyRecomputeLiveIns({ExitMBB, LoopTestMBB});
2618}
2619
2620void RISCVFrameLowering::inlineStackProbe(MachineFunction &MF,
2621 MachineBasicBlock &MBB) const {
2622 // Get the instructions that need to be replaced. We emit at most two of
2623 // these. Remember them in order to avoid complications coming from the need
2624 // to traverse the block while potentially creating more blocks.
2625 SmallVector<MachineInstr *, 4> ToReplace;
2626 for (MachineInstr &MI : MBB) {
2627 unsigned Opc = MI.getOpcode();
2628 if (Opc == RISCV::PROBED_STACKALLOC ||
2629 Opc == RISCV::PROBED_STACKALLOC_RVV) {
2630 ToReplace.push_back(&MI);
2631 }
2632 }
2633
2634 for (MachineInstr *MI : ToReplace) {
2635 if (MI->getOpcode() == RISCV::PROBED_STACKALLOC ||
2636 MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV) {
2637 MachineBasicBlock::iterator MBBI = MI->getIterator();
2639 Register TargetReg = MI->getOperand(0).getReg();
2640 emitStackProbeInline(MBBI, DL, TargetReg,
2641 (MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV));
2643 }
2644 }
2645}
2646
2648 return 0;
2649}
2650
2653 return RISCV::X2;
2654}
static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI, unsigned Reg, const StackOffset &Offset)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static uint64_t estimateFunctionSizeInBytes(const LoongArchInstrInfo *TII, const MachineFunction &MF)
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define P(N)
static constexpr uint64_t QCIInterruptPushAmount
static unsigned getPushOpcode(RISCVMachineFunctionInfo::PushPopKind Kind, bool UpdateFP)
static void emitSiFiveCLICPreemptibleSaves(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static MCRegister getRVVBaseRegister(const RISCVRegisterInfo &TRI, const Register &Reg)
static void createSiFivePreemptibleInterruptFrameEntries(MachineFunction &MF, RISCVMachineFunctionInfo &RVFI)
static constexpr MCPhysReg FPReg
static const char * getRestoreLibCallName(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static bool needsDwarfCFI(const MachineFunction &MF)
Returns true if DWARF CFI instructions ("frame moves") should be emitted.
static constexpr MCPhysReg SPReg
static const char * getSpillLibCallName(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static bool hasRVVFrameObject(const MachineFunction &MF)
static void appendScalableVectorExpression(const TargetRegisterInfo &TRI, SmallVectorImpl< char > &Expr, StackOffset Offset, llvm::raw_string_ostream &Comment)
static SmallVector< CalleeSavedInfo, 8 > getQCISavedInfo(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static void emitSiFiveCLICPreemptibleRestores(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static SmallVector< CalleeSavedInfo, 8 > getRVVCalleeSavedInfo(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static SmallVector< CalleeSavedInfo, 8 > getUnmanagedCSI(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI, bool ReverseOrder=false)
static bool isPop(unsigned Opcode)
static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg)
static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI, Register Reg, StackOffset Offset)
static void emitStackProbeInline(MachineBasicBlock::iterator MBBI, DebugLoc DL, Register TargetReg, bool IsRVV)
static Align getABIStackAlignment(RISCVABI::ABI ABI)
static unsigned getPopOpcode(RISCVMachineFunctionInfo::PushPopKind Kind)
static SmallVector< CalleeSavedInfo, 8 > getPushOrLibCallsSavedInfo(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static int getLibCallID(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static const std::pair< MCPhysReg, int8_t > FixedCSRFIQCIInterruptMap[]
static bool isPush(unsigned Opcode)
static constexpr MCPhysReg RAReg
static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL)
static const MCPhysReg FixedCSRFIMap[]
static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL)
static void emitSiFiveCLICStackSwap(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static unsigned getNumPushPopRegs(const std::vector< CalleeSavedInfo > &CSI)
static unsigned getScavSlotsNumForRVV(MachineFunction &MF)
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:483
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
bool empty() const
Check if the array is empty.
Definition ArrayRef.h:136
bool test(unsigned Idx) const
Returns true if bit Idx is set.
Definition BitVector.h:482
BitVector & reset()
Reset all bits in the bitvector.
Definition BitVector.h:409
BitVector & set()
Set all bits in the bitvector.
Definition BitVector.h:366
Helper class for creating CFI instructions and inserting them into MIR.
void buildEscape(StringRef Bytes, StringRef Comment="") const
void buildDefCFAOffset(int64_t Offset, MCSymbol *Label=nullptr) const
void buildRestore(MCRegister Reg) const
void buildDefCFARegister(MCRegister Reg) const
void buildOffset(MCRegister Reg, int64_t Offset) const
void insertCFIInst(const MCCFIInstruction &CFIInst) const
void buildDefCFA(MCRegister Reg, int64_t Offset) const
void setInsertPoint(MachineBasicBlock::iterator IP)
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
MCRegister getReg() const
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasOptNone() const
Do not optimize this function (-O0).
Definition Function.h:708
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
Definition MCDwarf.h:727
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI MachineBasicBlock * getFallThrough(bool JumpToFallThrough=true)
Return the fallthrough block if the block can implicitly transfer control to the block after it by fa...
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
LLVM_ABI void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
LLVM_ABI void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int64_t getOffsetAdjustment() const
Return the correction for frame offsets.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
LLVM_ABI uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
LLVM_ABI void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const BitVector & getUsedPhysRegsMask() const
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool def_empty(Register RegNo) const
def_empty - Return true if there are no instructions defining the specified register (it may be live-...
LLVM_ABI const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
LLVM_ABI void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
Represent a mutable reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:294
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
uint64_t getFirstSPAdjustAmount(const MachineFunction &MF) const
bool enableShrinkWrapping(const MachineFunction &MF) const override
Returns true if the target will correctly handle shrink wrapping.
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool hasBP(const MachineFunction &MF) const
void allocateStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineFunction &MF, uint64_t Offset, uint64_t RealStackSize, bool EmitCFI, bool NeedProbe, uint64_t ProbeSize, bool DynAllocation, MachineInstr::MIFlag Flag) const
bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a epilogue for the target.
bool hasFPImpl(const MachineFunction &MF) const override
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
Register getInitialCFARegister(const MachineFunction &MF) const override
Return initial CFA register value i.e.
const RISCVSubtarget & STI
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool isSupportedStackID(TargetStackID::Value ID) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
int getInitialCFAOffset(const MachineFunction &MF) const override
Return initial CFA offset value i.e.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
RISCVFrameLowering(const RISCVSubtarget &STI)
uint64_t getStackSizeWithRVVPadding(const MachineFunction &MF) const
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
bool isPushable(const MachineFunction &MF) const
InterruptStackKind getInterruptStackKind(const MachineFunction &MF) const
bool isSiFivePreemptibleInterrupt(const MachineFunction &MF) const
PushPopKind getPushPopKind(const MachineFunction &MF) const
bool useSaveRestoreLibCalls(const MachineFunction &MF) const
bool useQCIInterrupt(const MachineFunction &MF) const
bool hasVInstructions() const
const RISCVRegisterInfo * getRegisterInfo() const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
unsigned getStackProbeSize(const MachineFunction &MF, Align StackAlign) const
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Represents a location in source code.
Definition SMLoc.h:22
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:229
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition SmallString.h:68
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:46
int64_t getScalable() const
Returns the scalable component of the stack.
Definition TypeSize.h:49
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition TypeSize.h:41
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:40
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:39
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
TargetFrameLowering(StackDirection D, Align StackAl, int LAO, Align TransAl=Align(1), bool StackReal=true)
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
MCRegister getBPReg()
MCRegister getSCSPReg()
static unsigned encodeRegListNumRegs(unsigned NumRegs)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
IterT next_nodbg(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It, then continue incrementing it while it points to a debug instruction.
@ Offset
Definition DWP.cpp:557
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1668
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Define
Register definition.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
auto make_first_range(ContainerTy &&c)
Given a container of pairs, return a range over the first elements.
Definition STLExtras.h:1398
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition Alignment.h:186
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1771
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
void appendLEB128(SmallVectorImpl< U > &Buffer, T Value)
Definition LEB128.h:236
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
static bool isRVVRegClass(const TargetRegisterClass *RC)
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const