LLVM 23.0.0git
RISCVCallingConv.cpp
Go to the documentation of this file.
1//===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the custom routines for the RISC-V Calling Convention.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVCallingConv.h"
15#include "RISCVSubtarget.h"
16#include "llvm/IR/DataLayout.h"
17#include "llvm/IR/Module.h"
18#include "llvm/MC/MCRegister.h"
19
20using namespace llvm;
21
22// This does not have the regular `CCAssignFn` signature, it has an extra
23// `bool IsRet` parameter.
24static bool CC_RISCV_Impl(unsigned ValNo, MVT ValVT, MVT LocVT,
26 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
27 CCState &State, bool IsRet);
28
29/// Used for assigning arguments with CallingConvention::GHC
31
32/// Used for assigning arguments with CallingConvention::Fast
34
35bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
36 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
37 Type *OrigTy, CCState &State) {
38 if (State.getCallingConv() == CallingConv::GHC)
39 return CC_RISCV_GHC(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State);
40
41 if (State.getCallingConv() == CallingConv::Fast)
42 return CC_RISCV_FastCC(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy,
43 State);
44
45 // For all other cases, use the standard calling convention
46 return CC_RISCV_Impl(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
47 /*IsRet=*/false);
48}
49
50bool llvm::RetCC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
51 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
52 Type *OrigTy, CCState &State) {
53 // Always use the standard calling convention.
54 return CC_RISCV_Impl(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State,
55 /*IsRet=*/true);
56}
57
58// Calling Convention Implementation.
59// The expectations for frontend ABI lowering vary from target to target.
60// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
61// details, but this is a longer term goal. For now, we simply try to keep the
62// role of the frontend as simple and well-defined as possible. The rules can
63// be summarised as:
64// * Never split up large scalar arguments. We handle them here.
65// * If a hardfloat calling convention is being used, and the struct may be
66// passed in a pair of registers (fp+fp, int+fp), and both registers are
67// available, then pass as two separate arguments. If either the GPRs or FPRs
68// are exhausted, then pass according to the rule below.
69// * If a struct could never be passed in registers or directly in a stack
70// slot (as it is larger than 2*XLEN and the floating point rules don't
71// apply), then pass it using a pointer with the byval attribute.
72// * If a struct is less than 2*XLEN, then coerce to either a two-element
73// word-sized array or a 2*XLEN scalar (depending on alignment).
74// * The frontend can determine whether a struct is returned by reference or
75// not based on its size and fields. If it will be returned by reference, the
76// frontend must modify the prototype so a pointer with the sret annotation is
77// passed as the first argument. This is not necessary for large scalar
78// returns.
79// * Struct return values and varargs should be coerced to structs containing
80// register-size fields in the same situations they would be for fixed
81// arguments.
82
83static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H,
84 RISCV::F13_H, RISCV::F14_H, RISCV::F15_H,
85 RISCV::F16_H, RISCV::F17_H};
86static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F,
87 RISCV::F13_F, RISCV::F14_F, RISCV::F15_F,
88 RISCV::F16_F, RISCV::F17_F};
89static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D,
90 RISCV::F13_D, RISCV::F14_D, RISCV::F15_D,
91 RISCV::F16_D, RISCV::F17_D};
92// This is an interim calling convention and it may be changed in the future.
93static const MCPhysReg ArgVRs[] = {
94 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
95 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
96 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
97static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2,
98 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
99 RISCV::V20M2, RISCV::V22M2};
100static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
101 RISCV::V20M4};
102static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
103static const MCPhysReg ArgVRN2M1s[] = {
104 RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12,
105 RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16,
106 RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20,
107 RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23};
108static const MCPhysReg ArgVRN3M1s[] = {
109 RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12,
110 RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15,
111 RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18,
112 RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21,
113 RISCV::V20_V21_V22, RISCV::V21_V22_V23};
114static const MCPhysReg ArgVRN4M1s[] = {
115 RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13,
116 RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16,
117 RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19,
118 RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22,
119 RISCV::V20_V21_V22_V23};
120static const MCPhysReg ArgVRN5M1s[] = {
121 RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13,
122 RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15,
123 RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17,
124 RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19,
125 RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21,
126 RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23};
127static const MCPhysReg ArgVRN6M1s[] = {
128 RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14,
129 RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16,
130 RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18,
131 RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20,
132 RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22,
133 RISCV::V18_V19_V20_V21_V22_V23};
134static const MCPhysReg ArgVRN7M1s[] = {
135 RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15,
136 RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17,
137 RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19,
138 RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21,
139 RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23};
140static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15,
141 RISCV::V9_V10_V11_V12_V13_V14_V15_V16,
142 RISCV::V10_V11_V12_V13_V14_V15_V16_V17,
143 RISCV::V11_V12_V13_V14_V15_V16_V17_V18,
144 RISCV::V12_V13_V14_V15_V16_V17_V18_V19,
145 RISCV::V13_V14_V15_V16_V17_V18_V19_V20,
146 RISCV::V14_V15_V16_V17_V18_V19_V20_V21,
147 RISCV::V15_V16_V17_V18_V19_V20_V21_V22,
148 RISCV::V16_V17_V18_V19_V20_V21_V22_V23};
149static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2,
150 RISCV::V12M2_V14M2, RISCV::V14M2_V16M2,
151 RISCV::V16M2_V18M2, RISCV::V18M2_V20M2,
152 RISCV::V20M2_V22M2};
153static const MCPhysReg ArgVRN3M2s[] = {
154 RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2,
155 RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2,
156 RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2};
157static const MCPhysReg ArgVRN4M2s[] = {
158 RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2,
159 RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2,
160 RISCV::V16M2_V18M2_V20M2_V22M2};
161static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4,
162 RISCV::V16M4_V20M4};
163
165 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
166 // the ILP32E ABI.
167 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
168 RISCV::X13, RISCV::X14, RISCV::X15,
169 RISCV::X16, RISCV::X17};
170 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
171 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
172 RISCV::X13, RISCV::X14, RISCV::X15};
173
174 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
175 return ArrayRef(ArgEGPRs);
176
177 return ArrayRef(ArgIGPRs);
178}
179
181 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
182 // the ILP32E ABI.
183 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_H, RISCV::X11_H, RISCV::X12_H,
184 RISCV::X13_H, RISCV::X14_H, RISCV::X15_H,
185 RISCV::X16_H, RISCV::X17_H};
186 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
187 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
188 RISCV::X12_H, RISCV::X13_H,
189 RISCV::X14_H, RISCV::X15_H};
190
191 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
192 return ArrayRef(ArgEGPRs);
193
194 return ArrayRef(ArgIGPRs);
195}
196
198 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
199 // the ILP32E ABI.
200 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_W, RISCV::X11_W, RISCV::X12_W,
201 RISCV::X13_W, RISCV::X14_W, RISCV::X15_W,
202 RISCV::X16_W, RISCV::X17_W};
203 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
204 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
205 RISCV::X12_W, RISCV::X13_W,
206 RISCV::X14_W, RISCV::X15_W};
207
208 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
209 return ArrayRef(ArgEGPRs);
210
211 return ArrayRef(ArgIGPRs);
212}
213
215 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
216 // for save-restore libcall, so we don't use them.
217 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
218 static const MCPhysReg FastCCIGPRs[] = {
219 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15,
220 RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31};
221
222 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
223 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
224 RISCV::X13, RISCV::X14, RISCV::X15};
225
226 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
227 return ArrayRef(FastCCEGPRs);
228
229 return ArrayRef(FastCCIGPRs);
230}
231
233 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
234 // for save-restore libcall, so we don't use them.
235 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
236 static const MCPhysReg FastCCIGPRs[] = {
237 RISCV::X10_H, RISCV::X11_H, RISCV::X12_H, RISCV::X13_H,
238 RISCV::X14_H, RISCV::X15_H, RISCV::X16_H, RISCV::X17_H,
239 RISCV::X28_H, RISCV::X29_H, RISCV::X30_H, RISCV::X31_H};
240
241 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
242 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
243 RISCV::X12_H, RISCV::X13_H,
244 RISCV::X14_H, RISCV::X15_H};
245
246 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
247 return ArrayRef(FastCCEGPRs);
248
249 return ArrayRef(FastCCIGPRs);
250}
251
253 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
254 // for save-restore libcall, so we don't use them.
255 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
256 static const MCPhysReg FastCCIGPRs[] = {
257 RISCV::X10_W, RISCV::X11_W, RISCV::X12_W, RISCV::X13_W,
258 RISCV::X14_W, RISCV::X15_W, RISCV::X16_W, RISCV::X17_W,
259 RISCV::X28_W, RISCV::X29_W, RISCV::X30_W, RISCV::X31_W};
260
261 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
262 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
263 RISCV::X12_W, RISCV::X13_W,
264 RISCV::X14_W, RISCV::X15_W};
265
266 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
267 return ArrayRef(FastCCEGPRs);
268
269 return ArrayRef(FastCCIGPRs);
270}
271
272// Pass a 2*XLEN argument that has been split into two XLEN values through
273// registers or the stack as necessary.
275 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
276 MVT ValVT2, MVT LocVT2,
277 ISD::ArgFlagsTy ArgFlags2,
278 const RISCVSubtarget &Subtarget) {
279 unsigned XLen = Subtarget.getXLen();
280 unsigned XLenInBytes = XLen / 8;
281 RISCVABI::ABI ABI = Subtarget.getTargetABI();
282 bool EABI = ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E;
283
285
286 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
287 // At least one half can be passed via register.
288 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
290 } else {
291 // Both halves must be passed on the stack, with proper alignment.
292 // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte
293 // alignment. This behavior may be changed when RV32E/ILP32E is ratified.
294 Align StackAlign(XLenInBytes);
295 if (!EABI || XLen != 32)
296 StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign());
297 State.addLoc(
299 State.AllocateStack(XLenInBytes, StackAlign),
301 State.addLoc(CCValAssign::getMem(
302 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
303 LocVT2, CCValAssign::Full));
304 return false;
305 }
306
307 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
308 // The second half can also be passed via register.
309 State.addLoc(
310 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
311 } else {
312 // The second half is passed via the stack, without additional alignment.
313 State.addLoc(CCValAssign::getMem(
314 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
315 LocVT2, CCValAssign::Full));
316 }
317
318 return false;
319}
320
321static MCRegister allocateRVVReg(MVT LocVT, unsigned ValNo, CCState &State,
322 const RISCVTargetLowering &TLI) {
323 const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT);
324 if (RC == &RISCV::VRRegClass) {
325 // Assign the first mask argument to V0.
326 // This is an interim calling convention and it may be changed in the
327 // future.
328 if (LocVT.getVectorElementType() == MVT::i1)
329 if (MCRegister Reg = State.AllocateReg(RISCV::V0))
330 return Reg;
331 return State.AllocateReg(ArgVRs);
332 }
333 if (RC == &RISCV::VRM2RegClass)
334 return State.AllocateReg(ArgVRM2s);
335 if (RC == &RISCV::VRM4RegClass)
336 return State.AllocateReg(ArgVRM4s);
337 if (RC == &RISCV::VRM8RegClass)
338 return State.AllocateReg(ArgVRM8s);
339 if (RC == &RISCV::VRN2M1RegClass)
340 return State.AllocateReg(ArgVRN2M1s);
341 if (RC == &RISCV::VRN3M1RegClass)
342 return State.AllocateReg(ArgVRN3M1s);
343 if (RC == &RISCV::VRN4M1RegClass)
344 return State.AllocateReg(ArgVRN4M1s);
345 if (RC == &RISCV::VRN5M1RegClass)
346 return State.AllocateReg(ArgVRN5M1s);
347 if (RC == &RISCV::VRN6M1RegClass)
348 return State.AllocateReg(ArgVRN6M1s);
349 if (RC == &RISCV::VRN7M1RegClass)
350 return State.AllocateReg(ArgVRN7M1s);
351 if (RC == &RISCV::VRN8M1RegClass)
352 return State.AllocateReg(ArgVRN8M1s);
353 if (RC == &RISCV::VRN2M2RegClass)
354 return State.AllocateReg(ArgVRN2M2s);
355 if (RC == &RISCV::VRN3M2RegClass)
356 return State.AllocateReg(ArgVRN3M2s);
357 if (RC == &RISCV::VRN4M2RegClass)
358 return State.AllocateReg(ArgVRN4M2s);
359 if (RC == &RISCV::VRN2M4RegClass)
360 return State.AllocateReg(ArgVRN2M4s);
361 llvm_unreachable("Unhandled register class for ValueType");
362}
363
364// Implements the RISC-V calling convention. Returns true upon failure.
365//
366// This has a slightly different signature to CCAssignFn - it adds `bool IsRet`.
367static bool CC_RISCV_Impl(unsigned ValNo, MVT ValVT, MVT LocVT,
368 CCValAssign::LocInfo LocInfo,
369 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
370 CCState &State, bool IsRet) {
371 assert(ValVT == LocVT && "Expected ValVT and LocVT to match");
372 const MachineFunction &MF = State.getMachineFunction();
373 const DataLayout &DL = MF.getDataLayout();
374 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
375 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
376
377 unsigned XLen = Subtarget.getXLen();
378 MVT XLenVT = Subtarget.getXLenVT();
379
380 if (ArgFlags.isNest()) {
381 // Static chain parameter must not be passed in normal argument registers,
382 // so we assign t2/t3 for it as done in GCC's
383 // __builtin_call_with_static_chain
384 bool HasCFBranch =
385 MF.getInfo<RISCVMachineFunctionInfo>()->hasCFProtectionBranch();
386
387 // Normal: t2, Branch control flow protection: t3
388 const auto StaticChainReg = HasCFBranch ? RISCV::X28 : RISCV::X7;
389
390 RISCVABI::ABI ABI = Subtarget.getTargetABI();
391 if (HasCFBranch &&
392 (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E))
394 "Nested functions with control flow protection are not "
395 "usable with ILP32E or LP64E ABI.");
396 if (MCRegister Reg = State.AllocateReg(StaticChainReg)) {
397 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
398 return false;
399 }
400 }
401
402 // Any return value split in to more than two values can't be returned
403 // directly. Vectors are returned via the available vector registers.
404 if ((!LocVT.isVector() || Subtarget.isPExtPackedType(LocVT)) && IsRet &&
405 ValNo > 1)
406 return true;
407
408 // Double wide packed types require 2 GPRs so we can only return 1 of them.
409 if (Subtarget.isPExtPackedDoubleType(LocVT) && IsRet && ValNo > 0)
410 return true;
411
412 // AllowFPRForF16_F32 if targeting an FLEN>=32 ABI and the argument isn't
413 // variadic.
414 bool AllowFPRForF16_F32 = false;
415 // UseFPRForF64 if targeting an FLEN>=64 ABI and the argument isn't variadic.
416 bool AllowFPRForF64 = false;
417
418 RISCVABI::ABI ABI = Subtarget.getTargetABI();
419 switch (ABI) {
420 default:
421 llvm_unreachable("Unexpected ABI");
426 break;
429 AllowFPRForF64 = !ArgFlags.isVarArg();
430 [[fallthrough]];
433 AllowFPRForF16_F32 = !ArgFlags.isVarArg();
434 break;
435 }
436
437 if ((LocVT == MVT::f16 || LocVT == MVT::bf16) && AllowFPRForF16_F32) {
438 if (MCRegister Reg = State.AllocateReg(ArgFPR16s)) {
439 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
440 return false;
441 }
442 }
443
444 if (LocVT == MVT::f32 && AllowFPRForF16_F32) {
445 if (MCRegister Reg = State.AllocateReg(ArgFPR32s)) {
446 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
447 return false;
448 }
449 }
450
451 if (LocVT == MVT::f64 && AllowFPRForF64) {
452 if (MCRegister Reg = State.AllocateReg(ArgFPR64s)) {
453 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
454 return false;
455 }
456 }
457
458 if (LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) {
459 if (MCRegister Reg = State.AllocateReg(getArgGPR16s(ABI))) {
460 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
461 return false;
462 }
463 }
464
465 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
466 if (MCRegister Reg = State.AllocateReg(getArgGPR32s(ABI))) {
467 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
468 return false;
469 }
470 }
471
473
474 // Zdinx use GPR without a bitcast when possible.
475 if (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx()) {
476 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
477 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
478 return false;
479 }
480 }
481
482 // FP smaller than XLen, uses custom GPR.
483 if (LocVT == MVT::f16 || LocVT == MVT::bf16 ||
484 (LocVT == MVT::f32 && XLen == 64)) {
485 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
486 LocVT = XLenVT;
487 State.addLoc(
488 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
489 return false;
490 }
491 }
492
493 // Bitcast FP to GPR if we can use a GPR register.
494 if ((XLen == 32 && LocVT == MVT::f32) || (XLen == 64 && LocVT == MVT::f64)) {
495 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
496 LocVT = XLenVT;
497 LocInfo = CCValAssign::BCvt;
498 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
499 return false;
500 }
501 }
502
503 // If this is a variadic argument, the RISC-V calling convention requires
504 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
505 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
506 // be used regardless of whether the original argument was split during
507 // legalisation or not. The argument will not be passed by registers if the
508 // original type is larger than 2*XLEN, so the register alignment rule does
509 // not apply.
510 // TODO: To be compatible with GCC's behaviors, we don't align registers
511 // currently if we are using ILP32E calling convention. This behavior may be
512 // changed when RV32E/ILP32E is ratified.
513 unsigned TwoXLenInBytes = (2 * XLen) / 8;
514 if (ArgFlags.isVarArg() && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
515 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
516 ABI != RISCVABI::ABI_ILP32E) {
517 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
518 // Skip 'odd' register if necessary.
519 if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
520 State.AllocateReg(ArgGPRs);
521 }
522
523 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
524 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
525 State.getPendingArgFlags();
526
527 assert(PendingLocs.size() == PendingArgFlags.size() &&
528 "PendingLocs and PendingArgFlags out of sync");
529
530 // Handle passing f64 on RV32D with a soft float ABI or when floating point
531 // registers are exhausted. Or 64-bit P extension vectors on RV32.
532 if (XLen == 32 &&
533 (LocVT == MVT::f64 || (Subtarget.isPExtPackedDoubleType(LocVT) &&
534 !ArgFlags.isSplit() && PendingLocs.empty()))) {
535 assert(PendingLocs.empty() &&
536 "Can't lower f64 or P extension vector if it is split");
537 // Depending on available argument GPRS, f64 may be passed in a pair of
538 // GPRs, split between a GPR and the stack, or passed completely on the
539 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
540 // cases.
541 MCRegister Reg = State.AllocateReg(ArgGPRs);
542 if (!Reg) {
543 int64_t StackOffset = State.AllocateStack(8, Align(8));
544 State.addLoc(
545 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
546 return false;
547 }
548 LocVT = MVT::i32;
549 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
550 MCRegister HiReg = State.AllocateReg(ArgGPRs);
551 if (HiReg) {
552 State.addLoc(
553 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
554 } else {
555 int64_t StackOffset = State.AllocateStack(4, Align(4));
556 State.addLoc(
557 CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
558 }
559 return false;
560 }
561
562 // If the split argument only had two elements, it should be passed directly
563 // in registers or on the stack.
564 if ((LocVT.isScalarInteger() ||
565 (Subtarget.isPExtPackedType(LocVT) && LocVT.getSizeInBits() == XLen)) &&
566 ArgFlags.isSplitEnd() && PendingLocs.size() <= 1) {
567 assert(PendingLocs.size() == 1 && "Unexpected PendingLocs.size()");
568 // Apply the normal calling convention rules to the first half of the
569 // split argument.
570 CCValAssign VA = PendingLocs[0];
571 ISD::ArgFlagsTy AF = PendingArgFlags[0];
572 PendingLocs.clear();
573 PendingArgFlags.clear();
574 return CC_RISCVAssign2XLen(State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
575 Subtarget);
576 }
577
578 // Split arguments might be passed indirectly, so keep track of the pending
579 // values. Split vectors excluding P extension packed vectors(see
580 // isPExtPackedType) are passed via a mix of registers and indirectly, so
581 // treat them as we would any other argument.
582 if ((LocVT.isScalarInteger() || Subtarget.isPExtPackedType(LocVT)) &&
583 (ArgFlags.isSplit() || !PendingLocs.empty())) {
584 PendingLocs.push_back(
585 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
586 PendingArgFlags.push_back(ArgFlags);
587 if (!ArgFlags.isSplitEnd()) {
588 return false;
589 }
590 }
591
592 // Allocate to a register if possible, or else a stack slot.
594 unsigned StoreSizeBytes = XLen / 8;
595 Align StackAlign = Align(XLen / 8);
596
597 // FIXME: If P extension and V extension are enabled at the same time,
598 // who should go first?
599 if (!Subtarget.isPExtPackedType(LocVT) &&
600 (LocVT.isVector() || LocVT.isRISCVVectorTuple())) {
601 Reg = allocateRVVReg(LocVT, ValNo, State, TLI);
602 if (Reg) {
603 // Fixed-length vectors are located in the corresponding scalable-vector
604 // container types.
605 if (LocVT.isFixedLengthVector()) {
606 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
607 State.addLoc(
608 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
609 return false;
610 }
611 } else {
612 // For return values, the vector must be passed fully via registers or
613 // via the stack.
614 if (IsRet)
615 return true;
616 // Try using a GPR to pass the address
617 if ((Reg = State.AllocateReg(ArgGPRs))) {
618 LocVT = XLenVT;
619 LocInfo = CCValAssign::Indirect;
620 } else if (LocVT.isScalableVector()) {
621 LocVT = XLenVT;
622 LocInfo = CCValAssign::Indirect;
623 } else {
624 StoreSizeBytes = LocVT.getStoreSize();
625 // Align vectors to their element sizes, being careful for vXi1
626 // vectors.
627 StackAlign = MaybeAlign(LocVT.getScalarSizeInBits() / 8).valueOrOne();
628 }
629 }
630 } else {
631 Reg = State.AllocateReg(ArgGPRs);
632 }
633
634 int64_t StackOffset =
635 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
636
637 // If we reach this point and PendingLocs is non-empty, we must be at the
638 // end of a split argument that must be passed indirectly.
639 if (!PendingLocs.empty()) {
640 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
641 assert(PendingLocs.size() > 1 && "Unexpected PendingLocs.size()");
642
643 for (auto &It : PendingLocs) {
644 if (Reg)
645 State.addLoc(CCValAssign::getReg(It.getValNo(), It.getValVT(), Reg,
646 XLenVT, CCValAssign::Indirect));
647 else
648 State.addLoc(CCValAssign::getMem(It.getValNo(), It.getValVT(),
649 StackOffset, XLenVT,
651 }
652 PendingLocs.clear();
653 PendingArgFlags.clear();
654 return false;
655 }
656
657 assert(((LocVT.isFloatingPoint() && !LocVT.isVector()) || LocVT == XLenVT ||
658 Subtarget.isPExtPackedType(LocVT) ||
659 (TLI.getSubtarget().hasVInstructions() &&
660 (LocVT.isVector() || LocVT.isRISCVVectorTuple()))) &&
661 "Expected an XLenVT or vector types at this stage");
662
663 if (Reg) {
664 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
665 return false;
666 }
667
668 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
669 return false;
670}
671
672// FastCC has less than 1% performance improvement for some particular
673// benchmark. But theoretically, it may have benefit for some cases.
674static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
675 CCValAssign::LocInfo LocInfo,
676 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
677 CCState &State) {
678 const MachineFunction &MF = State.getMachineFunction();
679 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
680 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
681 RISCVABI::ABI ABI = Subtarget.getTargetABI();
682
683 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) ||
684 (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) {
685 static const MCPhysReg FPR16List[] = {
686 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
687 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
688 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
689 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
690 if (MCRegister Reg = State.AllocateReg(FPR16List)) {
691 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
692 return false;
693 }
694 }
695
696 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
697 static const MCPhysReg FPR32List[] = {
698 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
699 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
700 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
701 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
702 if (MCRegister Reg = State.AllocateReg(FPR32List)) {
703 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
704 return false;
705 }
706 }
707
708 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
709 static const MCPhysReg FPR64List[] = {
710 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
711 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
712 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
713 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
714 if (MCRegister Reg = State.AllocateReg(FPR64List)) {
715 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
716 return false;
717 }
718 }
719
720 MVT XLenVT = Subtarget.getXLenVT();
721
722 // Check if there is an available GPRF16 before hitting the stack.
723 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
724 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF16s(ABI))) {
725 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
726 return false;
727 }
728 }
729
730 // Check if there is an available GPRF32 before hitting the stack.
731 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
732 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF32s(ABI))) {
733 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
734 return false;
735 }
736 }
737
738 // Check if there is an available GPR before hitting the stack.
739 if (LocVT == MVT::f64 && Subtarget.is64Bit() && Subtarget.hasStdExtZdinx()) {
740 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
741 if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
742 LocVT = XLenVT;
743 State.addLoc(
744 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
745 return false;
746 }
747 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
748 return false;
749 }
750 }
751
753
754 if (LocVT.isVector()) {
755 if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) {
756 // Fixed-length vectors are located in the corresponding scalable-vector
757 // container types.
758 if (LocVT.isFixedLengthVector()) {
759 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
760 State.addLoc(
761 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
762 return false;
763 }
764 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
765 return false;
766 }
767
768 // Pass scalable vectors indirectly. Pass fixed vectors indirectly if we
769 // have a free GPR.
770 if (LocVT.isScalableVector() ||
771 State.getFirstUnallocated(ArgGPRs) != ArgGPRs.size()) {
772 LocInfo = CCValAssign::Indirect;
773 LocVT = XLenVT;
774 }
775 }
776
777 if (LocVT == XLenVT) {
778 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
779 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
780 return false;
781 }
782 }
783
784 if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 ||
785 LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) {
786 Align StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
787 int64_t Offset = State.AllocateStack(LocVT.getStoreSize(), StackAlign);
788 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
789 return false;
790 }
791
792 return true; // CC didn't match.
793}
794
795static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
796 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
797 Type *OrigTy, CCState &State) {
798 if (ArgFlags.isNest()) {
800 "Attribute 'nest' is not supported in GHC calling convention");
801 }
802
803 static const MCPhysReg GPRList[] = {
804 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
805 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
806
807 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
808 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
809 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11
810 if (MCRegister Reg = State.AllocateReg(GPRList)) {
811 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
812 return false;
813 }
814 }
815
816 const RISCVSubtarget &Subtarget =
817 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
818
819 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
820 // Pass in STG registers: F1, ..., F6
821 // fs0 ... fs5
822 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
823 RISCV::F18_F, RISCV::F19_F,
824 RISCV::F20_F, RISCV::F21_F};
825 if (MCRegister Reg = State.AllocateReg(FPR32List)) {
826 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
827 return false;
828 }
829 }
830
831 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
832 // Pass in STG registers: D1, ..., D6
833 // fs6 ... fs11
834 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
835 RISCV::F24_D, RISCV::F25_D,
836 RISCV::F26_D, RISCV::F27_D};
837 if (MCRegister Reg = State.AllocateReg(FPR64List)) {
838 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
839 return false;
840 }
841 }
842
843 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
844 static const MCPhysReg GPR32List[] = {
845 RISCV::X9_W, RISCV::X18_W, RISCV::X19_W, RISCV::X20_W,
846 RISCV::X21_W, RISCV::X22_W, RISCV::X23_W, RISCV::X24_W,
847 RISCV::X25_W, RISCV::X26_W, RISCV::X27_W};
848 if (MCRegister Reg = State.AllocateReg(GPR32List)) {
849 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
850 return false;
851 }
852 }
853
854 if (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && Subtarget.is64Bit()) {
855 if (MCRegister Reg = State.AllocateReg(GPRList)) {
856 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
857 return false;
858 }
859 }
860
861 report_fatal_error("No registers left in GHC calling convention");
862 return true;
863}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Module.h This file contains the declarations for the Module class.
const MCPhysReg ArgFPR32s[]
const MCPhysReg ArgVRs[]
const MCPhysReg ArgFPR64s[]
const MCPhysReg ArgGPRs[]
Register Reg
static const MCPhysReg ArgVRN2M2s[]
static CCAssignFn CC_RISCV_FastCC
Used for assigning arguments with CallingConvention::Fast.
static const MCPhysReg ArgVRM2s[]
static CCAssignFn CC_RISCV_GHC
Used for assigning arguments with CallingConvention::GHC.
static const MCPhysReg ArgVRN3M2s[]
static const MCPhysReg ArgVRN4M1s[]
static const MCPhysReg ArgVRN6M1s[]
static bool CC_RISCV_Impl(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State, bool IsRet)
static ArrayRef< MCPhysReg > getFastCCArgGPRF32s(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRN4M2s[]
static const MCPhysReg ArgVRN3M1s[]
static const MCPhysReg ArgVRN7M1s[]
static bool CC_RISCVAssign2XLen(CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2, const RISCVSubtarget &Subtarget)
static MCRegister allocateRVVReg(MVT LocVT, unsigned ValNo, CCState &State, const RISCVTargetLowering &TLI)
static const MCPhysReg ArgVRN5M1s[]
static const MCPhysReg ArgVRN2M4s[]
static ArrayRef< MCPhysReg > getFastCCArgGPRF16s(const RISCVABI::ABI ABI)
static ArrayRef< MCPhysReg > getArgGPR32s(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRN2M1s[]
static const MCPhysReg ArgVRN8M1s[]
static ArrayRef< MCPhysReg > getArgGPR16s(const RISCVABI::ABI ABI)
static ArrayRef< MCPhysReg > getFastCCArgGPRs(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRM8s[]
static const MCPhysReg ArgVRM4s[]
static const MCPhysReg ArgFPR16s[]
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
bool isRISCVVectorTuple() const
Return true if this is a RISCV vector tuple type where the runtime length is machine dependent.
uint64_t getScalarSizeInBits() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
RISCVABI::ABI getTargetABI() const
bool isPExtPackedDoubleType(MVT VT) const
bool isPExtPackedType(MVT VT) const
unsigned getXLen() const
const RISCVTargetLowering * getTargetLowering() const override
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
ArrayRef< MCPhysReg > getArgGPRs(const RISCVABI::ABI ABI)
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CCAssignFn RetCC_RISCV
This is used for assigning return values to locations when making calls.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
ArrayRef(const T &OneElt) -> ArrayRef< T >
EABI
Definition CodeGen.h:73
CCAssignFn CC_RISCV
This is used for assigining arguments to locations when making calls.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Align getNonZeroOrigAlign() const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130