LLVM 23.0.0git
SelectionDAGBuilder.cpp
Go to the documentation of this file.
1//===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements routines for translating from LLVM IR into SelectionDAG IR.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SelectionDAGBuilder.h"
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
26#include "llvm/Analysis/Loads.h"
58#include "llvm/IR/Argument.h"
59#include "llvm/IR/Attributes.h"
60#include "llvm/IR/BasicBlock.h"
61#include "llvm/IR/CFG.h"
62#include "llvm/IR/CallingConv.h"
63#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfo.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/InlineAsm.h"
75#include "llvm/IR/InstrTypes.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
82#include "llvm/IR/LLVMContext.h"
84#include "llvm/IR/Metadata.h"
85#include "llvm/IR/Module.h"
86#include "llvm/IR/Operator.h"
88#include "llvm/IR/Statepoint.h"
89#include "llvm/IR/Type.h"
90#include "llvm/IR/User.h"
91#include "llvm/IR/Value.h"
92#include "llvm/MC/MCContext.h"
97#include "llvm/Support/Debug.h"
105#include <cstddef>
106#include <limits>
107#include <optional>
108#include <tuple>
109
110using namespace llvm;
111using namespace PatternMatch;
112using namespace SwitchCG;
113
114#define DEBUG_TYPE "isel"
115
116/// LimitFloatPrecision - Generate low-precision inline sequences for
117/// some float libcalls (6, 8 or 12 bits).
118static unsigned LimitFloatPrecision;
119
120static cl::opt<bool>
121 InsertAssertAlign("insert-assert-align", cl::init(true),
122 cl::desc("Insert the experimental `assertalign` node."),
124
126 LimitFPPrecision("limit-float-precision",
127 cl::desc("Generate low-precision inline sequences "
128 "for some float libcalls"),
130 cl::init(0));
131
133 "switch-peel-threshold", cl::Hidden, cl::init(66),
134 cl::desc("Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
136 "optimization"));
137
138// Limit the width of DAG chains. This is important in general to prevent
139// DAG-based analysis from blowing up. For example, alias analysis and
140// load clustering may not complete in reasonable time. It is difficult to
141// recognize and avoid this situation within each individual analysis, and
142// future analyses are likely to have the same behavior. Limiting DAG width is
143// the safe approach and will be especially important with global DAGs.
144//
145// MaxParallelChains default is arbitrarily high to avoid affecting
146// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
147// sequence over this should have been converted to llvm.memcpy by the
148// frontend. It is easy to induce this behavior with .ll code such as:
149// %buffer = alloca [4096 x i8]
150// %data = load [4096 x i8]* %argPtr
151// store [4096 x i8] %data, [4096 x i8]* %buffer
152static const unsigned MaxParallelChains = 64;
153
155 const SDValue *Parts, unsigned NumParts,
156 MVT PartVT, EVT ValueVT, const Value *V,
157 SDValue InChain,
158 std::optional<CallingConv::ID> CC);
159
160/// getCopyFromParts - Create a value that contains the specified legal parts
161/// combined into the value they represent. If the parts combine to a type
162/// larger than ValueVT then AssertOp can be used to specify whether the extra
163/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
164/// (ISD::AssertSext).
165static SDValue
166getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
167 unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
168 SDValue InChain,
169 std::optional<CallingConv::ID> CC = std::nullopt,
170 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
171 // Let the target assemble the parts if it wants to
172 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
173 if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
174 PartVT, ValueVT, CC))
175 return Val;
176
177 if (ValueVT.isVector())
178 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
179 InChain, CC);
180
181 assert(NumParts > 0 && "No parts to assemble!");
182 SDValue Val = Parts[0];
183
184 if (NumParts > 1) {
185 // Assemble the value from multiple parts.
186 if (ValueVT.isInteger()) {
187 unsigned PartBits = PartVT.getSizeInBits();
188 unsigned ValueBits = ValueVT.getSizeInBits();
189
190 // Assemble the power of 2 part.
191 unsigned RoundParts = llvm::bit_floor(NumParts);
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
194 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
195 SDValue Lo, Hi;
196
197 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
198
199 if (RoundParts > 2) {
200 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, PartVT, HalfVT, V,
201 InChain);
202 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, RoundParts / 2,
203 PartVT, HalfVT, V, InChain);
204 } else {
205 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
206 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
207 }
208
209 if (DAG.getDataLayout().isBigEndian())
210 std::swap(Lo, Hi);
211
212 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
213
214 if (RoundParts < NumParts) {
215 // Assemble the trailing non-power-of-2 part.
216 unsigned OddParts = NumParts - RoundParts;
217 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
218 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
219 OddVT, V, InChain, CC);
220
221 // Combine the round and odd parts.
222 Lo = Val;
223 if (DAG.getDataLayout().isBigEndian())
224 std::swap(Lo, Hi);
225 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
226 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
227 Hi = DAG.getNode(
228 ISD::SHL, DL, TotalVT, Hi,
229 DAG.getShiftAmountConstant(Lo.getValueSizeInBits(), TotalVT, DL));
230 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
231 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
232 }
233 } else if (PartVT.isFloatingPoint()) {
234 // FP split into multiple FP parts (for ppcf128)
235 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
236 "Unexpected split");
237 SDValue Lo, Hi;
238 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
239 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
240 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
241 std::swap(Lo, Hi);
242 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
243 } else {
244 // FP split into integer parts (soft fp)
245 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
246 !PartVT.isVector() && "Unexpected split");
247 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
248 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V,
249 InChain, CC);
250 }
251 }
252
253 // There is now one part, held in Val. Correct it to match ValueVT.
254 // PartEVT is the type of the register class that holds the value.
255 // ValueVT is the type of the inline asm operation.
256 EVT PartEVT = Val.getValueType();
257
258 if (PartEVT == ValueVT)
259 return Val;
260
261 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
262 ValueVT.bitsLT(PartEVT)) {
263 // For an FP value in an integer part, we need to truncate to the right
264 // width first.
265 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
266 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
267 }
268
269 // Handle types that have the same size.
270 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
271 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
272
273 // Handle types with different sizes.
274 if (PartEVT.isInteger() && ValueVT.isInteger()) {
275 if (ValueVT.bitsLT(PartEVT)) {
276 // For a truncate, see if we have any information to
277 // indicate whether the truncated bits will always be
278 // zero or sign-extension.
279 if (AssertOp)
280 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
281 DAG.getValueType(ValueVT));
282 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
283 }
284 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
285 }
286
287 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
288 // FP_ROUND's are always exact here.
289 if (ValueVT.bitsLT(Val.getValueType())) {
290
291 SDValue NoChange =
293
294 if (DAG.getMachineFunction().getFunction().getAttributes().hasFnAttr(
295 llvm::Attribute::StrictFP)) {
296 return DAG.getNode(ISD::STRICT_FP_ROUND, DL,
297 DAG.getVTList(ValueVT, MVT::Other), InChain, Val,
298 NoChange);
299 }
300
301 return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val, NoChange);
302 }
303
304 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
305 }
306
307 // Handle MMX to a narrower integer type by bitcasting MMX to integer and
308 // then truncating.
309 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
310 ValueVT.bitsLT(PartEVT)) {
311 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
312 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
313 }
314
315 report_fatal_error("Unknown mismatch in getCopyFromParts!");
316}
317
319 const Twine &ErrMsg) {
321 if (!I)
322 return Ctx.emitError(ErrMsg);
323
324 if (const CallInst *CI = dyn_cast<CallInst>(I))
325 if (CI->isInlineAsm()) {
326 return Ctx.diagnose(DiagnosticInfoInlineAsm(
327 *CI, ErrMsg + ", possible invalid constraint for vector type"));
328 }
329
330 return Ctx.emitError(I, ErrMsg);
331}
332
333/// getCopyFromPartsVector - Create a value that contains the specified legal
334/// parts combined into the value they represent. If the parts combine to a
335/// type larger than ValueVT then AssertOp can be used to specify whether the
336/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
337/// ValueVT (ISD::AssertSext).
339 const SDValue *Parts, unsigned NumParts,
340 MVT PartVT, EVT ValueVT, const Value *V,
341 SDValue InChain,
342 std::optional<CallingConv::ID> CallConv) {
343 assert(ValueVT.isVector() && "Not a vector value");
344 assert(NumParts > 0 && "No parts to assemble!");
345 const bool IsABIRegCopy = CallConv.has_value();
346
347 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
348 SDValue Val = Parts[0];
349
350 // Handle a multi-element vector.
351 if (NumParts > 1) {
352 EVT IntermediateVT;
353 MVT RegisterVT;
354 unsigned NumIntermediates;
355 unsigned NumRegs;
356
357 if (IsABIRegCopy) {
359 *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
360 NumIntermediates, RegisterVT);
361 } else {
362 NumRegs =
363 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
364 NumIntermediates, RegisterVT);
365 }
366
367 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
368 NumParts = NumRegs; // Silence a compiler warning.
369 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
370 assert(RegisterVT.getSizeInBits() ==
371 Parts[0].getSimpleValueType().getSizeInBits() &&
372 "Part type sizes don't match!");
373
374 // Assemble the parts into intermediate operands.
375 SmallVector<SDValue, 8> Ops(NumIntermediates);
376 if (NumIntermediates == NumParts) {
377 // If the register was not expanded, truncate or copy the value,
378 // as appropriate.
379 for (unsigned i = 0; i != NumParts; ++i)
380 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, PartVT, IntermediateVT,
381 V, InChain, CallConv);
382 } else if (NumParts > 0) {
383 // If the intermediate type was expanded, build the intermediate
384 // operands from the parts.
385 assert(NumParts % NumIntermediates == 0 &&
386 "Must expand into a divisible number of parts!");
387 unsigned Factor = NumParts / NumIntermediates;
388 for (unsigned i = 0; i != NumIntermediates; ++i)
389 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, PartVT,
390 IntermediateVT, V, InChain, CallConv);
391 }
392
393 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
394 // intermediate operands.
395 EVT BuiltVectorTy =
396 IntermediateVT.isVector()
398 *DAG.getContext(), IntermediateVT.getScalarType(),
399 IntermediateVT.getVectorElementCount() * NumParts)
401 IntermediateVT.getScalarType(),
402 NumIntermediates);
403 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
405 DL, BuiltVectorTy, Ops);
406 }
407
408 // There is now one part, held in Val. Correct it to match ValueVT.
409 EVT PartEVT = Val.getValueType();
410
411 if (PartEVT == ValueVT)
412 return Val;
413
414 if (PartEVT.isVector()) {
415 // Vector/Vector bitcast.
416 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
417 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
418
419 // If the parts vector has more elements than the value vector, then we
420 // have a vector widening case (e.g. <2 x float> -> <4 x float>).
421 // Extract the elements we want.
422 if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
425 (PartEVT.getVectorElementCount().isScalable() ==
426 ValueVT.getVectorElementCount().isScalable()) &&
427 "Cannot narrow, it would be a lossy transformation");
428 PartEVT =
430 ValueVT.getVectorElementCount());
431 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
432 DAG.getVectorIdxConstant(0, DL));
433 if (PartEVT == ValueVT)
434 return Val;
435 if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
436 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
437
438 // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
439 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
440 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
441 }
442
443 // Promoted vector extract
444 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
445 }
446
447 // Trivial bitcast if the types are the same size and the destination
448 // vector type is legal.
449 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
450 TLI.isTypeLegal(ValueVT))
451 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
452
453 if (ValueVT.getVectorNumElements() != 1) {
454 // Certain ABIs require that vectors are passed as integers. For vectors
455 // are the same size, this is an obvious bitcast.
456 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
457 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
458 } else if (ValueVT.bitsLT(PartEVT)) {
459 const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
460 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
461 // Drop the extra bits.
462 Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
463 return DAG.getBitcast(ValueVT, Val);
464 }
465
467 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
468 return DAG.getUNDEF(ValueVT);
469 }
470
471 // Handle cases such as i8 -> <1 x i1>
472 EVT ValueSVT = ValueVT.getVectorElementType();
473 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
474 unsigned ValueSize = ValueSVT.getSizeInBits();
475 if (ValueSize == PartEVT.getSizeInBits()) {
476 Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
477 } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
478 // It's possible a scalar floating point type gets softened to integer and
479 // then promoted to a larger integer. If PartEVT is the larger integer
480 // we need to truncate it and then bitcast to the FP type.
481 assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
482 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
483 Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
484 Val = DAG.getBitcast(ValueSVT, Val);
485 } else {
486 Val = ValueVT.isFloatingPoint()
487 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
488 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
489 }
490 }
491
492 return DAG.getBuildVector(ValueVT, DL, Val);
493}
494
495static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
496 SDValue Val, SDValue *Parts, unsigned NumParts,
497 MVT PartVT, const Value *V,
498 std::optional<CallingConv::ID> CallConv);
499
500/// getCopyToParts - Create a series of nodes that contain the specified value
501/// split into legal parts. If the parts contain more bits than Val, then, for
502/// integers, ExtendKind can be used to specify how to generate the extra bits.
503static void
505 unsigned NumParts, MVT PartVT, const Value *V,
506 std::optional<CallingConv::ID> CallConv = std::nullopt,
507 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
508 // Let the target split the parts if it wants to
509 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
510 if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
511 CallConv))
512 return;
513 EVT ValueVT = Val.getValueType();
514
515 // Handle the vector case separately.
516 if (ValueVT.isVector())
517 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
518 CallConv);
519
520 unsigned OrigNumParts = NumParts;
522 "Copying to an illegal type!");
523
524 if (NumParts == 0)
525 return;
526
527 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
528 EVT PartEVT = PartVT;
529 if (PartEVT == ValueVT) {
530 assert(NumParts == 1 && "No-op copy with multiple parts!");
531 Parts[0] = Val;
532 return;
533 }
534
535 unsigned PartBits = PartVT.getSizeInBits();
536 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
537 // If the parts cover more bits than the value has, promote the value.
538 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
539 assert(NumParts == 1 && "Do not know what to promote to!");
540 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
541 } else {
542 if (ValueVT.isFloatingPoint()) {
543 // FP values need to be bitcast, then extended if they are being put
544 // into a larger container.
545 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
546 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
547 }
548 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
549 ValueVT.isInteger() &&
550 "Unknown mismatch!");
551 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
552 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
553 if (PartVT == MVT::x86mmx)
554 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
555 }
556 } else if (PartBits == ValueVT.getSizeInBits()) {
557 // Different types of the same size.
558 assert(NumParts == 1 && PartEVT != ValueVT);
559 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
560 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
561 // If the parts cover less bits than value has, truncate the value.
562 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
563 ValueVT.isInteger() &&
564 "Unknown mismatch!");
565 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
566 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
567 if (PartVT == MVT::x86mmx)
568 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
569 }
570
571 // The value may have changed - recompute ValueVT.
572 ValueVT = Val.getValueType();
573 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
574 "Failed to tile the value with PartVT!");
575
576 if (NumParts == 1) {
577 if (PartEVT != ValueVT) {
579 "scalar-to-vector conversion failed");
580 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
581 }
582
583 Parts[0] = Val;
584 return;
585 }
586
587 // Expand the value into multiple parts.
588 if (NumParts & (NumParts - 1)) {
589 // The number of parts is not a power of 2. Split off and copy the tail.
590 assert(PartVT.isInteger() && ValueVT.isInteger() &&
591 "Do not know what to expand to!");
592 unsigned RoundParts = llvm::bit_floor(NumParts);
593 unsigned RoundBits = RoundParts * PartBits;
594 unsigned OddParts = NumParts - RoundParts;
595 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
596 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
597
598 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
599 CallConv);
600
601 if (DAG.getDataLayout().isBigEndian())
602 // The odd parts were reversed by getCopyToParts - unreverse them.
603 std::reverse(Parts + RoundParts, Parts + NumParts);
604
605 NumParts = RoundParts;
606 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
607 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
608 }
609
610 // The number of parts is a power of 2. Repeatedly bisect the value using
611 // EXTRACT_ELEMENT.
612 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
614 ValueVT.getSizeInBits()),
615 Val);
616
617 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618 for (unsigned i = 0; i < NumParts; i += StepSize) {
619 unsigned ThisBits = StepSize * PartBits / 2;
620 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
621 SDValue &Part0 = Parts[i];
622 SDValue &Part1 = Parts[i+StepSize/2];
623
624 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
625 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
626 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
627 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
628
629 if (ThisBits == PartBits && ThisVT != PartVT) {
630 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
631 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
632 }
633 }
634 }
635
636 if (DAG.getDataLayout().isBigEndian())
637 std::reverse(Parts, Parts + OrigNumParts);
638}
639
641 const SDLoc &DL, EVT PartVT) {
642 if (!PartVT.isVector())
643 return SDValue();
644
645 EVT ValueVT = Val.getValueType();
646 EVT PartEVT = PartVT.getVectorElementType();
647 EVT ValueEVT = ValueVT.getVectorElementType();
648 ElementCount PartNumElts = PartVT.getVectorElementCount();
649 ElementCount ValueNumElts = ValueVT.getVectorElementCount();
650
651 // We only support widening vectors with equivalent element types and
652 // fixed/scalable properties. If a target needs to widen a fixed-length type
653 // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
654 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
655 PartNumElts.isScalable() != ValueNumElts.isScalable())
656 return SDValue();
657
658 // Have a try for bf16 because some targets share its ABI with fp16.
659 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
661 "Cannot widen to illegal type");
662 Val = DAG.getNode(
664 ValueVT.changeVectorElementType(*DAG.getContext(), MVT::f16), Val);
665 } else if (PartEVT != ValueEVT) {
666 return SDValue();
667 }
668
669 // Widening a scalable vector to another scalable vector is done by inserting
670 // the vector into a larger undef one.
671 if (PartNumElts.isScalable())
672 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
673 Val, DAG.getVectorIdxConstant(0, DL));
674
675 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
676 // undef elements.
678 DAG.ExtractVectorElements(Val, Ops);
679 SDValue EltUndef = DAG.getUNDEF(PartEVT);
680 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
681
682 // FIXME: Use CONCAT for 2x -> 4x.
683 return DAG.getBuildVector(PartVT, DL, Ops);
684}
685
686/// getCopyToPartsVector - Create a series of nodes that contain the specified
687/// value split into legal parts.
688static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
689 SDValue Val, SDValue *Parts, unsigned NumParts,
690 MVT PartVT, const Value *V,
691 std::optional<CallingConv::ID> CallConv) {
692 EVT ValueVT = Val.getValueType();
693 assert(ValueVT.isVector() && "Not a vector");
694 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
695 const bool IsABIRegCopy = CallConv.has_value();
696
697 if (NumParts == 1) {
698 EVT PartEVT = PartVT;
699 if (PartEVT == ValueVT) {
700 // Nothing to do.
701 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
702 // Bitconvert vector->vector case.
703 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
704 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
705 Val = Widened;
706 } else if (PartVT.isVector() &&
708 ValueVT.getVectorElementType()) &&
709 PartEVT.getVectorElementCount() ==
710 ValueVT.getVectorElementCount()) {
711
712 // Promoted vector extract
713 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
714 } else if (PartEVT.isVector() &&
715 PartEVT.getVectorElementType() !=
716 ValueVT.getVectorElementType() &&
717 TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
719 // Combination of widening and promotion.
720 EVT WidenVT =
722 PartVT.getVectorElementCount());
723 SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
724 Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
725 } else {
726 // Don't extract an integer from a float vector. This can happen if the
727 // FP type gets softened to integer and then promoted. The promotion
728 // prevents it from being picked up by the earlier bitcast case.
729 if (ValueVT.getVectorElementCount().isScalar() &&
730 (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
731 // If we reach this condition and PartVT is FP, this means that
732 // ValueVT is also FP and both have a different size, otherwise we
733 // would have bitcasted them. Producing an EXTRACT_VECTOR_ELT here
734 // would be invalid since that would mean the smaller FP type has to
735 // be extended to the larger one.
736 if (PartVT.isFloatingPoint()) {
737 Val = DAG.getBitcast(ValueVT.getScalarType(), Val);
738 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
739 } else
740 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
741 DAG.getVectorIdxConstant(0, DL));
742 } else {
743 uint64_t ValueSize = ValueVT.getFixedSizeInBits();
744 assert(PartVT.getFixedSizeInBits() > ValueSize &&
745 "lossy conversion of vector to scalar type");
746 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
747 Val = DAG.getBitcast(IntermediateType, Val);
748 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
749 }
750 }
751
752 assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
753 Parts[0] = Val;
754 return;
755 }
756
757 // Handle a multi-element vector.
758 EVT IntermediateVT;
759 MVT RegisterVT;
760 unsigned NumIntermediates;
761 unsigned NumRegs;
762 if (IsABIRegCopy) {
764 *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
765 RegisterVT);
766 } else {
767 NumRegs =
768 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
769 NumIntermediates, RegisterVT);
770 }
771
772 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
773 NumParts = NumRegs; // Silence a compiler warning.
774 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
775
776 assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
777 "Mixing scalable and fixed vectors when copying in parts");
778
779 std::optional<ElementCount> DestEltCnt;
780
781 if (IntermediateVT.isVector())
782 DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
783 else
784 DestEltCnt = ElementCount::getFixed(NumIntermediates);
785
786 EVT BuiltVectorTy = EVT::getVectorVT(
787 *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
788
789 if (ValueVT == BuiltVectorTy) {
790 // Nothing to do.
791 } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
792 // Bitconvert vector->vector case.
793 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
794 } else {
795 if (BuiltVectorTy.getVectorElementType().bitsGT(
796 ValueVT.getVectorElementType())) {
797 // Integer promotion.
798 ValueVT = EVT::getVectorVT(*DAG.getContext(),
799 BuiltVectorTy.getVectorElementType(),
800 ValueVT.getVectorElementCount());
801 Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
802 }
803
804 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
805 Val = Widened;
806 }
807 }
808
809 assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
810
811 // Split the vector into intermediate operands.
812 SmallVector<SDValue, 8> Ops(NumIntermediates);
813 for (unsigned i = 0; i != NumIntermediates; ++i) {
814 if (IntermediateVT.isVector()) {
815 // This does something sensible for scalable vectors - see the
816 // definition of EXTRACT_SUBVECTOR for further details.
817 unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
818 Ops[i] =
819 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
820 DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
821 } else {
822 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
823 DAG.getVectorIdxConstant(i, DL));
824 }
825 }
826
827 // Split the intermediate operands into legal parts.
828 if (NumParts == NumIntermediates) {
829 // If the register was not expanded, promote or copy the value,
830 // as appropriate.
831 for (unsigned i = 0; i != NumParts; ++i)
832 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
833 } else if (NumParts > 0) {
834 // If the intermediate type was expanded, split each the value into
835 // legal parts.
836 assert(NumIntermediates != 0 && "division by zero");
837 assert(NumParts % NumIntermediates == 0 &&
838 "Must expand into a divisible number of parts!");
839 unsigned Factor = NumParts / NumIntermediates;
840 for (unsigned i = 0; i != NumIntermediates; ++i)
841 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
842 CallConv);
843 }
844}
845
846static void failForInvalidBundles(const CallBase &I, StringRef Name,
847 ArrayRef<uint32_t> AllowedBundles) {
848 if (I.hasOperandBundlesOtherThan(AllowedBundles)) {
849 ListSeparator LS;
850 std::string Error;
852 for (unsigned i = 0, e = I.getNumOperandBundles(); i != e; ++i) {
853 OperandBundleUse U = I.getOperandBundleAt(i);
854 if (!is_contained(AllowedBundles, U.getTagID()))
855 OS << LS << U.getTagName();
856 }
858 Twine("cannot lower ", Name)
859 .concat(Twine(" with arbitrary operand bundles: ", Error)));
860 }
861}
862
864 EVT valuevt, std::optional<CallingConv::ID> CC)
865 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
866 RegCount(1, regs.size()), CallConv(CC) {}
867
869 const DataLayout &DL, Register Reg, Type *Ty,
870 std::optional<CallingConv::ID> CC) {
871 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
872
873 CallConv = CC;
874
875 for (EVT ValueVT : ValueVTs) {
876 unsigned NumRegs =
878 ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
879 : TLI.getNumRegisters(Context, ValueVT);
880 MVT RegisterVT =
882 ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
883 : TLI.getRegisterType(Context, ValueVT);
884 for (unsigned i = 0; i != NumRegs; ++i)
885 Regs.push_back(Reg + i);
886 RegVTs.push_back(RegisterVT);
887 RegCount.push_back(NumRegs);
888 Reg = Reg.id() + NumRegs;
889 }
890}
891
893 FunctionLoweringInfo &FuncInfo,
894 const SDLoc &dl, SDValue &Chain,
895 SDValue *Glue, const Value *V) const {
896 // A Value with type {} or [0 x %t] needs no registers.
897 if (ValueVTs.empty())
898 return SDValue();
899
900 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
901
902 // Assemble the legal parts into the final values.
903 SmallVector<SDValue, 4> Values(ValueVTs.size());
905 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
906 // Copy the legal parts from the registers.
907 EVT ValueVT = ValueVTs[Value];
908 unsigned NumRegs = RegCount[Value];
909 MVT RegisterVT = isABIMangled()
911 *DAG.getContext(), *CallConv, RegVTs[Value])
912 : RegVTs[Value];
913
914 Parts.resize(NumRegs);
915 for (unsigned i = 0; i != NumRegs; ++i) {
916 SDValue P;
917 if (!Glue) {
918 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
919 } else {
920 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
921 *Glue = P.getValue(2);
922 }
923
924 Chain = P.getValue(1);
925 Parts[i] = P;
926
927 // If the source register was virtual and if we know something about it,
928 // add an assert node.
929 if (!Regs[Part + i].isVirtual() || !RegisterVT.isInteger())
930 continue;
931
933 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
934 if (!LOI)
935 continue;
936
937 unsigned RegSize = RegisterVT.getScalarSizeInBits();
938 unsigned NumSignBits = LOI->NumSignBits;
939 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
940
941 if (NumZeroBits == RegSize) {
942 // The current value is a zero.
943 // Explicitly express that as it would be easier for
944 // optimizations to kick in.
945 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
946 continue;
947 }
948
949 // FIXME: We capture more information than the dag can represent. For
950 // now, just use the tightest assertzext/assertsext possible.
951 bool isSExt;
952 EVT FromVT(MVT::Other);
953 if (NumZeroBits) {
954 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
955 isSExt = false;
956 } else if (NumSignBits > 1) {
957 FromVT =
958 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
959 isSExt = true;
960 } else {
961 continue;
962 }
963 // Add an assertion node.
964 assert(FromVT != MVT::Other);
965 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
966 RegisterVT, P, DAG.getValueType(FromVT));
967 }
968
969 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
970 RegisterVT, ValueVT, V, Chain, CallConv);
971 Part += NumRegs;
972 Parts.clear();
973 }
974
975 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
976}
977
979 const SDLoc &dl, SDValue &Chain, SDValue *Glue,
980 const Value *V,
981 ISD::NodeType PreferredExtendType) const {
982 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
983 ISD::NodeType ExtendKind = PreferredExtendType;
984
985 // Get the list of the values's legal parts.
986 unsigned NumRegs = Regs.size();
987 SmallVector<SDValue, 8> Parts(NumRegs);
988 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
989 unsigned NumParts = RegCount[Value];
990
991 MVT RegisterVT = isABIMangled()
993 *DAG.getContext(), *CallConv, RegVTs[Value])
994 : RegVTs[Value];
995
996 if (ExtendKind == ISD::ANY_EXTEND)
997 if (TLI.isZExtFree(peekThroughFreeze(Val), RegisterVT))
998 ExtendKind = ISD::ZERO_EXTEND;
999
1000 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
1001 NumParts, RegisterVT, V, CallConv, ExtendKind);
1002 Part += NumParts;
1003 }
1004
1005 // Copy the parts into the registers.
1006 SmallVector<SDValue, 8> Chains(NumRegs);
1007 for (unsigned i = 0; i != NumRegs; ++i) {
1008 SDValue Part;
1009 if (!Glue) {
1010 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
1011 } else {
1012 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
1013 *Glue = Part.getValue(1);
1014 }
1015
1016 Chains[i] = Part.getValue(0);
1017 }
1018
1019 if (NumRegs == 1 || Glue)
1020 // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
1021 // flagged to it. That is the CopyToReg nodes and the user are considered
1022 // a single scheduling unit. If we create a TokenFactor and return it as
1023 // chain, then the TokenFactor is both a predecessor (operand) of the
1024 // user as well as a successor (the TF operands are flagged to the user).
1025 // c1, f1 = CopyToReg
1026 // c2, f2 = CopyToReg
1027 // c3 = TokenFactor c1, c2
1028 // ...
1029 // = op c3, ..., f2
1030 Chain = Chains[NumRegs-1];
1031 else
1032 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
1033}
1034
1036 unsigned MatchingIdx, const SDLoc &dl,
1037 SelectionDAG &DAG,
1038 std::vector<SDValue> &Ops) const {
1039 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1040
1041 InlineAsm::Flag Flag(Code, Regs.size());
1042 if (HasMatching)
1043 Flag.setMatchingOp(MatchingIdx);
1044 else if (!Regs.empty() && Regs.front().isVirtual()) {
1045 // Put the register class of the virtual registers in the flag word. That
1046 // way, later passes can recompute register class constraints for inline
1047 // assembly as well as normal instructions.
1048 // Don't do this for tied operands that can use the regclass information
1049 // from the def.
1051 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1052 Flag.setRegClass(RC->getID());
1053 }
1054
1055 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1056 Ops.push_back(Res);
1057
1058 if (Code == InlineAsm::Kind::Clobber) {
1059 // Clobbers should always have a 1:1 mapping with registers, and may
1060 // reference registers that have illegal (e.g. vector) types. Hence, we
1061 // shouldn't try to apply any sort of splitting logic to them.
1062 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1063 "No 1:1 mapping from clobbers to regs?");
1065 (void)SP;
1066 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1067 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1068 assert(
1069 (Regs[I] != SP ||
1071 "If we clobbered the stack pointer, MFI should know about it.");
1072 }
1073 return;
1074 }
1075
1076 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1077 MVT RegisterVT = RegVTs[Value];
1078 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1079 RegisterVT);
1080 for (unsigned i = 0; i != NumRegs; ++i) {
1081 assert(Reg < Regs.size() && "Mismatch in # registers expected");
1082 Register TheReg = Regs[Reg++];
1083 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1084 }
1085 }
1086}
1087
1091 unsigned I = 0;
1092 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1093 unsigned RegCount = std::get<0>(CountAndVT);
1094 MVT RegisterVT = std::get<1>(CountAndVT);
1095 TypeSize RegisterSize = RegisterVT.getSizeInBits();
1096 for (unsigned E = I + RegCount; I != E; ++I)
1097 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1098 }
1099 return OutVec;
1100}
1101
1103 AssumptionCache *ac, const TargetLibraryInfo *li,
1104 const TargetTransformInfo &TTI) {
1105 BatchAA = aa;
1106 AC = ac;
1107 GFI = gfi;
1108 LibInfo = li;
1109 Context = DAG.getContext();
1110 LPadToCallSiteMap.clear();
1111 this->TTI = &TTI;
1112 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1113 AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1114 *DAG.getMachineFunction().getFunction().getParent());
1115}
1116
1118 NodeMap.clear();
1119 UnusedArgNodeMap.clear();
1120 PendingLoads.clear();
1121 PendingExports.clear();
1122 PendingConstrainedFP.clear();
1123 PendingConstrainedFPStrict.clear();
1124 CurInst = nullptr;
1125 HasTailCall = false;
1126 SDNodeOrder = LowestSDNodeOrder;
1127 StatepointLowering.clear();
1128}
1129
1131 DanglingDebugInfoMap.clear();
1132}
1133
1134// Update DAG root to include dependencies on Pending chains.
1135SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1136 SDValue Root = DAG.getRoot();
1137
1138 if (Pending.empty())
1139 return Root;
1140
1141 // Add current root to PendingChains, unless we already indirectly
1142 // depend on it.
1143 if (Root.getOpcode() != ISD::EntryToken) {
1144 unsigned i = 0, e = Pending.size();
1145 for (; i != e; ++i) {
1146 assert(Pending[i].getNode()->getNumOperands() > 1);
1147 if (Pending[i].getNode()->getOperand(0) == Root)
1148 break; // Don't add the root if we already indirectly depend on it.
1149 }
1150
1151 if (i == e)
1152 Pending.push_back(Root);
1153 }
1154
1155 if (Pending.size() == 1)
1156 Root = Pending[0];
1157 else
1158 Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1159
1160 DAG.setRoot(Root);
1161 Pending.clear();
1162 return Root;
1163}
1164
1168
1170 // If the new exception behavior differs from that of the pending
1171 // ones, chain up them and update the root.
1172 switch (EB) {
1175 // Floating-point exceptions produced by such operations are not intended
1176 // to be observed, so the sequence of these operations does not need to be
1177 // preserved.
1178 //
1179 // They however must not be mixed with the instructions that have strict
1180 // exception behavior. Placing an operation with 'ebIgnore' behavior between
1181 // 'ebStrict' operations could distort the observed exception behavior.
1182 if (!PendingConstrainedFPStrict.empty()) {
1183 assert(PendingConstrainedFP.empty());
1184 updateRoot(PendingConstrainedFPStrict);
1185 }
1186 break;
1188 // Floating-point exception produced by these operations may be observed, so
1189 // they must be correctly chained. If trapping on FP exceptions is
1190 // disabled, the exceptions can be observed only by functions that read
1191 // exception flags, like 'llvm.get_fpenv' or 'fetestexcept'. It means that
1192 // the order of operations is not significant between barriers.
1193 //
1194 // If trapping is enabled, each operation becomes an implicit observation
1195 // point, so the operations must be sequenced according their original
1196 // source order.
1197 if (!PendingConstrainedFP.empty()) {
1198 assert(PendingConstrainedFPStrict.empty());
1199 updateRoot(PendingConstrainedFP);
1200 }
1201 // TODO: Add support for trapping-enabled scenarios.
1202 }
1203 return DAG.getRoot();
1204}
1205
1207 // Chain up all pending constrained intrinsics together with all
1208 // pending loads, by simply appending them to PendingLoads and
1209 // then calling getMemoryRoot().
1210 PendingLoads.reserve(PendingLoads.size() +
1211 PendingConstrainedFP.size() +
1212 PendingConstrainedFPStrict.size());
1213 PendingLoads.append(PendingConstrainedFP.begin(),
1214 PendingConstrainedFP.end());
1215 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1216 PendingConstrainedFPStrict.end());
1217 PendingConstrainedFP.clear();
1218 PendingConstrainedFPStrict.clear();
1219 return getMemoryRoot();
1220}
1221
1223 // We need to emit pending fpexcept.strict constrained intrinsics,
1224 // so append them to the PendingExports list.
1225 PendingExports.append(PendingConstrainedFPStrict.begin(),
1226 PendingConstrainedFPStrict.end());
1227 PendingConstrainedFPStrict.clear();
1228 return updateRoot(PendingExports);
1229}
1230
1232 DILocalVariable *Variable,
1234 DebugLoc DL) {
1235 assert(Variable && "Missing variable");
1236
1237 // Check if address has undef value.
1238 if (!Address || isa<UndefValue>(Address) ||
1239 (Address->use_empty() && !isa<Argument>(Address))) {
1240 LLVM_DEBUG(
1241 dbgs()
1242 << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1243 return;
1244 }
1245
1246 bool IsParameter = Variable->isParameter() || isa<Argument>(Address);
1247
1248 SDValue &N = NodeMap[Address];
1249 if (!N.getNode() && isa<Argument>(Address))
1250 // Check unused arguments map.
1251 N = UnusedArgNodeMap[Address];
1252 SDDbgValue *SDV;
1253 if (N.getNode()) {
1254 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
1255 Address = BCI->getOperand(0);
1256 // Parameters are handled specially.
1257 auto *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
1258 if (IsParameter && FINode) {
1259 // Byval parameter. We have a frame index at this point.
1260 SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
1261 /*IsIndirect*/ true, DL, SDNodeOrder);
1262 } else if (isa<Argument>(Address)) {
1263 // Address is an argument, so try to emit its dbg value using
1264 // virtual register info from the FuncInfo.ValueMap.
1265 EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1266 FuncArgumentDbgValueKind::Declare, N);
1267 return;
1268 } else {
1269 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
1270 true, DL, SDNodeOrder);
1271 }
1272 DAG.AddDbgValue(SDV, IsParameter);
1273 } else {
1274 // If Address is an argument then try to emit its dbg value using
1275 // virtual register info from the FuncInfo.ValueMap.
1276 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1277 FuncArgumentDbgValueKind::Declare, N)) {
1278 LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info"
1279 << " (could not emit func-arg dbg_value)\n");
1280 }
1281 }
1282}
1283
1285 // Add SDDbgValue nodes for any var locs here. Do so before updating
1286 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1287 if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1288 // Add SDDbgValue nodes for any var locs here. Do so before updating
1289 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1290 for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1291 It != End; ++It) {
1292 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1293 dropDanglingDebugInfo(Var, It->Expr);
1294 if (It->Values.isKillLocation(It->Expr)) {
1295 handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1296 continue;
1297 }
1298 SmallVector<Value *> Values(It->Values.location_ops());
1299 if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1300 It->Values.hasArgList())) {
1301 SmallVector<Value *, 4> Vals(It->Values.location_ops());
1303 FnVarLocs->getDILocalVariable(It->VariableID),
1304 It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1305 }
1306 }
1307 }
1308
1309 // We must skip DbgVariableRecords if they've already been processed above as
1310 // we have just emitted the debug values resulting from assignment tracking
1311 // analysis, making any existing DbgVariableRecords redundant (and probably
1312 // less correct). We still need to process DbgLabelRecords. This does sink
1313 // DbgLabelRecords to the bottom of the group of debug records. That sholdn't
1314 // be important as it does so deterministcally and ordering between
1315 // DbgLabelRecords and DbgVariableRecords is immaterial (other than for MIR/IR
1316 // printing).
1317 bool SkipDbgVariableRecords = DAG.getFunctionVarLocs();
1318 // Is there is any debug-info attached to this instruction, in the form of
1319 // DbgRecord non-instruction debug-info records.
1320 for (DbgRecord &DR : I.getDbgRecordRange()) {
1321 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1322 assert(DLR->getLabel() && "Missing label");
1323 SDDbgLabel *SDV =
1324 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1325 DAG.AddDbgLabel(SDV);
1326 continue;
1327 }
1328
1329 if (SkipDbgVariableRecords)
1330 continue;
1332 DILocalVariable *Variable = DVR.getVariable();
1335
1337 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1338 continue;
1339 LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DVR
1340 << "\n");
1342 DVR.getDebugLoc());
1343 continue;
1344 }
1345
1346 // A DbgVariableRecord with no locations is a kill location.
1348 if (Values.empty()) {
1350 SDNodeOrder);
1351 continue;
1352 }
1353
1354 // A DbgVariableRecord with an undef or absent location is also a kill
1355 // location.
1356 if (llvm::any_of(Values,
1357 [](Value *V) { return !V || isa<UndefValue>(V); })) {
1359 SDNodeOrder);
1360 continue;
1361 }
1362
1363 bool IsVariadic = DVR.hasArgList();
1364 if (!handleDebugValue(Values, Variable, Expression, DVR.getDebugLoc(),
1365 SDNodeOrder, IsVariadic)) {
1366 addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
1367 DVR.getDebugLoc(), SDNodeOrder);
1368 }
1369 }
1370}
1371
1373 visitDbgInfo(I);
1374
1375 // Set up outgoing PHI node register values before emitting the terminator.
1376 if (I.isTerminator()) {
1377 HandlePHINodesInSuccessorBlocks(I.getParent());
1378 }
1379
1380 ++SDNodeOrder;
1381 CurInst = &I;
1382
1383 // Set inserted listener only if required.
1384 bool NodeInserted = false;
1385 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1386 MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1387 MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra);
1388 if (PCSectionsMD || MMRA) {
1389 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1390 DAG, [&](SDNode *) { NodeInserted = true; });
1391 }
1392
1393 visit(I.getOpcode(), I);
1394
1395 if (!I.isTerminator() && !HasTailCall &&
1396 !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1398
1399 // Handle metadata.
1400 if (PCSectionsMD || MMRA) {
1401 auto It = NodeMap.find(&I);
1402 if (It != NodeMap.end()) {
1403 if (PCSectionsMD)
1404 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1405 if (MMRA)
1406 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1407 } else if (NodeInserted) {
1408 // This should not happen; if it does, don't let it go unnoticed so we can
1409 // fix it. Relevant visit*() function is probably missing a setValue().
1410 errs() << "warning: loosing !pcsections and/or !mmra metadata ["
1411 << I.getModule()->getName() << "]\n";
1412 LLVM_DEBUG(I.dump());
1413 assert(false);
1414 }
1415 }
1416
1417 CurInst = nullptr;
1418}
1419
1420void SelectionDAGBuilder::visitPHI(const PHINode &) {
1421 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1422}
1423
1424void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1425 // Note: this doesn't use InstVisitor, because it has to work with
1426 // ConstantExpr's in addition to instructions.
1427 switch (Opcode) {
1428 default: llvm_unreachable("Unknown instruction type encountered!");
1429 // Build the switch statement using the Instruction.def file.
1430#define HANDLE_INST(NUM, OPCODE, CLASS) \
1431 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1432#include "llvm/IR/Instruction.def"
1433 }
1434}
1435
1437 DILocalVariable *Variable,
1438 DebugLoc DL, unsigned Order,
1441 // For variadic dbg_values we will now insert poison.
1442 // FIXME: We can potentially recover these!
1444 for (const Value *V : Values) {
1445 auto *Poison = PoisonValue::get(V->getType());
1447 }
1448 SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1449 /*IsIndirect=*/false, DL, Order,
1450 /*IsVariadic=*/true);
1451 DAG.AddDbgValue(SDV, /*isParameter=*/false);
1452 return true;
1453}
1454
1456 DILocalVariable *Var,
1457 DIExpression *Expr,
1458 bool IsVariadic, DebugLoc DL,
1459 unsigned Order) {
1460 if (IsVariadic) {
1461 handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr);
1462 return;
1463 }
1464 // TODO: Dangling debug info will eventually either be resolved or produce
1465 // a poison DBG_VALUE. However in the resolution case, a gap may appear
1466 // between the original dbg.value location and its resolved DBG_VALUE,
1467 // which we should ideally fill with an extra poison DBG_VALUE.
1468 assert(Values.size() == 1);
1469 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order);
1470}
1471
1473 const DIExpression *Expr) {
1474 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1475 DIVariable *DanglingVariable = DDI.getVariable();
1476 DIExpression *DanglingExpr = DDI.getExpression();
1477 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1478 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for "
1479 << printDDI(nullptr, DDI) << "\n");
1480 return true;
1481 }
1482 return false;
1483 };
1484
1485 for (auto &DDIMI : DanglingDebugInfoMap) {
1486 DanglingDebugInfoVector &DDIV = DDIMI.second;
1487
1488 // If debug info is to be dropped, run it through final checks to see
1489 // whether it can be salvaged.
1490 for (auto &DDI : DDIV)
1491 if (isMatchingDbgValue(DDI))
1492 salvageUnresolvedDbgValue(DDIMI.first, DDI);
1493
1494 erase_if(DDIV, isMatchingDbgValue);
1495 }
1496}
1497
1498// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1499// generate the debug data structures now that we've seen its definition.
1501 SDValue Val) {
1502 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1503 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1504 return;
1505
1506 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1507 for (auto &DDI : DDIV) {
1508 DebugLoc DL = DDI.getDebugLoc();
1509 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1510 DILocalVariable *Variable = DDI.getVariable();
1511 DIExpression *Expr = DDI.getExpression();
1512 assert(Variable->isValidLocationForIntrinsic(DL) &&
1513 "Expected inlined-at fields to agree");
1514 SDDbgValue *SDV;
1515 if (Val.getNode()) {
1516 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1517 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1518 // we couldn't resolve it directly when examining the DbgValue intrinsic
1519 // in the first place we should not be more successful here). Unless we
1520 // have some test case that prove this to be correct we should avoid
1521 // calling EmitFuncArgumentDbgValue here.
1522 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1523 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1524 FuncArgumentDbgValueKind::Value, Val)) {
1525 LLVM_DEBUG(dbgs() << "Resolve dangling debug info for "
1526 << printDDI(V, DDI) << "\n");
1527 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
1528 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1529 // inserted after the definition of Val when emitting the instructions
1530 // after ISel. An alternative could be to teach
1531 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1532 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1533 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1534 << ValSDNodeOrder << "\n");
1535 SDV = getDbgValue(Val, Variable, Expr, DL,
1536 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1537 DAG.AddDbgValue(SDV, false);
1538 } else
1539 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1540 << printDDI(V, DDI)
1541 << " in EmitFuncArgumentDbgValue\n");
1542 } else {
1543 LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI)
1544 << "\n");
1545 auto Poison = PoisonValue::get(V->getType());
1546 auto SDV =
1547 DAG.getConstantDbgValue(Variable, Expr, Poison, DL, DbgSDNodeOrder);
1548 DAG.AddDbgValue(SDV, false);
1549 }
1550 }
1551 DDIV.clear();
1552}
1553
1555 DanglingDebugInfo &DDI) {
1556 // TODO: For the variadic implementation, instead of only checking the fail
1557 // state of `handleDebugValue`, we need know specifically which values were
1558 // invalid, so that we attempt to salvage only those values when processing
1559 // a DIArgList.
1560 const Value *OrigV = V;
1561 DILocalVariable *Var = DDI.getVariable();
1562 DIExpression *Expr = DDI.getExpression();
1563 DebugLoc DL = DDI.getDebugLoc();
1564 unsigned SDOrder = DDI.getSDNodeOrder();
1565
1566 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1567 // that DW_OP_stack_value is desired.
1568 bool StackValue = true;
1569
1570 // Can this Value can be encoded without any further work?
1571 if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1572 return;
1573
1574 // Attempt to salvage back through as many instructions as possible. Bail if
1575 // a non-instruction is seen, such as a constant expression or global
1576 // variable. FIXME: Further work could recover those too.
1577 while (isa<Instruction>(V)) {
1578 const Instruction &VAsInst = *cast<const Instruction>(V);
1579 // Temporary "0", awaiting real implementation.
1581 SmallVector<Value *, 4> AdditionalValues;
1582 V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst),
1583 Expr->getNumLocationOperands(), Ops,
1584 AdditionalValues);
1585 // If we cannot salvage any further, and haven't yet found a suitable debug
1586 // expression, bail out.
1587 if (!V)
1588 break;
1589
1590 // TODO: If AdditionalValues isn't empty, then the salvage can only be
1591 // represented with a DBG_VALUE_LIST, so we give up. When we have support
1592 // here for variadic dbg_values, remove that condition.
1593 if (!AdditionalValues.empty())
1594 break;
1595
1596 // New value and expr now represent this debuginfo.
1597 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1598
1599 // Some kind of simplification occurred: check whether the operand of the
1600 // salvaged debug expression can be encoded in this DAG.
1601 if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1602 LLVM_DEBUG(
1603 dbgs() << "Salvaged debug location info for:\n " << *Var << "\n"
1604 << *OrigV << "\nBy stripping back to:\n " << *V << "\n");
1605 return;
1606 }
1607 }
1608
1609 // This was the final opportunity to salvage this debug information, and it
1610 // couldn't be done. Place a poison DBG_VALUE at this location to terminate
1611 // any earlier variable location.
1612 assert(OrigV && "V shouldn't be null");
1613 auto *Poison = PoisonValue::get(OrigV->getType());
1614 auto *SDV = DAG.getConstantDbgValue(Var, Expr, Poison, DL, SDNodeOrder);
1615 DAG.AddDbgValue(SDV, false);
1616 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n "
1617 << printDDI(OrigV, DDI) << "\n");
1618}
1619
1621 DIExpression *Expr,
1622 DebugLoc DbgLoc,
1623 unsigned Order) {
1627 handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1628 /*IsVariadic*/ false);
1629}
1630
1632 DILocalVariable *Var,
1633 DIExpression *Expr, DebugLoc DbgLoc,
1634 unsigned Order, bool IsVariadic) {
1635 if (Values.empty())
1636 return true;
1637
1638 // Filter EntryValue locations out early.
1639 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1640 return true;
1641
1642 SmallVector<SDDbgOperand> LocationOps;
1643 SmallVector<SDNode *> Dependencies;
1644 for (const Value *V : Values) {
1645 // Constant value.
1648 LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1649 continue;
1650 }
1651
1652 // Look through IntToPtr constants.
1653 if (auto *CE = dyn_cast<ConstantExpr>(V))
1654 if (CE->getOpcode() == Instruction::IntToPtr) {
1655 LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1656 continue;
1657 }
1658
1659 // If the Value is a frame index, we can create a FrameIndex debug value
1660 // without relying on the DAG at all.
1661 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1662 auto SI = FuncInfo.StaticAllocaMap.find(AI);
1663 if (SI != FuncInfo.StaticAllocaMap.end()) {
1664 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1665 continue;
1666 }
1667 }
1668
1669 // Do not use getValue() in here; we don't want to generate code at
1670 // this point if it hasn't been done yet.
1671 SDValue N = NodeMap[V];
1672 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1673 N = UnusedArgNodeMap[V];
1674
1675 if (N.getNode()) {
1676 // Only emit func arg dbg value for non-variadic dbg.values for now.
1677 if (!IsVariadic &&
1678 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1679 FuncArgumentDbgValueKind::Value, N))
1680 return true;
1681 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1682 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1683 // describe stack slot locations.
1684 //
1685 // Consider "int x = 0; int *px = &x;". There are two kinds of
1686 // interesting debug values here after optimization:
1687 //
1688 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
1689 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1690 //
1691 // Both describe the direct values of their associated variables.
1692 Dependencies.push_back(N.getNode());
1693 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1694 continue;
1695 }
1696 LocationOps.emplace_back(
1697 SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1698 continue;
1699 }
1700
1701 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1702 // Special rules apply for the first dbg.values of parameter variables in a
1703 // function. Identify them by the fact they reference Argument Values, that
1704 // they're parameters, and they are parameters of the current function. We
1705 // need to let them dangle until they get an SDNode.
1706 bool IsParamOfFunc =
1707 isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1708 if (IsParamOfFunc)
1709 return false;
1710
1711 // The value is not used in this block yet (or it would have an SDNode).
1712 // We still want the value to appear for the user if possible -- if it has
1713 // an associated VReg, we can refer to that instead.
1714 auto VMI = FuncInfo.ValueMap.find(V);
1715 if (VMI != FuncInfo.ValueMap.end()) {
1716 Register Reg = VMI->second;
1717 // If this is a PHI node, it may be split up into several MI PHI nodes
1718 // (in FunctionLoweringInfo::set).
1719 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1720 V->getType(), std::nullopt);
1721 if (RFV.occupiesMultipleRegs()) {
1722 // FIXME: We could potentially support variadic dbg_values here.
1723 if (IsVariadic)
1724 return false;
1725 unsigned Offset = 0;
1726 unsigned BitsToDescribe = 0;
1727 if (auto VarSize = Var->getSizeInBits())
1728 BitsToDescribe = *VarSize;
1729 if (auto Fragment = Expr->getFragmentInfo())
1730 BitsToDescribe = Fragment->SizeInBits;
1731 for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1732 // Bail out if all bits are described already.
1733 if (Offset >= BitsToDescribe)
1734 break;
1735 // TODO: handle scalable vectors.
1736 unsigned RegisterSize = RegAndSize.second;
1737 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1738 ? BitsToDescribe - Offset
1739 : RegisterSize;
1740 auto FragmentExpr = DIExpression::createFragmentExpression(
1741 Expr, Offset, FragmentSize);
1742 if (!FragmentExpr)
1743 continue;
1744 SDDbgValue *SDV = DAG.getVRegDbgValue(
1745 Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, Order);
1746 DAG.AddDbgValue(SDV, false);
1747 Offset += RegisterSize;
1748 }
1749 return true;
1750 }
1751 // We can use simple vreg locations for variadic dbg_values as well.
1752 LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1753 continue;
1754 }
1755 // We failed to create a SDDbgOperand for V.
1756 return false;
1757 }
1758
1759 // We have created a SDDbgOperand for each Value in Values.
1760 assert(!LocationOps.empty());
1761 SDDbgValue *SDV =
1762 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1763 /*IsIndirect=*/false, DbgLoc, Order, IsVariadic);
1764 DAG.AddDbgValue(SDV, /*isParameter=*/false);
1765 return true;
1766}
1767
1769 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1770 for (auto &Pair : DanglingDebugInfoMap)
1771 for (auto &DDI : Pair.second)
1772 salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI);
1774}
1775
1776/// getCopyFromRegs - If there was virtual register allocated for the value V
1777/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1779 auto It = FuncInfo.ValueMap.find(V);
1780 SDValue Result;
1781
1782 if (It != FuncInfo.ValueMap.end()) {
1783 Register InReg = It->second;
1784
1785 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1786 DAG.getDataLayout(), InReg, Ty,
1787 std::nullopt); // This is not an ABI copy.
1788 SDValue Chain = DAG.getEntryNode();
1789 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1790 V);
1791 resolveDanglingDebugInfo(V, Result);
1792 }
1793
1794 return Result;
1795}
1796
1797/// getValue - Return an SDValue for the given Value.
1799 // If we already have an SDValue for this value, use it. It's important
1800 // to do this first, so that we don't create a CopyFromReg if we already
1801 // have a regular SDValue.
1802 SDValue &N = NodeMap[V];
1803 if (N.getNode()) return N;
1804
1805 // If there's a virtual register allocated and initialized for this
1806 // value, use it.
1807 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1808 return copyFromReg;
1809
1810 // Otherwise create a new SDValue and remember it.
1811 SDValue Val = getValueImpl(V);
1812 NodeMap[V] = Val;
1814 return Val;
1815}
1816
1817/// getNonRegisterValue - Return an SDValue for the given Value, but
1818/// don't look in FuncInfo.ValueMap for a virtual register.
1820 // If we already have an SDValue for this value, use it.
1821 SDValue &N = NodeMap[V];
1822 if (N.getNode()) {
1823 if (isIntOrFPConstant(N)) {
1824 // Remove the debug location from the node as the node is about to be used
1825 // in a location which may differ from the original debug location. This
1826 // is relevant to Constant and ConstantFP nodes because they can appear
1827 // as constant expressions inside PHI nodes.
1828 N->setDebugLoc(DebugLoc());
1829 }
1830 return N;
1831 }
1832
1833 // Otherwise create a new SDValue and remember it.
1834 SDValue Val = getValueImpl(V);
1835 NodeMap[V] = Val;
1837 return Val;
1838}
1839
1840/// getValueImpl - Helper function for getValue and getNonRegisterValue.
1841/// Create an SDValue for the given value.
1843 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1844
1845 if (const Constant *C = dyn_cast<Constant>(V)) {
1846 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1847
1848 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
1849 SDLoc DL = getCurSDLoc();
1850
1851 // DAG.getConstant() may attempt to legalise the vector constant which can
1852 // significantly change the combines applied to the DAG. To reduce the
1853 // divergence when enabling ConstantInt based vectors we try to construct
1854 // the DAG in the same way as shufflevector based splats. TODO: The
1855 // divergence sometimes leads to better optimisations. Ideally we should
1856 // prevent DAG.getConstant() from legalising too early but there are some
1857 // degradations preventing this.
1858 if (VT.isScalableVector())
1859 return DAG.getNode(
1860 ISD::SPLAT_VECTOR, DL, VT,
1861 DAG.getConstant(CI->getValue(), DL, VT.getVectorElementType()));
1862 if (VT.isFixedLengthVector())
1863 return DAG.getSplatBuildVector(
1864 VT, DL,
1865 DAG.getConstant(CI->getValue(), DL, VT.getVectorElementType()));
1866 return DAG.getConstant(*CI, DL, VT);
1867 }
1868
1869 if (const ConstantByte *CB = dyn_cast<ConstantByte>(C))
1870 return DAG.getConstant(CB->getValue(), getCurSDLoc(), VT);
1871
1872 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1873 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1874
1875 if (const ConstantPtrAuth *CPA = dyn_cast<ConstantPtrAuth>(C)) {
1876 return DAG.getNode(ISD::PtrAuthGlobalAddress, getCurSDLoc(), VT,
1877 getValue(CPA->getPointer()), getValue(CPA->getKey()),
1878 getValue(CPA->getAddrDiscriminator()),
1879 getValue(CPA->getDiscriminator()));
1880 }
1881
1883 return DAG.getConstant(0, getCurSDLoc(), VT);
1884
1885 if (match(C, m_VScale()))
1886 return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1887
1888 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1889 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1890
1891 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1892 return isa<PoisonValue>(C) ? DAG.getPOISON(VT) : DAG.getUNDEF(VT);
1893
1894 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1895 visit(CE->getOpcode(), *CE);
1896 SDValue N1 = NodeMap[V];
1897 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1898 return N1;
1899 }
1900
1902 SmallVector<SDValue, 4> Constants;
1903 for (const Use &U : C->operands()) {
1904 SDNode *Val = getValue(U).getNode();
1905 // If the operand is an empty aggregate, there are no values.
1906 if (!Val) continue;
1907 // Add each leaf value from the operand to the Constants list
1908 // to form a flattened list of all the values.
1909 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1910 Constants.push_back(SDValue(Val, i));
1911 }
1912
1913 return DAG.getMergeValues(Constants, getCurSDLoc());
1914 }
1915
1916 if (const ConstantDataSequential *CDS =
1919 for (uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1920 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1921 // Add each leaf value from the operand to the Constants list
1922 // to form a flattened list of all the values.
1923 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1924 Ops.push_back(SDValue(Val, i));
1925 }
1926
1927 if (isa<ArrayType>(CDS->getType()))
1928 return DAG.getMergeValues(Ops, getCurSDLoc());
1929 return DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1930 }
1931
1932 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1934 "Unknown struct or array constant!");
1935
1936 SmallVector<EVT, 4> ValueVTs;
1937 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1938 unsigned NumElts = ValueVTs.size();
1939 if (NumElts == 0)
1940 return SDValue(); // empty struct
1941 SmallVector<SDValue, 4> Constants(NumElts);
1942 for (unsigned i = 0; i != NumElts; ++i) {
1943 EVT EltVT = ValueVTs[i];
1944 if (isa<UndefValue>(C))
1945 Constants[i] = DAG.getUNDEF(EltVT);
1946 else if (EltVT.isFloatingPoint())
1947 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1948 else
1949 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1950 }
1951
1952 return DAG.getMergeValues(Constants, getCurSDLoc());
1953 }
1954
1955 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1956 return DAG.getBlockAddress(BA, VT);
1957
1958 if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1959 return getValue(Equiv->getGlobalValue());
1960
1961 if (const auto *NC = dyn_cast<NoCFIValue>(C))
1962 return getValue(NC->getGlobalValue());
1963
1964 if (VT == MVT::aarch64svcount) {
1965 assert(C->isNullValue() && "Can only zero this target type!");
1966 return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1967 DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1968 }
1969
1970 if (VT.isRISCVVectorTuple()) {
1971 assert(C->isNullValue() && "Can only zero this target type!");
1972 return DAG.getNode(
1974 DAG.getNode(
1976 EVT::getVectorVT(*DAG.getContext(), MVT::i8,
1977 VT.getSizeInBits().getKnownMinValue() / 8, true),
1978 DAG.getConstant(0, getCurSDLoc(), MVT::getIntegerVT(8))));
1979 }
1980
1981 VectorType *VecTy = cast<VectorType>(V->getType());
1982
1983 // Now that we know the number and type of the elements, get that number of
1984 // elements into the Ops array based on what kind of constant it is.
1985 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1987 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1988 for (unsigned i = 0; i != NumElements; ++i)
1989 Ops.push_back(getValue(CV->getOperand(i)));
1990
1991 return DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1992 }
1993
1995 EVT EltVT =
1996 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1997
1998 SDValue Op;
1999 if (EltVT.isFloatingPoint())
2000 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
2001 else
2002 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
2003
2004 return DAG.getSplat(VT, getCurSDLoc(), Op);
2005 }
2006
2007 llvm_unreachable("Unknown vector constant");
2008 }
2009
2010 // If this is a static alloca, generate it as the frameindex instead of
2011 // computation.
2012 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
2013 auto SI = FuncInfo.StaticAllocaMap.find(AI);
2014 if (SI != FuncInfo.StaticAllocaMap.end())
2015 return DAG.getFrameIndex(
2016 SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
2017 }
2018
2019 // If this is an instruction which fast-isel has deferred, select it now.
2020 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
2021 Register InReg = FuncInfo.InitializeRegForValue(Inst);
2022 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
2023 Inst->getType(), std::nullopt);
2024 SDValue Chain = DAG.getEntryNode();
2025 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
2026 }
2027
2028 if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
2029 return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
2030
2031 if (const auto *BB = dyn_cast<BasicBlock>(V))
2032 return DAG.getBasicBlock(FuncInfo.getMBB(BB));
2033
2034 llvm_unreachable("Can't get register for value!");
2035}
2036
2037void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
2039 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
2040 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
2041 bool IsSEH = isAsynchronousEHPersonality(Pers);
2042 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
2043 if (IsSEH) {
2044 // For SEH, EHCont Guard needs to know that this catchpad is a target.
2045 CatchPadMBB->setIsEHContTarget(true);
2047 } else
2048 CatchPadMBB->setIsEHScopeEntry();
2049 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
2050 if (IsMSVCCXX || IsCoreCLR)
2051 CatchPadMBB->setIsEHFuncletEntry();
2052}
2053
2054void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
2055 // Update machine-CFG edge.
2056 MachineBasicBlock *TargetMBB = FuncInfo.getMBB(I.getSuccessor());
2057 FuncInfo.MBB->addSuccessor(TargetMBB);
2058
2059 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2060 bool IsSEH = isAsynchronousEHPersonality(Pers);
2061 if (IsSEH) {
2062 // If this is not a fall-through branch or optimizations are switched off,
2063 // emit the branch.
2064 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
2065 TM.getOptLevel() == CodeGenOptLevel::None)
2066 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2067 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
2068 return;
2069 }
2070
2071 // For non-SEH, EHCont Guard needs to know that this catchret is a target.
2072 TargetMBB->setIsEHContTarget(true);
2073 DAG.getMachineFunction().setHasEHContTarget(true);
2074
2075 // Figure out the funclet membership for the catchret's successor.
2076 // This will be used by the FuncletLayout pass to determine how to order the
2077 // BB's.
2078 // A 'catchret' returns to the outer scope's color.
2079 Value *ParentPad = I.getCatchSwitchParentPad();
2080 const BasicBlock *SuccessorColor;
2081 if (isa<ConstantTokenNone>(ParentPad))
2082 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
2083 else
2084 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
2085 assert(SuccessorColor && "No parent funclet for catchret!");
2086 MachineBasicBlock *SuccessorColorMBB = FuncInfo.getMBB(SuccessorColor);
2087 assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
2088
2089 // Create the terminator node.
2090 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
2091 getControlRoot(), DAG.getBasicBlock(TargetMBB),
2092 DAG.getBasicBlock(SuccessorColorMBB));
2093 DAG.setRoot(Ret);
2094}
2095
2096void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
2097 // Don't emit any special code for the cleanuppad instruction. It just marks
2098 // the start of an EH scope/funclet.
2099 FuncInfo.MBB->setIsEHScopeEntry();
2100 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2101 if (Pers != EHPersonality::Wasm_CXX) {
2102 FuncInfo.MBB->setIsEHFuncletEntry();
2103 FuncInfo.MBB->setIsCleanupFuncletEntry();
2104 }
2105}
2106
2107/// When an invoke or a cleanupret unwinds to the next EH pad, there are
2108/// many places it could ultimately go. In the IR, we have a single unwind
2109/// destination, but in the machine CFG, we enumerate all the possible blocks.
2110/// This function skips over imaginary basic blocks that hold catchswitch
2111/// instructions, and finds all the "real" machine
2112/// basic block destinations. As those destinations may not be successors of
2113/// EHPadBB, here we also calculate the edge probability to those destinations.
2114/// The passed-in Prob is the edge probability to EHPadBB.
2116 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2117 BranchProbability Prob,
2118 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2119 &UnwindDests) {
2120 EHPersonality Personality =
2122 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2123 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2124 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2125 bool IsSEH = isAsynchronousEHPersonality(Personality);
2126
2127 while (EHPadBB) {
2129 BasicBlock *NewEHPadBB = nullptr;
2130 if (isa<LandingPadInst>(Pad)) {
2131 // Stop on landingpads. They are not funclets.
2132 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2133 break;
2134 } else if (isa<CleanupPadInst>(Pad)) {
2135 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2136 // personalities except Wasm. And in Wasm this becomes a catch_all(_ref),
2137 // which always catches an exception.
2138 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2139 UnwindDests.back().first->setIsEHScopeEntry();
2140 // In Wasm, EH scopes are not funclets
2141 if (!IsWasmCXX)
2142 UnwindDests.back().first->setIsEHFuncletEntry();
2143 break;
2144 } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2145 // Add the catchpad handlers to the possible destinations.
2146 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2147 UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob);
2148 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2149 if (IsMSVCCXX || IsCoreCLR)
2150 UnwindDests.back().first->setIsEHFuncletEntry();
2151 if (!IsSEH)
2152 UnwindDests.back().first->setIsEHScopeEntry();
2153 }
2154 NewEHPadBB = CatchSwitch->getUnwindDest();
2155 } else {
2156 continue;
2157 }
2158
2159 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2160 if (BPI && NewEHPadBB)
2161 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2162 EHPadBB = NewEHPadBB;
2163 }
2164}
2165
2166void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
2167 // Update successor info.
2169 auto UnwindDest = I.getUnwindDest();
2170 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2171 BranchProbability UnwindDestProb =
2172 (BPI && UnwindDest)
2173 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2175 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
2176 for (auto &UnwindDest : UnwindDests) {
2177 UnwindDest.first->setIsEHPad();
2178 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2179 }
2180 FuncInfo.MBB->normalizeSuccProbs();
2181
2182 // Create the terminator node.
2183 MachineBasicBlock *CleanupPadMBB =
2184 FuncInfo.getMBB(I.getCleanupPad()->getParent());
2185 SDValue Ret = DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other,
2186 getControlRoot(), DAG.getBasicBlock(CleanupPadMBB));
2187 DAG.setRoot(Ret);
2188}
2189
2190void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2191 report_fatal_error("visitCatchSwitch not yet implemented!");
2192}
2193
2194void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2195 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2196 auto &DL = DAG.getDataLayout();
2197 SDValue Chain = getControlRoot();
2200
2201 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2202 // lower
2203 //
2204 // %val = call <ty> @llvm.experimental.deoptimize()
2205 // ret <ty> %val
2206 //
2207 // differently.
2208 if (I.getParent()->getTerminatingDeoptimizeCall()) {
2210 return;
2211 }
2212
2213 if (!FuncInfo.CanLowerReturn) {
2214 Register DemoteReg = FuncInfo.DemoteRegister;
2215
2216 // Emit a store of the return value through the virtual register.
2217 // Leave Outs empty so that LowerReturn won't try to load return
2218 // registers the usual way.
2219 MVT PtrValueVT = TLI.getPointerTy(DL, DL.getAllocaAddrSpace());
2220 SDValue RetPtr =
2221 DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVT);
2222 SDValue RetOp = getValue(I.getOperand(0));
2223
2224 SmallVector<EVT, 4> ValueVTs, MemVTs;
2225 SmallVector<uint64_t, 4> Offsets;
2226 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2227 &Offsets, 0);
2228 unsigned NumValues = ValueVTs.size();
2229
2230 SmallVector<SDValue, 4> Chains(NumValues);
2231 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2232 for (unsigned i = 0; i != NumValues; ++i) {
2233 // An aggregate return value cannot wrap around the address space, so
2234 // offsets to its parts don't wrap either.
2235 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2236 TypeSize::getFixed(Offsets[i]));
2237
2238 SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2239 if (MemVTs[i] != ValueVTs[i])
2240 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2241 Chains[i] = DAG.getStore(
2242 Chain, getCurSDLoc(), Val,
2243 // FIXME: better loc info would be nice.
2244 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2245 commonAlignment(BaseAlign, Offsets[i]));
2246 }
2247
2248 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2249 MVT::Other, Chains);
2250 } else if (I.getNumOperands() != 0) {
2252 ComputeValueTypes(DL, I.getOperand(0)->getType(), Types);
2253 unsigned NumValues = Types.size();
2254 if (NumValues) {
2255 SDValue RetOp = getValue(I.getOperand(0));
2256
2257 const Function *F = I.getParent()->getParent();
2258
2259 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2260 I.getOperand(0)->getType(), F->getCallingConv(),
2261 /*IsVarArg*/ false, DL);
2262
2263 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2264 if (F->getAttributes().hasRetAttr(Attribute::SExt))
2265 ExtendKind = ISD::SIGN_EXTEND;
2266 else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2267 ExtendKind = ISD::ZERO_EXTEND;
2268
2269 LLVMContext &Context = F->getContext();
2270 bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2271
2272 for (unsigned j = 0; j != NumValues; ++j) {
2273 EVT VT = TLI.getValueType(DL, Types[j]);
2274
2275 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2276 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2277
2278 CallingConv::ID CC = F->getCallingConv();
2279
2280 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2281 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2282 SmallVector<SDValue, 4> Parts(NumParts);
2284 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2285 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2286
2287 // 'inreg' on function refers to return value
2288 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2289 if (RetInReg)
2290 Flags.setInReg();
2291
2292 if (I.getOperand(0)->getType()->isPointerTy()) {
2293 Flags.setPointer();
2294 Flags.setPointerAddrSpace(
2295 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2296 }
2297
2298 if (NeedsRegBlock) {
2299 Flags.setInConsecutiveRegs();
2300 if (j == NumValues - 1)
2301 Flags.setInConsecutiveRegsLast();
2302 }
2303
2304 // Propagate extension type if any
2305 if (ExtendKind == ISD::SIGN_EXTEND)
2306 Flags.setSExt();
2307 else if (ExtendKind == ISD::ZERO_EXTEND)
2308 Flags.setZExt();
2309 else if (F->getAttributes().hasRetAttr(Attribute::NoExt))
2310 Flags.setNoExt();
2311
2312 for (unsigned i = 0; i < NumParts; ++i) {
2313 Outs.push_back(ISD::OutputArg(Flags,
2314 Parts[i].getValueType().getSimpleVT(),
2315 VT, Types[j], 0, 0));
2316 OutVals.push_back(Parts[i]);
2317 }
2318 }
2319 }
2320 }
2321
2322 // Push in swifterror virtual register as the last element of Outs. This makes
2323 // sure swifterror virtual register will be returned in the swifterror
2324 // physical register.
2325 const Function *F = I.getParent()->getParent();
2326 if (TLI.supportSwiftError() &&
2327 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2328 assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2329 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2330 Flags.setSwiftError();
2331 Outs.push_back(ISD::OutputArg(Flags, /*vt=*/TLI.getPointerTy(DL),
2332 /*argvt=*/EVT(TLI.getPointerTy(DL)),
2333 PointerType::getUnqual(*DAG.getContext()),
2334 /*origidx=*/1, /*partOffs=*/0));
2335 // Create SDNode for the swifterror virtual register.
2336 OutVals.push_back(
2337 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2338 &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2339 EVT(TLI.getPointerTy(DL))));
2340 }
2341
2342 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2343 CallingConv::ID CallConv =
2344 DAG.getMachineFunction().getFunction().getCallingConv();
2345 Chain = DAG.getTargetLoweringInfo().LowerReturn(
2346 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2347
2348 // Verify that the target's LowerReturn behaved as expected.
2349 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2350 "LowerReturn didn't return a valid chain!");
2351
2352 // Update the DAG with the new chain value resulting from return lowering.
2353 DAG.setRoot(Chain);
2354}
2355
2356/// CopyToExportRegsIfNeeded - If the given value has virtual registers
2357/// created for it, emit nodes to copy the value into the virtual
2358/// registers.
2360 // Skip empty types
2361 if (V->getType()->isEmptyTy())
2362 return;
2363
2364 auto VMI = FuncInfo.ValueMap.find(V);
2365 if (VMI != FuncInfo.ValueMap.end()) {
2366 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2367 "Unused value assigned virtual registers!");
2368 CopyValueToVirtualRegister(V, VMI->second);
2369 }
2370}
2371
2372/// ExportFromCurrentBlock - If this condition isn't known to be exported from
2373/// the current basic block, add it to ValueMap now so that we'll get a
2374/// CopyTo/FromReg.
2376 // No need to export constants.
2377 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2378
2379 // Already exported?
2380 if (FuncInfo.isExportedInst(V)) return;
2381
2382 Register Reg = FuncInfo.InitializeRegForValue(V);
2384}
2385
2387 const BasicBlock *FromBB) {
2388 // The operands of the setcc have to be in this block. We don't know
2389 // how to export them from some other block.
2390 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2391 // Can export from current BB.
2392 if (VI->getParent() == FromBB)
2393 return true;
2394
2395 // Is already exported, noop.
2396 return FuncInfo.isExportedInst(V);
2397 }
2398
2399 // If this is an argument, we can export it if the BB is the entry block or
2400 // if it is already exported.
2401 if (isa<Argument>(V)) {
2402 if (FromBB->isEntryBlock())
2403 return true;
2404
2405 // Otherwise, can only export this if it is already exported.
2406 return FuncInfo.isExportedInst(V);
2407 }
2408
2409 // Otherwise, constants can always be exported.
2410 return true;
2411}
2412
2413/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2415SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2416 const MachineBasicBlock *Dst) const {
2418 const BasicBlock *SrcBB = Src->getBasicBlock();
2419 const BasicBlock *DstBB = Dst->getBasicBlock();
2420 if (!BPI) {
2421 // If BPI is not available, set the default probability as 1 / N, where N is
2422 // the number of successors.
2423 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2424 return BranchProbability(1, SuccSize);
2425 }
2426 return BPI->getEdgeProbability(SrcBB, DstBB);
2427}
2428
2429void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2430 MachineBasicBlock *Dst,
2431 BranchProbability Prob) {
2432 if (!FuncInfo.BPI)
2433 Src->addSuccessorWithoutProb(Dst);
2434 else {
2435 if (Prob.isUnknown())
2436 Prob = getEdgeProbability(Src, Dst);
2437 Src->addSuccessor(Dst, Prob);
2438 }
2439}
2440
2441static bool InBlock(const Value *V, const BasicBlock *BB) {
2442 if (const Instruction *I = dyn_cast<Instruction>(V))
2443 return I->getParent() == BB;
2444 return true;
2445}
2446
2447/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2448/// This function emits a branch and is used at the leaves of an OR or an
2449/// AND operator tree.
2450void
2453 MachineBasicBlock *FBB,
2454 MachineBasicBlock *CurBB,
2455 MachineBasicBlock *SwitchBB,
2456 BranchProbability TProb,
2457 BranchProbability FProb,
2458 bool InvertCond) {
2459 const BasicBlock *BB = CurBB->getBasicBlock();
2460
2461 // If the leaf of the tree is a comparison, merge the condition into
2462 // the caseblock.
2463 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2464 // The operands of the cmp have to be in this block. We don't know
2465 // how to export them from some other block. If this is the first block
2466 // of the sequence, no exporting is needed.
2467 if (CurBB == SwitchBB ||
2468 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2469 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2470 ISD::CondCode Condition;
2471 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2472 ICmpInst::Predicate Pred =
2473 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2474 Condition = getICmpCondCode(Pred);
2475 } else {
2476 const FCmpInst *FC = cast<FCmpInst>(Cond);
2477 FCmpInst::Predicate Pred =
2478 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2479 Condition = getFCmpCondCode(Pred);
2480 if (FC->hasNoNaNs() ||
2481 (isKnownNeverNaN(FC->getOperand(0),
2482 SimplifyQuery(DAG.getDataLayout(), FC)) &&
2483 isKnownNeverNaN(FC->getOperand(1),
2484 SimplifyQuery(DAG.getDataLayout(), FC))))
2485 Condition = getFCmpCodeWithoutNaN(Condition);
2486 }
2487
2488 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2489 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2490 SL->SwitchCases.push_back(CB);
2491 return;
2492 }
2493 }
2494
2495 // Create a CaseBlock record representing this branch.
2496 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2497 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2498 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2499 SL->SwitchCases.push_back(CB);
2500}
2501
2502// Collect dependencies on V recursively. This is used for the cost analysis in
2503// `shouldKeepJumpConditionsTogether`.
2507 unsigned Depth = 0) {
2508 // Return false if we have an incomplete count.
2510 return false;
2511
2512 auto *I = dyn_cast<Instruction>(V);
2513 if (I == nullptr)
2514 return true;
2515
2516 if (Necessary != nullptr) {
2517 // This instruction is necessary for the other side of the condition so
2518 // don't count it.
2519 if (Necessary->contains(I))
2520 return true;
2521 }
2522
2523 // Already added this dep.
2524 if (!Deps->try_emplace(I, false).second)
2525 return true;
2526
2527 for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx)
2528 if (!collectInstructionDeps(Deps, I->getOperand(OpIdx), Necessary,
2529 Depth + 1))
2530 return false;
2531 return true;
2532}
2533
2536 Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs,
2538 if (Params.BaseCost < 0)
2539 return false;
2540
2541 // Baseline cost.
2542 InstructionCost CostThresh = Params.BaseCost;
2543
2544 BranchProbabilityInfo *BPI = nullptr;
2545 if (Params.LikelyBias || Params.UnlikelyBias)
2546 BPI = FuncInfo.BPI;
2547 if (BPI != nullptr) {
2548 // See if we are either likely to get an early out or compute both lhs/rhs
2549 // of the condition.
2550 BasicBlock *IfFalse = I.getSuccessor(0);
2551 BasicBlock *IfTrue = I.getSuccessor(1);
2552
2553 std::optional<bool> Likely;
2554 if (BPI->isEdgeHot(I.getParent(), IfTrue))
2555 Likely = true;
2556 else if (BPI->isEdgeHot(I.getParent(), IfFalse))
2557 Likely = false;
2558
2559 if (Likely) {
2560 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2561 // Its likely we will have to compute both lhs and rhs of condition
2562 CostThresh += Params.LikelyBias;
2563 else {
2564 if (Params.UnlikelyBias < 0)
2565 return false;
2566 // Its likely we will get an early out.
2567 CostThresh -= Params.UnlikelyBias;
2568 }
2569 }
2570 }
2571
2572 if (CostThresh <= 0)
2573 return false;
2574
2575 // Collect "all" instructions that lhs condition is dependent on.
2576 // Use map for stable iteration (to avoid non-determanism of iteration of
2577 // SmallPtrSet). The `bool` value is just a dummy.
2579 collectInstructionDeps(&LhsDeps, Lhs);
2580 // Collect "all" instructions that rhs condition is dependent on AND are
2581 // dependencies of lhs. This gives us an estimate on which instructions we
2582 // stand to save by splitting the condition.
2583 if (!collectInstructionDeps(&RhsDeps, Rhs, &LhsDeps))
2584 return false;
2585 // Add the compare instruction itself unless its a dependency on the LHS.
2586 if (const auto *RhsI = dyn_cast<Instruction>(Rhs))
2587 if (!LhsDeps.contains(RhsI))
2588 RhsDeps.try_emplace(RhsI, false);
2589
2590 InstructionCost CostOfIncluding = 0;
2591 // See if this instruction will need to computed independently of whether RHS
2592 // is.
2593 Value *BrCond = I.getCondition();
2594 auto ShouldCountInsn = [&RhsDeps, &BrCond](const Instruction *Ins) {
2595 for (const auto *U : Ins->users()) {
2596 // If user is independent of RHS calculation we don't need to count it.
2597 if (auto *UIns = dyn_cast<Instruction>(U))
2598 if (UIns != BrCond && !RhsDeps.contains(UIns))
2599 return false;
2600 }
2601 return true;
2602 };
2603
2604 // Prune instructions from RHS Deps that are dependencies of unrelated
2605 // instructions. The value (SelectionDAG::MaxRecursionDepth) is fairly
2606 // arbitrary and just meant to cap the how much time we spend in the pruning
2607 // loop. Its highly unlikely to come into affect.
2608 const unsigned MaxPruneIters = SelectionDAG::MaxRecursionDepth;
2609 // Stop after a certain point. No incorrectness from including too many
2610 // instructions.
2611 for (unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2612 const Instruction *ToDrop = nullptr;
2613 for (const auto &InsPair : RhsDeps) {
2614 if (!ShouldCountInsn(InsPair.first)) {
2615 ToDrop = InsPair.first;
2616 break;
2617 }
2618 }
2619 if (ToDrop == nullptr)
2620 break;
2621 RhsDeps.erase(ToDrop);
2622 }
2623
2624 for (const auto &InsPair : RhsDeps) {
2625 // Finally accumulate latency that we can only attribute to computing the
2626 // RHS condition. Use latency because we are essentially trying to calculate
2627 // the cost of the dependency chain.
2628 // Possible TODO: We could try to estimate ILP and make this more precise.
2629 CostOfIncluding += TTI->getInstructionCost(
2630 InsPair.first, TargetTransformInfo::TCK_Latency);
2631
2632 if (CostOfIncluding > CostThresh)
2633 return false;
2634 }
2635 return true;
2636}
2637
2640 MachineBasicBlock *FBB,
2641 MachineBasicBlock *CurBB,
2642 MachineBasicBlock *SwitchBB,
2644 BranchProbability TProb,
2645 BranchProbability FProb,
2646 bool InvertCond) {
2647 // Skip over not part of the tree and remember to invert op and operands at
2648 // next level.
2649 Value *NotCond;
2650 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2651 InBlock(NotCond, CurBB->getBasicBlock())) {
2652 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2653 !InvertCond);
2654 return;
2655 }
2656
2658 const Value *BOpOp0, *BOpOp1;
2659 // Compute the effective opcode for Cond, taking into account whether it needs
2660 // to be inverted, e.g.
2661 // and (not (or A, B)), C
2662 // gets lowered as
2663 // and (and (not A, not B), C)
2665 if (BOp) {
2666 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2667 ? Instruction::And
2668 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2669 ? Instruction::Or
2671 if (InvertCond) {
2672 if (BOpc == Instruction::And)
2673 BOpc = Instruction::Or;
2674 else if (BOpc == Instruction::Or)
2675 BOpc = Instruction::And;
2676 }
2677 }
2678
2679 // If this node is not part of the or/and tree, emit it as a branch.
2680 // Note that all nodes in the tree should have same opcode.
2681 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2682 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2683 !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2684 !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2685 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2686 TProb, FProb, InvertCond);
2687 return;
2688 }
2689
2690 // Create TmpBB after CurBB.
2691 MachineFunction::iterator BBI(CurBB);
2692 MachineFunction &MF = DAG.getMachineFunction();
2694 CurBB->getParent()->insert(++BBI, TmpBB);
2695
2696 if (Opc == Instruction::Or) {
2697 // Codegen X | Y as:
2698 // BB1:
2699 // jmp_if_X TBB
2700 // jmp TmpBB
2701 // TmpBB:
2702 // jmp_if_Y TBB
2703 // jmp FBB
2704 //
2705
2706 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2707 // The requirement is that
2708 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2709 // = TrueProb for original BB.
2710 // Assuming the original probabilities are A and B, one choice is to set
2711 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2712 // A/(1+B) and 2B/(1+B). This choice assumes that
2713 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2714 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2715 // TmpBB, but the math is more complicated.
2716
2717 auto NewTrueProb = TProb / 2;
2718 auto NewFalseProb = TProb / 2 + FProb;
2719 // Emit the LHS condition.
2720 FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2721 NewFalseProb, InvertCond);
2722
2723 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2724 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2726 // Emit the RHS condition into TmpBB.
2727 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2728 Probs[1], InvertCond);
2729 } else {
2730 assert(Opc == Instruction::And && "Unknown merge op!");
2731 // Codegen X & Y as:
2732 // BB1:
2733 // jmp_if_X TmpBB
2734 // jmp FBB
2735 // TmpBB:
2736 // jmp_if_Y TBB
2737 // jmp FBB
2738 //
2739 // This requires creation of TmpBB after CurBB.
2740
2741 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2742 // The requirement is that
2743 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2744 // = FalseProb for original BB.
2745 // Assuming the original probabilities are A and B, one choice is to set
2746 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2747 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2748 // TrueProb for BB1 * FalseProb for TmpBB.
2749
2750 auto NewTrueProb = TProb + FProb / 2;
2751 auto NewFalseProb = FProb / 2;
2752 // Emit the LHS condition.
2753 FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2754 NewFalseProb, InvertCond);
2755
2756 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2757 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2759 // Emit the RHS condition into TmpBB.
2760 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2761 Probs[1], InvertCond);
2762 }
2763}
2764
2765/// If the set of cases should be emitted as a series of branches, return true.
2766/// If we should emit this as a bunch of and/or'd together conditions, return
2767/// false.
2768bool
2769SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2770 if (Cases.size() != 2) return true;
2771
2772 // If this is two comparisons of the same values or'd or and'd together, they
2773 // will get folded into a single comparison, so don't emit two blocks.
2774 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2775 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2776 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2777 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2778 return false;
2779 }
2780
2781 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2782 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2783 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2784 Cases[0].CC == Cases[1].CC &&
2785 isa<Constant>(Cases[0].CmpRHS) &&
2786 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2787 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2788 return false;
2789 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2790 return false;
2791 }
2792
2793 return true;
2794}
2795
2796void SelectionDAGBuilder::visitUncondBr(const UncondBrInst &I) {
2798
2799 MachineBasicBlock *Succ0MBB = FuncInfo.getMBB(I.getSuccessor(0));
2800
2801 // Update machine-CFG edges.
2802 BrMBB->addSuccessor(Succ0MBB);
2803
2804 // If this is not a fall-through branch or optimizations are switched off,
2805 // emit the branch.
2806 if (Succ0MBB != NextBlock(BrMBB) ||
2808 auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
2809 DAG.getBasicBlock(Succ0MBB));
2810 setValue(&I, Br);
2811 DAG.setRoot(Br);
2812 }
2813}
2814
2815void SelectionDAGBuilder::visitCondBr(const CondBrInst &I) {
2816 MachineBasicBlock *BrMBB = FuncInfo.MBB;
2817
2818 MachineBasicBlock *Succ0MBB = FuncInfo.getMBB(I.getSuccessor(0));
2819
2820 // If this condition is one of the special cases we handle, do special stuff
2821 // now.
2822 const Value *CondVal = I.getCondition();
2823 MachineBasicBlock *Succ1MBB = FuncInfo.getMBB(I.getSuccessor(1));
2824
2825 // If this is a series of conditions that are or'd or and'd together, emit
2826 // this as a sequence of branches instead of setcc's with and/or operations.
2827 // As long as jumps are not expensive (exceptions for multi-use logic ops,
2828 // unpredictable branches, and vector extracts because those jumps are likely
2829 // expensive for any target), this should improve performance.
2830 // For example, instead of something like:
2831 // cmp A, B
2832 // C = seteq
2833 // cmp D, E
2834 // F = setle
2835 // or C, F
2836 // jnz foo
2837 // Emit:
2838 // cmp A, B
2839 // je foo
2840 // cmp D, E
2841 // jle foo
2842 bool IsUnpredictable = I.hasMetadata(LLVMContext::MD_unpredictable);
2843 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2844 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2845 BOp->hasOneUse() && !IsUnpredictable) {
2846 Value *Vec;
2847 const Value *BOp0, *BOp1;
2849 if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2850 Opcode = Instruction::And;
2851 else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2852 Opcode = Instruction::Or;
2853
2854 if (Opcode &&
2855 !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2856 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value()))) &&
2858 FuncInfo, I, Opcode, BOp0, BOp1,
2859 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2860 Opcode, BOp0, BOp1))) {
2861 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2862 getEdgeProbability(BrMBB, Succ0MBB),
2863 getEdgeProbability(BrMBB, Succ1MBB),
2864 /*InvertCond=*/false);
2865 // If the compares in later blocks need to use values not currently
2866 // exported from this block, export them now. This block should always
2867 // be the first entry.
2868 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2869
2870 // Allow some cases to be rejected.
2871 if (ShouldEmitAsBranches(SL->SwitchCases)) {
2872 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2873 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2874 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2875 }
2876
2877 // Emit the branch for this block.
2878 visitSwitchCase(SL->SwitchCases[0], BrMBB);
2879 SL->SwitchCases.erase(SL->SwitchCases.begin());
2880 return;
2881 }
2882
2883 // Okay, we decided not to do this, remove any inserted MBB's and clear
2884 // SwitchCases.
2885 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2886 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2887
2888 SL->SwitchCases.clear();
2889 }
2890 }
2891
2892 // Create a CaseBlock record representing this branch.
2893 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2894 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc(),
2896 IsUnpredictable);
2897
2898 // Use visitSwitchCase to actually insert the fast branch sequence for this
2899 // cond branch.
2900 visitSwitchCase(CB, BrMBB);
2901}
2902
2903/// visitSwitchCase - Emits the necessary code to represent a single node in
2904/// the binary search tree resulting from lowering a switch instruction.
2906 MachineBasicBlock *SwitchBB) {
2907 SDValue Cond;
2908 SDValue CondLHS = getValue(CB.CmpLHS);
2909 SDLoc dl = CB.DL;
2910
2911 if (CB.CC == ISD::SETTRUE) {
2912 // Branch or fall through to TrueBB.
2913 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2914 SwitchBB->normalizeSuccProbs();
2915 if (CB.TrueBB != NextBlock(SwitchBB)) {
2916 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2917 DAG.getBasicBlock(CB.TrueBB)));
2918 }
2919 return;
2920 }
2921
2922 auto &TLI = DAG.getTargetLoweringInfo();
2923 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2924
2925 // Build the setcc now.
2926 if (!CB.CmpMHS) {
2927 // Fold "(X == true)" to X and "(X == false)" to !X to
2928 // handle common cases produced by branch lowering.
2929 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2930 CB.CC == ISD::SETEQ)
2931 Cond = CondLHS;
2932 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2933 CB.CC == ISD::SETEQ) {
2934 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2935 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2936 } else {
2937 SDValue CondRHS = getValue(CB.CmpRHS);
2938
2939 // If a pointer's DAG type is larger than its memory type then the DAG
2940 // values are zero-extended. This breaks signed comparisons so truncate
2941 // back to the underlying type before doing the compare.
2942 if (CondLHS.getValueType() != MemVT) {
2943 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2944 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2945 }
2946 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2947 }
2948 } else {
2949 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2950
2951 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2952 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2953
2954 SDValue CmpOp = getValue(CB.CmpMHS);
2955 EVT VT = CmpOp.getValueType();
2956
2957 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2958 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2959 ISD::SETLE);
2960 } else {
2961 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2962 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2963 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2964 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2965 }
2966 }
2967
2968 // Update successor info
2969 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2970 // TrueBB and FalseBB are always different unless the incoming IR is
2971 // degenerate. This only happens when running llc on weird IR.
2972 if (CB.TrueBB != CB.FalseBB)
2973 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2974 SwitchBB->normalizeSuccProbs();
2975
2976 // If the lhs block is the next block, invert the condition so that we can
2977 // fall through to the lhs instead of the rhs block.
2978 if (CB.TrueBB == NextBlock(SwitchBB)) {
2979 std::swap(CB.TrueBB, CB.FalseBB);
2980 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2981 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2982 }
2983
2984 SDNodeFlags Flags;
2986 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(),
2987 Cond, DAG.getBasicBlock(CB.TrueBB), Flags);
2988
2989 setValue(CurInst, BrCond);
2990
2991 // Insert the false branch. Do this even if it's a fall through branch,
2992 // this makes it easier to do DAG optimizations which require inverting
2993 // the branch condition.
2994 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2995 DAG.getBasicBlock(CB.FalseBB));
2996
2997 DAG.setRoot(BrCond);
2998}
2999
3000/// visitJumpTable - Emit JumpTable node in the current MBB
3002 // Emit the code for the jump table
3003 assert(JT.SL && "Should set SDLoc for SelectionDAG!");
3004 assert(JT.Reg && "Should lower JT Header first!");
3005 EVT PTy = DAG.getTargetLoweringInfo().getJumpTableRegTy(DAG.getDataLayout());
3006 SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy);
3007 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
3008 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
3009 Index.getValue(1), Table, Index);
3010 DAG.setRoot(BrJumpTable);
3011}
3012
3013/// visitJumpTableHeader - This function emits necessary code to produce index
3014/// in the JumpTable from switch case.
3016 JumpTableHeader &JTH,
3017 MachineBasicBlock *SwitchBB) {
3018 assert(JT.SL && "Should set SDLoc for SelectionDAG!");
3019 const SDLoc &dl = *JT.SL;
3020
3021 // Subtract the lowest switch case value from the value being switched on.
3022 SDValue SwitchOp = getValue(JTH.SValue);
3023 EVT VT = SwitchOp.getValueType();
3024 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
3025 DAG.getConstant(JTH.First, dl, VT));
3026
3027 // The SDNode we just created, which holds the value being switched on minus
3028 // the smallest case value, needs to be copied to a virtual register so it
3029 // can be used as an index into the jump table in a subsequent basic block.
3030 // This value may be smaller or larger than the target's pointer type, and
3031 // therefore require extension or truncating.
3032 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3033 SwitchOp =
3034 DAG.getZExtOrTrunc(Sub, dl, TLI.getJumpTableRegTy(DAG.getDataLayout()));
3035
3036 Register JumpTableReg =
3037 FuncInfo.CreateReg(TLI.getJumpTableRegTy(DAG.getDataLayout()));
3038 SDValue CopyTo =
3039 DAG.getCopyToReg(getControlRoot(), dl, JumpTableReg, SwitchOp);
3040 JT.Reg = JumpTableReg;
3041
3042 if (!JTH.FallthroughUnreachable) {
3043 // Emit the range check for the jump table, and branch to the default block
3044 // for the switch statement if the value being switched on exceeds the
3045 // largest case in the switch.
3046 SDValue CMP = DAG.getSetCC(
3047 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3048 Sub.getValueType()),
3049 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
3050
3051 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
3052 MVT::Other, CopyTo, CMP,
3053 DAG.getBasicBlock(JT.Default));
3054
3055 // Avoid emitting unnecessary branches to the next block.
3056 if (JT.MBB != NextBlock(SwitchBB))
3057 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3058 DAG.getBasicBlock(JT.MBB));
3059
3060 DAG.setRoot(BrCond);
3061 } else {
3062 // Avoid emitting unnecessary branches to the next block.
3063 if (JT.MBB != NextBlock(SwitchBB))
3064 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3065 DAG.getBasicBlock(JT.MBB)));
3066 else
3067 DAG.setRoot(CopyTo);
3068 }
3069}
3070
3071/// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
3072/// variable if there exists one.
3074 SDValue &Chain) {
3075 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3076 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
3077 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
3079 Value *Global =
3082 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
3083 if (Global) {
3084 MachinePointerInfo MPInfo(Global);
3088 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
3089 DAG.setNodeMemRefs(Node, {MemRef});
3090 }
3091 if (PtrTy != PtrMemTy)
3092 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
3093 return SDValue(Node, 0);
3094}
3095
3096/// Codegen a new tail for a stack protector check ParentMBB which has had its
3097/// tail spliced into a stack protector check success bb.
3098///
3099/// For a high level explanation of how this fits into the stack protector
3100/// generation see the comment on the declaration of class
3101/// StackProtectorDescriptor.
3103 MachineBasicBlock *ParentBB) {
3104
3105 // First create the loads to the guard/stack slot for the comparison.
3106 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3107 auto &DL = DAG.getDataLayout();
3108 EVT PtrTy = TLI.getFrameIndexTy(DL);
3109 EVT PtrMemTy = TLI.getPointerMemTy(DL, DL.getAllocaAddrSpace());
3110
3111 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3112 int FI = MFI.getStackProtectorIndex();
3113
3114 SDValue Guard;
3115 SDLoc dl = getCurSDLoc();
3116 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
3117 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3118 Align Align = DL.getPrefTypeAlign(
3119 PointerType::get(M.getContext(), DL.getAllocaAddrSpace()));
3120
3121 // Generate code to load the content of the guard slot.
3122 SDValue GuardVal = DAG.getLoad(
3123 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
3124 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
3126
3127 if (TLI.useStackGuardXorFP())
3128 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
3129
3130 // If we're using function-based instrumentation, call the guard check
3131 // function
3133 // Get the guard check function from the target and verify it exists since
3134 // we're using function-based instrumentation
3135 const Function *GuardCheckFn =
3136 TLI.getSSPStackGuardCheck(M, DAG.getLibcalls());
3137 assert(GuardCheckFn && "Guard check function is null");
3138
3139 // The target provides a guard check function to validate the guard value.
3140 // Generate a call to that function with the content of the guard slot as
3141 // argument.
3142 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3143 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3144
3146 TargetLowering::ArgListEntry Entry(GuardVal, FnTy->getParamType(0));
3147 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3148 Entry.IsInReg = true;
3149 Args.push_back(Entry);
3150
3153 .setChain(DAG.getEntryNode())
3154 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
3155 getValue(GuardCheckFn), std::move(Args));
3156
3157 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
3158 DAG.setRoot(Result.second);
3159 return;
3160 }
3161
3162 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3163 // Otherwise, emit a volatile load to retrieve the stack guard value.
3164 SDValue Chain = DAG.getEntryNode();
3165 if (TLI.useLoadStackGuardNode(M)) {
3166 Guard = getLoadStackGuard(DAG, dl, Chain);
3167 } else {
3168 if (const Value *IRGuard = TLI.getSDagStackGuard(M, DAG.getLibcalls())) {
3169 SDValue GuardPtr = getValue(IRGuard);
3170 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3171 MachinePointerInfo(IRGuard, 0), Align,
3173 } else {
3174 LLVMContext &Ctx = *DAG.getContext();
3175 Ctx.diagnose(DiagnosticInfoGeneric("unable to lower stackguard"));
3176 Guard = DAG.getPOISON(PtrMemTy);
3177 }
3178 }
3179
3180 // Perform the comparison via a getsetcc.
3181 SDValue Cmp = DAG.getSetCC(
3182 dl, TLI.getSetCCResultType(DL, *DAG.getContext(), Guard.getValueType()),
3183 Guard, GuardVal, ISD::SETNE);
3184
3185 // If the guard/stackslot do not equal, branch to failure MBB.
3186 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(),
3187 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
3188 // Otherwise branch to success MBB.
3189 SDValue Br = DAG.getNode(ISD::BR, dl,
3190 MVT::Other, BrCond,
3191 DAG.getBasicBlock(SPD.getSuccessMBB()));
3192
3193 DAG.setRoot(Br);
3194}
3195
3196/// Codegen the failure basic block for a stack protector check.
3197///
3198/// A failure stack protector machine basic block consists simply of a call to
3199/// __stack_chk_fail().
3200///
3201/// For a high level explanation of how this fits into the stack protector
3202/// generation see the comment on the declaration of class
3203/// StackProtectorDescriptor.
3206
3207 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3208 MachineBasicBlock *ParentBB = SPD.getParentMBB();
3209 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3210 SDValue Chain;
3211
3212 // For -Oz builds with a guard check function, we use function-based
3213 // instrumentation. Otherwise, if we have a guard check function, we call it
3214 // in the failure block.
3215 auto *GuardCheckFn = TLI.getSSPStackGuardCheck(M, DAG.getLibcalls());
3216 if (GuardCheckFn && !SPD.shouldEmitFunctionBasedCheckStackProtector()) {
3217 // First create the loads to the guard/stack slot for the comparison.
3218 auto &DL = DAG.getDataLayout();
3219 EVT PtrTy = TLI.getFrameIndexTy(DL);
3220 EVT PtrMemTy = TLI.getPointerMemTy(DL, DL.getAllocaAddrSpace());
3221
3222 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3223 int FI = MFI.getStackProtectorIndex();
3224
3225 SDLoc dl = getCurSDLoc();
3226 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
3227 Align Align = DL.getPrefTypeAlign(
3228 PointerType::get(M.getContext(), DL.getAllocaAddrSpace()));
3229
3230 // Generate code to load the content of the guard slot.
3231 SDValue GuardVal = DAG.getLoad(
3232 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
3233 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
3235
3236 if (TLI.useStackGuardXorFP())
3237 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
3238
3239 // The target provides a guard check function to validate the guard value.
3240 // Generate a call to that function with the content of the guard slot as
3241 // argument.
3242 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3243 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3244
3246 TargetLowering::ArgListEntry Entry(GuardVal, FnTy->getParamType(0));
3247 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3248 Entry.IsInReg = true;
3249 Args.push_back(Entry);
3250
3253 .setChain(DAG.getEntryNode())
3254 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
3255 getValue(GuardCheckFn), std::move(Args));
3256
3257 Chain = TLI.LowerCallTo(CLI).second;
3258 } else {
3260 CallOptions.setDiscardResult(true);
3261 Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3262 {}, CallOptions, getCurSDLoc())
3263 .second;
3264 }
3265
3266 // Emit a trap instruction if we are required to do so.
3267 const TargetOptions &TargetOpts = DAG.getTarget().Options;
3268 if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn)
3269 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
3270
3271 DAG.setRoot(Chain);
3272}
3273
3274/// visitBitTestHeader - This function emits necessary code to produce value
3275/// suitable for "bit tests"
3277 MachineBasicBlock *SwitchBB) {
3278 SDLoc dl = getCurSDLoc();
3279
3280 // Subtract the minimum value.
3281 SDValue SwitchOp = getValue(B.SValue);
3282 EVT VT = SwitchOp.getValueType();
3283 SDValue RangeSub =
3284 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
3285
3286 // Determine the type of the test operands.
3287 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3288 bool UsePtrType = false;
3289 if (!TLI.isTypeLegal(VT)) {
3290 UsePtrType = true;
3291 } else {
3292 for (const BitTestCase &Case : B.Cases)
3293 if (!isUIntN(VT.getSizeInBits(), Case.Mask)) {
3294 // Switch table case range are encoded into series of masks.
3295 // Just use pointer type, it's guaranteed to fit.
3296 UsePtrType = true;
3297 break;
3298 }
3299 }
3300 SDValue Sub = RangeSub;
3301 if (UsePtrType) {
3302 VT = TLI.getPointerTy(DAG.getDataLayout());
3303 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
3304 }
3305
3306 B.RegVT = VT.getSimpleVT();
3307 B.Reg = FuncInfo.CreateReg(B.RegVT);
3308 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
3309
3310 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
3311
3312 if (!B.FallthroughUnreachable)
3313 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
3314 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
3315 SwitchBB->normalizeSuccProbs();
3316
3317 SDValue Root = CopyTo;
3318 if (!B.FallthroughUnreachable) {
3319 // Conditional branch to the default block.
3320 SDValue RangeCmp = DAG.getSetCC(dl,
3321 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3322 RangeSub.getValueType()),
3323 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
3324 ISD::SETUGT);
3325
3326 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3327 DAG.getBasicBlock(B.Default));
3328 }
3329
3330 // Avoid emitting unnecessary branches to the next block.
3331 if (MBB != NextBlock(SwitchBB))
3332 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
3333
3334 DAG.setRoot(Root);
3335}
3336
3337/// visitBitTestCase - this function produces one "bit test"
3339 MachineBasicBlock *NextMBB,
3340 BranchProbability BranchProbToNext,
3341 Register Reg, BitTestCase &B,
3342 MachineBasicBlock *SwitchBB) {
3343 SDLoc dl = getCurSDLoc();
3344 MVT VT = BB.RegVT;
3345 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
3346 SDValue Cmp;
3347 unsigned PopCount = llvm::popcount(B.Mask);
3348 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3349 if (PopCount == 1) {
3350 // Testing for a single bit; just compare the shift count with what it
3351 // would need to be to shift a 1 bit in that position.
3352 Cmp = DAG.getSetCC(
3353 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3354 ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
3355 ISD::SETEQ);
3356 } else if (PopCount == BB.Range) {
3357 // There is only one zero bit in the range, test for it directly.
3358 Cmp = DAG.getSetCC(
3359 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3360 ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
3361 } else {
3362 // Make desired shift
3363 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
3364 DAG.getConstant(1, dl, VT), ShiftOp);
3365
3366 // Emit bit tests and jumps
3367 SDValue AndOp = DAG.getNode(ISD::AND, dl,
3368 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
3369 Cmp = DAG.getSetCC(
3370 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3371 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
3372 }
3373
3374 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
3375 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
3376 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
3377 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3378 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
3379 // one as they are relative probabilities (and thus work more like weights),
3380 // and hence we need to normalize them to let the sum of them become one.
3381 SwitchBB->normalizeSuccProbs();
3382
3383 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
3384 MVT::Other, getControlRoot(),
3385 Cmp, DAG.getBasicBlock(B.TargetBB));
3386
3387 // Avoid emitting unnecessary branches to the next block.
3388 if (NextMBB != NextBlock(SwitchBB))
3389 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3390 DAG.getBasicBlock(NextMBB));
3391
3392 DAG.setRoot(BrAnd);
3393}
3394
3395void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3396 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3397
3398 // Retrieve successors. Look through artificial IR level blocks like
3399 // catchswitch for successors.
3400 MachineBasicBlock *Return = FuncInfo.getMBB(I.getSuccessor(0));
3401 const BasicBlock *EHPadBB = I.getSuccessor(1);
3402 MachineBasicBlock *EHPadMBB = FuncInfo.getMBB(EHPadBB);
3403
3404 // Deopt and ptrauth bundles are lowered in helper functions, and we don't
3405 // have to do anything here to lower funclet bundles.
3406 failForInvalidBundles(I, "invokes",
3412
3413 const Value *Callee(I.getCalledOperand());
3414 const Function *Fn = dyn_cast<Function>(Callee);
3415 if (isa<InlineAsm>(Callee))
3416 visitInlineAsm(I, EHPadBB);
3417 else if (Fn && Fn->isIntrinsic()) {
3418 switch (Fn->getIntrinsicID()) {
3419 default:
3420 llvm_unreachable("Cannot invoke this intrinsic");
3421 case Intrinsic::donothing:
3422 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3423 case Intrinsic::seh_try_begin:
3424 case Intrinsic::seh_scope_begin:
3425 case Intrinsic::seh_try_end:
3426 case Intrinsic::seh_scope_end:
3427 if (EHPadMBB)
3428 // a block referenced by EH table
3429 // so dtor-funclet not removed by opts
3430 EHPadMBB->setMachineBlockAddressTaken();
3431 break;
3432 case Intrinsic::experimental_patchpoint_void:
3433 case Intrinsic::experimental_patchpoint:
3434 visitPatchpoint(I, EHPadBB);
3435 break;
3436 case Intrinsic::experimental_gc_statepoint:
3438 break;
3439 // wasm_throw, wasm_rethrow: This is usually done in visitTargetIntrinsic,
3440 // but these intrinsics are special because they can be invoked, so we
3441 // manually lower it to a DAG node here.
3442 case Intrinsic::wasm_throw: {
3444 std::array<SDValue, 4> Ops = {
3445 getControlRoot(), // inchain for the terminator node
3446 DAG.getTargetConstant(Intrinsic::wasm_throw, getCurSDLoc(),
3448 getValue(I.getArgOperand(0)), // tag
3449 getValue(I.getArgOperand(1)) // thrown value
3450 };
3451 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3452 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3453 break;
3454 }
3455 case Intrinsic::wasm_rethrow: {
3456 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3457 std::array<SDValue, 2> Ops = {
3458 getControlRoot(), // inchain for the terminator node
3459 DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3460 TLI.getPointerTy(DAG.getDataLayout()))};
3461 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3462 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3463 break;
3464 }
3465 }
3466 } else if (I.hasDeoptState()) {
3467 // Currently we do not lower any intrinsic calls with deopt operand bundles.
3468 // Eventually we will support lowering the @llvm.experimental.deoptimize
3469 // intrinsic, and right now there are no plans to support other intrinsics
3470 // with deopt state.
3471 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3472 } else if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
3474 } else {
3475 LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3476 }
3477
3478 // If the value of the invoke is used outside of its defining block, make it
3479 // available as a virtual register.
3480 // We already took care of the exported value for the statepoint instruction
3481 // during call to the LowerStatepoint.
3482 if (!isa<GCStatepointInst>(I)) {
3484 }
3485
3487 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3488 BranchProbability EHPadBBProb =
3489 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3491 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3492
3493 // Update successor info.
3494 addSuccessorWithProb(InvokeMBB, Return);
3495 for (auto &UnwindDest : UnwindDests) {
3496 UnwindDest.first->setIsEHPad();
3497 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3498 }
3499 InvokeMBB->normalizeSuccProbs();
3500
3501 // Drop into normal successor.
3502 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3503 DAG.getBasicBlock(Return)));
3504}
3505
3506/// The intrinsics currently supported by callbr are implicit control flow
3507/// intrinsics such as amdgcn.kill.
3508/// - they should be called (no "dontcall-" attributes)
3509/// - they do not touch memory on the target (= !TLI.getTgtMemIntrinsic())
3510/// - they do not need custom argument handling (no
3511/// TLI.CollectTargetIntrinsicOperands())
3512void SelectionDAGBuilder::visitCallBrIntrinsic(const CallBrInst &I) {
3513#ifndef NDEBUG
3515 DAG.getTargetLoweringInfo().getTgtMemIntrinsic(
3516 Infos, I, DAG.getMachineFunction(), I.getIntrinsicID());
3517 assert(Infos.empty() && "Intrinsic touches memory");
3518#endif
3519
3520 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(I);
3521
3523 getTargetIntrinsicOperands(I, HasChain, OnlyLoad);
3524 SDVTList VTs = getTargetIntrinsicVTList(I, HasChain);
3525
3526 // Create the node.
3527 SDValue Result =
3528 getTargetNonMemIntrinsicNode(*I.getType(), HasChain, Ops, VTs);
3529 Result = handleTargetIntrinsicRet(I, HasChain, OnlyLoad, Result);
3530
3531 setValue(&I, Result);
3532}
3533
3534void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3535 MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3536
3537 if (I.isInlineAsm()) {
3538 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3539 // have to do anything here to lower funclet bundles.
3540 failForInvalidBundles(I, "callbrs",
3542 visitInlineAsm(I);
3543 } else {
3544 assert(!I.hasOperandBundles() &&
3545 "Can't have operand bundles for intrinsics");
3546 visitCallBrIntrinsic(I);
3547 }
3549
3550 // Retrieve successors.
3551 SmallPtrSet<BasicBlock *, 8> Dests;
3552 Dests.insert(I.getDefaultDest());
3553 MachineBasicBlock *Return = FuncInfo.getMBB(I.getDefaultDest());
3554
3555 // Update successor info.
3556 addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3557 // TODO: For most of the cases where there is an intrinsic callbr, we're
3558 // having exactly one indirect target, which will be unreachable. As soon as
3559 // this changes, we might need to enhance
3560 // Target->setIsInlineAsmBrIndirectTarget or add something similar for
3561 // intrinsic indirect branches.
3562 if (I.isInlineAsm()) {
3563 for (BasicBlock *Dest : I.getIndirectDests()) {
3564 MachineBasicBlock *Target = FuncInfo.getMBB(Dest);
3565 Target->setIsInlineAsmBrIndirectTarget();
3566 // If we introduce a type of asm goto statement that is permitted to use
3567 // an indirect call instruction to jump to its labels, then we should add
3568 // a call to Target->setMachineBlockAddressTaken() here, to mark the
3569 // target block as requiring a BTI.
3570
3571 Target->setLabelMustBeEmitted();
3572 // Don't add duplicate machine successors.
3573 if (Dests.insert(Dest).second)
3574 addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3575 }
3576 }
3577 CallBrMBB->normalizeSuccProbs();
3578
3579 // Drop into default successor.
3580 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3581 MVT::Other, getControlRoot(),
3582 DAG.getBasicBlock(Return)));
3583}
3584
3585void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3586 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3587}
3588
3589void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3590 assert(FuncInfo.MBB->isEHPad() &&
3591 "Call to landingpad not in landing pad!");
3592
3593 // If there aren't registers to copy the values into (e.g., during SjLj
3594 // exceptions), then don't bother to create these DAG nodes.
3595 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3596 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3597 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3598 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3599 return;
3600
3601 // If landingpad's return type is token type, we don't create DAG nodes
3602 // for its exception pointer and selector value. The extraction of exception
3603 // pointer or selector value from token type landingpads is not currently
3604 // supported.
3605 if (LP.getType()->isTokenTy())
3606 return;
3607
3608 SmallVector<EVT, 2> ValueVTs;
3609 SDLoc dl = getCurSDLoc();
3610 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3611 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3612
3613 // Get the two live-in registers as SDValues. The physregs have already been
3614 // copied into virtual registers.
3615 SDValue Ops[2];
3616 if (FuncInfo.ExceptionPointerVirtReg) {
3617 Ops[0] = DAG.getZExtOrTrunc(
3618 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3619 FuncInfo.ExceptionPointerVirtReg,
3620 TLI.getPointerTy(DAG.getDataLayout())),
3621 dl, ValueVTs[0]);
3622 } else {
3623 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3624 }
3625 Ops[1] = DAG.getZExtOrTrunc(
3626 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3627 FuncInfo.ExceptionSelectorVirtReg,
3628 TLI.getPointerTy(DAG.getDataLayout())),
3629 dl, ValueVTs[1]);
3630
3631 // Merge into one.
3632 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3633 DAG.getVTList(ValueVTs), Ops);
3634 setValue(&LP, Res);
3635}
3636
3639 // Update JTCases.
3640 for (JumpTableBlock &JTB : SL->JTCases)
3641 if (JTB.first.HeaderBB == First)
3642 JTB.first.HeaderBB = Last;
3643
3644 // Update BitTestCases.
3645 for (BitTestBlock &BTB : SL->BitTestCases)
3646 if (BTB.Parent == First)
3647 BTB.Parent = Last;
3648}
3649
3650void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3651 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3652
3653 // Update machine-CFG edges with unique successors.
3655 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3656 BasicBlock *BB = I.getSuccessor(i);
3657 bool Inserted = Done.insert(BB).second;
3658 if (!Inserted)
3659 continue;
3660
3661 MachineBasicBlock *Succ = FuncInfo.getMBB(BB);
3662 addSuccessorWithProb(IndirectBrMBB, Succ);
3663 }
3664 IndirectBrMBB->normalizeSuccProbs();
3665
3667 MVT::Other, getControlRoot(),
3668 getValue(I.getAddress())));
3669}
3670
3671void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3672 if (!I.shouldLowerToTrap(DAG.getTarget().Options.TrapUnreachable,
3673 DAG.getTarget().Options.NoTrapAfterNoreturn))
3674 return;
3675
3676 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3677}
3678
3679void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3680 SDNodeFlags Flags;
3681 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3682 Flags.copyFMF(*FPOp);
3683
3684 SDValue Op = getValue(I.getOperand(0));
3685 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3686 Op, Flags);
3687 setValue(&I, UnNodeValue);
3688}
3689
3690void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3691 SDNodeFlags Flags;
3692 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3693 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3694 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3695 }
3696 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3697 Flags.setExact(ExactOp->isExact());
3698 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
3699 Flags.setDisjoint(DisjointOp->isDisjoint());
3700 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3701 Flags.copyFMF(*FPOp);
3702
3703 SDValue Op1 = getValue(I.getOperand(0));
3704 SDValue Op2 = getValue(I.getOperand(1));
3705 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3706 Op1, Op2, Flags);
3707 setValue(&I, BinNodeValue);
3708}
3709
3710void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3711 SDValue Op1 = getValue(I.getOperand(0));
3712 SDValue Op2 = getValue(I.getOperand(1));
3713
3714 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3715 Op1.getValueType(), DAG.getDataLayout());
3716
3717 // Coerce the shift amount to the right type if we can. This exposes the
3718 // truncate or zext to optimization early.
3719 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3721 "Unexpected shift type");
3722 Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3723 }
3724
3725 bool nuw = false;
3726 bool nsw = false;
3727 bool exact = false;
3728
3729 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3730
3731 if (const OverflowingBinaryOperator *OFBinOp =
3733 nuw = OFBinOp->hasNoUnsignedWrap();
3734 nsw = OFBinOp->hasNoSignedWrap();
3735 }
3736 if (const PossiblyExactOperator *ExactOp =
3738 exact = ExactOp->isExact();
3739 }
3740 SDNodeFlags Flags;
3741 Flags.setExact(exact);
3742 Flags.setNoSignedWrap(nsw);
3743 Flags.setNoUnsignedWrap(nuw);
3744 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3745 Flags);
3746 setValue(&I, Res);
3747}
3748
3749void SelectionDAGBuilder::visitSDiv(const User &I) {
3750 SDValue Op1 = getValue(I.getOperand(0));
3751 SDValue Op2 = getValue(I.getOperand(1));
3752
3753 SDNodeFlags Flags;
3754 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3755 cast<PossiblyExactOperator>(&I)->isExact());
3756 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3757 Op2, Flags));
3758}
3759
3760void SelectionDAGBuilder::visitICmp(const ICmpInst &I) {
3761 ICmpInst::Predicate predicate = I.getPredicate();
3762 SDValue Op1 = getValue(I.getOperand(0));
3763 SDValue Op2 = getValue(I.getOperand(1));
3764 ISD::CondCode Opcode = getICmpCondCode(predicate);
3765
3766 auto &TLI = DAG.getTargetLoweringInfo();
3767 EVT MemVT =
3768 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3769
3770 // If a pointer's DAG type is larger than its memory type then the DAG values
3771 // are zero-extended. This breaks signed comparisons so truncate back to the
3772 // underlying type before doing the compare.
3773 if (Op1.getValueType() != MemVT) {
3774 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3775 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3776 }
3777
3778 SDNodeFlags Flags;
3779 Flags.setSameSign(I.hasSameSign());
3780
3781 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3782 I.getType());
3783 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode,
3784 /*Chain=*/{}, /*IsSignaling=*/false, Flags));
3785}
3786
3787void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
3788 FCmpInst::Predicate predicate = I.getPredicate();
3789 SDValue Op1 = getValue(I.getOperand(0));
3790 SDValue Op2 = getValue(I.getOperand(1));
3791
3792 ISD::CondCode Condition = getFCmpCondCode(predicate);
3793 auto *FPMO = cast<FPMathOperator>(&I);
3794 if (FPMO->hasNoNaNs() ||
3795 (DAG.isKnownNeverNaN(Op1) && DAG.isKnownNeverNaN(Op2)))
3796 Condition = getFCmpCodeWithoutNaN(Condition);
3797
3798 SDNodeFlags Flags;
3799 Flags.copyFMF(*FPMO);
3800
3801 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3802 I.getType());
3803 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition,
3804 /*Chain=*/{}, /*IsSignaling=*/false, Flags));
3805}
3806
3807// Check if the condition of the select has one use or two users that are both
3808// selects with the same condition.
3809static bool hasOnlySelectUsers(const Value *Cond) {
3810 return llvm::all_of(Cond->users(), [](const Value *V) {
3811 return isa<SelectInst>(V);
3812 });
3813}
3814
3815void SelectionDAGBuilder::visitSelect(const User &I) {
3816 SmallVector<EVT, 4> ValueVTs;
3817 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3818 ValueVTs);
3819 unsigned NumValues = ValueVTs.size();
3820 if (NumValues == 0) return;
3821
3822 SmallVector<SDValue, 4> Values(NumValues);
3823 SDValue Cond = getValue(I.getOperand(0));
3824 SDValue LHSVal = getValue(I.getOperand(1));
3825 SDValue RHSVal = getValue(I.getOperand(2));
3826 SmallVector<SDValue, 1> BaseOps(1, Cond);
3828 Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3829
3830 bool IsUnaryAbs = false;
3831 bool Negate = false;
3832
3833 SDNodeFlags Flags;
3834 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3835 Flags.copyFMF(*FPOp);
3836
3837 Flags.setUnpredictable(
3838 cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3839
3840 // Min/max matching is only viable if all output VTs are the same.
3841 if (all_equal(ValueVTs)) {
3842 EVT VT = ValueVTs[0];
3843 LLVMContext &Ctx = *DAG.getContext();
3844 auto &TLI = DAG.getTargetLoweringInfo();
3845
3846 // We care about the legality of the operation after it has been type
3847 // legalized.
3848 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3849 VT = TLI.getTypeToTransformTo(Ctx, VT);
3850
3851 // If the vselect is legal, assume we want to leave this as a vector setcc +
3852 // vselect. Otherwise, if this is going to be scalarized, we want to see if
3853 // min/max is legal on the scalar type.
3854 bool UseScalarMinMax = VT.isVector() &&
3856
3857 // ValueTracking's select pattern matching does not account for -0.0,
3858 // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3859 // -0.0 is less than +0.0.
3860 const Value *LHS, *RHS;
3861 auto SPR = matchSelectPattern(&I, LHS, RHS);
3863 switch (SPR.Flavor) {
3864 case SPF_UMAX: Opc = ISD::UMAX; break;
3865 case SPF_UMIN: Opc = ISD::UMIN; break;
3866 case SPF_SMAX: Opc = ISD::SMAX; break;
3867 case SPF_SMIN: Opc = ISD::SMIN; break;
3868 case SPF_FMINNUM:
3870 break;
3871
3872 switch (SPR.NaNBehavior) {
3873 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3874 case SPNB_RETURNS_NAN: break;
3875 case SPNB_RETURNS_OTHER:
3877 Flags.setNoSignedZeros(true);
3878 break;
3879 case SPNB_RETURNS_ANY:
3881 (UseScalarMinMax &&
3883 Opc = ISD::FMINNUM;
3884 break;
3885 }
3886 break;
3887 case SPF_FMAXNUM:
3889 break;
3890
3891 switch (SPR.NaNBehavior) {
3892 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3893 case SPNB_RETURNS_NAN: break;
3894 case SPNB_RETURNS_OTHER:
3896 Flags.setNoSignedZeros(true);
3897 break;
3898 case SPNB_RETURNS_ANY:
3900 (UseScalarMinMax &&
3902 Opc = ISD::FMAXNUM;
3903 break;
3904 }
3905 break;
3906 case SPF_NABS:
3907 Negate = true;
3908 [[fallthrough]];
3909 case SPF_ABS:
3910 IsUnaryAbs = true;
3911 Opc = ISD::ABS;
3912 break;
3913 default: break;
3914 }
3915
3916 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3917 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3918 (UseScalarMinMax &&
3920 // If the underlying comparison instruction is used by any other
3921 // instruction, the consumed instructions won't be destroyed, so it is
3922 // not profitable to convert to a min/max.
3924 OpCode = Opc;
3925 LHSVal = getValue(LHS);
3926 RHSVal = getValue(RHS);
3927 BaseOps.clear();
3928 }
3929
3930 if (IsUnaryAbs) {
3931 OpCode = Opc;
3932 LHSVal = getValue(LHS);
3933 BaseOps.clear();
3934 }
3935 }
3936
3937 if (IsUnaryAbs) {
3938 for (unsigned i = 0; i != NumValues; ++i) {
3939 SDLoc dl = getCurSDLoc();
3940 EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3941 Values[i] =
3942 DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3943 if (Negate)
3944 Values[i] = DAG.getNegative(Values[i], dl, VT);
3945 }
3946 } else {
3947 for (unsigned i = 0; i != NumValues; ++i) {
3948 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3949 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3950 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3951 Values[i] = DAG.getNode(
3952 OpCode, getCurSDLoc(),
3953 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3954 }
3955 }
3956
3958 DAG.getVTList(ValueVTs), Values));
3959}
3960
3961void SelectionDAGBuilder::visitTrunc(const User &I) {
3962 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3963 SDValue N = getValue(I.getOperand(0));
3964 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3965 I.getType());
3966 SDNodeFlags Flags;
3967 if (auto *Trunc = dyn_cast<TruncInst>(&I)) {
3968 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3969 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3970 }
3971
3972 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N, Flags));
3973}
3974
3975void SelectionDAGBuilder::visitZExt(const User &I) {
3976 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3977 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3978 SDValue N = getValue(I.getOperand(0));
3979 auto &TLI = DAG.getTargetLoweringInfo();
3980 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3981
3982 SDNodeFlags Flags;
3983 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3984 Flags.setNonNeg(PNI->hasNonNeg());
3985
3986 // Eagerly use nonneg information to canonicalize towards sign_extend if
3987 // that is the target's preference.
3988 // TODO: Let the target do this later.
3989 if (Flags.hasNonNeg() &&
3990 TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3991 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3992 return;
3993 }
3994
3995 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags));
3996}
3997
3998void SelectionDAGBuilder::visitSExt(const User &I) {
3999 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
4000 // SExt also can't be a cast to bool for same reason. So, nothing much to do
4001 SDValue N = getValue(I.getOperand(0));
4002 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
4003 I.getType());
4004 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
4005}
4006
4007void SelectionDAGBuilder::visitFPTrunc(const User &I) {
4008 // FPTrunc is never a no-op cast, no need to check
4009 SDValue N = getValue(I.getOperand(0));
4010 SDLoc dl = getCurSDLoc();
4011 SDNodeFlags Flags;
4012 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
4013 Flags.copyFMF(*FPOp);
4014 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4015 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4016 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
4017 DAG.getTargetConstant(
4018 0, dl, TLI.getPointerTy(DAG.getDataLayout())),
4019 Flags));
4020}
4021
4022void SelectionDAGBuilder::visitFPExt(const User &I) {
4023 // FPExt is never a no-op cast, no need to check
4024 SDValue N = getValue(I.getOperand(0));
4025 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
4026 I.getType());
4027 SDNodeFlags Flags;
4028 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
4029 Flags.copyFMF(*FPOp);
4030 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N, Flags));
4031}
4032
4033void SelectionDAGBuilder::visitFPToUI(const User &I) {
4034 // FPToUI is never a no-op cast, no need to check
4035 SDValue N = getValue(I.getOperand(0));
4036 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
4037 I.getType());
4038 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
4039}
4040
4041void SelectionDAGBuilder::visitFPToSI(const User &I) {
4042 // FPToSI is never a no-op cast, no need to check
4043 SDValue N = getValue(I.getOperand(0));
4044 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
4045 I.getType());
4046 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
4047}
4048
4049void SelectionDAGBuilder::visitUIToFP(const User &I) {
4050 // UIToFP is never a no-op cast, no need to check
4051 SDValue N = getValue(I.getOperand(0));
4052 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
4053 I.getType());
4054 SDNodeFlags Flags;
4055 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
4056 Flags.setNonNeg(PNI->hasNonNeg());
4057
4058 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N, Flags));
4059}
4060
4061void SelectionDAGBuilder::visitSIToFP(const User &I) {
4062 // SIToFP is never a no-op cast, no need to check
4063 SDValue N = getValue(I.getOperand(0));
4064 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
4065 I.getType());
4066 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
4067}
4068
4069void SelectionDAGBuilder::visitPtrToAddr(const User &I) {
4070 SDValue N = getValue(I.getOperand(0));
4071 // By definition the type of the ptrtoaddr must be equal to the address type.
4072 const auto &TLI = DAG.getTargetLoweringInfo();
4073 EVT AddrVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4074 // The address width must be smaller or equal to the pointer representation
4075 // width, so we lower ptrtoaddr as a truncate (possibly folded to a no-op).
4076 N = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), AddrVT, N);
4077 setValue(&I, N);
4078}
4079
4080void SelectionDAGBuilder::visitPtrToInt(const User &I) {
4081 // What to do depends on the size of the integer and the size of the pointer.
4082 // We can either truncate, zero extend, or no-op, accordingly.
4083 SDValue N = getValue(I.getOperand(0));
4084 auto &TLI = DAG.getTargetLoweringInfo();
4085 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
4086 I.getType());
4087 EVT PtrMemVT =
4088 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
4089 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
4090 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
4091 setValue(&I, N);
4092}
4093
4094void SelectionDAGBuilder::visitIntToPtr(const User &I) {
4095 // What to do depends on the size of the integer and the size of the pointer.
4096 // We can either truncate, zero extend, or no-op, accordingly.
4097 SDValue N = getValue(I.getOperand(0));
4098 auto &TLI = DAG.getTargetLoweringInfo();
4099 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4100 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4101 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
4102 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
4103 setValue(&I, N);
4104}
4105
4106void SelectionDAGBuilder::visitBitCast(const User &I) {
4107 SDValue N = getValue(I.getOperand(0));
4108 SDLoc dl = getCurSDLoc();
4109 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
4110 I.getType());
4111
4112 // BitCast assures us that source and destination are the same size so this is
4113 // either a BITCAST or a no-op.
4114 if (DestVT != N.getValueType())
4115 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
4116 DestVT, N)); // convert types.
4117 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
4118 // might fold any kind of constant expression to an integer constant and that
4119 // is not what we are looking for. Only recognize a bitcast of a genuine
4120 // constant integer as an opaque constant.
4121 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
4122 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
4123 /*isOpaque*/true));
4124 else
4125 setValue(&I, N); // noop cast.
4126}
4127
4128void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
4129 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4130 const Value *SV = I.getOperand(0);
4131 SDValue N = getValue(SV);
4132 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4133
4134 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
4135 unsigned DestAS = I.getType()->getPointerAddressSpace();
4136
4137 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4138 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
4139
4140 setValue(&I, N);
4141}
4142
4143void SelectionDAGBuilder::visitInsertElement(const User &I) {
4144 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4145 SDValue InVec = getValue(I.getOperand(0));
4146 SDValue InVal = getValue(I.getOperand(1));
4147 SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
4148 TLI.getVectorIdxTy(DAG.getDataLayout()));
4150 TLI.getValueType(DAG.getDataLayout(), I.getType()),
4151 InVec, InVal, InIdx));
4152}
4153
4154void SelectionDAGBuilder::visitExtractElement(const User &I) {
4155 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4156 SDValue InVec = getValue(I.getOperand(0));
4157 SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
4158 TLI.getVectorIdxTy(DAG.getDataLayout()));
4160 TLI.getValueType(DAG.getDataLayout(), I.getType()),
4161 InVec, InIdx));
4162}
4163
4164void SelectionDAGBuilder::visitShuffleVector(const User &I) {
4165 SDValue Src1 = getValue(I.getOperand(0));
4166 SDValue Src2 = getValue(I.getOperand(1));
4167 ArrayRef<int> Mask;
4168 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
4169 Mask = SVI->getShuffleMask();
4170 else
4171 Mask = cast<ConstantExpr>(I).getShuffleMask();
4172 SDLoc DL = getCurSDLoc();
4173 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4174 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4175 EVT SrcVT = Src1.getValueType();
4176
4177 if (all_of(Mask, equal_to(0)) && VT.isScalableVector()) {
4178 // Canonical splat form of first element of first input vector.
4179 SDValue FirstElt =
4180 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
4181 DAG.getVectorIdxConstant(0, DL));
4182 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
4183 return;
4184 }
4185
4186 // For now, we only handle splats for scalable vectors.
4187 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
4188 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
4189 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
4190
4191 unsigned SrcNumElts = SrcVT.getVectorNumElements();
4192 unsigned MaskNumElts = Mask.size();
4193
4194 if (SrcNumElts == MaskNumElts) {
4195 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
4196 return;
4197 }
4198
4199 // Normalize the shuffle vector since mask and vector length don't match.
4200 if (SrcNumElts < MaskNumElts) {
4201 // Mask is longer than the source vectors. We can use concatenate vector to
4202 // make the mask and vectors lengths match.
4203
4204 if (MaskNumElts % SrcNumElts == 0) {
4205 // Mask length is a multiple of the source vector length.
4206 // Check if the shuffle is some kind of concatenation of the input
4207 // vectors.
4208 unsigned NumConcat = MaskNumElts / SrcNumElts;
4209 bool IsConcat = true;
4210 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4211 for (unsigned i = 0; i != MaskNumElts; ++i) {
4212 int Idx = Mask[i];
4213 if (Idx < 0)
4214 continue;
4215 // Ensure the indices in each SrcVT sized piece are sequential and that
4216 // the same source is used for the whole piece.
4217 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4218 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4219 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
4220 IsConcat = false;
4221 break;
4222 }
4223 // Remember which source this index came from.
4224 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4225 }
4226
4227 // The shuffle is concatenating multiple vectors together. Just emit
4228 // a CONCAT_VECTORS operation.
4229 if (IsConcat) {
4230 SmallVector<SDValue, 8> ConcatOps;
4231 for (auto Src : ConcatSrcs) {
4232 if (Src < 0)
4233 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
4234 else if (Src == 0)
4235 ConcatOps.push_back(Src1);
4236 else
4237 ConcatOps.push_back(Src2);
4238 }
4239 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
4240 return;
4241 }
4242 }
4243
4244 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
4245 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4246 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
4247 PaddedMaskNumElts);
4248
4249 // Pad both vectors with undefs to make them the same length as the mask.
4250 SDValue UndefVal = DAG.getUNDEF(SrcVT);
4251
4252 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
4253 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
4254 MOps1[0] = Src1;
4255 MOps2[0] = Src2;
4256
4257 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
4258 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
4259
4260 // Readjust mask for new input vector length.
4261 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4262 for (unsigned i = 0; i != MaskNumElts; ++i) {
4263 int Idx = Mask[i];
4264 if (Idx >= (int)SrcNumElts)
4265 Idx -= SrcNumElts - PaddedMaskNumElts;
4266 MappedOps[i] = Idx;
4267 }
4268
4269 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
4270
4271 // If the concatenated vector was padded, extract a subvector with the
4272 // correct number of elements.
4273 if (MaskNumElts != PaddedMaskNumElts)
4274 Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
4275 DAG.getVectorIdxConstant(0, DL));
4276
4277 setValue(&I, Result);
4278 return;
4279 }
4280
4281 assert(SrcNumElts > MaskNumElts);
4282
4283 // Analyze the access pattern of the vector to see if we can extract
4284 // two subvectors and do the shuffle.
4285 int StartIdx[2] = {-1, -1}; // StartIdx to extract from
4286 bool CanExtract = true;
4287 for (int Idx : Mask) {
4288 unsigned Input = 0;
4289 if (Idx < 0)
4290 continue;
4291
4292 if (Idx >= (int)SrcNumElts) {
4293 Input = 1;
4294 Idx -= SrcNumElts;
4295 }
4296
4297 // If all the indices come from the same MaskNumElts sized portion of
4298 // the sources we can use extract. Also make sure the extract wouldn't
4299 // extract past the end of the source.
4300 int NewStartIdx = alignDown(Idx, MaskNumElts);
4301 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4302 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4303 CanExtract = false;
4304 // Make sure we always update StartIdx as we use it to track if all
4305 // elements are undef.
4306 StartIdx[Input] = NewStartIdx;
4307 }
4308
4309 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4310 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
4311 return;
4312 }
4313 if (CanExtract) {
4314 // Extract appropriate subvector and generate a vector shuffle
4315 for (unsigned Input = 0; Input < 2; ++Input) {
4316 SDValue &Src = Input == 0 ? Src1 : Src2;
4317 if (StartIdx[Input] < 0)
4318 Src = DAG.getUNDEF(VT);
4319 else {
4320 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
4321 DAG.getVectorIdxConstant(StartIdx[Input], DL));
4322 }
4323 }
4324
4325 // Calculate new mask.
4326 SmallVector<int, 8> MappedOps(Mask);
4327 for (int &Idx : MappedOps) {
4328 if (Idx >= (int)SrcNumElts)
4329 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4330 else if (Idx >= 0)
4331 Idx -= StartIdx[0];
4332 }
4333
4334 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
4335 return;
4336 }
4337
4338 // We can't use either concat vectors or extract subvectors so fall back to
4339 // replacing the shuffle with extract and build vector.
4340 // to insert and build vector.
4341 EVT EltVT = VT.getVectorElementType();
4343 for (int Idx : Mask) {
4344 SDValue Res;
4345
4346 if (Idx < 0) {
4347 Res = DAG.getUNDEF(EltVT);
4348 } else {
4349 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4350 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
4351
4352 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
4353 DAG.getVectorIdxConstant(Idx, DL));
4354 }
4355
4356 Ops.push_back(Res);
4357 }
4358
4359 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
4360}
4361
4362void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
4363 ArrayRef<unsigned> Indices = I.getIndices();
4364 const Value *Op0 = I.getOperand(0);
4365 const Value *Op1 = I.getOperand(1);
4366 Type *AggTy = I.getType();
4367 Type *ValTy = Op1->getType();
4368 bool IntoUndef = isa<UndefValue>(Op0);
4369 bool FromUndef = isa<UndefValue>(Op1);
4370
4371 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4372
4373 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4374 SmallVector<EVT, 4> AggValueVTs;
4375 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
4376 SmallVector<EVT, 4> ValValueVTs;
4377 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4378
4379 unsigned NumAggValues = AggValueVTs.size();
4380 unsigned NumValValues = ValValueVTs.size();
4381 SmallVector<SDValue, 4> Values(NumAggValues);
4382
4383 // Ignore an insertvalue that produces an empty object
4384 if (!NumAggValues) {
4385 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4386 return;
4387 }
4388
4389 SDValue Agg = getValue(Op0);
4390 unsigned i = 0;
4391 // Copy the beginning value(s) from the original aggregate.
4392 for (; i != LinearIndex; ++i)
4393 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4394 SDValue(Agg.getNode(), Agg.getResNo() + i);
4395 // Copy values from the inserted value(s).
4396 if (NumValValues) {
4397 SDValue Val = getValue(Op1);
4398 for (; i != LinearIndex + NumValValues; ++i)
4399 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4400 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
4401 }
4402 // Copy remaining value(s) from the original aggregate.
4403 for (; i != NumAggValues; ++i)
4404 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4405 SDValue(Agg.getNode(), Agg.getResNo() + i);
4406
4408 DAG.getVTList(AggValueVTs), Values));
4409}
4410
4411void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
4412 ArrayRef<unsigned> Indices = I.getIndices();
4413 const Value *Op0 = I.getOperand(0);
4414 Type *AggTy = Op0->getType();
4415 Type *ValTy = I.getType();
4416 bool OutOfUndef = isa<UndefValue>(Op0);
4417
4418 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4419
4420 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4421 SmallVector<EVT, 4> ValValueVTs;
4422 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4423
4424 unsigned NumValValues = ValValueVTs.size();
4425
4426 // Ignore a extractvalue that produces an empty object
4427 if (!NumValValues) {
4428 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4429 return;
4430 }
4431
4432 SmallVector<SDValue, 4> Values(NumValValues);
4433
4434 SDValue Agg = getValue(Op0);
4435 // Copy out the selected value(s).
4436 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4437 Values[i - LinearIndex] =
4438 OutOfUndef ?
4439 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
4440 SDValue(Agg.getNode(), Agg.getResNo() + i);
4441
4443 DAG.getVTList(ValValueVTs), Values));
4444}
4445
4446void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
4447 Value *Op0 = I.getOperand(0);
4448 // Note that the pointer operand may be a vector of pointers. Take the scalar
4449 // element which holds a pointer.
4450 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4451 SDValue N = getValue(Op0);
4452 SDLoc dl = getCurSDLoc();
4453 auto &TLI = DAG.getTargetLoweringInfo();
4454 GEPNoWrapFlags NW = cast<GEPOperator>(I).getNoWrapFlags();
4455
4456 // For a vector GEP, keep the prefix scalar as long as possible, then
4457 // convert any scalars encountered after the first vector operand to vectors.
4458 bool IsVectorGEP = I.getType()->isVectorTy();
4459 ElementCount VectorElementCount =
4460 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4462
4464 GTI != E; ++GTI) {
4465 const Value *Idx = GTI.getOperand();
4466 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4467 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4468 if (Field) {
4469 // N = N + Offset
4470 uint64_t Offset =
4471 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4472
4473 // In an inbounds GEP with an offset that is nonnegative even when
4474 // interpreted as signed, assume there is no unsigned overflow.
4475 SDNodeFlags Flags;
4476 if (NW.hasNoUnsignedWrap() ||
4477 (int64_t(Offset) >= 0 && NW.hasNoUnsignedSignedWrap()))
4479 Flags.setInBounds(NW.isInBounds());
4480
4481 N = DAG.getMemBasePlusOffset(
4482 N, DAG.getConstant(Offset, dl, N.getValueType()), dl, Flags);
4483 }
4484 } else {
4485 // IdxSize is the width of the arithmetic according to IR semantics.
4486 // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4487 // (and fix up the result later).
4488 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4489 MVT IdxTy = MVT::getIntegerVT(IdxSize);
4490 TypeSize ElementSize =
4491 GTI.getSequentialElementStride(DAG.getDataLayout());
4492 // We intentionally mask away the high bits here; ElementSize may not
4493 // fit in IdxTy.
4494 APInt ElementMul(IdxSize, ElementSize.getKnownMinValue(),
4495 /*isSigned=*/false, /*implicitTrunc=*/true);
4496 bool ElementScalable = ElementSize.isScalable();
4497
4498 // If this is a scalar constant or a splat vector of constants,
4499 // handle it quickly.
4500 const auto *C = dyn_cast<Constant>(Idx);
4501 if (C && isa<VectorType>(C->getType()))
4502 C = C->getSplatValue();
4503
4504 const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4505 if (CI && CI->isZero())
4506 continue;
4507 if (CI && !ElementScalable) {
4508 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4509 LLVMContext &Context = *DAG.getContext();
4510 SDValue OffsVal;
4511 if (N.getValueType().isVector())
4512 OffsVal = DAG.getConstant(
4513 Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4514 else
4515 OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4516
4517 // In an inbounds GEP with an offset that is nonnegative even when
4518 // interpreted as signed, assume there is no unsigned overflow.
4519 SDNodeFlags Flags;
4520 if (NW.hasNoUnsignedWrap() ||
4521 (Offs.isNonNegative() && NW.hasNoUnsignedSignedWrap()))
4522 Flags.setNoUnsignedWrap(true);
4523 Flags.setInBounds(NW.isInBounds());
4524
4525 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4526
4527 N = DAG.getMemBasePlusOffset(N, OffsVal, dl, Flags);
4528 continue;
4529 }
4530
4531 // N = N + Idx * ElementMul;
4532 SDValue IdxN = getValue(Idx);
4533
4534 if (IdxN.getValueType().isVector() != N.getValueType().isVector()) {
4535 if (N.getValueType().isVector()) {
4536 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4537 VectorElementCount);
4538 IdxN = DAG.getSplat(VT, dl, IdxN);
4539 } else {
4540 EVT VT =
4541 EVT::getVectorVT(*Context, N.getValueType(), VectorElementCount);
4542 N = DAG.getSplat(VT, dl, N);
4543 }
4544 }
4545
4546 // If the index is smaller or larger than intptr_t, truncate or extend
4547 // it.
4548 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4549
4550 SDNodeFlags ScaleFlags;
4551 // The multiplication of an index by the type size does not wrap the
4552 // pointer index type in a signed sense (mul nsw).
4554
4555 // The multiplication of an index by the type size does not wrap the
4556 // pointer index type in an unsigned sense (mul nuw).
4557 ScaleFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4558
4559 if (ElementScalable) {
4560 EVT VScaleTy = N.getValueType().getScalarType();
4561 SDValue VScale = DAG.getNode(
4562 ISD::VSCALE, dl, VScaleTy,
4563 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4564 if (N.getValueType().isVector())
4565 VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4566 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale,
4567 ScaleFlags);
4568 } else {
4569 // If this is a multiply by a power of two, turn it into a shl
4570 // immediately. This is a very common case.
4571 if (ElementMul != 1) {
4572 if (ElementMul.isPowerOf2()) {
4573 unsigned Amt = ElementMul.logBase2();
4574 IdxN = DAG.getNode(
4575 ISD::SHL, dl, N.getValueType(), IdxN,
4576 DAG.getShiftAmountConstant(Amt, N.getValueType(), dl),
4577 ScaleFlags);
4578 } else {
4579 SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4580 IdxN.getValueType());
4581 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, Scale,
4582 ScaleFlags);
4583 }
4584 }
4585 }
4586
4587 // The successive addition of the current address, truncated to the
4588 // pointer index type and interpreted as an unsigned number, and each
4589 // offset, also interpreted as an unsigned number, does not wrap the
4590 // pointer index type (add nuw).
4591 SDNodeFlags AddFlags;
4592 AddFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4593 AddFlags.setInBounds(NW.isInBounds());
4594
4595 N = DAG.getMemBasePlusOffset(N, IdxN, dl, AddFlags);
4596 }
4597 }
4598
4599 if (IsVectorGEP && !N.getValueType().isVector()) {
4600 EVT VT = EVT::getVectorVT(*Context, N.getValueType(), VectorElementCount);
4601 N = DAG.getSplat(VT, dl, N);
4602 }
4603
4604 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4605 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4606 if (IsVectorGEP) {
4607 PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4608 PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4609 }
4610
4611 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4612 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4613
4614 setValue(&I, N);
4615}
4616
4617void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4618 // If this is a fixed sized alloca in the entry block of the function,
4619 // allocate it statically on the stack.
4620 if (FuncInfo.StaticAllocaMap.count(&I))
4621 return; // getValue will auto-populate this.
4622
4623 SDLoc dl = getCurSDLoc();
4624 Type *Ty = I.getAllocatedType();
4625 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4626 auto &DL = DAG.getDataLayout();
4627 TypeSize TySize = DL.getTypeAllocSize(Ty);
4628 MaybeAlign Alignment = I.getAlign();
4629
4630 SDValue AllocSize = getValue(I.getArraySize());
4631
4632 EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace());
4633 if (AllocSize.getValueType() != IntPtr)
4634 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4635
4636 AllocSize = DAG.getNode(
4637 ISD::MUL, dl, IntPtr, AllocSize,
4638 DAG.getZExtOrTrunc(DAG.getTypeSize(dl, MVT::i64, TySize), dl, IntPtr));
4639
4640 // Handle alignment. If the requested alignment is less than or equal to
4641 // the stack alignment, ignore it. If the size is greater than or equal to
4642 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4643 Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4644 if (*Alignment <= StackAlign)
4645 Alignment = std::nullopt;
4646
4647 const uint64_t StackAlignMask = StackAlign.value() - 1U;
4648 // Round the size of the allocation up to the stack alignment size
4649 // by add SA-1 to the size. This doesn't overflow because we're computing
4650 // an address inside an alloca.
4651 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4652 DAG.getConstant(StackAlignMask, dl, IntPtr),
4654
4655 // Mask out the low bits for alignment purposes.
4656 AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4657 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4658
4659 SDValue Ops[] = {
4660 getRoot(), AllocSize,
4661 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4662 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4663 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4664 setValue(&I, DSA);
4665 DAG.setRoot(DSA.getValue(1));
4666
4667 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4668}
4669
4670static const MDNode *getRangeMetadata(const Instruction &I) {
4671 return I.getMetadata(LLVMContext::MD_range);
4672}
4673
4674static std::optional<ConstantRange> getRange(const Instruction &I) {
4675 if (const auto *CB = dyn_cast<CallBase>(&I))
4676 if (std::optional<ConstantRange> CR = CB->getRange())
4677 return CR;
4678 if (const MDNode *Range = getRangeMetadata(I))
4680 return std::nullopt;
4681}
4682
4684 if (const auto *CB = dyn_cast<CallBase>(&I))
4685 return CB->getRetNoFPClass();
4686 return fcNone;
4687}
4688
4689void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4690 if (I.isAtomic())
4691 return visitAtomicLoad(I);
4692
4693 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4694 const Value *SV = I.getOperand(0);
4695 if (TLI.supportSwiftError()) {
4696 // Swifterror values can come from either a function parameter with
4697 // swifterror attribute or an alloca with swifterror attribute.
4698 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4699 if (Arg->hasSwiftErrorAttr())
4700 return visitLoadFromSwiftError(I);
4701 }
4702
4703 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4704 if (Alloca->isSwiftError())
4705 return visitLoadFromSwiftError(I);
4706 }
4707 }
4708
4709 SDValue Ptr = getValue(SV);
4710
4711 Type *Ty = I.getType();
4712 SmallVector<EVT, 4> ValueVTs, MemVTs;
4714 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
4715 unsigned NumValues = ValueVTs.size();
4716 if (NumValues == 0)
4717 return;
4718
4719 Align Alignment = I.getAlign();
4720 AAMDNodes AAInfo = I.getAAMetadata();
4721 const MDNode *Ranges = getRangeMetadata(I);
4722 bool isVolatile = I.isVolatile();
4723 MachineMemOperand::Flags MMOFlags =
4724 TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4725
4726 SDValue Root;
4727 bool ConstantMemory = false;
4728 if (isVolatile)
4729 // Serialize volatile loads with other side effects.
4730 Root = getRoot();
4731 else if (NumValues > MaxParallelChains)
4732 Root = getMemoryRoot();
4733 else if (BatchAA &&
4734 BatchAA->pointsToConstantMemory(MemoryLocation(
4735 SV,
4736 LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4737 AAInfo))) {
4738 // Do not serialize (non-volatile) loads of constant memory with anything.
4739 Root = DAG.getEntryNode();
4740 ConstantMemory = true;
4742 } else {
4743 // Do not serialize non-volatile loads against each other.
4744 Root = DAG.getRoot();
4745 }
4746
4747 SDLoc dl = getCurSDLoc();
4748
4749 if (isVolatile)
4750 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4751
4752 SmallVector<SDValue, 4> Values(NumValues);
4753 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4754
4755 unsigned ChainI = 0;
4756 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4757 // Serializing loads here may result in excessive register pressure, and
4758 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4759 // could recover a bit by hoisting nodes upward in the chain by recognizing
4760 // they are side-effect free or do not alias. The optimizer should really
4761 // avoid this case by converting large object/array copies to llvm.memcpy
4762 // (MaxParallelChains should always remain as failsafe).
4763 if (ChainI == MaxParallelChains) {
4764 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4765 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4766 ArrayRef(Chains.data(), ChainI));
4767 Root = Chain;
4768 ChainI = 0;
4769 }
4770
4771 // TODO: MachinePointerInfo only supports a fixed length offset.
4772 MachinePointerInfo PtrInfo =
4773 !Offsets[i].isScalable() || Offsets[i].isZero()
4774 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4775 : MachinePointerInfo();
4776
4777 SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4778 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4779 MMOFlags, AAInfo, Ranges);
4780 Chains[ChainI] = L.getValue(1);
4781
4782 if (MemVTs[i] != ValueVTs[i])
4783 L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4784
4785 if (MDNode *NoFPClassMD = I.getMetadata(LLVMContext::MD_nofpclass)) {
4786 uint64_t FPTestInt =
4787 cast<ConstantInt>(
4788 cast<ConstantAsMetadata>(NoFPClassMD->getOperand(0))->getValue())
4789 ->getZExtValue();
4790 if (FPTestInt != fcNone) {
4791 SDValue FPTestConst =
4792 DAG.getTargetConstant(FPTestInt, SDLoc(), MVT::i32);
4793 L = DAG.getNode(ISD::AssertNoFPClass, dl, L.getValueType(), L,
4794 FPTestConst);
4795 }
4796 }
4797 Values[i] = L;
4798 }
4799
4800 if (!ConstantMemory) {
4801 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4802 ArrayRef(Chains.data(), ChainI));
4803 if (isVolatile)
4804 DAG.setRoot(Chain);
4805 else
4806 PendingLoads.push_back(Chain);
4807 }
4808
4809 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4810 DAG.getVTList(ValueVTs), Values));
4811}
4812
4813void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4814 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4815 "call visitStoreToSwiftError when backend supports swifterror");
4816
4817 SmallVector<EVT, 4> ValueVTs;
4818 SmallVector<uint64_t, 4> Offsets;
4819 const Value *SrcV = I.getOperand(0);
4820 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4821 SrcV->getType(), ValueVTs, /*MemVTs=*/nullptr, &Offsets, 0);
4822 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4823 "expect a single EVT for swifterror");
4824
4825 SDValue Src = getValue(SrcV);
4826 // Create a virtual register, then update the virtual register.
4827 Register VReg =
4828 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4829 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4830 // Chain can be getRoot or getControlRoot.
4831 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4832 SDValue(Src.getNode(), Src.getResNo()));
4833 DAG.setRoot(CopyNode);
4834}
4835
4836void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4837 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4838 "call visitLoadFromSwiftError when backend supports swifterror");
4839
4840 assert(!I.isVolatile() &&
4841 !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4842 !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4843 "Support volatile, non temporal, invariant for load_from_swift_error");
4844
4845 const Value *SV = I.getOperand(0);
4846 Type *Ty = I.getType();
4847 assert(
4848 (!BatchAA ||
4849 !BatchAA->pointsToConstantMemory(MemoryLocation(
4850 SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4851 I.getAAMetadata()))) &&
4852 "load_from_swift_error should not be constant memory");
4853
4854 SmallVector<EVT, 4> ValueVTs;
4855 SmallVector<uint64_t, 4> Offsets;
4856 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4857 ValueVTs, /*MemVTs=*/nullptr, &Offsets, 0);
4858 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4859 "expect a single EVT for swifterror");
4860
4861 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4862 SDValue L = DAG.getCopyFromReg(
4863 getRoot(), getCurSDLoc(),
4864 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4865
4866 setValue(&I, L);
4867}
4868
4869void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4870 if (I.isAtomic())
4871 return visitAtomicStore(I);
4872
4873 const Value *SrcV = I.getOperand(0);
4874 const Value *PtrV = I.getOperand(1);
4875
4876 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4877 if (TLI.supportSwiftError()) {
4878 // Swifterror values can come from either a function parameter with
4879 // swifterror attribute or an alloca with swifterror attribute.
4880 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4881 if (Arg->hasSwiftErrorAttr())
4882 return visitStoreToSwiftError(I);
4883 }
4884
4885 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4886 if (Alloca->isSwiftError())
4887 return visitStoreToSwiftError(I);
4888 }
4889 }
4890
4891 SmallVector<EVT, 4> ValueVTs, MemVTs;
4893 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4894 SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4895 unsigned NumValues = ValueVTs.size();
4896 if (NumValues == 0)
4897 return;
4898
4899 // Get the lowered operands. Note that we do this after
4900 // checking if NumResults is zero, because with zero results
4901 // the operands won't have values in the map.
4902 SDValue Src = getValue(SrcV);
4903 SDValue Ptr = getValue(PtrV);
4904
4905 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4906 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4907 SDLoc dl = getCurSDLoc();
4908 Align Alignment = I.getAlign();
4909 AAMDNodes AAInfo = I.getAAMetadata();
4910
4911 auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4912
4913 unsigned ChainI = 0;
4914 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4915 // See visitLoad comments.
4916 if (ChainI == MaxParallelChains) {
4917 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4918 ArrayRef(Chains.data(), ChainI));
4919 Root = Chain;
4920 ChainI = 0;
4921 }
4922
4923 // TODO: MachinePointerInfo only supports a fixed length offset.
4924 MachinePointerInfo PtrInfo =
4925 !Offsets[i].isScalable() || Offsets[i].isZero()
4926 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4927 : MachinePointerInfo();
4928
4929 SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4930 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4931 if (MemVTs[i] != ValueVTs[i])
4932 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4933 SDValue St =
4934 DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4935 Chains[ChainI] = St;
4936 }
4937
4938 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4939 ArrayRef(Chains.data(), ChainI));
4940 setValue(&I, StoreNode);
4941 DAG.setRoot(StoreNode);
4942}
4943
4944void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4945 bool IsCompressing) {
4946 SDLoc sdl = getCurSDLoc();
4947
4948 Value *Src0Operand = I.getArgOperand(0);
4949 Value *PtrOperand = I.getArgOperand(1);
4950 Value *MaskOperand = I.getArgOperand(2);
4951 Align Alignment = I.getParamAlign(1).valueOrOne();
4952
4953 SDValue Ptr = getValue(PtrOperand);
4954 SDValue Src0 = getValue(Src0Operand);
4955 SDValue Mask = getValue(MaskOperand);
4956 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4957
4958 EVT VT = Src0.getValueType();
4959
4960 auto MMOFlags = MachineMemOperand::MOStore;
4961 if (I.hasMetadata(LLVMContext::MD_nontemporal))
4963
4964 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4965 MachinePointerInfo(PtrOperand), MMOFlags,
4966 LocationSize::upperBound(VT.getStoreSize()), Alignment,
4967 I.getAAMetadata());
4968
4969 const auto &TLI = DAG.getTargetLoweringInfo();
4970
4971 SDValue StoreNode =
4972 !IsCompressing && TTI->hasConditionalLoadStoreForType(
4973 I.getArgOperand(0)->getType(), /*IsStore=*/true)
4974 ? TLI.visitMaskedStore(DAG, sdl, getMemoryRoot(), MMO, Ptr, Src0,
4975 Mask)
4976 : DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask,
4977 VT, MMO, ISD::UNINDEXED, /*Truncating=*/false,
4978 IsCompressing);
4979 DAG.setRoot(StoreNode);
4980 setValue(&I, StoreNode);
4981}
4982
4983// Get a uniform base for the Gather/Scatter intrinsic.
4984// The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4985// We try to represent it as a base pointer + vector of indices.
4986// Usually, the vector of pointers comes from a 'getelementptr' instruction.
4987// The first operand of the GEP may be a single pointer or a vector of pointers
4988// Example:
4989// %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4990// or
4991// %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
4992// %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4993//
4994// When the first GEP operand is a single pointer - it is the uniform base we
4995// are looking for. If first operand of the GEP is a splat vector - we
4996// extract the splat value and use it as a uniform base.
4997// In all other cases the function returns 'false'.
4998static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4999 SDValue &Scale, SelectionDAGBuilder *SDB,
5000 const BasicBlock *CurBB, uint64_t ElemSize) {
5001 SelectionDAG& DAG = SDB->DAG;
5002 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5003 const DataLayout &DL = DAG.getDataLayout();
5004
5005 assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
5006
5007 // Handle splat constant pointer.
5008 if (auto *C = dyn_cast<Constant>(Ptr)) {
5009 C = C->getSplatValue();
5010 if (!C)
5011 return false;
5012
5013 Base = SDB->getValue(C);
5014
5015 ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
5016 EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
5017 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
5018 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
5019 return true;
5020 }
5021
5023 if (!GEP || GEP->getParent() != CurBB)
5024 return false;
5025
5026 if (GEP->getNumOperands() != 2)
5027 return false;
5028
5029 const Value *BasePtr = GEP->getPointerOperand();
5030 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
5031
5032 // Make sure the base is scalar and the index is a vector.
5033 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
5034 return false;
5035
5036 TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
5037 if (ScaleVal.isScalable())
5038 return false;
5039
5040 // Target may not support the required addressing mode.
5041 if (ScaleVal != 1 &&
5042 !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
5043 return false;
5044
5045 Base = SDB->getValue(BasePtr);
5046 Index = SDB->getValue(IndexVal);
5047
5048 Scale =
5049 DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
5050 return true;
5051}
5052
5053void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
5054 SDLoc sdl = getCurSDLoc();
5055
5056 // llvm.masked.scatter.*(Src0, Ptrs, Mask)
5057 const Value *Ptr = I.getArgOperand(1);
5058 SDValue Src0 = getValue(I.getArgOperand(0));
5059 SDValue Mask = getValue(I.getArgOperand(2));
5060 EVT VT = Src0.getValueType();
5061 Align Alignment = I.getParamAlign(1).valueOrOne();
5062 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5063
5064 SDValue Base;
5065 SDValue Index;
5066 SDValue Scale;
5067 bool UniformBase = getUniformBase(Ptr, Base, Index, Scale, this,
5068 I.getParent(), VT.getScalarStoreSize());
5069
5070 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
5071 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5072 MachinePointerInfo(AS), MachineMemOperand::MOStore,
5073 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
5074 if (!UniformBase) {
5075 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5076 Index = getValue(Ptr);
5077 Scale =
5078 DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5079 }
5080
5081 EVT IdxVT = Index.getValueType();
5082 EVT EltTy = IdxVT.getVectorElementType();
5083 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
5084 EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
5085 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
5086 }
5087
5088 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
5089 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
5090 Ops, MMO, ISD::SIGNED_SCALED, false);
5091 DAG.setRoot(Scatter);
5092 setValue(&I, Scatter);
5093}
5094
5095void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
5096 SDLoc sdl = getCurSDLoc();
5097
5098 Value *PtrOperand = I.getArgOperand(0);
5099 Value *MaskOperand = I.getArgOperand(1);
5100 Value *Src0Operand = I.getArgOperand(2);
5101 Align Alignment = I.getParamAlign(0).valueOrOne();
5102
5103 SDValue Ptr = getValue(PtrOperand);
5104 SDValue Src0 = getValue(Src0Operand);
5105 SDValue Mask = getValue(MaskOperand);
5106 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
5107
5108 EVT VT = Src0.getValueType();
5109 AAMDNodes AAInfo = I.getAAMetadata();
5110 const MDNode *Ranges = getRangeMetadata(I);
5111
5112 // Do not serialize masked loads of constant memory with anything.
5113 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
5114 bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(ML);
5115
5116 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
5117
5118 auto MMOFlags = MachineMemOperand::MOLoad;
5119 if (I.hasMetadata(LLVMContext::MD_nontemporal))
5121 if (I.hasMetadata(LLVMContext::MD_invariant_load))
5123
5124 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5125 MachinePointerInfo(PtrOperand), MMOFlags,
5126 LocationSize::upperBound(VT.getStoreSize()), Alignment, AAInfo, Ranges);
5127
5128 const auto &TLI = DAG.getTargetLoweringInfo();
5129
5130 // The Load/Res may point to different values and both of them are output
5131 // variables.
5132 SDValue Load;
5133 SDValue Res;
5134 if (!IsExpanding &&
5135 TTI->hasConditionalLoadStoreForType(Src0Operand->getType(),
5136 /*IsStore=*/false))
5137 Res = TLI.visitMaskedLoad(DAG, sdl, InChain, MMO, Load, Ptr, Src0, Mask);
5138 else
5139 Res = Load =
5140 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
5141 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
5142 if (AddToChain)
5143 PendingLoads.push_back(Load.getValue(1));
5144 setValue(&I, Res);
5145}
5146
5147void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
5148 SDLoc sdl = getCurSDLoc();
5149
5150 // @llvm.masked.gather.*(Ptrs, Mask, Src0)
5151 const Value *Ptr = I.getArgOperand(0);
5152 SDValue Src0 = getValue(I.getArgOperand(2));
5153 SDValue Mask = getValue(I.getArgOperand(1));
5154
5155 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5156 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5157 Align Alignment = I.getParamAlign(0).valueOrOne();
5158
5159 const MDNode *Ranges = getRangeMetadata(I);
5160
5161 SDValue Root = DAG.getRoot();
5162 SDValue Base;
5163 SDValue Index;
5164 SDValue Scale;
5165 bool UniformBase = getUniformBase(Ptr, Base, Index, Scale, this,
5166 I.getParent(), VT.getScalarStoreSize());
5167 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
5168 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5169 MachinePointerInfo(AS), MachineMemOperand::MOLoad,
5170 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata(),
5171 Ranges);
5172
5173 if (!UniformBase) {
5174 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5175 Index = getValue(Ptr);
5176 Scale =
5177 DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5178 }
5179
5180 EVT IdxVT = Index.getValueType();
5181 EVT EltTy = IdxVT.getVectorElementType();
5182 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
5183 EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
5184 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
5185 }
5186
5187 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
5188 SDValue Gather =
5189 DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl, Ops, MMO,
5191
5192 PendingLoads.push_back(Gather.getValue(1));
5193 setValue(&I, Gather);
5194}
5195
5196void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
5197 SDLoc dl = getCurSDLoc();
5198 AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
5199 AtomicOrdering FailureOrdering = I.getFailureOrdering();
5200 SyncScope::ID SSID = I.getSyncScopeID();
5201
5202 SDValue InChain = getRoot();
5203
5204 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
5205 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5206
5207 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5208 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
5209
5210 MachineFunction &MF = DAG.getMachineFunction();
5211 MachineMemOperand *MMO = MF.getMachineMemOperand(
5212 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
5213 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
5214 FailureOrdering);
5215
5217 dl, MemVT, VTs, InChain,
5218 getValue(I.getPointerOperand()),
5219 getValue(I.getCompareOperand()),
5220 getValue(I.getNewValOperand()), MMO);
5221
5222 SDValue OutChain = L.getValue(2);
5223
5224 setValue(&I, L);
5225 DAG.setRoot(OutChain);
5226}
5227
5228void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
5229 SDLoc dl = getCurSDLoc();
5231 switch (I.getOperation()) {
5232 default: llvm_unreachable("Unknown atomicrmw operation");
5250 break;
5253 break;
5256 break;
5259 break;
5262 break;
5265 break;
5268 break;
5271 break;
5272 }
5273 AtomicOrdering Ordering = I.getOrdering();
5274 SyncScope::ID SSID = I.getSyncScopeID();
5275
5276 SDValue InChain = getRoot();
5277
5278 auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
5279 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5280 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
5281
5282 MachineFunction &MF = DAG.getMachineFunction();
5283 MachineMemOperand *MMO = MF.getMachineMemOperand(
5284 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
5285 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
5286
5287 SDValue L =
5288 DAG.getAtomic(NT, dl, MemVT, InChain,
5289 getValue(I.getPointerOperand()), getValue(I.getValOperand()),
5290 MMO);
5291
5292 SDValue OutChain = L.getValue(1);
5293
5294 setValue(&I, L);
5295 DAG.setRoot(OutChain);
5296}
5297
5298void SelectionDAGBuilder::visitFence(const FenceInst &I) {
5299 SDLoc dl = getCurSDLoc();
5300 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5301 SDValue Ops[3];
5302 Ops[0] = getRoot();
5303 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
5304 TLI.getFenceOperandTy(DAG.getDataLayout()));
5305 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
5306 TLI.getFenceOperandTy(DAG.getDataLayout()));
5307 SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
5308 setValue(&I, N);
5309 DAG.setRoot(N);
5310}
5311
5312void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
5313 SDLoc dl = getCurSDLoc();
5314 AtomicOrdering Order = I.getOrdering();
5315 SyncScope::ID SSID = I.getSyncScopeID();
5316
5317 SDValue InChain = getRoot();
5318
5319 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5320 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5321 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
5322
5323 if (!TLI.supportsUnalignedAtomics() &&
5324 I.getAlign().value() < MemVT.getSizeInBits() / 8)
5325 report_fatal_error("Cannot generate unaligned atomic load");
5326
5327 auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
5328
5329 const MDNode *Ranges = getRangeMetadata(I);
5330 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5331 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
5332 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5333
5334 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
5335
5336 SDValue Ptr = getValue(I.getPointerOperand());
5337 SDValue L =
5338 DAG.getAtomicLoad(ISD::NON_EXTLOAD, dl, MemVT, MemVT, InChain, Ptr, MMO);
5339
5340 SDValue OutChain = L.getValue(1);
5341 if (MemVT != VT)
5342 L = DAG.getPtrExtOrTrunc(L, dl, VT);
5343
5344 setValue(&I, L);
5345 DAG.setRoot(OutChain);
5346}
5347
5348void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
5349 SDLoc dl = getCurSDLoc();
5350
5351 AtomicOrdering Ordering = I.getOrdering();
5352 SyncScope::ID SSID = I.getSyncScopeID();
5353
5354 SDValue InChain = getRoot();
5355
5356 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5357 EVT MemVT =
5358 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
5359
5360 if (!TLI.supportsUnalignedAtomics() &&
5361 I.getAlign().value() < MemVT.getSizeInBits() / 8)
5362 report_fatal_error("Cannot generate unaligned atomic store");
5363
5364 auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
5365
5366 MachineFunction &MF = DAG.getMachineFunction();
5367 MachineMemOperand *MMO = MF.getMachineMemOperand(
5368 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
5369 I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
5370
5371 SDValue Val = getValue(I.getValueOperand());
5372 if (Val.getValueType() != MemVT)
5373 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5374 SDValue Ptr = getValue(I.getPointerOperand());
5375
5376 SDValue OutChain =
5377 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
5378
5379 setValue(&I, OutChain);
5380 DAG.setRoot(OutChain);
5381}
5382
5383/// Check if this intrinsic call depends on the chain (1st return value)
5384/// and if it only *loads* memory.
5385/// Ignore the callsite's attributes. A specific call site may be marked with
5386/// readnone, but the lowering code will expect the chain based on the
5387/// definition.
5388std::pair<bool, bool>
5389SelectionDAGBuilder::getTargetIntrinsicCallProperties(const CallBase &I) {
5390 const Function *F = I.getCalledFunction();
5391 bool HasChain = !F->doesNotAccessMemory();
5392 bool OnlyLoad =
5393 HasChain && F->onlyReadsMemory() && F->willReturn() && F->doesNotThrow();
5394
5395 return {HasChain, OnlyLoad};
5396}
5397
5398SmallVector<SDValue, 8> SelectionDAGBuilder::getTargetIntrinsicOperands(
5399 const CallBase &I, bool HasChain, bool OnlyLoad,
5400 TargetLowering::IntrinsicInfo *TgtMemIntrinsicInfo) {
5401 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5402
5403 // Build the operand list.
5405 if (HasChain) { // If this intrinsic has side-effects, chainify it.
5406 if (OnlyLoad) {
5407 // We don't need to serialize loads against other loads.
5408 Ops.push_back(DAG.getRoot());
5409 } else {
5410 Ops.push_back(getRoot());
5411 }
5412 }
5413
5414 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
5415 if (!TgtMemIntrinsicInfo || TgtMemIntrinsicInfo->opc == ISD::INTRINSIC_VOID ||
5416 TgtMemIntrinsicInfo->opc == ISD::INTRINSIC_W_CHAIN)
5417 Ops.push_back(DAG.getTargetConstant(I.getIntrinsicID(), getCurSDLoc(),
5418 TLI.getPointerTy(DAG.getDataLayout())));
5419
5420 // Add all operands of the call to the operand list.
5421 for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
5422 const Value *Arg = I.getArgOperand(i);
5423 if (!I.paramHasAttr(i, Attribute::ImmArg)) {
5424 Ops.push_back(getValue(Arg));
5425 continue;
5426 }
5427
5428 // Use TargetConstant instead of a regular constant for immarg.
5429 EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
5430 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5431 assert(CI->getBitWidth() <= 64 &&
5432 "large intrinsic immediates not handled");
5433 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
5434 } else {
5435 Ops.push_back(
5436 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
5437 }
5438 }
5439
5440 if (std::optional<OperandBundleUse> Bundle =
5441 I.getOperandBundle(LLVMContext::OB_deactivation_symbol)) {
5442 auto *Sym = Bundle->Inputs[0].get();
5443 SDValue SDSym = getValue(Sym);
5444 SDSym = DAG.getDeactivationSymbol(cast<GlobalValue>(Sym));
5445 Ops.push_back(SDSym);
5446 }
5447
5448 if (std::optional<OperandBundleUse> Bundle =
5449 I.getOperandBundle(LLVMContext::OB_convergencectrl)) {
5450 Value *Token = Bundle->Inputs[0].get();
5451 SDValue ConvControlToken = getValue(Token);
5452 assert(Ops.back().getValueType() != MVT::Glue &&
5453 "Did not expect another glue node here.");
5454 ConvControlToken =
5455 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5456 Ops.push_back(ConvControlToken);
5457 }
5458
5459 return Ops;
5460}
5461
5462SDVTList SelectionDAGBuilder::getTargetIntrinsicVTList(const CallBase &I,
5463 bool HasChain) {
5464 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5465
5466 SmallVector<EVT, 4> ValueVTs;
5467 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5468
5469 if (HasChain)
5470 ValueVTs.push_back(MVT::Other);
5471
5472 return DAG.getVTList(ValueVTs);
5473}
5474
5475/// Get an INTRINSIC node for a target intrinsic which does not touch memory.
5476SDValue SelectionDAGBuilder::getTargetNonMemIntrinsicNode(
5477 const Type &IntrinsicVT, bool HasChain, ArrayRef<SDValue> Ops,
5478 const SDVTList &VTs) {
5479 if (!HasChain)
5480 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
5481 if (!IntrinsicVT.isVoidTy())
5482 return DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
5483 return DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
5484}
5485
5486/// Set root, convert return type if necessary and check alignment.
5487SDValue SelectionDAGBuilder::handleTargetIntrinsicRet(const CallBase &I,
5488 bool HasChain,
5489 bool OnlyLoad,
5490 SDValue Result) {
5491 if (HasChain) {
5492 SDValue Chain = Result.getValue(Result.getNode()->getNumValues() - 1);
5493 if (OnlyLoad)
5494 PendingLoads.push_back(Chain);
5495 else
5496 DAG.setRoot(Chain);
5497 }
5498
5499 if (I.getType()->isVoidTy())
5500 return Result;
5501
5502 if (MaybeAlign Alignment = I.getRetAlign(); InsertAssertAlign && Alignment) {
5503 // Insert `assertalign` node if there's an alignment.
5504 Result = DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5505 } else if (!isa<VectorType>(I.getType())) {
5506 Result = lowerRangeToAssertZExt(DAG, I, Result);
5507 }
5508
5509 return Result;
5510}
5511
5512/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
5513/// node.
5514void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
5515 unsigned Intrinsic) {
5516 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(I);
5517
5518 // Infos is set by getTgtMemIntrinsic.
5520 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5521 TLI.getTgtMemIntrinsic(Infos, I, DAG.getMachineFunction(), Intrinsic);
5522 // Use the first (primary) info determines the node opcode.
5523 TargetLowering::IntrinsicInfo *Info = !Infos.empty() ? &Infos[0] : nullptr;
5524
5526 getTargetIntrinsicOperands(I, HasChain, OnlyLoad, Info);
5527 SDVTList VTs = getTargetIntrinsicVTList(I, HasChain);
5528
5529 // Propagate fast-math-flags from IR to node(s).
5530 SDNodeFlags Flags;
5531 if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
5532 Flags.copyFMF(*FPMO);
5533 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
5534
5535 // Create the node.
5537
5538 // In some cases, custom collection of operands from CallInst I may be needed.
5540 if (!Infos.empty()) {
5541 // This is target intrinsic that touches memory
5542 // Create MachineMemOperands for each memory access described by the target.
5543 MachineFunction &MF = DAG.getMachineFunction();
5545 for (const auto &Info : Infos) {
5546 // TODO: We currently just fallback to address space 0 if
5547 // getTgtMemIntrinsic didn't yield anything useful.
5548 MachinePointerInfo MPI;
5549 if (Info.ptrVal)
5550 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
5551 else if (Info.fallbackAddressSpace)
5552 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
5553 EVT MemVT = Info.memVT;
5554 LocationSize Size = LocationSize::precise(Info.size);
5555 if (Size.hasValue() && !Size.getValue())
5557 Align Alignment = Info.align.value_or(DAG.getEVTAlign(MemVT));
5558 MachineMemOperand *MMO = MF.getMachineMemOperand(
5559 MPI, Info.flags, Size, Alignment, I.getAAMetadata(),
5560 /*Ranges=*/nullptr, Info.ssid, Info.order, Info.failureOrder);
5561 MMOs.push_back(MMO);
5562 }
5563
5564 Result = DAG.getMemIntrinsicNode(Info->opc, getCurSDLoc(), VTs, Ops,
5565 Info->memVT, MMOs);
5566 } else {
5567 Result = getTargetNonMemIntrinsicNode(*I.getType(), HasChain, Ops, VTs);
5568 }
5569
5570 Result = handleTargetIntrinsicRet(I, HasChain, OnlyLoad, Result);
5571
5572 setValue(&I, Result);
5573}
5574
5575/// GetSignificand - Get the significand and build it into a floating-point
5576/// number with exponent of 1:
5577///
5578/// Op = (Op & 0x007fffff) | 0x3f800000;
5579///
5580/// where Op is the hexadecimal representation of floating point value.
5582 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5583 DAG.getConstant(0x007fffff, dl, MVT::i32));
5584 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5585 DAG.getConstant(0x3f800000, dl, MVT::i32));
5586 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5587}
5588
5589/// GetExponent - Get the exponent:
5590///
5591/// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5592///
5593/// where Op is the hexadecimal representation of floating point value.
5595 const TargetLowering &TLI, const SDLoc &dl) {
5596 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5597 DAG.getConstant(0x7f800000, dl, MVT::i32));
5598 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
5599 DAG.getShiftAmountConstant(23, MVT::i32, dl));
5600 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5601 DAG.getConstant(127, dl, MVT::i32));
5602 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5603}
5604
5605/// getF32Constant - Get 32-bit floating point constant.
5606static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5607 const SDLoc &dl) {
5608 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5609 MVT::f32);
5610}
5611
5613 SelectionDAG &DAG) {
5614 // TODO: What fast-math-flags should be set on the floating-point nodes?
5615
5616 // IntegerPartOfX = ((int32_t)(t0);
5617 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5618
5619 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
5620 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5621 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5622
5623 // IntegerPartOfX <<= 23;
5624 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5625 DAG.getShiftAmountConstant(23, MVT::i32, dl));
5626
5627 SDValue TwoToFractionalPartOfX;
5628 if (LimitFloatPrecision <= 6) {
5629 // For floating-point precision of 6:
5630 //
5631 // TwoToFractionalPartOfX =
5632 // 0.997535578f +
5633 // (0.735607626f + 0.252464424f * x) * x;
5634 //
5635 // error 0.0144103317, which is 6 bits
5636 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5637 getF32Constant(DAG, 0x3e814304, dl));
5638 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5639 getF32Constant(DAG, 0x3f3c50c8, dl));
5640 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5641 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5642 getF32Constant(DAG, 0x3f7f5e7e, dl));
5643 } else if (LimitFloatPrecision <= 12) {
5644 // For floating-point precision of 12:
5645 //
5646 // TwoToFractionalPartOfX =
5647 // 0.999892986f +
5648 // (0.696457318f +
5649 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
5650 //
5651 // error 0.000107046256, which is 13 to 14 bits
5652 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5653 getF32Constant(DAG, 0x3da235e3, dl));
5654 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5655 getF32Constant(DAG, 0x3e65b8f3, dl));
5656 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5657 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5658 getF32Constant(DAG, 0x3f324b07, dl));
5659 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5660 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5661 getF32Constant(DAG, 0x3f7ff8fd, dl));
5662 } else { // LimitFloatPrecision <= 18
5663 // For floating-point precision of 18:
5664 //
5665 // TwoToFractionalPartOfX =
5666 // 0.999999982f +
5667 // (0.693148872f +
5668 // (0.240227044f +
5669 // (0.554906021e-1f +
5670 // (0.961591928e-2f +
5671 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5672 // error 2.47208000*10^(-7), which is better than 18 bits
5673 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5674 getF32Constant(DAG, 0x3924b03e, dl));
5675 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5676 getF32Constant(DAG, 0x3ab24b87, dl));
5677 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5678 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5679 getF32Constant(DAG, 0x3c1d8c17, dl));
5680 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5681 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5682 getF32Constant(DAG, 0x3d634a1d, dl));
5683 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5684 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5685 getF32Constant(DAG, 0x3e75fe14, dl));
5686 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5687 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5688 getF32Constant(DAG, 0x3f317234, dl));
5689 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5690 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5691 getF32Constant(DAG, 0x3f800000, dl));
5692 }
5693
5694 // Add the exponent into the result in integer domain.
5695 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5696 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5697 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5698}
5699
5700/// expandExp - Lower an exp intrinsic. Handles the special sequences for
5701/// limited-precision mode.
5703 const TargetLowering &TLI, SDNodeFlags Flags) {
5704 if (Op.getValueType() == MVT::f32 &&
5706
5707 // Put the exponent in the right bit position for later addition to the
5708 // final result:
5709 //
5710 // t0 = Op * log2(e)
5711
5712 // TODO: What fast-math-flags should be set here?
5713 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5714 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5715 return getLimitedPrecisionExp2(t0, dl, DAG);
5716 }
5717
5718 // No special expansion.
5719 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5720}
5721
5722/// expandLog - Lower a log intrinsic. Handles the special sequences for
5723/// limited-precision mode.
5725 const TargetLowering &TLI, SDNodeFlags Flags) {
5726 // TODO: What fast-math-flags should be set on the floating-point nodes?
5727
5728 if (Op.getValueType() == MVT::f32 &&
5730 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5731
5732 // Scale the exponent by log(2).
5733 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5734 SDValue LogOfExponent =
5735 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5736 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5737
5738 // Get the significand and build it into a floating-point number with
5739 // exponent of 1.
5740 SDValue X = GetSignificand(DAG, Op1, dl);
5741
5742 SDValue LogOfMantissa;
5743 if (LimitFloatPrecision <= 6) {
5744 // For floating-point precision of 6:
5745 //
5746 // LogofMantissa =
5747 // -1.1609546f +
5748 // (1.4034025f - 0.23903021f * x) * x;
5749 //
5750 // error 0.0034276066, which is better than 8 bits
5751 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5752 getF32Constant(DAG, 0xbe74c456, dl));
5753 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5754 getF32Constant(DAG, 0x3fb3a2b1, dl));
5755 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5756 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5757 getF32Constant(DAG, 0x3f949a29, dl));
5758 } else if (LimitFloatPrecision <= 12) {
5759 // For floating-point precision of 12:
5760 //
5761 // LogOfMantissa =
5762 // -1.7417939f +
5763 // (2.8212026f +
5764 // (-1.4699568f +
5765 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5766 //
5767 // error 0.000061011436, which is 14 bits
5768 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5769 getF32Constant(DAG, 0xbd67b6d6, dl));
5770 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5771 getF32Constant(DAG, 0x3ee4f4b8, dl));
5772 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5773 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5774 getF32Constant(DAG, 0x3fbc278b, dl));
5775 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5776 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5777 getF32Constant(DAG, 0x40348e95, dl));
5778 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5779 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5780 getF32Constant(DAG, 0x3fdef31a, dl));
5781 } else { // LimitFloatPrecision <= 18
5782 // For floating-point precision of 18:
5783 //
5784 // LogOfMantissa =
5785 // -2.1072184f +
5786 // (4.2372794f +
5787 // (-3.7029485f +
5788 // (2.2781945f +
5789 // (-0.87823314f +
5790 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5791 //
5792 // error 0.0000023660568, which is better than 18 bits
5793 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5794 getF32Constant(DAG, 0xbc91e5ac, dl));
5795 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5796 getF32Constant(DAG, 0x3e4350aa, dl));
5797 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5798 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5799 getF32Constant(DAG, 0x3f60d3e3, dl));
5800 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5801 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5802 getF32Constant(DAG, 0x4011cdf0, dl));
5803 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5804 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5805 getF32Constant(DAG, 0x406cfd1c, dl));
5806 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5807 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5808 getF32Constant(DAG, 0x408797cb, dl));
5809 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5810 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5811 getF32Constant(DAG, 0x4006dcab, dl));
5812 }
5813
5814 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5815 }
5816
5817 // No special expansion.
5818 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5819}
5820
5821/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5822/// limited-precision mode.
5824 const TargetLowering &TLI, SDNodeFlags Flags) {
5825 // TODO: What fast-math-flags should be set on the floating-point nodes?
5826
5827 if (Op.getValueType() == MVT::f32 &&
5829 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5830
5831 // Get the exponent.
5832 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5833
5834 // Get the significand and build it into a floating-point number with
5835 // exponent of 1.
5836 SDValue X = GetSignificand(DAG, Op1, dl);
5837
5838 // Different possible minimax approximations of significand in
5839 // floating-point for various degrees of accuracy over [1,2].
5840 SDValue Log2ofMantissa;
5841 if (LimitFloatPrecision <= 6) {
5842 // For floating-point precision of 6:
5843 //
5844 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5845 //
5846 // error 0.0049451742, which is more than 7 bits
5847 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5848 getF32Constant(DAG, 0xbeb08fe0, dl));
5849 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5850 getF32Constant(DAG, 0x40019463, dl));
5851 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5852 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5853 getF32Constant(DAG, 0x3fd6633d, dl));
5854 } else if (LimitFloatPrecision <= 12) {
5855 // For floating-point precision of 12:
5856 //
5857 // Log2ofMantissa =
5858 // -2.51285454f +
5859 // (4.07009056f +
5860 // (-2.12067489f +
5861 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5862 //
5863 // error 0.0000876136000, which is better than 13 bits
5864 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5865 getF32Constant(DAG, 0xbda7262e, dl));
5866 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5867 getF32Constant(DAG, 0x3f25280b, dl));
5868 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5869 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5870 getF32Constant(DAG, 0x4007b923, dl));
5871 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5872 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5873 getF32Constant(DAG, 0x40823e2f, dl));
5874 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5875 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5876 getF32Constant(DAG, 0x4020d29c, dl));
5877 } else { // LimitFloatPrecision <= 18
5878 // For floating-point precision of 18:
5879 //
5880 // Log2ofMantissa =
5881 // -3.0400495f +
5882 // (6.1129976f +
5883 // (-5.3420409f +
5884 // (3.2865683f +
5885 // (-1.2669343f +
5886 // (0.27515199f -
5887 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5888 //
5889 // error 0.0000018516, which is better than 18 bits
5890 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5891 getF32Constant(DAG, 0xbcd2769e, dl));
5892 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5893 getF32Constant(DAG, 0x3e8ce0b9, dl));
5894 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5895 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5896 getF32Constant(DAG, 0x3fa22ae7, dl));
5897 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5898 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5899 getF32Constant(DAG, 0x40525723, dl));
5900 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5901 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5902 getF32Constant(DAG, 0x40aaf200, dl));
5903 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5904 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5905 getF32Constant(DAG, 0x40c39dad, dl));
5906 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5907 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5908 getF32Constant(DAG, 0x4042902c, dl));
5909 }
5910
5911 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5912 }
5913
5914 // No special expansion.
5915 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5916}
5917
5918/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5919/// limited-precision mode.
5921 const TargetLowering &TLI, SDNodeFlags Flags) {
5922 // TODO: What fast-math-flags should be set on the floating-point nodes?
5923
5924 if (Op.getValueType() == MVT::f32 &&
5926 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5927
5928 // Scale the exponent by log10(2) [0.30102999f].
5929 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5930 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5931 getF32Constant(DAG, 0x3e9a209a, dl));
5932
5933 // Get the significand and build it into a floating-point number with
5934 // exponent of 1.
5935 SDValue X = GetSignificand(DAG, Op1, dl);
5936
5937 SDValue Log10ofMantissa;
5938 if (LimitFloatPrecision <= 6) {
5939 // For floating-point precision of 6:
5940 //
5941 // Log10ofMantissa =
5942 // -0.50419619f +
5943 // (0.60948995f - 0.10380950f * x) * x;
5944 //
5945 // error 0.0014886165, which is 6 bits
5946 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5947 getF32Constant(DAG, 0xbdd49a13, dl));
5948 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5949 getF32Constant(DAG, 0x3f1c0789, dl));
5950 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5951 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5952 getF32Constant(DAG, 0x3f011300, dl));
5953 } else if (LimitFloatPrecision <= 12) {
5954 // For floating-point precision of 12:
5955 //
5956 // Log10ofMantissa =
5957 // -0.64831180f +
5958 // (0.91751397f +
5959 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5960 //
5961 // error 0.00019228036, which is better than 12 bits
5962 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5963 getF32Constant(DAG, 0x3d431f31, dl));
5964 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5965 getF32Constant(DAG, 0x3ea21fb2, dl));
5966 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5967 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5968 getF32Constant(DAG, 0x3f6ae232, dl));
5969 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5970 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5971 getF32Constant(DAG, 0x3f25f7c3, dl));
5972 } else { // LimitFloatPrecision <= 18
5973 // For floating-point precision of 18:
5974 //
5975 // Log10ofMantissa =
5976 // -0.84299375f +
5977 // (1.5327582f +
5978 // (-1.0688956f +
5979 // (0.49102474f +
5980 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5981 //
5982 // error 0.0000037995730, which is better than 18 bits
5983 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5984 getF32Constant(DAG, 0x3c5d51ce, dl));
5985 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5986 getF32Constant(DAG, 0x3e00685a, dl));
5987 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5988 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5989 getF32Constant(DAG, 0x3efb6798, dl));
5990 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5991 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5992 getF32Constant(DAG, 0x3f88d192, dl));
5993 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5994 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5995 getF32Constant(DAG, 0x3fc4316c, dl));
5996 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5997 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5998 getF32Constant(DAG, 0x3f57ce70, dl));
5999 }
6000
6001 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
6002 }
6003
6004 // No special expansion.
6005 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
6006}
6007
6008/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
6009/// limited-precision mode.
6011 const TargetLowering &TLI, SDNodeFlags Flags) {
6012 if (Op.getValueType() == MVT::f32 &&
6014 return getLimitedPrecisionExp2(Op, dl, DAG);
6015
6016 // No special expansion.
6017 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
6018}
6019
6020/// visitPow - Lower a pow intrinsic. Handles the special sequences for
6021/// limited-precision mode with x == 10.0f.
6023 SelectionDAG &DAG, const TargetLowering &TLI,
6024 SDNodeFlags Flags) {
6025 bool IsExp10 = false;
6026 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
6029 APFloat Ten(10.0f);
6030 IsExp10 = LHSC->isExactlyValue(Ten);
6031 }
6032 }
6033
6034 // TODO: What fast-math-flags should be set on the FMUL node?
6035 if (IsExp10) {
6036 // Put the exponent in the right bit position for later addition to the
6037 // final result:
6038 //
6039 // #define LOG2OF10 3.3219281f
6040 // t0 = Op * LOG2OF10;
6041 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
6042 getF32Constant(DAG, 0x40549a78, dl));
6043 return getLimitedPrecisionExp2(t0, dl, DAG);
6044 }
6045
6046 // No special expansion.
6047 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
6048}
6049
6050/// ExpandPowI - Expand a llvm.powi intrinsic.
6052 SelectionDAG &DAG) {
6053 // If RHS is a constant, we can expand this out to a multiplication tree if
6054 // it's beneficial on the target, otherwise we end up lowering to a call to
6055 // __powidf2 (for example).
6057 unsigned Val = RHSC->getSExtValue();
6058
6059 // powi(x, 0) -> 1.0
6060 if (Val == 0)
6061 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
6062
6064 Val, DAG.shouldOptForSize())) {
6065 // Get the exponent as a positive value.
6066 if ((int)Val < 0)
6067 Val = -Val;
6068 // We use the simple binary decomposition method to generate the multiply
6069 // sequence. There are more optimal ways to do this (for example,
6070 // powi(x,15) generates one more multiply than it should), but this has
6071 // the benefit of being both really simple and much better than a libcall.
6072 SDValue Res; // Logically starts equal to 1.0
6073 SDValue CurSquare = LHS;
6074 // TODO: Intrinsics should have fast-math-flags that propagate to these
6075 // nodes.
6076 while (Val) {
6077 if (Val & 1) {
6078 if (Res.getNode())
6079 Res =
6080 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
6081 else
6082 Res = CurSquare; // 1.0*CurSquare.
6083 }
6084
6085 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
6086 CurSquare, CurSquare);
6087 Val >>= 1;
6088 }
6089
6090 // If the original was negative, invert the result, producing 1/(x*x*x).
6091 if (RHSC->getSExtValue() < 0)
6092 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
6093 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
6094 return Res;
6095 }
6096 }
6097
6098 // Otherwise, expand to a libcall.
6099 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
6100}
6101
6102static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
6103 SDValue LHS, SDValue RHS, SDValue Scale,
6104 SelectionDAG &DAG, const TargetLowering &TLI) {
6105 EVT VT = LHS.getValueType();
6106 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
6107 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
6108 LLVMContext &Ctx = *DAG.getContext();
6109
6110 // If the type is legal but the operation isn't, this node might survive all
6111 // the way to operation legalization. If we end up there and we do not have
6112 // the ability to widen the type (if VT*2 is not legal), we cannot expand the
6113 // node.
6114
6115 // Coax the legalizer into expanding the node during type legalization instead
6116 // by bumping the size by one bit. This will force it to Promote, enabling the
6117 // early expansion and avoiding the need to expand later.
6118
6119 // We don't have to do this if Scale is 0; that can always be expanded, unless
6120 // it's a saturating signed operation. Those can experience true integer
6121 // division overflow, a case which we must avoid.
6122
6123 // FIXME: We wouldn't have to do this (or any of the early
6124 // expansion/promotion) if it was possible to expand a libcall of an
6125 // illegal type during operation legalization. But it's not, so things
6126 // get a bit hacky.
6127 unsigned ScaleInt = Scale->getAsZExtVal();
6128 if ((ScaleInt > 0 || (Saturating && Signed)) &&
6129 (TLI.isTypeLegal(VT) ||
6130 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
6132 Opcode, VT, ScaleInt);
6133 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
6134 EVT PromVT;
6135 if (VT.isScalarInteger())
6136 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
6137 else if (VT.isVector()) {
6138 PromVT = VT.getVectorElementType();
6139 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
6140 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
6141 } else
6142 llvm_unreachable("Wrong VT for DIVFIX?");
6143 LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
6144 RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
6145 EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
6146 // For saturating operations, we need to shift up the LHS to get the
6147 // proper saturation width, and then shift down again afterwards.
6148 if (Saturating)
6149 LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
6150 DAG.getConstant(1, DL, ShiftTy));
6151 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
6152 if (Saturating)
6153 Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
6154 DAG.getConstant(1, DL, ShiftTy));
6155 return DAG.getZExtOrTrunc(Res, DL, VT);
6156 }
6157 }
6158
6159 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
6160}
6161
6162// getUnderlyingArgRegs - Find underlying registers used for a truncated,
6163// bitcasted, or split argument. Returns a list of <Register, size in bits>
6164static void
6165getUnderlyingArgRegs(SmallVectorImpl<std::pair<Register, TypeSize>> &Regs,
6166 const SDValue &N) {
6167 switch (N.getOpcode()) {
6168 case ISD::CopyFromReg: {
6169 SDValue Op = N.getOperand(1);
6170 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
6171 Op.getValueType().getSizeInBits());
6172 return;
6173 }
6174 case ISD::BITCAST:
6175 case ISD::AssertZext:
6176 case ISD::AssertSext:
6177 case ISD::TRUNCATE:
6178 getUnderlyingArgRegs(Regs, N.getOperand(0));
6179 return;
6180 case ISD::BUILD_PAIR:
6181 case ISD::BUILD_VECTOR:
6183 for (SDValue Op : N->op_values())
6184 getUnderlyingArgRegs(Regs, Op);
6185 return;
6186 default:
6187 return;
6188 }
6189}
6190
6191/// If the DbgValueInst is a dbg_value of a function argument, create the
6192/// corresponding DBG_VALUE machine instruction for it now. At the end of
6193/// instruction selection, they will be inserted to the entry BB.
6194/// We don't currently support this for variadic dbg_values, as they shouldn't
6195/// appear for function arguments or in the prologue.
6196bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6197 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
6198 DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
6199 const Argument *Arg = dyn_cast<Argument>(V);
6200 if (!Arg)
6201 return false;
6202
6203 MachineFunction &MF = DAG.getMachineFunction();
6204 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6205
6206 // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
6207 // we've been asked to pursue.
6208 auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
6209 bool Indirect) {
6210 if (Reg.isVirtual() && MF.useDebugInstrRef()) {
6211 // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
6212 // pointing at the VReg, which will be patched up later.
6213 auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
6215 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
6216 /* isKill */ false, /* isDead */ false,
6217 /* isUndef */ false, /* isEarlyClobber */ false,
6218 /* SubReg */ 0, /* isDebug */ true)});
6219
6220 auto *NewDIExpr = FragExpr;
6221 // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
6222 // the DIExpression.
6223 if (Indirect)
6224 NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
6226 NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
6227 return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
6228 } else {
6229 // Create a completely standard DBG_VALUE.
6230 auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
6231 return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
6232 }
6233 };
6234
6235 if (Kind == FuncArgumentDbgValueKind::Value) {
6236 // ArgDbgValues are hoisted to the beginning of the entry block. So we
6237 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
6238 // the entry block.
6239 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
6240 if (!IsInEntryBlock)
6241 return false;
6242
6243 // ArgDbgValues are hoisted to the beginning of the entry block. So we
6244 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
6245 // variable that also is a param.
6246 //
6247 // Although, if we are at the top of the entry block already, we can still
6248 // emit using ArgDbgValue. This might catch some situations when the
6249 // dbg.value refers to an argument that isn't used in the entry block, so
6250 // any CopyToReg node would be optimized out and the only way to express
6251 // this DBG_VALUE is by using the physical reg (or FI) as done in this
6252 // method. ArgDbgValues are hoisted to the beginning of the entry block. So
6253 // we should only emit as ArgDbgValue if the Variable is an argument to the
6254 // current function, and the dbg.value intrinsic is found in the entry
6255 // block.
6256 bool VariableIsFunctionInputArg = Variable->isParameter() &&
6257 !DL->getInlinedAt();
6258 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
6259 if (!IsInPrologue && !VariableIsFunctionInputArg)
6260 return false;
6261
6262 // Here we assume that a function argument on IR level only can be used to
6263 // describe one input parameter on source level. If we for example have
6264 // source code like this
6265 //
6266 // struct A { long x, y; };
6267 // void foo(struct A a, long b) {
6268 // ...
6269 // b = a.x;
6270 // ...
6271 // }
6272 //
6273 // and IR like this
6274 //
6275 // define void @foo(i32 %a1, i32 %a2, i32 %b) {
6276 // entry:
6277 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
6278 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
6279 // call void @llvm.dbg.value(metadata i32 %b, "b",
6280 // ...
6281 // call void @llvm.dbg.value(metadata i32 %a1, "b"
6282 // ...
6283 //
6284 // then the last dbg.value is describing a parameter "b" using a value that
6285 // is an argument. But since we already has used %a1 to describe a parameter
6286 // we should not handle that last dbg.value here (that would result in an
6287 // incorrect hoisting of the DBG_VALUE to the function entry).
6288 // Notice that we allow one dbg.value per IR level argument, to accommodate
6289 // for the situation with fragments above.
6290 // If there is no node for the value being handled, we return true to skip
6291 // the normal generation of debug info, as it would kill existing debug
6292 // info for the parameter in case of duplicates.
6293 if (VariableIsFunctionInputArg) {
6294 unsigned ArgNo = Arg->getArgNo();
6295 if (ArgNo >= FuncInfo.DescribedArgs.size())
6296 FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
6297 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
6298 return !NodeMap[V].getNode();
6299 FuncInfo.DescribedArgs.set(ArgNo);
6300 }
6301 }
6302
6303 bool IsIndirect = false;
6304 std::optional<MachineOperand> Op;
6305 // Some arguments' frame index is recorded during argument lowering.
6306 int FI = FuncInfo.getArgumentFrameIndex(Arg);
6307 if (FI != std::numeric_limits<int>::max())
6309
6311 if (!Op && N.getNode()) {
6312 getUnderlyingArgRegs(ArgRegsAndSizes, N);
6313 Register Reg;
6314 if (ArgRegsAndSizes.size() == 1)
6315 Reg = ArgRegsAndSizes.front().first;
6316
6317 if (Reg && Reg.isVirtual()) {
6318 MachineRegisterInfo &RegInfo = MF.getRegInfo();
6319 Register PR = RegInfo.getLiveInPhysReg(Reg);
6320 if (PR)
6321 Reg = PR;
6322 }
6323 if (Reg) {
6325 IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6326 }
6327 }
6328
6329 if (!Op && N.getNode()) {
6330 // Check if frame index is available.
6331 SDValue LCandidate = peekThroughBitcasts(N);
6332 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
6333 if (FrameIndexSDNode *FINode =
6334 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6335 Op = MachineOperand::CreateFI(FINode->getIndex());
6336 }
6337
6338 if (!Op) {
6339 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
6340 auto splitMultiRegDbgValue =
6341 [&](ArrayRef<std::pair<Register, TypeSize>> SplitRegs) -> bool {
6342 unsigned Offset = 0;
6343 for (const auto &[Reg, RegSizeInBits] : SplitRegs) {
6344 // FIXME: Scalable sizes are not supported in fragment expressions.
6345 if (RegSizeInBits.isScalable())
6346 return false;
6347
6348 // If the expression is already a fragment, the current register
6349 // offset+size might extend beyond the fragment. In this case, only
6350 // the register bits that are inside the fragment are relevant.
6351 int RegFragmentSizeInBits = RegSizeInBits.getFixedValue();
6352 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
6353 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6354 // The register is entirely outside the expression fragment,
6355 // so is irrelevant for debug info.
6356 if (Offset >= ExprFragmentSizeInBits)
6357 break;
6358 // The register is partially outside the expression fragment, only
6359 // the low bits within the fragment are relevant for debug info.
6360 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6361 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
6362 }
6363 }
6364
6365 auto FragmentExpr = DIExpression::createFragmentExpression(
6366 Expr, Offset, RegFragmentSizeInBits);
6367 Offset += RegSizeInBits.getFixedValue();
6368 // If a valid fragment expression cannot be created, the variable's
6369 // correct value cannot be determined and so it is set as poison.
6370 if (!FragmentExpr) {
6371 SDDbgValue *SDV = DAG.getConstantDbgValue(
6372 Variable, Expr, PoisonValue::get(V->getType()), DL, SDNodeOrder);
6373 DAG.AddDbgValue(SDV, false);
6374 continue;
6375 }
6376 MachineInstr *NewMI = MakeVRegDbgValue(
6377 Reg, *FragmentExpr, Kind != FuncArgumentDbgValueKind::Value);
6378 FuncInfo.ArgDbgValues.push_back(NewMI);
6379 }
6380
6381 return true;
6382 };
6383
6384 // Check if ValueMap has reg number.
6386 VMI = FuncInfo.ValueMap.find(V);
6387 if (VMI != FuncInfo.ValueMap.end()) {
6388 const auto &TLI = DAG.getTargetLoweringInfo();
6389 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
6390 V->getType(), std::nullopt);
6391 if (RFV.occupiesMultipleRegs())
6392 return splitMultiRegDbgValue(RFV.getRegsAndSizes());
6393
6394 Op = MachineOperand::CreateReg(VMI->second, false);
6395 IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6396 } else if (ArgRegsAndSizes.size() > 1) {
6397 // This was split due to the calling convention, and no virtual register
6398 // mapping exists for the value.
6399 return splitMultiRegDbgValue(ArgRegsAndSizes);
6400 }
6401 }
6402
6403 if (!Op)
6404 return false;
6405
6406 assert(Variable->isValidLocationForIntrinsic(DL) &&
6407 "Expected inlined-at fields to agree");
6408 MachineInstr *NewMI = nullptr;
6409
6410 if (Op->isReg())
6411 NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
6412 else
6413 NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
6414 Variable, Expr);
6415
6416 // Otherwise, use ArgDbgValues.
6417 FuncInfo.ArgDbgValues.push_back(NewMI);
6418 return true;
6419}
6420
6421/// Return the appropriate SDDbgValue based on N.
6422SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
6423 DILocalVariable *Variable,
6424 DIExpression *Expr,
6425 const DebugLoc &dl,
6426 unsigned DbgSDNodeOrder) {
6427 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
6428 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
6429 // stack slot locations.
6430 //
6431 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
6432 // debug values here after optimization:
6433 //
6434 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
6435 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
6436 //
6437 // Both describe the direct values of their associated variables.
6438 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6439 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6440 }
6441 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
6442 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6443}
6444
6445static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
6446 switch (Intrinsic) {
6447 case Intrinsic::smul_fix:
6448 return ISD::SMULFIX;
6449 case Intrinsic::umul_fix:
6450 return ISD::UMULFIX;
6451 case Intrinsic::smul_fix_sat:
6452 return ISD::SMULFIXSAT;
6453 case Intrinsic::umul_fix_sat:
6454 return ISD::UMULFIXSAT;
6455 case Intrinsic::sdiv_fix:
6456 return ISD::SDIVFIX;
6457 case Intrinsic::udiv_fix:
6458 return ISD::UDIVFIX;
6459 case Intrinsic::sdiv_fix_sat:
6460 return ISD::SDIVFIXSAT;
6461 case Intrinsic::udiv_fix_sat:
6462 return ISD::UDIVFIXSAT;
6463 default:
6464 llvm_unreachable("Unhandled fixed point intrinsic");
6465 }
6466}
6467
6468/// Given a @llvm.call.preallocated.setup, return the corresponding
6469/// preallocated call.
6470static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
6471 assert(cast<CallBase>(PreallocatedSetup)
6473 ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
6474 "expected call_preallocated_setup Value");
6475 for (const auto *U : PreallocatedSetup->users()) {
6476 auto *UseCall = cast<CallBase>(U);
6477 const Function *Fn = UseCall->getCalledFunction();
6478 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6479 return UseCall;
6480 }
6481 }
6482 llvm_unreachable("expected corresponding call to preallocated setup/arg");
6483}
6484
6485/// If DI is a debug value with an EntryValue expression, lower it using the
6486/// corresponding physical register of the associated Argument value
6487/// (guaranteed to exist by the verifier).
6488bool SelectionDAGBuilder::visitEntryValueDbgValue(
6489 ArrayRef<const Value *> Values, DILocalVariable *Variable,
6490 DIExpression *Expr, DebugLoc DbgLoc) {
6491 if (!Expr->isEntryValue() || !hasSingleElement(Values))
6492 return false;
6493
6494 // These properties are guaranteed by the verifier.
6495 const Argument *Arg = cast<Argument>(Values[0]);
6496 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
6497
6498 auto ArgIt = FuncInfo.ValueMap.find(Arg);
6499 if (ArgIt == FuncInfo.ValueMap.end()) {
6500 LLVM_DEBUG(
6501 dbgs() << "Dropping dbg.value: expression is entry_value but "
6502 "couldn't find an associated register for the Argument\n");
6503 return true;
6504 }
6505 Register ArgVReg = ArgIt->getSecond();
6506
6507 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
6508 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6509 SDDbgValue *SDV = DAG.getVRegDbgValue(
6510 Variable, Expr, PhysReg, false /*IsIndidrect*/, DbgLoc, SDNodeOrder);
6511 DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
6512 return true;
6513 }
6514 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
6515 "couldn't find a physical register\n");
6516 return true;
6517}
6518
6519/// Lower the call to the specified intrinsic function.
6520void SelectionDAGBuilder::visitConvergenceControl(const CallInst &I,
6521 unsigned Intrinsic) {
6522 SDLoc sdl = getCurSDLoc();
6523 switch (Intrinsic) {
6524 case Intrinsic::experimental_convergence_anchor:
6525 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6526 break;
6527 case Intrinsic::experimental_convergence_entry:
6528 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6529 break;
6530 case Intrinsic::experimental_convergence_loop: {
6531 auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl);
6532 auto *Token = Bundle->Inputs[0].get();
6533 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6534 getValue(Token)));
6535 break;
6536 }
6537 }
6538}
6539
6540void SelectionDAGBuilder::visitVectorHistogram(const CallInst &I,
6541 unsigned IntrinsicID) {
6542 // For now, we're only lowering an 'add' histogram.
6543 // We can add others later, e.g. saturating adds, min/max.
6544 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6545 "Tried to lower unsupported histogram type");
6546 SDLoc sdl = getCurSDLoc();
6547 Value *Ptr = I.getOperand(0);
6548 SDValue Inc = getValue(I.getOperand(1));
6549 SDValue Mask = getValue(I.getOperand(2));
6550
6551 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6552 DataLayout TargetDL = DAG.getDataLayout();
6553 EVT VT = Inc.getValueType();
6554 Align Alignment = DAG.getEVTAlign(VT);
6555
6556 const MDNode *Ranges = getRangeMetadata(I);
6557
6558 SDValue Root = DAG.getRoot();
6559 SDValue Base;
6560 SDValue Index;
6561 SDValue Scale;
6562 bool UniformBase = getUniformBase(Ptr, Base, Index, Scale, this,
6563 I.getParent(), VT.getScalarStoreSize());
6564
6565 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
6566
6567 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6568 MachinePointerInfo(AS),
6570 MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
6571
6572 if (!UniformBase) {
6573 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
6574 Index = getValue(Ptr);
6575 Scale =
6576 DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
6577 }
6578
6579 EVT IdxVT = Index.getValueType();
6580
6581 // Avoid using e.g. i32 as index type when the increment must be performed
6582 // on i64's.
6583 bool MustExtendIndex = VT.getScalarSizeInBits() > IdxVT.getScalarSizeInBits();
6584 EVT EltTy = MustExtendIndex ? VT : IdxVT.getVectorElementType();
6585 if (MustExtendIndex || TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
6586 EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
6587 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
6588 }
6589
6590 SDValue ID = DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6591
6592 SDValue Ops[] = {Root, Inc, Mask, Base, Index, Scale, ID};
6593 SDValue Histogram = DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), VT, sdl,
6594 Ops, MMO, ISD::SIGNED_SCALED);
6595
6596 setValue(&I, Histogram);
6597 DAG.setRoot(Histogram);
6598}
6599
6600void SelectionDAGBuilder::visitVectorExtractLastActive(const CallInst &I,
6601 unsigned Intrinsic) {
6602 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6603 "Tried lowering invalid vector extract last");
6604 SDLoc sdl = getCurSDLoc();
6605 const DataLayout &Layout = DAG.getDataLayout();
6606 SDValue Data = getValue(I.getOperand(0));
6607 SDValue Mask = getValue(I.getOperand(1));
6608
6609 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6610 EVT ResVT = TLI.getValueType(Layout, I.getType());
6611
6612 EVT ExtVT = TLI.getVectorIdxTy(Layout);
6613 SDValue Idx = DAG.getNode(ISD::VECTOR_FIND_LAST_ACTIVE, sdl, ExtVT, Mask);
6614 SDValue Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, sdl, ResVT, Data, Idx);
6615
6616 Value *Default = I.getOperand(2);
6618 SDValue PassThru = getValue(Default);
6619 EVT BoolVT = Mask.getValueType().getScalarType();
6620 SDValue AnyActive = DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6621 Result = DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6622 }
6623
6624 setValue(&I, Result);
6625}
6626
6627/// Lower the call to the specified intrinsic function.
6628void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
6629 unsigned Intrinsic) {
6630 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6631 SDLoc sdl = getCurSDLoc();
6632 DebugLoc dl = getCurDebugLoc();
6633 SDValue Res;
6634
6635 SDNodeFlags Flags;
6636 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
6637 Flags.copyFMF(*FPOp);
6638
6639 switch (Intrinsic) {
6640 default:
6641 // By default, turn this into a target intrinsic node.
6642 visitTargetIntrinsic(I, Intrinsic);
6643 return;
6644 case Intrinsic::vscale: {
6645 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6646 setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
6647 return;
6648 }
6649 case Intrinsic::vastart: visitVAStart(I); return;
6650 case Intrinsic::vaend: visitVAEnd(I); return;
6651 case Intrinsic::vacopy: visitVACopy(I); return;
6652 case Intrinsic::returnaddress:
6653 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
6654 TLI.getValueType(DAG.getDataLayout(), I.getType()),
6655 getValue(I.getArgOperand(0))));
6656 return;
6657 case Intrinsic::addressofreturnaddress:
6658 setValue(&I,
6659 DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
6660 TLI.getValueType(DAG.getDataLayout(), I.getType())));
6661 return;
6662 case Intrinsic::sponentry:
6663 setValue(&I,
6664 DAG.getNode(ISD::SPONENTRY, sdl,
6665 TLI.getValueType(DAG.getDataLayout(), I.getType())));
6666 return;
6667 case Intrinsic::frameaddress:
6668 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
6669 TLI.getFrameIndexTy(DAG.getDataLayout()),
6670 getValue(I.getArgOperand(0))));
6671 return;
6672 case Intrinsic::read_volatile_register:
6673 case Intrinsic::read_register: {
6674 Value *Reg = I.getArgOperand(0);
6675 SDValue Chain = getRoot();
6677 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6678 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6679 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6680 DAG.getVTList(VT, MVT::Other), Chain, RegName);
6681 setValue(&I, Res);
6682 DAG.setRoot(Res.getValue(1));
6683 return;
6684 }
6685 case Intrinsic::write_register: {
6686 Value *Reg = I.getArgOperand(0);
6687 Value *RegValue = I.getArgOperand(1);
6688 SDValue Chain = getRoot();
6690 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6691 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6692 RegName, getValue(RegValue)));
6693 return;
6694 }
6695 case Intrinsic::memcpy:
6696 case Intrinsic::memcpy_inline: {
6697 const auto &MCI = cast<MemCpyInst>(I);
6698 SDValue Dst = getValue(I.getArgOperand(0));
6699 SDValue Src = getValue(I.getArgOperand(1));
6700 SDValue Size = getValue(I.getArgOperand(2));
6701 assert((!MCI.isForceInlined() || isa<ConstantSDNode>(Size)) &&
6702 "memcpy_inline needs constant size");
6703 // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6704 Align DstAlign = MCI.getDestAlign().valueOrOne();
6705 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6706 Align Alignment = std::min(DstAlign, SrcAlign);
6707 bool isVol = MCI.isVolatile();
6708 // FIXME: Support passing different dest/src alignments to the memcpy DAG
6709 // node.
6710 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6711 SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol,
6712 MCI.isForceInlined(), &I, std::nullopt,
6713 MachinePointerInfo(I.getArgOperand(0)),
6714 MachinePointerInfo(I.getArgOperand(1)),
6715 I.getAAMetadata(), BatchAA);
6716 updateDAGForMaybeTailCall(MC);
6717 return;
6718 }
6719 case Intrinsic::memset:
6720 case Intrinsic::memset_inline: {
6721 const auto &MSII = cast<MemSetInst>(I);
6722 SDValue Dst = getValue(I.getArgOperand(0));
6723 SDValue Value = getValue(I.getArgOperand(1));
6724 SDValue Size = getValue(I.getArgOperand(2));
6725 assert((!MSII.isForceInlined() || isa<ConstantSDNode>(Size)) &&
6726 "memset_inline needs constant size");
6727 // @llvm.memset defines 0 and 1 to both mean no alignment.
6728 Align DstAlign = MSII.getDestAlign().valueOrOne();
6729 bool isVol = MSII.isVolatile();
6730 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6731 SDValue MC = DAG.getMemset(
6732 Root, sdl, Dst, Value, Size, DstAlign, isVol, MSII.isForceInlined(),
6733 &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6734 updateDAGForMaybeTailCall(MC);
6735 return;
6736 }
6737 case Intrinsic::memmove: {
6738 const auto &MMI = cast<MemMoveInst>(I);
6739 SDValue Op1 = getValue(I.getArgOperand(0));
6740 SDValue Op2 = getValue(I.getArgOperand(1));
6741 SDValue Op3 = getValue(I.getArgOperand(2));
6742 // @llvm.memmove defines 0 and 1 to both mean no alignment.
6743 Align DstAlign = MMI.getDestAlign().valueOrOne();
6744 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6745 Align Alignment = std::min(DstAlign, SrcAlign);
6746 bool isVol = MMI.isVolatile();
6747 // FIXME: Support passing different dest/src alignments to the memmove DAG
6748 // node.
6749 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6750 SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &I,
6751 /* OverrideTailCall */ std::nullopt,
6752 MachinePointerInfo(I.getArgOperand(0)),
6753 MachinePointerInfo(I.getArgOperand(1)),
6754 I.getAAMetadata(), BatchAA);
6755 updateDAGForMaybeTailCall(MM);
6756 return;
6757 }
6758 case Intrinsic::memcpy_element_unordered_atomic: {
6759 auto &MI = cast<AnyMemCpyInst>(I);
6760 SDValue Dst = getValue(MI.getRawDest());
6761 SDValue Src = getValue(MI.getRawSource());
6762 SDValue Length = getValue(MI.getLength());
6763
6764 Type *LengthTy = MI.getLength()->getType();
6765 unsigned ElemSz = MI.getElementSizeInBytes();
6766 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6767 SDValue MC =
6768 DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6769 isTC, MachinePointerInfo(MI.getRawDest()),
6770 MachinePointerInfo(MI.getRawSource()));
6771 updateDAGForMaybeTailCall(MC);
6772 return;
6773 }
6774 case Intrinsic::memmove_element_unordered_atomic: {
6775 auto &MI = cast<AnyMemMoveInst>(I);
6776 SDValue Dst = getValue(MI.getRawDest());
6777 SDValue Src = getValue(MI.getRawSource());
6778 SDValue Length = getValue(MI.getLength());
6779
6780 Type *LengthTy = MI.getLength()->getType();
6781 unsigned ElemSz = MI.getElementSizeInBytes();
6782 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6783 SDValue MC =
6784 DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6785 isTC, MachinePointerInfo(MI.getRawDest()),
6786 MachinePointerInfo(MI.getRawSource()));
6787 updateDAGForMaybeTailCall(MC);
6788 return;
6789 }
6790 case Intrinsic::memset_element_unordered_atomic: {
6791 auto &MI = cast<AnyMemSetInst>(I);
6792 SDValue Dst = getValue(MI.getRawDest());
6793 SDValue Val = getValue(MI.getValue());
6794 SDValue Length = getValue(MI.getLength());
6795
6796 Type *LengthTy = MI.getLength()->getType();
6797 unsigned ElemSz = MI.getElementSizeInBytes();
6798 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6799 SDValue MC =
6800 DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6801 isTC, MachinePointerInfo(MI.getRawDest()));
6802 updateDAGForMaybeTailCall(MC);
6803 return;
6804 }
6805 case Intrinsic::call_preallocated_setup: {
6806 const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6807 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6808 SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6809 getRoot(), SrcValue);
6810 setValue(&I, Res);
6811 DAG.setRoot(Res);
6812 return;
6813 }
6814 case Intrinsic::call_preallocated_arg: {
6815 const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6816 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6817 SDValue Ops[3];
6818 Ops[0] = getRoot();
6819 Ops[1] = SrcValue;
6820 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6821 MVT::i32); // arg index
6822 SDValue Res = DAG.getNode(
6824 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6825 setValue(&I, Res);
6826 DAG.setRoot(Res.getValue(1));
6827 return;
6828 }
6829
6830 case Intrinsic::eh_typeid_for: {
6831 // Find the type id for the given typeinfo.
6832 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6833 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6834 Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6835 setValue(&I, Res);
6836 return;
6837 }
6838
6839 case Intrinsic::eh_return_i32:
6840 case Intrinsic::eh_return_i64:
6841 DAG.getMachineFunction().setCallsEHReturn(true);
6842 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6843 MVT::Other,
6845 getValue(I.getArgOperand(0)),
6846 getValue(I.getArgOperand(1))));
6847 return;
6848 case Intrinsic::eh_unwind_init:
6849 DAG.getMachineFunction().setCallsUnwindInit(true);
6850 return;
6851 case Intrinsic::eh_dwarf_cfa:
6852 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6853 TLI.getPointerTy(DAG.getDataLayout()),
6854 getValue(I.getArgOperand(0))));
6855 return;
6856 case Intrinsic::eh_sjlj_callsite: {
6857 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6858 assert(FuncInfo.getCurrentCallSite() == 0 && "Overlapping call sites!");
6859
6860 FuncInfo.setCurrentCallSite(CI->getZExtValue());
6861 return;
6862 }
6863 case Intrinsic::eh_sjlj_functioncontext: {
6864 // Get and store the index of the function context.
6865 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6866 AllocaInst *FnCtx =
6867 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6868 int FI = FuncInfo.StaticAllocaMap[FnCtx];
6870 return;
6871 }
6872 case Intrinsic::eh_sjlj_setjmp: {
6873 SDValue Ops[2];
6874 Ops[0] = getRoot();
6875 Ops[1] = getValue(I.getArgOperand(0));
6876 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6877 DAG.getVTList(MVT::i32, MVT::Other), Ops);
6878 setValue(&I, Op.getValue(0));
6879 DAG.setRoot(Op.getValue(1));
6880 return;
6881 }
6882 case Intrinsic::eh_sjlj_longjmp:
6883 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6884 getRoot(), getValue(I.getArgOperand(0))));
6885 return;
6886 case Intrinsic::eh_sjlj_setup_dispatch:
6887 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6888 getRoot()));
6889 return;
6890 case Intrinsic::masked_gather:
6891 visitMaskedGather(I);
6892 return;
6893 case Intrinsic::masked_load:
6894 visitMaskedLoad(I);
6895 return;
6896 case Intrinsic::masked_scatter:
6897 visitMaskedScatter(I);
6898 return;
6899 case Intrinsic::masked_store:
6900 visitMaskedStore(I);
6901 return;
6902 case Intrinsic::masked_expandload:
6903 visitMaskedLoad(I, true /* IsExpanding */);
6904 return;
6905 case Intrinsic::masked_compressstore:
6906 visitMaskedStore(I, true /* IsCompressing */);
6907 return;
6908 case Intrinsic::powi:
6909 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6910 getValue(I.getArgOperand(1)), DAG));
6911 return;
6912 case Intrinsic::log:
6913 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6914 return;
6915 case Intrinsic::log2:
6916 setValue(&I,
6917 expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6918 return;
6919 case Intrinsic::log10:
6920 setValue(&I,
6921 expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6922 return;
6923 case Intrinsic::exp:
6924 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6925 return;
6926 case Intrinsic::exp2:
6927 setValue(&I,
6928 expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6929 return;
6930 case Intrinsic::pow:
6931 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6932 getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6933 return;
6934 case Intrinsic::sqrt:
6935 case Intrinsic::fabs:
6936 case Intrinsic::sin:
6937 case Intrinsic::cos:
6938 case Intrinsic::tan:
6939 case Intrinsic::asin:
6940 case Intrinsic::acos:
6941 case Intrinsic::atan:
6942 case Intrinsic::sinh:
6943 case Intrinsic::cosh:
6944 case Intrinsic::tanh:
6945 case Intrinsic::exp10:
6946 case Intrinsic::floor:
6947 case Intrinsic::ceil:
6948 case Intrinsic::trunc:
6949 case Intrinsic::rint:
6950 case Intrinsic::nearbyint:
6951 case Intrinsic::round:
6952 case Intrinsic::roundeven:
6953 case Intrinsic::canonicalize: {
6954 unsigned Opcode;
6955 // clang-format off
6956 switch (Intrinsic) {
6957 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6958 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
6959 case Intrinsic::fabs: Opcode = ISD::FABS; break;
6960 case Intrinsic::sin: Opcode = ISD::FSIN; break;
6961 case Intrinsic::cos: Opcode = ISD::FCOS; break;
6962 case Intrinsic::tan: Opcode = ISD::FTAN; break;
6963 case Intrinsic::asin: Opcode = ISD::FASIN; break;
6964 case Intrinsic::acos: Opcode = ISD::FACOS; break;
6965 case Intrinsic::atan: Opcode = ISD::FATAN; break;
6966 case Intrinsic::sinh: Opcode = ISD::FSINH; break;
6967 case Intrinsic::cosh: Opcode = ISD::FCOSH; break;
6968 case Intrinsic::tanh: Opcode = ISD::FTANH; break;
6969 case Intrinsic::exp10: Opcode = ISD::FEXP10; break;
6970 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
6971 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
6972 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
6973 case Intrinsic::rint: Opcode = ISD::FRINT; break;
6974 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6975 case Intrinsic::round: Opcode = ISD::FROUND; break;
6976 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6977 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6978 }
6979 // clang-format on
6980
6981 setValue(&I, DAG.getNode(Opcode, sdl,
6982 getValue(I.getArgOperand(0)).getValueType(),
6983 getValue(I.getArgOperand(0)), Flags));
6984 return;
6985 }
6986 case Intrinsic::atan2:
6987 setValue(&I, DAG.getNode(ISD::FATAN2, sdl,
6988 getValue(I.getArgOperand(0)).getValueType(),
6989 getValue(I.getArgOperand(0)),
6990 getValue(I.getArgOperand(1)), Flags));
6991 return;
6992 case Intrinsic::lround:
6993 case Intrinsic::llround:
6994 case Intrinsic::lrint:
6995 case Intrinsic::llrint: {
6996 unsigned Opcode;
6997 // clang-format off
6998 switch (Intrinsic) {
6999 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
7000 case Intrinsic::lround: Opcode = ISD::LROUND; break;
7001 case Intrinsic::llround: Opcode = ISD::LLROUND; break;
7002 case Intrinsic::lrint: Opcode = ISD::LRINT; break;
7003 case Intrinsic::llrint: Opcode = ISD::LLRINT; break;
7004 }
7005 // clang-format on
7006
7007 EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7008 setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
7009 getValue(I.getArgOperand(0))));
7010 return;
7011 }
7012 case Intrinsic::minnum:
7013 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
7014 getValue(I.getArgOperand(0)).getValueType(),
7015 getValue(I.getArgOperand(0)),
7016 getValue(I.getArgOperand(1)), Flags));
7017 return;
7018 case Intrinsic::maxnum:
7019 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
7020 getValue(I.getArgOperand(0)).getValueType(),
7021 getValue(I.getArgOperand(0)),
7022 getValue(I.getArgOperand(1)), Flags));
7023 return;
7024 case Intrinsic::minimum:
7025 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
7026 getValue(I.getArgOperand(0)).getValueType(),
7027 getValue(I.getArgOperand(0)),
7028 getValue(I.getArgOperand(1)), Flags));
7029 return;
7030 case Intrinsic::maximum:
7031 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
7032 getValue(I.getArgOperand(0)).getValueType(),
7033 getValue(I.getArgOperand(0)),
7034 getValue(I.getArgOperand(1)), Flags));
7035 return;
7036 case Intrinsic::minimumnum:
7037 setValue(&I, DAG.getNode(ISD::FMINIMUMNUM, sdl,
7038 getValue(I.getArgOperand(0)).getValueType(),
7039 getValue(I.getArgOperand(0)),
7040 getValue(I.getArgOperand(1)), Flags));
7041 return;
7042 case Intrinsic::maximumnum:
7043 setValue(&I, DAG.getNode(ISD::FMAXIMUMNUM, sdl,
7044 getValue(I.getArgOperand(0)).getValueType(),
7045 getValue(I.getArgOperand(0)),
7046 getValue(I.getArgOperand(1)), Flags));
7047 return;
7048 case Intrinsic::copysign:
7049 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
7050 getValue(I.getArgOperand(0)).getValueType(),
7051 getValue(I.getArgOperand(0)),
7052 getValue(I.getArgOperand(1)), Flags));
7053 return;
7054 case Intrinsic::ldexp:
7055 setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
7056 getValue(I.getArgOperand(0)).getValueType(),
7057 getValue(I.getArgOperand(0)),
7058 getValue(I.getArgOperand(1)), Flags));
7059 return;
7060 case Intrinsic::modf:
7061 case Intrinsic::sincos:
7062 case Intrinsic::sincospi:
7063 case Intrinsic::frexp: {
7064 unsigned Opcode;
7065 switch (Intrinsic) {
7066 default:
7067 llvm_unreachable("unexpected intrinsic");
7068 case Intrinsic::sincos:
7069 Opcode = ISD::FSINCOS;
7070 break;
7071 case Intrinsic::sincospi:
7072 Opcode = ISD::FSINCOSPI;
7073 break;
7074 case Intrinsic::modf:
7075 Opcode = ISD::FMODF;
7076 break;
7077 case Intrinsic::frexp:
7078 Opcode = ISD::FFREXP;
7079 break;
7080 }
7081 SmallVector<EVT, 2> ValueVTs;
7082 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
7083 SDVTList VTs = DAG.getVTList(ValueVTs);
7084 setValue(
7085 &I, DAG.getNode(Opcode, sdl, VTs, getValue(I.getArgOperand(0)), Flags));
7086 return;
7087 }
7088 case Intrinsic::arithmetic_fence: {
7089 setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
7090 getValue(I.getArgOperand(0)).getValueType(),
7091 getValue(I.getArgOperand(0)), Flags));
7092 return;
7093 }
7094 case Intrinsic::fma:
7095 setValue(&I, DAG.getNode(
7096 ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
7097 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
7098 getValue(I.getArgOperand(2)), Flags));
7099 return;
7100#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7101 case Intrinsic::INTRINSIC:
7102#include "llvm/IR/ConstrainedOps.def"
7103 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
7104 return;
7105#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7106#include "llvm/IR/VPIntrinsics.def"
7107 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
7108 return;
7109 case Intrinsic::fptrunc_round: {
7110 // Get the last argument, the metadata and convert it to an integer in the
7111 // call
7112 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
7113 std::optional<RoundingMode> RoundMode =
7114 convertStrToRoundingMode(cast<MDString>(MD)->getString());
7115
7116 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7117
7118 // Propagate fast-math-flags from IR to node(s).
7119 SDNodeFlags Flags;
7120 Flags.copyFMF(*cast<FPMathOperator>(&I));
7121 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
7122
7124 Result = DAG.getNode(
7125 ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
7126 DAG.getTargetConstant((int)*RoundMode, sdl, MVT::i32));
7127 setValue(&I, Result);
7128
7129 return;
7130 }
7131 case Intrinsic::fmuladd: {
7132 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7133 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
7134 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7135 setValue(&I, DAG.getNode(ISD::FMA, sdl,
7136 getValue(I.getArgOperand(0)).getValueType(),
7137 getValue(I.getArgOperand(0)),
7138 getValue(I.getArgOperand(1)),
7139 getValue(I.getArgOperand(2)), Flags));
7140 } else if (TLI.isOperationLegalOrCustom(ISD::FMULADD, VT)) {
7141 // TODO: Support splitting the vector.
7142 setValue(&I, DAG.getNode(ISD::FMULADD, sdl,
7143 getValue(I.getArgOperand(0)).getValueType(),
7144 getValue(I.getArgOperand(0)),
7145 getValue(I.getArgOperand(1)),
7146 getValue(I.getArgOperand(2)), Flags));
7147 } else {
7148 // TODO: Intrinsic calls should have fast-math-flags.
7149 SDValue Mul = DAG.getNode(
7150 ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
7151 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
7152 SDValue Add = DAG.getNode(ISD::FADD, sdl,
7153 getValue(I.getArgOperand(0)).getValueType(),
7154 Mul, getValue(I.getArgOperand(2)), Flags);
7155 setValue(&I, Add);
7156 }
7157 return;
7158 }
7159 case Intrinsic::fptosi_sat: {
7160 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7161 setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
7162 getValue(I.getArgOperand(0)),
7163 DAG.getValueType(VT.getScalarType())));
7164 return;
7165 }
7166 case Intrinsic::fptoui_sat: {
7167 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7168 setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
7169 getValue(I.getArgOperand(0)),
7170 DAG.getValueType(VT.getScalarType())));
7171 return;
7172 }
7173 case Intrinsic::convert_from_arbitrary_fp: {
7174 // Extract format metadata and convert to semantics enum.
7175 EVT DstVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7176 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
7177 StringRef FormatStr = cast<MDString>(MD)->getString();
7178 const fltSemantics *SrcSem =
7180 if (!SrcSem) {
7181 DAG.getContext()->emitError(
7182 "convert_from_arbitrary_fp: not implemented format '" + FormatStr +
7183 "'");
7184 setValue(&I, DAG.getPOISON(DstVT));
7185 return;
7186 }
7188
7189 SDValue IntVal = getValue(I.getArgOperand(0));
7190
7191 // Emit ISD::CONVERT_FROM_ARBITRARY_FP node.
7192 SDValue SemConst =
7193 DAG.getTargetConstant(static_cast<int>(SemEnum), sdl, MVT::i32);
7194 setValue(&I, DAG.getNode(ISD::CONVERT_FROM_ARBITRARY_FP, sdl, DstVT, IntVal,
7195 SemConst));
7196 return;
7197 }
7198 case Intrinsic::set_rounding:
7199 Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7200 {getRoot(), getValue(I.getArgOperand(0))});
7201 setValue(&I, Res);
7202 DAG.setRoot(Res.getValue(0));
7203 return;
7204 case Intrinsic::is_fpclass: {
7205 const DataLayout DLayout = DAG.getDataLayout();
7206 EVT DestVT = TLI.getValueType(DLayout, I.getType());
7207 EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
7208 FPClassTest Test = static_cast<FPClassTest>(
7209 cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
7210 MachineFunction &MF = DAG.getMachineFunction();
7211 const Function &F = MF.getFunction();
7212 SDValue Op = getValue(I.getArgOperand(0));
7213 SDNodeFlags Flags;
7214 Flags.setNoFPExcept(
7215 !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7216 // If ISD::IS_FPCLASS should be expanded, do it right now, because the
7217 // expansion can use illegal types. Making expansion early allows
7218 // legalizing these types prior to selection.
7219 if (!TLI.isOperationLegal(ISD::IS_FPCLASS, ArgVT) &&
7220 !TLI.isOperationCustom(ISD::IS_FPCLASS, ArgVT)) {
7221 SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
7222 setValue(&I, Result);
7223 return;
7224 }
7225
7226 SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
7227 SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
7228 setValue(&I, V);
7229 return;
7230 }
7231 case Intrinsic::get_fpenv: {
7232 const DataLayout DLayout = DAG.getDataLayout();
7233 EVT EnvVT = TLI.getValueType(DLayout, I.getType());
7234 Align TempAlign = DAG.getEVTAlign(EnvVT);
7235 SDValue Chain = getRoot();
7236 // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
7237 // and temporary storage in stack.
7238 if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
7239 Res = DAG.getNode(
7240 ISD::GET_FPENV, sdl,
7241 DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7242 MVT::Other),
7243 Chain);
7244 } else {
7245 SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7246 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7247 auto MPI =
7248 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
7249 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7251 TempAlign);
7252 Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7253 Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7254 }
7255 setValue(&I, Res);
7256 DAG.setRoot(Res.getValue(1));
7257 return;
7258 }
7259 case Intrinsic::set_fpenv: {
7260 const DataLayout DLayout = DAG.getDataLayout();
7261 SDValue Env = getValue(I.getArgOperand(0));
7262 EVT EnvVT = Env.getValueType();
7263 Align TempAlign = DAG.getEVTAlign(EnvVT);
7264 SDValue Chain = getRoot();
7265 // If SET_FPENV is custom or legal, use it. Otherwise use loading
7266 // environment from memory.
7267 if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
7268 Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7269 } else {
7270 // Allocate space in stack, copy environment bits into it and use this
7271 // memory in SET_FPENV_MEM.
7272 SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7273 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7274 auto MPI =
7275 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
7276 Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7278 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7280 TempAlign);
7281 Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7282 }
7283 DAG.setRoot(Chain);
7284 return;
7285 }
7286 case Intrinsic::reset_fpenv:
7287 DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
7288 return;
7289 case Intrinsic::get_fpmode:
7290 Res = DAG.getNode(
7291 ISD::GET_FPMODE, sdl,
7292 DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7293 MVT::Other),
7294 DAG.getRoot());
7295 setValue(&I, Res);
7296 DAG.setRoot(Res.getValue(1));
7297 return;
7298 case Intrinsic::set_fpmode:
7299 Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
7300 getValue(I.getArgOperand(0)));
7301 DAG.setRoot(Res);
7302 return;
7303 case Intrinsic::reset_fpmode: {
7304 Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
7305 DAG.setRoot(Res);
7306 return;
7307 }
7308 case Intrinsic::pcmarker: {
7309 SDValue Tmp = getValue(I.getArgOperand(0));
7310 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
7311 return;
7312 }
7313 case Intrinsic::readcyclecounter: {
7314 SDValue Op = getRoot();
7315 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7316 DAG.getVTList(MVT::i64, MVT::Other), Op);
7317 setValue(&I, Res);
7318 DAG.setRoot(Res.getValue(1));
7319 return;
7320 }
7321 case Intrinsic::readsteadycounter: {
7322 SDValue Op = getRoot();
7323 Res = DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7324 DAG.getVTList(MVT::i64, MVT::Other), Op);
7325 setValue(&I, Res);
7326 DAG.setRoot(Res.getValue(1));
7327 return;
7328 }
7329 case Intrinsic::bitreverse:
7330 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
7331 getValue(I.getArgOperand(0)).getValueType(),
7332 getValue(I.getArgOperand(0))));
7333 return;
7334 case Intrinsic::bswap:
7335 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
7336 getValue(I.getArgOperand(0)).getValueType(),
7337 getValue(I.getArgOperand(0))));
7338 return;
7339 case Intrinsic::cttz: {
7340 SDValue Arg = getValue(I.getArgOperand(0));
7341 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7342 EVT Ty = Arg.getValueType();
7343 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_POISON,
7344 sdl, Ty, Arg));
7345 return;
7346 }
7347 case Intrinsic::ctlz: {
7348 SDValue Arg = getValue(I.getArgOperand(0));
7349 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7350 EVT Ty = Arg.getValueType();
7351 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_POISON,
7352 sdl, Ty, Arg));
7353 return;
7354 }
7355 case Intrinsic::ctpop: {
7356 SDValue Arg = getValue(I.getArgOperand(0));
7357 EVT Ty = Arg.getValueType();
7358 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
7359 return;
7360 }
7361 case Intrinsic::fshl:
7362 case Intrinsic::fshr: {
7363 bool IsFSHL = Intrinsic == Intrinsic::fshl;
7364 SDValue X = getValue(I.getArgOperand(0));
7365 SDValue Y = getValue(I.getArgOperand(1));
7366 SDValue Z = getValue(I.getArgOperand(2));
7367 EVT VT = X.getValueType();
7368
7369 if (X == Y) {
7370 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
7371 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
7372 } else {
7373 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
7374 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
7375 }
7376 return;
7377 }
7378 case Intrinsic::clmul: {
7379 SDValue X = getValue(I.getArgOperand(0));
7380 SDValue Y = getValue(I.getArgOperand(1));
7381 setValue(&I, DAG.getNode(ISD::CLMUL, sdl, X.getValueType(), X, Y));
7382 return;
7383 }
7384 case Intrinsic::sadd_sat: {
7385 SDValue Op1 = getValue(I.getArgOperand(0));
7386 SDValue Op2 = getValue(I.getArgOperand(1));
7387 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7388 return;
7389 }
7390 case Intrinsic::uadd_sat: {
7391 SDValue Op1 = getValue(I.getArgOperand(0));
7392 SDValue Op2 = getValue(I.getArgOperand(1));
7393 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7394 return;
7395 }
7396 case Intrinsic::ssub_sat: {
7397 SDValue Op1 = getValue(I.getArgOperand(0));
7398 SDValue Op2 = getValue(I.getArgOperand(1));
7399 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7400 return;
7401 }
7402 case Intrinsic::usub_sat: {
7403 SDValue Op1 = getValue(I.getArgOperand(0));
7404 SDValue Op2 = getValue(I.getArgOperand(1));
7405 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7406 return;
7407 }
7408 case Intrinsic::sshl_sat:
7409 case Intrinsic::ushl_sat: {
7410 SDValue Op1 = getValue(I.getArgOperand(0));
7411 SDValue Op2 = getValue(I.getArgOperand(1));
7412
7413 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
7414 Op1.getValueType(), DAG.getDataLayout());
7415
7416 // Coerce the shift amount to the right type if we can. This exposes the
7417 // truncate or zext to optimization early.
7418 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
7419 assert(ShiftTy.getSizeInBits() >=
7421 "Unexpected shift type");
7422 Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
7423 }
7424
7425 unsigned Opc =
7426 Intrinsic == Intrinsic::sshl_sat ? ISD::SSHLSAT : ISD::USHLSAT;
7427 setValue(&I, DAG.getNode(Opc, sdl, Op1.getValueType(), Op1, Op2));
7428 return;
7429 }
7430 case Intrinsic::smul_fix:
7431 case Intrinsic::umul_fix:
7432 case Intrinsic::smul_fix_sat:
7433 case Intrinsic::umul_fix_sat: {
7434 SDValue Op1 = getValue(I.getArgOperand(0));
7435 SDValue Op2 = getValue(I.getArgOperand(1));
7436 SDValue Op3 = getValue(I.getArgOperand(2));
7437 setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
7438 Op1.getValueType(), Op1, Op2, Op3));
7439 return;
7440 }
7441 case Intrinsic::sdiv_fix:
7442 case Intrinsic::udiv_fix:
7443 case Intrinsic::sdiv_fix_sat:
7444 case Intrinsic::udiv_fix_sat: {
7445 SDValue Op1 = getValue(I.getArgOperand(0));
7446 SDValue Op2 = getValue(I.getArgOperand(1));
7447 SDValue Op3 = getValue(I.getArgOperand(2));
7449 Op1, Op2, Op3, DAG, TLI));
7450 return;
7451 }
7452 case Intrinsic::smax: {
7453 SDValue Op1 = getValue(I.getArgOperand(0));
7454 SDValue Op2 = getValue(I.getArgOperand(1));
7455 setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
7456 return;
7457 }
7458 case Intrinsic::smin: {
7459 SDValue Op1 = getValue(I.getArgOperand(0));
7460 SDValue Op2 = getValue(I.getArgOperand(1));
7461 setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
7462 return;
7463 }
7464 case Intrinsic::umax: {
7465 SDValue Op1 = getValue(I.getArgOperand(0));
7466 SDValue Op2 = getValue(I.getArgOperand(1));
7467 setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
7468 return;
7469 }
7470 case Intrinsic::umin: {
7471 SDValue Op1 = getValue(I.getArgOperand(0));
7472 SDValue Op2 = getValue(I.getArgOperand(1));
7473 setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
7474 return;
7475 }
7476 case Intrinsic::abs: {
7477 SDValue Op1 = getValue(I.getArgOperand(0));
7478 bool IntMinIsPoison = cast<ConstantInt>(I.getArgOperand(1))->isOne();
7479 unsigned Opc = IntMinIsPoison ? ISD::ABS_MIN_POISON : ISD::ABS;
7480 setValue(&I, DAG.getNode(Opc, sdl, Op1.getValueType(), Op1));
7481 return;
7482 }
7483 case Intrinsic::scmp: {
7484 SDValue Op1 = getValue(I.getArgOperand(0));
7485 SDValue Op2 = getValue(I.getArgOperand(1));
7486 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7487 setValue(&I, DAG.getNode(ISD::SCMP, sdl, DestVT, Op1, Op2));
7488 break;
7489 }
7490 case Intrinsic::ucmp: {
7491 SDValue Op1 = getValue(I.getArgOperand(0));
7492 SDValue Op2 = getValue(I.getArgOperand(1));
7493 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7494 setValue(&I, DAG.getNode(ISD::UCMP, sdl, DestVT, Op1, Op2));
7495 break;
7496 }
7497 case Intrinsic::stackaddress:
7498 case Intrinsic::stacksave: {
7499 unsigned SDOpcode = Intrinsic == Intrinsic::stackaddress ? ISD::STACKADDRESS
7501 SDValue Op = getRoot();
7502 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7503 Res = DAG.getNode(SDOpcode, sdl, DAG.getVTList(VT, MVT::Other), Op);
7504 setValue(&I, Res);
7505 DAG.setRoot(Res.getValue(1));
7506 return;
7507 }
7508 case Intrinsic::stackrestore:
7509 Res = getValue(I.getArgOperand(0));
7510 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
7511 return;
7512 case Intrinsic::get_dynamic_area_offset: {
7513 SDValue Op = getRoot();
7514 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7515 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
7516 Op);
7517 DAG.setRoot(Op);
7518 setValue(&I, Res);
7519 return;
7520 }
7521 case Intrinsic::stackguard: {
7522 MachineFunction &MF = DAG.getMachineFunction();
7523 const Module &M = *MF.getFunction().getParent();
7524 EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7525 SDValue Chain = getRoot();
7526 if (TLI.useLoadStackGuardNode(M)) {
7527 Res = getLoadStackGuard(DAG, sdl, Chain);
7528 Res = DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7529 } else {
7530 const Value *Global = TLI.getSDagStackGuard(M, DAG.getLibcalls());
7531 if (!Global) {
7532 LLVMContext &Ctx = *DAG.getContext();
7533 Ctx.diagnose(DiagnosticInfoGeneric("unable to lower stackguard"));
7534 setValue(&I, DAG.getPOISON(PtrTy));
7535 return;
7536 }
7537
7538 Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
7539 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
7540 MachinePointerInfo(Global, 0), Align,
7542 }
7543 if (TLI.useStackGuardXorFP())
7544 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
7545 DAG.setRoot(Chain);
7546 setValue(&I, Res);
7547 return;
7548 }
7549 case Intrinsic::stackprotector: {
7550 // Emit code into the DAG to store the stack guard onto the stack.
7551 MachineFunction &MF = DAG.getMachineFunction();
7552 MachineFrameInfo &MFI = MF.getFrameInfo();
7553 const Module &M = *MF.getFunction().getParent();
7554 SDValue Src, Chain = getRoot();
7555
7556 if (TLI.useLoadStackGuardNode(M))
7557 Src = getLoadStackGuard(DAG, sdl, Chain);
7558 else
7559 Src = getValue(I.getArgOperand(0)); // The guard's value.
7560
7561 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
7562
7563 int FI = FuncInfo.StaticAllocaMap[Slot];
7564 MFI.setStackProtectorIndex(FI);
7565 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
7566
7567 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
7568
7569 // Store the stack protector onto the stack.
7570 Res = DAG.getStore(
7571 Chain, sdl, Src, FIN,
7572 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
7573 MaybeAlign(), MachineMemOperand::MOVolatile);
7574 setValue(&I, Res);
7575 DAG.setRoot(Res);
7576 return;
7577 }
7578 case Intrinsic::objectsize:
7579 llvm_unreachable("llvm.objectsize.* should have been lowered already");
7580
7581 case Intrinsic::is_constant:
7582 llvm_unreachable("llvm.is.constant.* should have been lowered already");
7583
7584 case Intrinsic::annotation:
7585 case Intrinsic::ptr_annotation:
7586 case Intrinsic::launder_invariant_group:
7587 case Intrinsic::strip_invariant_group:
7588 // Drop the intrinsic, but forward the value
7589 setValue(&I, getValue(I.getOperand(0)));
7590 return;
7591
7592 case Intrinsic::type_test:
7593 case Intrinsic::public_type_test:
7594 reportFatalUsageError("llvm.type.test intrinsic must be lowered by the "
7595 "LowerTypeTests pass before code generation");
7596 return;
7597
7598 case Intrinsic::assume:
7599 case Intrinsic::experimental_noalias_scope_decl:
7600 case Intrinsic::var_annotation:
7601 case Intrinsic::sideeffect:
7602 // Discard annotate attributes, noalias scope declarations, assumptions, and
7603 // artificial side-effects.
7604 return;
7605
7606 case Intrinsic::codeview_annotation: {
7607 // Emit a label associated with this metadata.
7608 MachineFunction &MF = DAG.getMachineFunction();
7609 MCSymbol *Label = MF.getContext().createTempSymbol("annotation", true);
7610 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7611 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
7612 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
7613 DAG.setRoot(Res);
7614 return;
7615 }
7616
7617 case Intrinsic::init_trampoline: {
7618 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7619
7620 SDValue Ops[6];
7621 Ops[0] = getRoot();
7622 Ops[1] = getValue(I.getArgOperand(0));
7623 Ops[2] = getValue(I.getArgOperand(1));
7624 Ops[3] = getValue(I.getArgOperand(2));
7625 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
7626 Ops[5] = DAG.getSrcValue(F);
7627
7628 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
7629
7630 DAG.setRoot(Res);
7631 return;
7632 }
7633 case Intrinsic::adjust_trampoline:
7634 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
7635 TLI.getPointerTy(DAG.getDataLayout()),
7636 getValue(I.getArgOperand(0))));
7637 return;
7638 case Intrinsic::gcroot: {
7639 assert(DAG.getMachineFunction().getFunction().hasGC() &&
7640 "only valid in functions with gc specified, enforced by Verifier");
7641 assert(GFI && "implied by previous");
7642 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7643 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7644
7645 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7646 GFI->addStackRoot(FI->getIndex(), TypeMap);
7647 return;
7648 }
7649 case Intrinsic::gcread:
7650 case Intrinsic::gcwrite:
7651 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7652 case Intrinsic::get_rounding:
7653 Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7654 setValue(&I, Res);
7655 DAG.setRoot(Res.getValue(1));
7656 return;
7657
7658 case Intrinsic::expect:
7659 case Intrinsic::expect_with_probability:
7660 // Just replace __builtin_expect(exp, c) and
7661 // __builtin_expect_with_probability(exp, c, p) with EXP.
7662 setValue(&I, getValue(I.getArgOperand(0)));
7663 return;
7664
7665 case Intrinsic::ubsantrap:
7666 case Intrinsic::debugtrap:
7667 case Intrinsic::trap: {
7668 StringRef TrapFuncName =
7669 I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7670 if (TrapFuncName.empty()) {
7671 switch (Intrinsic) {
7672 case Intrinsic::trap:
7673 DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7674 break;
7675 case Intrinsic::debugtrap:
7676 DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7677 break;
7678 case Intrinsic::ubsantrap:
7679 DAG.setRoot(DAG.getNode(
7680 ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7681 DAG.getTargetConstant(
7682 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7683 MVT::i32)));
7684 break;
7685 default: llvm_unreachable("unknown trap intrinsic");
7686 }
7687 DAG.addNoMergeSiteInfo(DAG.getRoot().getNode(),
7688 I.hasFnAttr(Attribute::NoMerge));
7689 return;
7690 }
7692 if (Intrinsic == Intrinsic::ubsantrap) {
7693 Value *Arg = I.getArgOperand(0);
7694 Args.emplace_back(Arg, getValue(Arg));
7695 }
7696
7697 TargetLowering::CallLoweringInfo CLI(DAG);
7698 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7699 CallingConv::C, I.getType(),
7700 DAG.getExternalSymbol(TrapFuncName.data(),
7701 TLI.getPointerTy(DAG.getDataLayout())),
7702 std::move(Args));
7703 CLI.NoMerge = I.hasFnAttr(Attribute::NoMerge);
7704 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7705 DAG.setRoot(Result.second);
7706 return;
7707 }
7708
7709 case Intrinsic::allow_runtime_check:
7710 case Intrinsic::allow_ubsan_check:
7711 setValue(&I, getValue(ConstantInt::getTrue(I.getType())));
7712 return;
7713
7714 case Intrinsic::uadd_with_overflow:
7715 case Intrinsic::sadd_with_overflow:
7716 case Intrinsic::usub_with_overflow:
7717 case Intrinsic::ssub_with_overflow:
7718 case Intrinsic::umul_with_overflow:
7719 case Intrinsic::smul_with_overflow: {
7721 switch (Intrinsic) {
7722 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
7723 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7724 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7725 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7726 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7727 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7728 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7729 }
7730 SDValue Op1 = getValue(I.getArgOperand(0));
7731 SDValue Op2 = getValue(I.getArgOperand(1));
7732
7733 EVT ResultVT = Op1.getValueType();
7734 EVT OverflowVT = ResultVT.changeElementType(*Context, MVT::i1);
7735
7736 SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7737 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7738 return;
7739 }
7740 case Intrinsic::prefetch: {
7741 SDValue Ops[5];
7742 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7744 Ops[0] = DAG.getRoot();
7745 Ops[1] = getValue(I.getArgOperand(0));
7746 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7747 MVT::i32);
7748 Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7749 MVT::i32);
7750 Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7751 MVT::i32);
7752 SDValue Result = DAG.getMemIntrinsicNode(
7753 ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7754 EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7755 /* align */ std::nullopt, Flags);
7756
7757 // Chain the prefetch in parallel with any pending loads, to stay out of
7758 // the way of later optimizations.
7759 PendingLoads.push_back(Result);
7760 Result = getRoot();
7761 DAG.setRoot(Result);
7762 return;
7763 }
7764 case Intrinsic::lifetime_start:
7765 case Intrinsic::lifetime_end: {
7766 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7767 // Stack coloring is not enabled in O0, discard region information.
7768 if (TM.getOptLevel() == CodeGenOptLevel::None)
7769 return;
7770
7771 const AllocaInst *LifetimeObject = dyn_cast<AllocaInst>(I.getArgOperand(0));
7772 if (!LifetimeObject)
7773 return;
7774
7775 // First check that the Alloca is static, otherwise it won't have a
7776 // valid frame index.
7777 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7778 if (SI == FuncInfo.StaticAllocaMap.end())
7779 return;
7780
7781 const int FrameIndex = SI->second;
7782 Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex);
7783 DAG.setRoot(Res);
7784 return;
7785 }
7786 case Intrinsic::pseudoprobe: {
7787 auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7788 auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7789 auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7790 Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7791 DAG.setRoot(Res);
7792 return;
7793 }
7794 case Intrinsic::invariant_start:
7795 // Discard region information.
7796 setValue(&I,
7797 DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7798 return;
7799 case Intrinsic::invariant_end:
7800 // Discard region information.
7801 return;
7802 case Intrinsic::clear_cache: {
7803 SDValue InputChain = DAG.getRoot();
7804 SDValue StartVal = getValue(I.getArgOperand(0));
7805 SDValue EndVal = getValue(I.getArgOperand(1));
7806 Res = DAG.getNode(ISD::CLEAR_CACHE, sdl, DAG.getVTList(MVT::Other),
7807 {InputChain, StartVal, EndVal});
7808 setValue(&I, Res);
7809 DAG.setRoot(Res);
7810 return;
7811 }
7812 case Intrinsic::donothing:
7813 case Intrinsic::seh_try_begin:
7814 case Intrinsic::seh_scope_begin:
7815 case Intrinsic::seh_try_end:
7816 case Intrinsic::seh_scope_end:
7817 // ignore
7818 return;
7819 case Intrinsic::experimental_stackmap:
7820 visitStackmap(I);
7821 return;
7822 case Intrinsic::experimental_patchpoint_void:
7823 case Intrinsic::experimental_patchpoint:
7824 visitPatchpoint(I);
7825 return;
7826 case Intrinsic::experimental_gc_statepoint:
7828 return;
7829 case Intrinsic::experimental_gc_result:
7830 visitGCResult(cast<GCResultInst>(I));
7831 return;
7832 case Intrinsic::experimental_gc_relocate:
7833 visitGCRelocate(cast<GCRelocateInst>(I));
7834 return;
7835 case Intrinsic::instrprof_cover:
7836 llvm_unreachable("instrprof failed to lower a cover");
7837 case Intrinsic::instrprof_increment:
7838 llvm_unreachable("instrprof failed to lower an increment");
7839 case Intrinsic::instrprof_timestamp:
7840 llvm_unreachable("instrprof failed to lower a timestamp");
7841 case Intrinsic::instrprof_value_profile:
7842 llvm_unreachable("instrprof failed to lower a value profiling call");
7843 case Intrinsic::instrprof_mcdc_parameters:
7844 llvm_unreachable("instrprof failed to lower mcdc parameters");
7845 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7846 llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7847 case Intrinsic::localescape: {
7848 MachineFunction &MF = DAG.getMachineFunction();
7849 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7850
7851 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7852 // is the same on all targets.
7853 for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7854 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7855 if (isa<ConstantPointerNull>(Arg))
7856 continue; // Skip null pointers. They represent a hole in index space.
7857 AllocaInst *Slot = cast<AllocaInst>(Arg);
7858 assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7859 "can only escape static allocas");
7860 int FI = FuncInfo.StaticAllocaMap[Slot];
7861 MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7863 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7864 TII->get(TargetOpcode::LOCAL_ESCAPE))
7865 .addSym(FrameAllocSym)
7866 .addFrameIndex(FI);
7867 }
7868
7869 return;
7870 }
7871
7872 case Intrinsic::localrecover: {
7873 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7874 MachineFunction &MF = DAG.getMachineFunction();
7875
7876 // Get the symbol that defines the frame offset.
7877 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7878 auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7879 unsigned IdxVal =
7880 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7881 MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7883
7884 Value *FP = I.getArgOperand(1);
7885 SDValue FPVal = getValue(FP);
7886 EVT PtrVT = FPVal.getValueType();
7887
7888 // Create a MCSymbol for the label to avoid any target lowering
7889 // that would make this PC relative.
7890 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7891 SDValue OffsetVal =
7892 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7893
7894 // Add the offset to the FP.
7895 SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7896 setValue(&I, Add);
7897
7898 return;
7899 }
7900
7901 case Intrinsic::fake_use: {
7902 Value *V = I.getArgOperand(0);
7903 SDValue Ops[2];
7904 // For Values not declared or previously used in this basic block, the
7905 // NodeMap will not have an entry, and `getValue` will assert if V has no
7906 // valid register value.
7907 auto FakeUseValue = [&]() -> SDValue {
7908 SDValue &N = NodeMap[V];
7909 if (N.getNode())
7910 return N;
7911
7912 // If there's a virtual register allocated and initialized for this
7913 // value, use it.
7914 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
7915 return copyFromReg;
7916 // FIXME: Do we want to preserve constants? It seems pointless.
7917 if (isa<Constant>(V))
7918 return getValue(V);
7919 return SDValue();
7920 }();
7921 if (!FakeUseValue || FakeUseValue.isUndef())
7922 return;
7923 Ops[0] = getRoot();
7924 Ops[1] = FakeUseValue;
7925 // Also, do not translate a fake use with an undef operand, or any other
7926 // empty SDValues.
7927 if (!Ops[1] || Ops[1].isUndef())
7928 return;
7929 DAG.setRoot(DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other, Ops));
7930 return;
7931 }
7932
7933 case Intrinsic::reloc_none: {
7934 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7935 StringRef SymbolName = cast<MDString>(MD)->getString();
7936 SDValue Ops[2] = {
7937 getRoot(),
7938 DAG.getTargetExternalSymbol(
7939 SymbolName.data(), TLI.getProgramPointerTy(DAG.getDataLayout()))};
7940 DAG.setRoot(DAG.getNode(ISD::RELOC_NONE, sdl, MVT::Other, Ops));
7941 return;
7942 }
7943
7944 case Intrinsic::cond_loop: {
7945 SDValue InputChain = DAG.getRoot();
7946 SDValue P = getValue(I.getArgOperand(0));
7947 Res = DAG.getNode(ISD::COND_LOOP, sdl, DAG.getVTList(MVT::Other),
7948 {InputChain, P});
7949 setValue(&I, Res);
7950 DAG.setRoot(Res);
7951 return;
7952 }
7953
7954 case Intrinsic::eh_exceptionpointer:
7955 case Intrinsic::eh_exceptioncode: {
7956 // Get the exception pointer vreg, copy from it, and resize it to fit.
7957 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7958 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7959 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7960 Register VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7961 SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7962 if (Intrinsic == Intrinsic::eh_exceptioncode)
7963 N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7964 setValue(&I, N);
7965 return;
7966 }
7967 case Intrinsic::xray_customevent: {
7968 // Here we want to make sure that the intrinsic behaves as if it has a
7969 // specific calling convention.
7970 const auto &Triple = DAG.getTarget().getTargetTriple();
7971 if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64 &&
7972 Triple.getArch() != Triple::hexagon)
7973 return;
7974
7976
7977 // We want to say that we always want the arguments in registers.
7978 SDValue LogEntryVal = getValue(I.getArgOperand(0));
7979 SDValue StrSizeVal = getValue(I.getArgOperand(1));
7980 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7981 SDValue Chain = getRoot();
7982 Ops.push_back(LogEntryVal);
7983 Ops.push_back(StrSizeVal);
7984 Ops.push_back(Chain);
7985
7986 // We need to enforce the calling convention for the callsite, so that
7987 // argument ordering is enforced correctly, and that register allocation can
7988 // see that some registers may be assumed clobbered and have to preserve
7989 // them across calls to the intrinsic.
7990 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7991 sdl, NodeTys, Ops);
7992 SDValue patchableNode = SDValue(MN, 0);
7993 DAG.setRoot(patchableNode);
7994 setValue(&I, patchableNode);
7995 return;
7996 }
7997 case Intrinsic::xray_typedevent: {
7998 // Here we want to make sure that the intrinsic behaves as if it has a
7999 // specific calling convention.
8000 const auto &Triple = DAG.getTarget().getTargetTriple();
8001 if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64 &&
8002 Triple.getArch() != Triple::hexagon)
8003 return;
8004
8006
8007 // We want to say that we always want the arguments in registers.
8008 // It's unclear to me how manipulating the selection DAG here forces callers
8009 // to provide arguments in registers instead of on the stack.
8010 SDValue LogTypeId = getValue(I.getArgOperand(0));
8011 SDValue LogEntryVal = getValue(I.getArgOperand(1));
8012 SDValue StrSizeVal = getValue(I.getArgOperand(2));
8013 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8014 SDValue Chain = getRoot();
8015 Ops.push_back(LogTypeId);
8016 Ops.push_back(LogEntryVal);
8017 Ops.push_back(StrSizeVal);
8018 Ops.push_back(Chain);
8019
8020 // We need to enforce the calling convention for the callsite, so that
8021 // argument ordering is enforced correctly, and that register allocation can
8022 // see that some registers may be assumed clobbered and have to preserve
8023 // them across calls to the intrinsic.
8024 MachineSDNode *MN = DAG.getMachineNode(
8025 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
8026 SDValue patchableNode = SDValue(MN, 0);
8027 DAG.setRoot(patchableNode);
8028 setValue(&I, patchableNode);
8029 return;
8030 }
8031 case Intrinsic::experimental_deoptimize:
8033 return;
8034 case Intrinsic::stepvector:
8035 visitStepVector(I);
8036 return;
8037 case Intrinsic::vector_reduce_fadd:
8038 case Intrinsic::vector_reduce_fmul:
8039 case Intrinsic::vector_reduce_add:
8040 case Intrinsic::vector_reduce_mul:
8041 case Intrinsic::vector_reduce_and:
8042 case Intrinsic::vector_reduce_or:
8043 case Intrinsic::vector_reduce_xor:
8044 case Intrinsic::vector_reduce_smax:
8045 case Intrinsic::vector_reduce_smin:
8046 case Intrinsic::vector_reduce_umax:
8047 case Intrinsic::vector_reduce_umin:
8048 case Intrinsic::vector_reduce_fmax:
8049 case Intrinsic::vector_reduce_fmin:
8050 case Intrinsic::vector_reduce_fmaximum:
8051 case Intrinsic::vector_reduce_fminimum:
8052 visitVectorReduce(I, Intrinsic);
8053 return;
8054
8055 case Intrinsic::icall_branch_funnel: {
8057 Ops.push_back(getValue(I.getArgOperand(0)));
8058
8059 int64_t Offset;
8061 I.getArgOperand(1), Offset, DAG.getDataLayout()));
8062 if (!Base)
8064 "llvm.icall.branch.funnel operand must be a GlobalValue");
8065 Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
8066
8067 struct BranchFunnelTarget {
8068 int64_t Offset;
8070 };
8072
8073 for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
8075 I.getArgOperand(Op), Offset, DAG.getDataLayout()));
8076 if (ElemBase != Base)
8077 report_fatal_error("all llvm.icall.branch.funnel operands must refer "
8078 "to the same GlobalValue");
8079
8080 SDValue Val = getValue(I.getArgOperand(Op + 1));
8081 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
8082 if (!GA)
8084 "llvm.icall.branch.funnel operand must be a GlobalValue");
8085 Targets.push_back({Offset, DAG.getTargetGlobalAddress(
8086 GA->getGlobal(), sdl, Val.getValueType(),
8087 GA->getOffset())});
8088 }
8089 llvm::sort(Targets,
8090 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
8091 return T1.Offset < T2.Offset;
8092 });
8093
8094 for (auto &T : Targets) {
8095 Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
8096 Ops.push_back(T.Target);
8097 }
8098
8099 Ops.push_back(DAG.getRoot()); // Chain
8100 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
8101 MVT::Other, Ops),
8102 0);
8103 DAG.setRoot(N);
8104 setValue(&I, N);
8105 HasTailCall = true;
8106 return;
8107 }
8108
8109 case Intrinsic::wasm_landingpad_index:
8110 // Information this intrinsic contained has been transferred to
8111 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
8112 // delete it now.
8113 return;
8114
8115 case Intrinsic::aarch64_settag:
8116 case Intrinsic::aarch64_settag_zero: {
8117 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8118 bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
8120 DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
8121 getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
8122 ZeroMemory);
8123 DAG.setRoot(Val);
8124 setValue(&I, Val);
8125 return;
8126 }
8127 case Intrinsic::amdgcn_cs_chain: {
8128 // At this point we don't care if it's amdgpu_cs_chain or
8129 // amdgpu_cs_chain_preserve.
8131
8132 Type *RetTy = I.getType();
8133 assert(RetTy->isVoidTy() && "Should not return");
8134
8135 SDValue Callee = getValue(I.getOperand(0));
8136
8137 // We only have 2 actual args: one for the SGPRs and one for the VGPRs.
8138 // We'll also tack the value of the EXEC mask at the end.
8140 Args.reserve(3);
8141
8142 for (unsigned Idx : {2, 3, 1}) {
8143 TargetLowering::ArgListEntry Arg(getValue(I.getOperand(Idx)),
8144 I.getOperand(Idx)->getType());
8145 Arg.setAttributes(&I, Idx);
8146 Args.push_back(Arg);
8147 }
8148
8149 assert(Args[0].IsInReg && "SGPR args should be marked inreg");
8150 assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
8151 Args[2].IsInReg = true; // EXEC should be inreg
8152
8153 // Forward the flags and any additional arguments.
8154 for (unsigned Idx = 4; Idx < I.arg_size(); ++Idx) {
8155 TargetLowering::ArgListEntry Arg(getValue(I.getOperand(Idx)),
8156 I.getOperand(Idx)->getType());
8157 Arg.setAttributes(&I, Idx);
8158 Args.push_back(Arg);
8159 }
8160
8161 TargetLowering::CallLoweringInfo CLI(DAG);
8162 CLI.setDebugLoc(getCurSDLoc())
8163 .setChain(getRoot())
8164 .setCallee(CC, RetTy, Callee, std::move(Args))
8165 .setNoReturn(true)
8166 .setTailCall(true)
8167 .setConvergent(I.isConvergent());
8168 CLI.CB = &I;
8169 std::pair<SDValue, SDValue> Result =
8170 lowerInvokable(CLI, /*EHPadBB*/ nullptr);
8171 (void)Result;
8172 assert(!Result.first.getNode() && !Result.second.getNode() &&
8173 "Should've lowered as tail call");
8174
8175 HasTailCall = true;
8176 return;
8177 }
8178 case Intrinsic::amdgcn_call_whole_wave: {
8180 bool isTailCall = I.isTailCall();
8181
8182 // The first argument is the callee. Skip it when assembling the call args.
8183 for (unsigned Idx = 1; Idx < I.arg_size(); ++Idx) {
8184 TargetLowering::ArgListEntry Arg(getValue(I.getArgOperand(Idx)),
8185 I.getArgOperand(Idx)->getType());
8186 Arg.setAttributes(&I, Idx);
8187
8188 // If we have an explicit sret argument that is an Instruction, (i.e., it
8189 // might point to function-local memory), we can't meaningfully tail-call.
8190 if (Arg.IsSRet && isa<Instruction>(I.getArgOperand(Idx)))
8191 isTailCall = false;
8192
8193 Args.push_back(Arg);
8194 }
8195
8196 SDValue ConvControlToken;
8197 if (auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) {
8198 auto *Token = Bundle->Inputs[0].get();
8199 ConvControlToken = getValue(Token);
8200 }
8201
8202 TargetLowering::CallLoweringInfo CLI(DAG);
8203 CLI.setDebugLoc(getCurSDLoc())
8204 .setChain(getRoot())
8205 .setCallee(CallingConv::AMDGPU_Gfx_WholeWave, I.getType(),
8206 getValue(I.getArgOperand(0)), std::move(Args))
8207 .setTailCall(isTailCall && canTailCall(I))
8208 .setIsPreallocated(
8209 I.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8210 .setConvergent(I.isConvergent())
8211 .setConvergenceControlToken(ConvControlToken);
8212 CLI.CB = &I;
8213
8214 std::pair<SDValue, SDValue> Result =
8215 lowerInvokable(CLI, /*EHPadBB=*/nullptr);
8216
8217 if (Result.first.getNode())
8218 setValue(&I, Result.first);
8219 return;
8220 }
8221 case Intrinsic::ptrmask: {
8222 SDValue Ptr = getValue(I.getOperand(0));
8223 SDValue Mask = getValue(I.getOperand(1));
8224
8225 // On arm64_32, pointers are 32 bits when stored in memory, but
8226 // zero-extended to 64 bits when in registers. Thus the mask is 32 bits to
8227 // match the index type, but the pointer is 64 bits, so the mask must be
8228 // zero-extended up to 64 bits to match the pointer.
8229 EVT PtrVT =
8230 TLI.getValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8231 EVT MemVT =
8232 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8233 assert(PtrVT == Ptr.getValueType());
8234 if (Mask.getValueType().getFixedSizeInBits() < MemVT.getFixedSizeInBits()) {
8235 // For AMDGPU buffer descriptors the mask is 48 bits, but the pointer is
8236 // 128-bit, so we have to pad the mask with ones for unused bits.
8237 auto HighOnes = DAG.getNode(
8238 ISD::SHL, sdl, PtrVT, DAG.getAllOnesConstant(sdl, PtrVT),
8239 DAG.getShiftAmountConstant(Mask.getValueType().getFixedSizeInBits(),
8240 PtrVT, sdl));
8241 Mask = DAG.getNode(ISD::OR, sdl, PtrVT,
8242 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8243 } else if (Mask.getValueType() != PtrVT)
8244 Mask = DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8245
8246 assert(Mask.getValueType() == PtrVT);
8247 setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
8248 return;
8249 }
8250 case Intrinsic::threadlocal_address: {
8251 setValue(&I, getValue(I.getOperand(0)));
8252 return;
8253 }
8254 case Intrinsic::get_active_lane_mask: {
8255 EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8256 SDValue Index = getValue(I.getOperand(0));
8257 SDValue TripCount = getValue(I.getOperand(1));
8258 EVT ElementVT = Index.getValueType();
8259
8260 if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
8261 setValue(&I, DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, sdl, CCVT, Index,
8262 TripCount));
8263 return;
8264 }
8265
8266 EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
8267 CCVT.getVectorElementCount());
8268
8269 SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
8270 SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
8271 SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
8272 SDValue VectorInduction = DAG.getNode(
8273 ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
8274 SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
8275 VectorTripCount, ISD::CondCode::SETULT);
8276 setValue(&I, SetCC);
8277 return;
8278 }
8279 case Intrinsic::experimental_get_vector_length: {
8280 assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
8281 "Expected positive VF");
8282 unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
8283 bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
8284
8285 SDValue Count = getValue(I.getOperand(0));
8286 EVT CountVT = Count.getValueType();
8287
8288 if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
8289 visitTargetIntrinsic(I, Intrinsic);
8290 return;
8291 }
8292
8293 // Expand to a umin between the trip count and the maximum elements the type
8294 // can hold.
8295 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8296
8297 // Extend the trip count to at least the result VT.
8298 if (CountVT.bitsLT(VT)) {
8299 Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
8300 CountVT = VT;
8301 }
8302
8303 SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
8304 ElementCount::get(VF, IsScalable));
8305
8306 SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
8307 // Clip to the result type if needed.
8308 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
8309
8310 setValue(&I, Trunc);
8311 return;
8312 }
8313 case Intrinsic::vector_partial_reduce_add: {
8314 SDValue Acc = getValue(I.getOperand(0));
8315 SDValue Input = getValue(I.getOperand(1));
8316 setValue(&I,
8317 DAG.getNode(ISD::PARTIAL_REDUCE_UMLA, sdl, Acc.getValueType(), Acc,
8318 Input, DAG.getConstant(1, sdl, Input.getValueType())));
8319 return;
8320 }
8321 case Intrinsic::vector_partial_reduce_fadd: {
8322 SDValue Acc = getValue(I.getOperand(0));
8323 SDValue Input = getValue(I.getOperand(1));
8324 setValue(&I, DAG.getNode(
8325 ISD::PARTIAL_REDUCE_FMLA, sdl, Acc.getValueType(), Acc,
8326 Input, DAG.getConstantFP(1.0, sdl, Input.getValueType())));
8327 return;
8328 }
8329 case Intrinsic::experimental_cttz_elts: {
8330 SDValue Op = getValue(I.getOperand(0));
8331 EVT OpVT = Op.getValueType();
8332 EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
8333 bool ZeroIsPoison =
8334 !cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero();
8335 if (OpVT.getVectorElementType() != MVT::i1) {
8336 // Compare the input vector elements to zero & use to count trailing
8337 // zeros.
8338 SDValue AllZero = DAG.getConstant(0, sdl, OpVT);
8339 EVT I1OpVT = OpVT.changeVectorElementType(*DAG.getContext(), MVT::i1);
8340 Op = DAG.getSetCC(sdl, I1OpVT, Op, AllZero, ISD::SETNE);
8341 }
8342 setValue(&I, DAG.getNode(ZeroIsPoison ? ISD::CTTZ_ELTS_ZERO_POISON
8344 sdl, RetTy, Op));
8345 return;
8346 }
8347 case Intrinsic::vector_insert: {
8348 SDValue Vec = getValue(I.getOperand(0));
8349 SDValue SubVec = getValue(I.getOperand(1));
8350 SDValue Index = getValue(I.getOperand(2));
8351
8352 // The intrinsic's index type is i64, but the SDNode requires an index type
8353 // suitable for the target. Convert the index as required.
8354 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8355 if (Index.getValueType() != VectorIdxTy)
8356 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8357
8358 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8359 setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
8360 Index));
8361 return;
8362 }
8363 case Intrinsic::vector_extract: {
8364 SDValue Vec = getValue(I.getOperand(0));
8365 SDValue Index = getValue(I.getOperand(1));
8366 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8367
8368 // The intrinsic's index type is i64, but the SDNode requires an index type
8369 // suitable for the target. Convert the index as required.
8370 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8371 if (Index.getValueType() != VectorIdxTy)
8372 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8373
8374 setValue(&I,
8375 DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
8376 return;
8377 }
8378 case Intrinsic::experimental_vector_match: {
8379 SDValue Op1 = getValue(I.getOperand(0));
8380 SDValue Op2 = getValue(I.getOperand(1));
8381 SDValue Mask = getValue(I.getOperand(2));
8382 EVT Op1VT = Op1.getValueType();
8383 EVT Op2VT = Op2.getValueType();
8384 EVT ResVT = Mask.getValueType();
8385 unsigned SearchSize = Op2VT.getVectorNumElements();
8386
8387 // If the target has native support for this vector match operation, lower
8388 // the intrinsic untouched; otherwise, expand it below.
8389 if (!TLI.shouldExpandVectorMatch(Op1VT, SearchSize)) {
8390 visitTargetIntrinsic(I, Intrinsic);
8391 return;
8392 }
8393
8394 SDValue Ret = DAG.getConstant(0, sdl, ResVT);
8395
8396 for (unsigned i = 0; i < SearchSize; ++i) {
8397 SDValue Op2Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, sdl,
8398 Op2VT.getVectorElementType(), Op2,
8399 DAG.getVectorIdxConstant(i, sdl));
8400 SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, sdl, Op1VT, Op2Elem);
8401 SDValue Cmp = DAG.getSetCC(sdl, ResVT, Op1, Splat, ISD::SETEQ);
8402 Ret = DAG.getNode(ISD::OR, sdl, ResVT, Ret, Cmp);
8403 }
8404
8405 setValue(&I, DAG.getNode(ISD::AND, sdl, ResVT, Ret, Mask));
8406 return;
8407 }
8408 case Intrinsic::vector_reverse:
8409 visitVectorReverse(I);
8410 return;
8411 case Intrinsic::vector_splice_left:
8412 case Intrinsic::vector_splice_right:
8413 visitVectorSplice(I);
8414 return;
8415 case Intrinsic::callbr_landingpad:
8416 visitCallBrLandingPad(I);
8417 return;
8418 case Intrinsic::vector_interleave2:
8419 visitVectorInterleave(I, 2);
8420 return;
8421 case Intrinsic::vector_interleave3:
8422 visitVectorInterleave(I, 3);
8423 return;
8424 case Intrinsic::vector_interleave4:
8425 visitVectorInterleave(I, 4);
8426 return;
8427 case Intrinsic::vector_interleave5:
8428 visitVectorInterleave(I, 5);
8429 return;
8430 case Intrinsic::vector_interleave6:
8431 visitVectorInterleave(I, 6);
8432 return;
8433 case Intrinsic::vector_interleave7:
8434 visitVectorInterleave(I, 7);
8435 return;
8436 case Intrinsic::vector_interleave8:
8437 visitVectorInterleave(I, 8);
8438 return;
8439 case Intrinsic::vector_deinterleave2:
8440 visitVectorDeinterleave(I, 2);
8441 return;
8442 case Intrinsic::vector_deinterleave3:
8443 visitVectorDeinterleave(I, 3);
8444 return;
8445 case Intrinsic::vector_deinterleave4:
8446 visitVectorDeinterleave(I, 4);
8447 return;
8448 case Intrinsic::vector_deinterleave5:
8449 visitVectorDeinterleave(I, 5);
8450 return;
8451 case Intrinsic::vector_deinterleave6:
8452 visitVectorDeinterleave(I, 6);
8453 return;
8454 case Intrinsic::vector_deinterleave7:
8455 visitVectorDeinterleave(I, 7);
8456 return;
8457 case Intrinsic::vector_deinterleave8:
8458 visitVectorDeinterleave(I, 8);
8459 return;
8460 case Intrinsic::experimental_vector_compress:
8461 setValue(&I, DAG.getNode(ISD::VECTOR_COMPRESS, sdl,
8462 getValue(I.getArgOperand(0)).getValueType(),
8463 getValue(I.getArgOperand(0)),
8464 getValue(I.getArgOperand(1)),
8465 getValue(I.getArgOperand(2)), Flags));
8466 return;
8467 case Intrinsic::experimental_convergence_anchor:
8468 case Intrinsic::experimental_convergence_entry:
8469 case Intrinsic::experimental_convergence_loop:
8470 visitConvergenceControl(I, Intrinsic);
8471 return;
8472 case Intrinsic::experimental_vector_histogram_add: {
8473 visitVectorHistogram(I, Intrinsic);
8474 return;
8475 }
8476 case Intrinsic::experimental_vector_extract_last_active: {
8477 visitVectorExtractLastActive(I, Intrinsic);
8478 return;
8479 }
8480 case Intrinsic::loop_dependence_war_mask:
8481 setValue(&I,
8483 EVT::getEVT(I.getType()), getValue(I.getOperand(0)),
8484 getValue(I.getOperand(1)), getValue(I.getOperand(2)),
8485 DAG.getConstant(0, sdl, MVT::i64)));
8486 return;
8487 case Intrinsic::loop_dependence_raw_mask:
8488 setValue(&I,
8490 EVT::getEVT(I.getType()), getValue(I.getOperand(0)),
8491 getValue(I.getOperand(1)), getValue(I.getOperand(2)),
8492 DAG.getConstant(0, sdl, MVT::i64)));
8493 return;
8494 case Intrinsic::masked_udiv:
8495 setValue(&I,
8496 DAG.getNode(ISD::MASKED_UDIV, sdl, EVT::getEVT(I.getType()),
8497 getValue(I.getOperand(0)), getValue(I.getOperand(1)),
8498 getValue(I.getOperand(2))));
8499 return;
8500 case Intrinsic::masked_sdiv:
8501 setValue(&I,
8502 DAG.getNode(ISD::MASKED_SDIV, sdl, EVT::getEVT(I.getType()),
8503 getValue(I.getOperand(0)), getValue(I.getOperand(1)),
8504 getValue(I.getOperand(2))));
8505 return;
8506 case Intrinsic::masked_urem:
8507 setValue(&I,
8508 DAG.getNode(ISD::MASKED_UREM, sdl, EVT::getEVT(I.getType()),
8509 getValue(I.getOperand(0)), getValue(I.getOperand(1)),
8510 getValue(I.getOperand(2))));
8511 return;
8512 case Intrinsic::masked_srem:
8513 setValue(&I,
8514 DAG.getNode(ISD::MASKED_SREM, sdl, EVT::getEVT(I.getType()),
8515 getValue(I.getOperand(0)), getValue(I.getOperand(1)),
8516 getValue(I.getOperand(2))));
8517 return;
8518 }
8519}
8520
8521void SelectionDAGBuilder::pushFPOpOutChain(SDValue Result,
8523 assert(Result.getNode()->getNumValues() == 2);
8524 SDValue OutChain = Result.getValue(1);
8525 assert(OutChain.getValueType() == MVT::Other);
8526
8527 // Instead of updating the root immediately, push the produced chain to the
8528 // appropriate list, deferring the update until the root is requested. In this
8529 // case, the nodes from the lists are chained using TokenFactor, indicating
8530 // that the operations are independent.
8531 //
8532 // In particular, the root is updated before any call that might access the
8533 // floating-point environment, except for constrained intrinsics.
8534 switch (EB) {
8537 PendingConstrainedFP.push_back(OutChain);
8538 break;
8540 PendingConstrainedFPStrict.push_back(OutChain);
8541 break;
8542 }
8543}
8544
8545void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8546 const ConstrainedFPIntrinsic &FPI) {
8547 SDLoc sdl = getCurSDLoc();
8548
8549 // We do not need to serialize constrained FP intrinsics against
8550 // each other or against (nonvolatile) loads, so they can be
8551 // chained like loads.
8553 SDValue Chain = getFPOperationRoot(EB);
8555 Opers.push_back(Chain);
8556 for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
8557 Opers.push_back(getValue(FPI.getArgOperand(I)));
8558
8559 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8560 EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
8561 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
8562
8563 SDNodeFlags Flags;
8565 Flags.setNoFPExcept(true);
8566
8567 if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
8568 Flags.copyFMF(*FPOp);
8569
8570 unsigned Opcode;
8571 switch (FPI.getIntrinsicID()) {
8572 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
8573#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8574 case Intrinsic::INTRINSIC: \
8575 Opcode = ISD::STRICT_##DAGN; \
8576 break;
8577#include "llvm/IR/ConstrainedOps.def"
8578 case Intrinsic::experimental_constrained_fmuladd: {
8579 Opcode = ISD::STRICT_FMA;
8580 // Break fmuladd into fmul and fadd.
8581 if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
8582 !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
8583 Opers.pop_back();
8584 SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
8585 pushFPOpOutChain(Mul, EB);
8586 Opcode = ISD::STRICT_FADD;
8587 Opers.clear();
8588 Opers.push_back(Mul.getValue(1));
8589 Opers.push_back(Mul.getValue(0));
8590 Opers.push_back(getValue(FPI.getArgOperand(2)));
8591 }
8592 break;
8593 }
8594 }
8595
8596 // A few strict DAG nodes carry additional operands that are not
8597 // set up by the default code above.
8598 switch (Opcode) {
8599 default: break;
8601 Opers.push_back(
8602 DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
8603 break;
8604 case ISD::STRICT_FSETCC:
8605 case ISD::STRICT_FSETCCS: {
8606 auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8607 ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
8608 if (DAG.isKnownNeverNaN(Opers[1]) && DAG.isKnownNeverNaN(Opers[2]))
8609 Condition = getFCmpCodeWithoutNaN(Condition);
8610 Opers.push_back(DAG.getCondCode(Condition));
8611 break;
8612 }
8613 }
8614
8615 SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
8616 pushFPOpOutChain(Result, EB);
8617
8618 SDValue FPResult = Result.getValue(0);
8619 setValue(&FPI, FPResult);
8620}
8621
8622static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
8623 std::optional<unsigned> ResOPC;
8624 switch (VPIntrin.getIntrinsicID()) {
8625 case Intrinsic::vp_ctlz: {
8626 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8627 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_POISON : ISD::VP_CTLZ;
8628 break;
8629 }
8630 case Intrinsic::vp_cttz: {
8631 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8632 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_POISON : ISD::VP_CTTZ;
8633 break;
8634 }
8635 case Intrinsic::vp_cttz_elts: {
8636 bool IsZeroPoison = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8637 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_POISON : ISD::VP_CTTZ_ELTS;
8638 break;
8639 }
8640#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8641 case Intrinsic::VPID: \
8642 ResOPC = ISD::VPSD; \
8643 break;
8644#include "llvm/IR/VPIntrinsics.def"
8645 }
8646
8647 if (!ResOPC)
8649 "Inconsistency: no SDNode available for this VPIntrinsic!");
8650
8651 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8652 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8653 if (VPIntrin.getFastMathFlags().allowReassoc())
8654 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8655 : ISD::VP_REDUCE_FMUL;
8656 }
8657
8658 return *ResOPC;
8659}
8660
8661void SelectionDAGBuilder::visitVPLoad(
8662 const VPIntrinsic &VPIntrin, EVT VT,
8663 const SmallVectorImpl<SDValue> &OpValues) {
8664 SDLoc DL = getCurSDLoc();
8665 Value *PtrOperand = VPIntrin.getArgOperand(0);
8666 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8667 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8668 const MDNode *Ranges = getRangeMetadata(VPIntrin);
8669 SDValue LD;
8670 // Do not serialize variable-length loads of constant memory with
8671 // anything.
8672 if (!Alignment)
8673 Alignment = DAG.getEVTAlign(VT);
8674 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8675 bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(ML);
8676 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8677 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8678 MachineMemOperand::Flags MMOFlags =
8679 TLI.getVPIntrinsicMemOperandFlags(VPIntrin);
8680 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8681 MachinePointerInfo(PtrOperand), MMOFlags,
8682 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8683 LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8684 MMO, false /*IsExpanding */);
8685 if (AddToChain)
8686 PendingLoads.push_back(LD.getValue(1));
8687 setValue(&VPIntrin, LD);
8688}
8689
8690void SelectionDAGBuilder::visitVPLoadFF(
8691 const VPIntrinsic &VPIntrin, EVT VT, EVT EVLVT,
8692 const SmallVectorImpl<SDValue> &OpValues) {
8693 assert(OpValues.size() == 3 && "Unexpected number of operands");
8694 SDLoc DL = getCurSDLoc();
8695 Value *PtrOperand = VPIntrin.getArgOperand(0);
8696 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8697 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8698 const MDNode *Ranges = VPIntrin.getMetadata(LLVMContext::MD_range);
8699 SDValue LD;
8700 // Do not serialize variable-length loads of constant memory with
8701 // anything.
8702 if (!Alignment)
8703 Alignment = DAG.getEVTAlign(VT);
8704 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8705 bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(ML);
8706 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8707 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8708 MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
8709 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8710 LD = DAG.getLoadFFVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8711 MMO);
8712 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, EVLVT, LD.getValue(1));
8713 if (AddToChain)
8714 PendingLoads.push_back(LD.getValue(2));
8715 setValue(&VPIntrin, DAG.getMergeValues({LD.getValue(0), Trunc}, DL));
8716}
8717
8718void SelectionDAGBuilder::visitVPGather(
8719 const VPIntrinsic &VPIntrin, EVT VT,
8720 const SmallVectorImpl<SDValue> &OpValues) {
8721 SDLoc DL = getCurSDLoc();
8722 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8723 Value *PtrOperand = VPIntrin.getArgOperand(0);
8724 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8725 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8726 const MDNode *Ranges = getRangeMetadata(VPIntrin);
8727 SDValue LD;
8728 if (!Alignment)
8729 Alignment = DAG.getEVTAlign(VT.getScalarType());
8730 unsigned AS =
8731 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8732 MachineMemOperand::Flags MMOFlags =
8733 TLI.getVPIntrinsicMemOperandFlags(VPIntrin);
8734 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8735 MachinePointerInfo(AS), MMOFlags, LocationSize::beforeOrAfterPointer(),
8736 *Alignment, AAInfo, Ranges);
8737 SDValue Base, Index, Scale;
8738 bool UniformBase =
8739 getUniformBase(PtrOperand, Base, Index, Scale, this, VPIntrin.getParent(),
8740 VT.getScalarStoreSize());
8741 if (!UniformBase) {
8742 Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
8743 Index = getValue(PtrOperand);
8744 Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
8745 }
8746 EVT IdxVT = Index.getValueType();
8747 EVT EltTy = IdxVT.getVectorElementType();
8748 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8749 EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
8750 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8751 }
8752 LD = DAG.getGatherVP(
8753 DAG.getVTList(VT, MVT::Other), VT, DL,
8754 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8756 PendingLoads.push_back(LD.getValue(1));
8757 setValue(&VPIntrin, LD);
8758}
8759
8760void SelectionDAGBuilder::visitVPStore(
8761 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8762 SDLoc DL = getCurSDLoc();
8763 Value *PtrOperand = VPIntrin.getArgOperand(1);
8764 EVT VT = OpValues[0].getValueType();
8765 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8766 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8767 SDValue ST;
8768 if (!Alignment)
8769 Alignment = DAG.getEVTAlign(VT);
8770 SDValue Ptr = OpValues[1];
8771 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
8772 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8773 MachineMemOperand::Flags MMOFlags =
8774 TLI.getVPIntrinsicMemOperandFlags(VPIntrin);
8775 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8776 MachinePointerInfo(PtrOperand), MMOFlags,
8777 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8778 ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
8779 OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
8780 /* IsTruncating */ false, /*IsCompressing*/ false);
8781 DAG.setRoot(ST);
8782 setValue(&VPIntrin, ST);
8783}
8784
8785void SelectionDAGBuilder::visitVPScatter(
8786 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8787 SDLoc DL = getCurSDLoc();
8788 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8789 Value *PtrOperand = VPIntrin.getArgOperand(1);
8790 EVT VT = OpValues[0].getValueType();
8791 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8792 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8793 SDValue ST;
8794 if (!Alignment)
8795 Alignment = DAG.getEVTAlign(VT.getScalarType());
8796 unsigned AS =
8797 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8798 MachineMemOperand::Flags MMOFlags =
8799 TLI.getVPIntrinsicMemOperandFlags(VPIntrin);
8800 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8801 MachinePointerInfo(AS), MMOFlags, LocationSize::beforeOrAfterPointer(),
8802 *Alignment, AAInfo);
8803 SDValue Base, Index, Scale;
8804 bool UniformBase =
8805 getUniformBase(PtrOperand, Base, Index, Scale, this, VPIntrin.getParent(),
8806 VT.getScalarStoreSize());
8807 if (!UniformBase) {
8808 Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
8809 Index = getValue(PtrOperand);
8810 Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
8811 }
8812 EVT IdxVT = Index.getValueType();
8813 EVT EltTy = IdxVT.getVectorElementType();
8814 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8815 EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
8816 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8817 }
8818 ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
8819 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8820 OpValues[2], OpValues[3]},
8821 MMO, ISD::SIGNED_SCALED);
8822 DAG.setRoot(ST);
8823 setValue(&VPIntrin, ST);
8824}
8825
8826void SelectionDAGBuilder::visitVPStridedLoad(
8827 const VPIntrinsic &VPIntrin, EVT VT,
8828 const SmallVectorImpl<SDValue> &OpValues) {
8829 SDLoc DL = getCurSDLoc();
8830 Value *PtrOperand = VPIntrin.getArgOperand(0);
8831 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8832 if (!Alignment)
8833 Alignment = DAG.getEVTAlign(VT.getScalarType());
8834 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8835 const MDNode *Ranges = getRangeMetadata(VPIntrin);
8836 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8837 bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(ML);
8838 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8839 unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8840 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8841 MachineMemOperand::Flags MMOFlags =
8842 TLI.getVPIntrinsicMemOperandFlags(VPIntrin);
8843 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8844 MachinePointerInfo(AS), MMOFlags, LocationSize::beforeOrAfterPointer(),
8845 *Alignment, AAInfo, Ranges);
8846
8847 SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
8848 OpValues[2], OpValues[3], MMO,
8849 false /*IsExpanding*/);
8850
8851 if (AddToChain)
8852 PendingLoads.push_back(LD.getValue(1));
8853 setValue(&VPIntrin, LD);
8854}
8855
8856void SelectionDAGBuilder::visitVPStridedStore(
8857 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8858 SDLoc DL = getCurSDLoc();
8859 Value *PtrOperand = VPIntrin.getArgOperand(1);
8860 EVT VT = OpValues[0].getValueType();
8861 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8862 if (!Alignment)
8863 Alignment = DAG.getEVTAlign(VT.getScalarType());
8864 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8865 unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8866 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8867 MachineMemOperand::Flags MMOFlags =
8868 TLI.getVPIntrinsicMemOperandFlags(VPIntrin);
8869 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8870 MachinePointerInfo(AS), MMOFlags, LocationSize::beforeOrAfterPointer(),
8871 *Alignment, AAInfo);
8872
8873 SDValue ST = DAG.getStridedStoreVP(
8874 getMemoryRoot(), DL, OpValues[0], OpValues[1],
8875 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8876 OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
8877 /*IsCompressing*/ false);
8878
8879 DAG.setRoot(ST);
8880 setValue(&VPIntrin, ST);
8881}
8882
8883void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
8884 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8885 SDLoc DL = getCurSDLoc();
8886
8887 ISD::CondCode Condition;
8889
8890 Value *Op1 = VPIntrin.getOperand(0);
8891 Value *Op2 = VPIntrin.getOperand(1);
8892 // #2 is the condition code
8893 SDValue MaskOp = getValue(VPIntrin.getOperand(3));
8894 SDValue EVL = getValue(VPIntrin.getOperand(4));
8895 MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8896 assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8897 "Unexpected target EVL type");
8898 EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
8899
8900 if (VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy()) {
8901 Condition = getFCmpCondCode(CondCode);
8902 SimplifyQuery SQ(DAG.getDataLayout(), &VPIntrin);
8903 if (isKnownNeverNaN(Op2, SQ) && isKnownNeverNaN(Op1, SQ))
8904 Condition = getFCmpCodeWithoutNaN(Condition);
8905 } else {
8906 Condition = getICmpCondCode(CondCode);
8907 }
8908
8909 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8910 VPIntrin.getType());
8911 setValue(&VPIntrin, DAG.getSetCCVP(DL, DestVT, getValue(Op1), getValue(Op2),
8912 Condition, MaskOp, EVL));
8913}
8914
8915void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8916 const VPIntrinsic &VPIntrin) {
8917 SDLoc DL = getCurSDLoc();
8918 unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
8919
8920 auto IID = VPIntrin.getIntrinsicID();
8921
8922 if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8923 return visitVPCmp(*CmpI);
8924
8925 SmallVector<EVT, 4> ValueVTs;
8926 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8927 ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
8928 SDVTList VTs = DAG.getVTList(ValueVTs);
8929
8930 auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
8931
8932 MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8933 assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8934 "Unexpected target EVL type");
8935
8936 // Request operands.
8937 SmallVector<SDValue, 7> OpValues;
8938 for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
8939 auto Op = getValue(VPIntrin.getArgOperand(I));
8940 if (I == EVLParamPos)
8941 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
8942 OpValues.push_back(Op);
8943 }
8944
8945 switch (Opcode) {
8946 default: {
8947 SDNodeFlags SDFlags;
8948 if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8949 SDFlags.copyFMF(*FPMO);
8950 SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
8951 setValue(&VPIntrin, Result);
8952 break;
8953 }
8954 case ISD::VP_LOAD:
8955 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8956 break;
8957 case ISD::VP_LOAD_FF:
8958 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8959 break;
8960 case ISD::VP_GATHER:
8961 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8962 break;
8963 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8964 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8965 break;
8966 case ISD::VP_STORE:
8967 visitVPStore(VPIntrin, OpValues);
8968 break;
8969 case ISD::VP_SCATTER:
8970 visitVPScatter(VPIntrin, OpValues);
8971 break;
8972 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8973 visitVPStridedStore(VPIntrin, OpValues);
8974 break;
8975 case ISD::VP_FMULADD: {
8976 assert(OpValues.size() == 5 && "Unexpected number of operands");
8977 SDNodeFlags SDFlags;
8978 if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8979 SDFlags.copyFMF(*FPMO);
8980 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
8981 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
8982 setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
8983 } else {
8984 SDValue Mul = DAG.getNode(
8985 ISD::VP_FMUL, DL, VTs,
8986 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8987 SDValue Add =
8988 DAG.getNode(ISD::VP_FADD, DL, VTs,
8989 {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8990 setValue(&VPIntrin, Add);
8991 }
8992 break;
8993 }
8994 case ISD::VP_IS_FPCLASS: {
8995 const DataLayout DLayout = DAG.getDataLayout();
8996 EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8997 auto Constant = OpValues[1]->getAsZExtVal();
8998 SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
8999 SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
9000 {OpValues[0], Check, OpValues[2], OpValues[3]});
9001 setValue(&VPIntrin, V);
9002 return;
9003 }
9004 case ISD::VP_INTTOPTR: {
9005 SDValue N = OpValues[0];
9006 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
9007 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
9008 N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
9009 OpValues[2]);
9010 N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
9011 OpValues[2]);
9012 setValue(&VPIntrin, N);
9013 break;
9014 }
9015 case ISD::VP_PTRTOINT: {
9016 SDValue N = OpValues[0];
9017 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
9018 VPIntrin.getType());
9019 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
9020 VPIntrin.getOperand(0)->getType());
9021 N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
9022 OpValues[2]);
9023 N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
9024 OpValues[2]);
9025 setValue(&VPIntrin, N);
9026 break;
9027 }
9028 case ISD::VP_ABS:
9029 case ISD::VP_CTLZ:
9030 case ISD::VP_CTLZ_ZERO_POISON:
9031 case ISD::VP_CTTZ:
9032 case ISD::VP_CTTZ_ZERO_POISON:
9033 case ISD::VP_CTTZ_ELTS_ZERO_POISON:
9034 case ISD::VP_CTTZ_ELTS: {
9035 SDValue Result =
9036 DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
9037 setValue(&VPIntrin, Result);
9038 break;
9039 }
9040 }
9041}
9042
9044 const BasicBlock *EHPadBB,
9045 MCSymbol *&BeginLabel) {
9046 MachineFunction &MF = DAG.getMachineFunction();
9047
9048 // Insert a label before the invoke call to mark the try range. This can be
9049 // used to detect deletion of the invoke via the MachineModuleInfo.
9050 BeginLabel = MF.getContext().createTempSymbol();
9051
9052 // For SjLj, keep track of which landing pads go with which invokes
9053 // so as to maintain the ordering of pads in the LSDA.
9054 unsigned CallSiteIndex = FuncInfo.getCurrentCallSite();
9055 if (CallSiteIndex) {
9056 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
9057 LPadToCallSiteMap[FuncInfo.getMBB(EHPadBB)].push_back(CallSiteIndex);
9058
9059 // Now that the call site is handled, stop tracking it.
9060 FuncInfo.setCurrentCallSite(0);
9061 }
9062
9063 return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
9064}
9065
9066SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
9067 const BasicBlock *EHPadBB,
9068 MCSymbol *BeginLabel) {
9069 assert(BeginLabel && "BeginLabel should've been set");
9070
9072
9073 // Insert a label at the end of the invoke call to mark the try range. This
9074 // can be used to detect deletion of the invoke via the MachineModuleInfo.
9075 MCSymbol *EndLabel = MF.getContext().createTempSymbol();
9076 Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
9077
9078 // Inform MachineModuleInfo of range.
9080 // There is a platform (e.g. wasm) that uses funclet style IR but does not
9081 // actually use outlined funclets and their LSDA info style.
9082 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
9083 assert(II && "II should've been set");
9084 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
9085 EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
9086 } else if (!isScopedEHPersonality(Pers)) {
9087 assert(EHPadBB);
9088 MF.addInvoke(FuncInfo.getMBB(EHPadBB), BeginLabel, EndLabel);
9089 }
9090
9091 return Chain;
9092}
9093
9094std::pair<SDValue, SDValue>
9096 const BasicBlock *EHPadBB) {
9097 MCSymbol *BeginLabel = nullptr;
9098
9099 if (EHPadBB) {
9100 // Both PendingLoads and PendingExports must be flushed here;
9101 // this call might not return.
9102 (void)getRoot();
9103 DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
9104 CLI.setChain(getRoot());
9105 }
9106
9107 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9108 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
9109
9110 assert((CLI.IsTailCall || Result.second.getNode()) &&
9111 "Non-null chain expected with non-tail call!");
9112 assert((Result.second.getNode() || !Result.first.getNode()) &&
9113 "Null value expected with tail call!");
9114
9115 if (!Result.second.getNode()) {
9116 // As a special case, a null chain means that a tail call has been emitted
9117 // and the DAG root is already updated.
9118 HasTailCall = true;
9119
9120 // Since there's no actual continuation from this block, nothing can be
9121 // relying on us setting vregs for them.
9122 PendingExports.clear();
9123 } else {
9124 DAG.setRoot(Result.second);
9125 }
9126
9127 if (EHPadBB) {
9128 DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
9129 BeginLabel));
9130 Result.second = getRoot();
9131 }
9132
9133 return Result;
9134}
9135
9137 bool isMustTailCall = CB.isMustTailCall();
9138
9139 // Avoid emitting tail calls in functions with the disable-tail-calls
9140 // attribute.
9141 const Function *Caller = CB.getParent()->getParent();
9142 if (!isMustTailCall &&
9143 Caller->getFnAttribute("disable-tail-calls").getValueAsBool())
9144 return false;
9145
9146 // We can't tail call inside a function with a swifterror argument. Lowering
9147 // does not support this yet. It would have to move into the swifterror
9148 // register before the call.
9149 if (DAG.getTargetLoweringInfo().supportSwiftError() &&
9150 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
9151 return false;
9152
9153 // Check if target-independent constraints permit a tail call here.
9154 // Target-dependent constraints are checked within TLI->LowerCallTo.
9155 return isInTailCallPosition(CB, DAG.getTarget());
9156}
9157
9159 bool isTailCall, bool isMustTailCall,
9160 const BasicBlock *EHPadBB,
9161 const TargetLowering::PtrAuthInfo *PAI) {
9162 auto &DL = DAG.getDataLayout();
9163 FunctionType *FTy = CB.getFunctionType();
9164 Type *RetTy = CB.getType();
9165
9167 Args.reserve(CB.arg_size());
9168
9169 const Value *SwiftErrorVal = nullptr;
9170 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9171
9172 if (isTailCall)
9173 isTailCall = canTailCall(CB);
9174
9175 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
9176 const Value *V = *I;
9177
9178 // Skip empty types
9179 if (V->getType()->isEmptyTy())
9180 continue;
9181
9182 SDValue ArgNode = getValue(V);
9183 TargetLowering::ArgListEntry Entry(ArgNode, V->getType());
9184 Entry.setAttributes(&CB, I - CB.arg_begin());
9185
9186 // Use swifterror virtual register as input to the call.
9187 if (Entry.IsSwiftError && TLI.supportSwiftError()) {
9188 SwiftErrorVal = V;
9189 // We find the virtual register for the actual swifterror argument.
9190 // Instead of using the Value, we use the virtual register instead.
9191 Entry.Node =
9192 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
9193 EVT(TLI.getPointerTy(DL)));
9194 }
9195
9196 Args.push_back(Entry);
9197
9198 // If we have an explicit sret argument that is an Instruction, (i.e., it
9199 // might point to function-local memory), we can't meaningfully tail-call.
9200 if (Entry.IsSRet && isa<Instruction>(V))
9201 isTailCall = false;
9202 }
9203
9204 // If call site has a cfguardtarget operand bundle, create and add an
9205 // additional ArgListEntry.
9206 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
9207 Value *V = Bundle->Inputs[0];
9209 Entry.IsCFGuardTarget = true;
9210 Args.push_back(Entry);
9211 }
9212
9213 // Disable tail calls if there is an swifterror argument. Targets have not
9214 // been updated to support tail calls.
9215 if (TLI.supportSwiftError() && SwiftErrorVal)
9216 isTailCall = false;
9217
9218 ConstantInt *CFIType = nullptr;
9219 if (CB.isIndirectCall()) {
9220 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
9221 if (!TLI.supportKCFIBundles())
9223 "Target doesn't support calls with kcfi operand bundles.");
9224 CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
9225 assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
9226 }
9227 }
9228
9229 SDValue ConvControlToken;
9230 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
9231 auto *Token = Bundle->Inputs[0].get();
9232 ConvControlToken = getValue(Token);
9233 }
9234
9235 GlobalValue *DeactivationSymbol = nullptr;
9237 DeactivationSymbol = cast<GlobalValue>(Bundle->Inputs[0].get());
9238 }
9239
9242 .setChain(getRoot())
9243 .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9244 .setTailCall(isTailCall)
9248 .setCFIType(CFIType)
9249 .setConvergenceControlToken(ConvControlToken)
9250 .setDeactivationSymbol(DeactivationSymbol);
9251
9252 // Set the pointer authentication info if we have it.
9253 if (PAI) {
9254 if (!TLI.supportPtrAuthBundles())
9256 "This target doesn't support calls with ptrauth operand bundles.");
9257 CLI.setPtrAuth(*PAI);
9258 }
9259
9260 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
9261
9262 if (Result.first.getNode()) {
9263 Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
9264 Result.first = lowerNoFPClassToAssertNoFPClass(DAG, CB, Result.first);
9265 setValue(&CB, Result.first);
9266 }
9267
9268 // The last element of CLI.InVals has the SDValue for swifterror return.
9269 // Here we copy it to a virtual register and update SwiftErrorMap for
9270 // book-keeping.
9271 if (SwiftErrorVal && TLI.supportSwiftError()) {
9272 // Get the last element of InVals.
9273 SDValue Src = CLI.InVals.back();
9274 Register VReg =
9275 SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
9276 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
9277 DAG.setRoot(CopyNode);
9278 }
9279}
9280
9281static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
9282 SelectionDAGBuilder &Builder) {
9283 // Check to see if this load can be trivially constant folded, e.g. if the
9284 // input is from a string literal.
9285 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
9286 // Cast pointer to the type we really want to load.
9287 Type *LoadTy =
9288 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
9289 if (LoadVT.isVector())
9290 LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
9291 if (const Constant *LoadCst =
9292 ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
9293 LoadTy, Builder.DAG.getDataLayout()))
9294 return Builder.getValue(LoadCst);
9295 }
9296
9297 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
9298 // still constant memory, the input chain can be the entry node.
9299 SDValue Root;
9300 bool ConstantMemory = false;
9301
9302 // Do not serialize (non-volatile) loads of constant memory with anything.
9303 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9304 Root = Builder.DAG.getEntryNode();
9305 ConstantMemory = true;
9306 } else {
9307 // Do not serialize non-volatile loads against each other.
9308 Root = Builder.DAG.getRoot();
9309 }
9310
9311 SDValue Ptr = Builder.getValue(PtrVal);
9312 SDValue LoadVal =
9313 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9314 MachinePointerInfo(PtrVal), Align(1));
9315
9316 if (!ConstantMemory)
9317 Builder.PendingLoads.push_back(LoadVal.getValue(1));
9318 return LoadVal;
9319}
9320
9321/// Record the value for an instruction that produces an integer result,
9322/// converting the type where necessary.
9323void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
9324 SDValue Value,
9325 bool IsSigned) {
9326 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
9327 I.getType(), true);
9328 Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
9329 setValue(&I, Value);
9330}
9331
9332/// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
9333/// true and lower it. Otherwise return false, and it will be lowered like a
9334/// normal call.
9335/// The caller already checked that \p I calls the appropriate LibFunc with a
9336/// correct prototype.
9337bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
9338 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
9339 const Value *Size = I.getArgOperand(2);
9340 const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
9341 if (CSize && CSize->getZExtValue() == 0) {
9342 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
9343 I.getType(), true);
9344 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
9345 return true;
9346 }
9347
9348 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9349 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
9350 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
9351 getValue(Size), &I);
9352 if (Res.first.getNode()) {
9353 processIntegerCallValue(I, Res.first, true);
9354 PendingLoads.push_back(Res.second);
9355 return true;
9356 }
9357
9358 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
9359 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
9360 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
9361 return false;
9362
9363 // If the target has a fast compare for the given size, it will return a
9364 // preferred load type for that size. Require that the load VT is legal and
9365 // that the target supports unaligned loads of that type. Otherwise, return
9366 // INVALID.
9367 auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
9368 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9369 MVT LVT = TLI.hasFastEqualityCompare(NumBits);
9370 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
9371 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
9372 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
9373 // TODO: Check alignment of src and dest ptrs.
9374 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
9375 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
9376 if (!TLI.isTypeLegal(LVT) ||
9377 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
9378 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
9380 }
9381
9382 return LVT;
9383 };
9384
9385 // This turns into unaligned loads. We only do this if the target natively
9386 // supports the MVT we'll be loading or if it is small enough (<= 4) that
9387 // we'll only produce a small number of byte loads.
9388 MVT LoadVT;
9389 unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
9390 switch (NumBitsToCompare) {
9391 default:
9392 return false;
9393 case 16:
9394 LoadVT = MVT::i16;
9395 break;
9396 case 32:
9397 LoadVT = MVT::i32;
9398 break;
9399 case 64:
9400 case 128:
9401 case 256:
9402 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9403 break;
9404 }
9405
9406 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
9407 return false;
9408
9409 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
9410 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
9411
9412 // Bitcast to a wide integer type if the loads are vectors.
9413 if (LoadVT.isVector()) {
9414 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
9415 LoadL = DAG.getBitcast(CmpVT, LoadL);
9416 LoadR = DAG.getBitcast(CmpVT, LoadR);
9417 }
9418
9419 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
9420 processIntegerCallValue(I, Cmp, false);
9421 return true;
9422}
9423
9424/// See if we can lower a memchr call into an optimized form. If so, return
9425/// true and lower it. Otherwise return false, and it will be lowered like a
9426/// normal call.
9427/// The caller already checked that \p I calls the appropriate LibFunc with a
9428/// correct prototype.
9429bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
9430 const Value *Src = I.getArgOperand(0);
9431 const Value *Char = I.getArgOperand(1);
9432 const Value *Length = I.getArgOperand(2);
9433
9434 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9435 std::pair<SDValue, SDValue> Res =
9436 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
9437 getValue(Src), getValue(Char), getValue(Length),
9438 MachinePointerInfo(Src));
9439 if (Res.first.getNode()) {
9440 setValue(&I, Res.first);
9441 PendingLoads.push_back(Res.second);
9442 return true;
9443 }
9444
9445 return false;
9446}
9447
9448/// See if we can lower a memccpy call into an optimized form. If so, return
9449/// true and lower it, otherwise return false and it will be lowered like a
9450/// normal call.
9451/// The caller already checked that \p I calls the appropriate LibFunc with a
9452/// correct prototype.
9453bool SelectionDAGBuilder::visitMemCCpyCall(const CallInst &I) {
9454 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9455 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemccpy(
9456 DAG, getCurSDLoc(), DAG.getRoot(), getValue(I.getArgOperand(0)),
9457 getValue(I.getArgOperand(1)), getValue(I.getArgOperand(2)),
9458 getValue(I.getArgOperand(3)), &I);
9459
9460 if (Res.first) {
9461 processIntegerCallValue(I, Res.first, true);
9462 PendingLoads.push_back(Res.second);
9463 return true;
9464 }
9465 return false;
9466}
9467
9468/// See if we can lower a mempcpy call into an optimized form. If so, return
9469/// true and lower it. Otherwise return false, and it will be lowered like a
9470/// normal call.
9471/// The caller already checked that \p I calls the appropriate LibFunc with a
9472/// correct prototype.
9473bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
9474 SDValue Dst = getValue(I.getArgOperand(0));
9475 SDValue Src = getValue(I.getArgOperand(1));
9476 SDValue Size = getValue(I.getArgOperand(2));
9477
9478 Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
9479 Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
9480 // DAG::getMemcpy needs Alignment to be defined.
9481 Align Alignment = std::min(DstAlign, SrcAlign);
9482
9483 SDLoc sdl = getCurSDLoc();
9484
9485 // In the mempcpy context we need to pass in a false value for isTailCall
9486 // because the return pointer needs to be adjusted by the size of
9487 // the copied memory.
9488 SDValue Root = getMemoryRoot();
9489 SDValue MC = DAG.getMemcpy(
9490 Root, sdl, Dst, Src, Size, Alignment, false, false, /*CI=*/nullptr,
9491 std::nullopt, MachinePointerInfo(I.getArgOperand(0)),
9492 MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata());
9493 assert(MC.getNode() != nullptr &&
9494 "** memcpy should not be lowered as TailCall in mempcpy context **");
9495 DAG.setRoot(MC);
9496
9497 // Check if Size needs to be truncated or extended.
9498 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
9499
9500 // Adjust return pointer to point just past the last dst byte.
9501 SDValue DstPlusSize = DAG.getMemBasePlusOffset(Dst, Size, sdl);
9502 setValue(&I, DstPlusSize);
9503 return true;
9504}
9505
9506/// See if we can lower a strcpy call into an optimized form. If so, return
9507/// true and lower it, otherwise return false and it will be lowered like a
9508/// normal call.
9509/// The caller already checked that \p I calls the appropriate LibFunc with a
9510/// correct prototype.
9511bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
9512 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9513
9514 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9515 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForStrcpy(
9516 DAG, getCurSDLoc(), getRoot(), getValue(Arg0), getValue(Arg1),
9517 MachinePointerInfo(Arg0), MachinePointerInfo(Arg1), isStpcpy, &I);
9518 if (Res.first.getNode()) {
9519 setValue(&I, Res.first);
9520 DAG.setRoot(Res.second);
9521 return true;
9522 }
9523
9524 return false;
9525}
9526
9527/// See if we can lower a strcmp call into an optimized form. If so, return
9528/// true and lower it, otherwise return false and it will be lowered like a
9529/// normal call.
9530/// The caller already checked that \p I calls the appropriate LibFunc with a
9531/// correct prototype.
9532bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
9533 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9534
9535 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9536 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForStrcmp(
9537 DAG, getCurSDLoc(), DAG.getRoot(), getValue(Arg0), getValue(Arg1),
9538 MachinePointerInfo(Arg0), MachinePointerInfo(Arg1), &I);
9539 if (Res.first.getNode()) {
9540 processIntegerCallValue(I, Res.first, true);
9541 PendingLoads.push_back(Res.second);
9542 return true;
9543 }
9544
9545 return false;
9546}
9547
9548/// See if we can lower a strlen call into an optimized form. If so, return
9549/// true and lower it, otherwise return false and it will be lowered like a
9550/// normal call.
9551/// The caller already checked that \p I calls the appropriate LibFunc with a
9552/// correct prototype.
9553bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
9554 const Value *Arg0 = I.getArgOperand(0);
9555
9556 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9557 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForStrlen(
9558 DAG, getCurSDLoc(), DAG.getRoot(), getValue(Arg0), &I);
9559 if (Res.first.getNode()) {
9560 processIntegerCallValue(I, Res.first, false);
9561 PendingLoads.push_back(Res.second);
9562 return true;
9563 }
9564
9565 return false;
9566}
9567
9568/// See if we can lower a strnlen call into an optimized form. If so, return
9569/// true and lower it, otherwise return false and it will be lowered like a
9570/// normal call.
9571/// The caller already checked that \p I calls the appropriate LibFunc with a
9572/// correct prototype.
9573bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
9574 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9575
9576 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9577 std::pair<SDValue, SDValue> Res =
9578 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
9579 getValue(Arg0), getValue(Arg1),
9580 MachinePointerInfo(Arg0));
9581 if (Res.first.getNode()) {
9582 processIntegerCallValue(I, Res.first, false);
9583 PendingLoads.push_back(Res.second);
9584 return true;
9585 }
9586
9587 return false;
9588}
9589
9590/// See if we can lower a Strstr call into an optimized form. If so, return
9591/// true and lower it, otherwise return false and it will be lowered like a
9592/// normal call.
9593/// The caller already checked that \p I calls the appropriate LibFunc with a
9594/// correct prototype.
9595bool SelectionDAGBuilder::visitStrstrCall(const CallInst &I) {
9596 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9597 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9598 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForStrstr(
9599 DAG, getCurSDLoc(), DAG.getRoot(), getValue(Arg0), getValue(Arg1), &I);
9600 if (Res.first) {
9601 processIntegerCallValue(I, Res.first, false);
9602 PendingLoads.push_back(Res.second);
9603 return true;
9604 }
9605 return false;
9606}
9607
9608/// See if we can lower a unary floating-point operation into an SDNode with
9609/// the specified Opcode. If so, return true and lower it, otherwise return
9610/// false and it will be lowered like a normal call.
9611/// The caller already checked that \p I calls the appropriate LibFunc with a
9612/// correct prototype.
9613bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
9614 unsigned Opcode) {
9615 // We already checked this call's prototype; verify it doesn't modify errno.
9616 // Do not perform optimizations for call sites that require strict
9617 // floating-point semantics.
9618 if (!I.onlyReadsMemory() || I.isStrictFP())
9619 return false;
9620
9621 SDNodeFlags Flags;
9622 Flags.copyFMF(cast<FPMathOperator>(I));
9623
9624 SDValue Tmp = getValue(I.getArgOperand(0));
9625 setValue(&I,
9626 DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
9627 return true;
9628}
9629
9630/// See if we can lower a binary floating-point operation into an SDNode with
9631/// the specified Opcode. If so, return true and lower it. Otherwise return
9632/// false, and it will be lowered like a normal call.
9633/// The caller already checked that \p I calls the appropriate LibFunc with a
9634/// correct prototype.
9635bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
9636 unsigned Opcode) {
9637 // We already checked this call's prototype; verify it doesn't modify errno.
9638 // Do not perform optimizations for call sites that require strict
9639 // floating-point semantics.
9640 if (!I.onlyReadsMemory() || I.isStrictFP())
9641 return false;
9642
9643 SDNodeFlags Flags;
9644 Flags.copyFMF(cast<FPMathOperator>(I));
9645
9646 SDValue Tmp0 = getValue(I.getArgOperand(0));
9647 SDValue Tmp1 = getValue(I.getArgOperand(1));
9648 EVT VT = Tmp0.getValueType();
9649 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
9650 return true;
9651}
9652
9653void SelectionDAGBuilder::visitCall(const CallInst &I) {
9654 // Handle inline assembly differently.
9655 if (I.isInlineAsm()) {
9656 visitInlineAsm(I);
9657 return;
9658 }
9659
9661
9662 if (Function *F = I.getCalledFunction()) {
9663 if (F->isDeclaration()) {
9664 // Is this an LLVM intrinsic?
9665 if (unsigned IID = F->getIntrinsicID()) {
9666 visitIntrinsicCall(I, IID);
9667 return;
9668 }
9669 }
9670
9671 // Check for well-known libc/libm calls. If the function is internal, it
9672 // can't be a library call. Don't do the check if marked as nobuiltin for
9673 // some reason.
9674 // This code should not handle libcalls that are already canonicalized to
9675 // intrinsics by the middle-end.
9676 LibFunc Func;
9677 if (!I.isNoBuiltin() && !F->hasLocalLinkage() && F->hasName() &&
9678 LibInfo->getLibFunc(*F, Func) && LibInfo->hasOptimizedCodeGen(Func)) {
9679 switch (Func) {
9680 default: break;
9681 case LibFunc_bcmp:
9682 if (visitMemCmpBCmpCall(I))
9683 return;
9684 break;
9685 case LibFunc_copysign:
9686 case LibFunc_copysignf:
9687 case LibFunc_copysignl:
9688 // We already checked this call's prototype; verify it doesn't modify
9689 // errno.
9690 if (I.onlyReadsMemory()) {
9691 SDValue LHS = getValue(I.getArgOperand(0));
9692 SDValue RHS = getValue(I.getArgOperand(1));
9694 LHS.getValueType(), LHS, RHS));
9695 return;
9696 }
9697 break;
9698 case LibFunc_sin:
9699 case LibFunc_sinf:
9700 case LibFunc_sinl:
9701 if (visitUnaryFloatCall(I, ISD::FSIN))
9702 return;
9703 break;
9704 case LibFunc_cos:
9705 case LibFunc_cosf:
9706 case LibFunc_cosl:
9707 if (visitUnaryFloatCall(I, ISD::FCOS))
9708 return;
9709 break;
9710 case LibFunc_tan:
9711 case LibFunc_tanf:
9712 case LibFunc_tanl:
9713 if (visitUnaryFloatCall(I, ISD::FTAN))
9714 return;
9715 break;
9716 case LibFunc_asin:
9717 case LibFunc_asinf:
9718 case LibFunc_asinl:
9719 if (visitUnaryFloatCall(I, ISD::FASIN))
9720 return;
9721 break;
9722 case LibFunc_acos:
9723 case LibFunc_acosf:
9724 case LibFunc_acosl:
9725 if (visitUnaryFloatCall(I, ISD::FACOS))
9726 return;
9727 break;
9728 case LibFunc_atan:
9729 case LibFunc_atanf:
9730 case LibFunc_atanl:
9731 if (visitUnaryFloatCall(I, ISD::FATAN))
9732 return;
9733 break;
9734 case LibFunc_atan2:
9735 case LibFunc_atan2f:
9736 case LibFunc_atan2l:
9737 if (visitBinaryFloatCall(I, ISD::FATAN2))
9738 return;
9739 break;
9740 case LibFunc_sinh:
9741 case LibFunc_sinhf:
9742 case LibFunc_sinhl:
9743 if (visitUnaryFloatCall(I, ISD::FSINH))
9744 return;
9745 break;
9746 case LibFunc_cosh:
9747 case LibFunc_coshf:
9748 case LibFunc_coshl:
9749 if (visitUnaryFloatCall(I, ISD::FCOSH))
9750 return;
9751 break;
9752 case LibFunc_tanh:
9753 case LibFunc_tanhf:
9754 case LibFunc_tanhl:
9755 if (visitUnaryFloatCall(I, ISD::FTANH))
9756 return;
9757 break;
9758 case LibFunc_sqrt:
9759 case LibFunc_sqrtf:
9760 case LibFunc_sqrtl:
9761 case LibFunc_sqrt_finite:
9762 case LibFunc_sqrtf_finite:
9763 case LibFunc_sqrtl_finite:
9764 if (visitUnaryFloatCall(I, ISD::FSQRT))
9765 return;
9766 break;
9767 case LibFunc_log2:
9768 case LibFunc_log2f:
9769 case LibFunc_log2l:
9770 if (visitUnaryFloatCall(I, ISD::FLOG2))
9771 return;
9772 break;
9773 case LibFunc_exp2:
9774 case LibFunc_exp2f:
9775 case LibFunc_exp2l:
9776 if (visitUnaryFloatCall(I, ISD::FEXP2))
9777 return;
9778 break;
9779 case LibFunc_exp10:
9780 case LibFunc_exp10f:
9781 case LibFunc_exp10l:
9782 if (visitUnaryFloatCall(I, ISD::FEXP10))
9783 return;
9784 break;
9785 case LibFunc_ldexp:
9786 case LibFunc_ldexpf:
9787 case LibFunc_ldexpl:
9788 if (visitBinaryFloatCall(I, ISD::FLDEXP))
9789 return;
9790 break;
9791 case LibFunc_strstr:
9792 if (visitStrstrCall(I))
9793 return;
9794 break;
9795 case LibFunc_memcmp:
9796 if (visitMemCmpBCmpCall(I))
9797 return;
9798 break;
9799 case LibFunc_memccpy:
9800 if (visitMemCCpyCall(I))
9801 return;
9802 break;
9803 case LibFunc_mempcpy:
9804 if (visitMemPCpyCall(I))
9805 return;
9806 break;
9807 case LibFunc_memchr:
9808 if (visitMemChrCall(I))
9809 return;
9810 break;
9811 case LibFunc_strcpy:
9812 if (visitStrCpyCall(I, false))
9813 return;
9814 break;
9815 case LibFunc_stpcpy:
9816 if (visitStrCpyCall(I, true))
9817 return;
9818 break;
9819 case LibFunc_strcmp:
9820 if (visitStrCmpCall(I))
9821 return;
9822 break;
9823 case LibFunc_strlen:
9824 if (visitStrLenCall(I))
9825 return;
9826 break;
9827 case LibFunc_strnlen:
9828 if (visitStrNLenCall(I))
9829 return;
9830 break;
9831 }
9832 }
9833 }
9834
9835 if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
9836 LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), /*EHPadBB=*/nullptr);
9837 return;
9838 }
9839
9840 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
9841 // have to do anything here to lower funclet bundles.
9842 // CFGuardTarget bundles are lowered in LowerCallTo.
9844 I, "calls",
9849
9850 SDValue Callee = getValue(I.getCalledOperand());
9851
9852 if (I.hasDeoptState())
9853 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
9854 else
9855 // Check if we can potentially perform a tail call. More detailed checking
9856 // is be done within LowerCallTo, after more information about the call is
9857 // known.
9858 LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
9859}
9860
9862 const CallBase &CB, const BasicBlock *EHPadBB) {
9863 auto PAB = CB.getOperandBundle("ptrauth");
9864 const Value *CalleeV = CB.getCalledOperand();
9865
9866 // Gather the call ptrauth data from the operand bundle:
9867 // [ i32 <key>, i64 <discriminator> ]
9868 const auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
9869 const Value *Discriminator = PAB->Inputs[1];
9870
9871 assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key");
9872 assert(Discriminator->getType()->isIntegerTy(64) &&
9873 "Invalid ptrauth discriminator");
9874
9875 // Look through ptrauth constants to find the raw callee.
9876 // Do a direct unauthenticated call if we found it and everything matches.
9877 if (const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CalleeV))
9878 if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator,
9879 DAG.getDataLayout()))
9880 return LowerCallTo(CB, getValue(CalleeCPA->getPointer()), CB.isTailCall(),
9881 CB.isMustTailCall(), EHPadBB);
9882
9883 // Functions should never be ptrauth-called directly.
9884 assert(!isa<Function>(CalleeV) && "invalid direct ptrauth call");
9885
9886 // Otherwise, do an authenticated indirect call.
9887 TargetLowering::PtrAuthInfo PAI = {Key->getZExtValue(),
9888 getValue(Discriminator)};
9889
9890 LowerCallTo(CB, getValue(CalleeV), CB.isTailCall(), CB.isMustTailCall(),
9891 EHPadBB, &PAI);
9892}
9893
9894namespace {
9895
9896/// AsmOperandInfo - This contains information for each constraint that we are
9897/// lowering.
9898class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
9899public:
9900 /// CallOperand - If this is the result output operand or a clobber
9901 /// this is null, otherwise it is the incoming operand to the CallInst.
9902 /// This gets modified as the asm is processed.
9903 SDValue CallOperand;
9904
9905 /// AssignedRegs - If this is a register or register class operand, this
9906 /// contains the set of register corresponding to the operand.
9907 RegsForValue AssignedRegs;
9908
9909 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
9910 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
9911 }
9912
9913 /// Whether or not this operand accesses memory
9914 bool hasMemory(const TargetLowering &TLI) const {
9915 // Indirect operand accesses access memory.
9916 if (isIndirect)
9917 return true;
9918
9919 for (const auto &Code : Codes)
9921 return true;
9922
9923 return false;
9924 }
9925};
9926
9927
9928} // end anonymous namespace
9929
9930/// Make sure that the output operand \p OpInfo and its corresponding input
9931/// operand \p MatchingOpInfo have compatible constraint types (otherwise error
9932/// out).
9933static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
9934 SDISelAsmOperandInfo &MatchingOpInfo,
9935 SelectionDAG &DAG) {
9936 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9937 return;
9938
9940 const auto &TLI = DAG.getTargetLoweringInfo();
9941
9942 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9943 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
9944 OpInfo.ConstraintVT);
9945 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9946 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
9947 MatchingOpInfo.ConstraintVT);
9948 const bool OutOpIsIntOrFP =
9949 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9950 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9951 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9952 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9953 // FIXME: error out in a more elegant fashion
9954 report_fatal_error("Unsupported asm: input constraint"
9955 " with a matching output constraint of"
9956 " incompatible type!");
9957 }
9958 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9959}
9960
9961/// Get a direct memory input to behave well as an indirect operand.
9962/// This may introduce stores, hence the need for a \p Chain.
9963/// \return The (possibly updated) chain.
9964static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
9965 SDISelAsmOperandInfo &OpInfo,
9966 SelectionDAG &DAG) {
9967 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9968
9969 // If we don't have an indirect input, put it in the constpool if we can,
9970 // otherwise spill it to a stack slot.
9971 // TODO: This isn't quite right. We need to handle these according to
9972 // the addressing mode that the constraint wants. Also, this may take
9973 // an additional register for the computation and we don't want that
9974 // either.
9975
9976 // If the operand is a float, integer, or vector constant, spill to a
9977 // constant pool entry to get its address.
9978 const Value *OpVal = OpInfo.CallOperandVal;
9979 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9981 OpInfo.CallOperand = DAG.getConstantPool(
9982 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
9983 return Chain;
9984 }
9985
9986 // Otherwise, create a stack slot and emit a store to it before the asm.
9987 Type *Ty = OpVal->getType();
9988 auto &DL = DAG.getDataLayout();
9989 TypeSize TySize = DL.getTypeAllocSize(Ty);
9992 int StackID = 0;
9993 if (TySize.isScalable())
9994 StackID = TFI->getStackIDForScalableVectors();
9995 int SSFI = MF.getFrameInfo().CreateStackObject(TySize.getKnownMinValue(),
9996 DL.getPrefTypeAlign(Ty), false,
9997 nullptr, StackID);
9998 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
9999 Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
10001 TLI.getMemValueType(DL, Ty));
10002 OpInfo.CallOperand = StackSlot;
10003
10004 return Chain;
10005}
10006
10007/// GetRegistersForValue - Assign registers (virtual or physical) for the
10008/// specified operand. We prefer to assign virtual registers, to allow the
10009/// register allocator to handle the assignment process. However, if the asm
10010/// uses features that we can't model on machineinstrs, we have SDISel do the
10011/// allocation. This produces generally horrible, but correct, code.
10012///
10013/// OpInfo describes the operand
10014/// RefOpInfo describes the matching operand if any, the operand otherwise
10015static std::optional<unsigned>
10017 SDISelAsmOperandInfo &OpInfo,
10018 SDISelAsmOperandInfo &RefOpInfo) {
10019 LLVMContext &Context = *DAG.getContext();
10020 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10021
10025
10026 // No work to do for memory/address operands.
10027 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
10028 OpInfo.ConstraintType == TargetLowering::C_Address)
10029 return std::nullopt;
10030
10031 // If this is a constraint for a single physreg, or a constraint for a
10032 // register class, find it.
10033 unsigned AssignedReg;
10034 const TargetRegisterClass *RC;
10035 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
10036 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
10037 // RC is unset only on failure. Return immediately.
10038 if (!RC)
10039 return std::nullopt;
10040
10041 // Get the actual register value type. This is important, because the user
10042 // may have asked for (e.g.) the AX register in i32 type. We need to
10043 // remember that AX is actually i16 to get the right extension.
10044 const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
10045
10046 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
10047 // If this is an FP operand in an integer register (or visa versa), or more
10048 // generally if the operand value disagrees with the register class we plan
10049 // to stick it in, fix the operand type.
10050 //
10051 // If this is an input value, the bitcast to the new type is done now.
10052 // Bitcast for output value is done at the end of visitInlineAsm().
10053 if ((OpInfo.Type == InlineAsm::isOutput ||
10054 OpInfo.Type == InlineAsm::isInput) &&
10055 !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
10056 // Try to convert to the first EVT that the reg class contains. If the
10057 // types are identical size, use a bitcast to convert (e.g. two differing
10058 // vector types). Note: output bitcast is done at the end of
10059 // visitInlineAsm().
10060 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
10061 // Exclude indirect inputs while they are unsupported because the code
10062 // to perform the load is missing and thus OpInfo.CallOperand still
10063 // refers to the input address rather than the pointed-to value.
10064 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
10065 OpInfo.CallOperand =
10066 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
10067 OpInfo.ConstraintVT = RegVT;
10068 // If the operand is an FP value and we want it in integer registers,
10069 // use the corresponding integer type. This turns an f64 value into
10070 // i64, which can be passed with two i32 values on a 32-bit machine.
10071 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
10072 MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
10073 if (OpInfo.Type == InlineAsm::isInput)
10074 OpInfo.CallOperand =
10075 DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
10076 OpInfo.ConstraintVT = VT;
10077 }
10078 }
10079 }
10080
10081 // No need to allocate a matching input constraint since the constraint it's
10082 // matching to has already been allocated.
10083 if (OpInfo.isMatchingInputConstraint())
10084 return std::nullopt;
10085
10086 EVT ValueVT = OpInfo.ConstraintVT;
10087 if (OpInfo.ConstraintVT == MVT::Other)
10088 ValueVT = RegVT;
10089
10090 // Initialize NumRegs.
10091 unsigned NumRegs = 1;
10092 if (OpInfo.ConstraintVT != MVT::Other)
10093 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
10094
10095 // If this is a constraint for a specific physical register, like {r17},
10096 // assign it now.
10097
10098 // If this associated to a specific register, initialize iterator to correct
10099 // place. If virtual, make sure we have enough registers
10100
10101 // Initialize iterator if necessary
10104
10105 // Do not check for single registers.
10106 if (AssignedReg) {
10107 I = std::find(I, RC->end(), AssignedReg);
10108 if (I == RC->end()) {
10109 // RC does not contain the selected register, which indicates a
10110 // mismatch between the register and the required type/bitwidth.
10111 return {AssignedReg};
10112 }
10113 }
10114
10115 for (; NumRegs; --NumRegs, ++I) {
10116 assert(I != RC->end() && "Ran out of registers to allocate!");
10117 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
10118 Regs.push_back(R);
10119 }
10120
10121 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
10122 return std::nullopt;
10123}
10124
10125static unsigned
10127 const std::vector<SDValue> &AsmNodeOperands) {
10128 // Scan until we find the definition we already emitted of this operand.
10129 unsigned CurOp = InlineAsm::Op_FirstOperand;
10130 for (; OperandNo; --OperandNo) {
10131 // Advance to the next operand.
10132 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
10133 const InlineAsm::Flag F(OpFlag);
10134 assert(
10135 (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
10136 "Skipped past definitions?");
10137 CurOp += F.getNumOperandRegisters() + 1;
10138 }
10139 return CurOp;
10140}
10141
10142namespace {
10143
10144class ExtraFlags {
10145 unsigned Flags = 0;
10146
10147public:
10148 explicit ExtraFlags(const CallBase &Call) {
10149 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
10150 if (IA->hasSideEffects())
10152 if (IA->isAlignStack())
10154 if (IA->canThrow())
10156 if (Call.isConvergent())
10158 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
10159 }
10160
10161 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
10162 // Ideally, we would only check against memory constraints. However, the
10163 // meaning of an Other constraint can be target-specific and we can't easily
10164 // reason about it. Therefore, be conservative and set MayLoad/MayStore
10165 // for Other constraints as well.
10168 if (OpInfo.Type == InlineAsm::isInput)
10170 else if (OpInfo.Type == InlineAsm::isOutput)
10172 else if (OpInfo.Type == InlineAsm::isClobber)
10174 }
10175 }
10176
10177 unsigned get() const { return Flags; }
10178};
10179
10180} // end anonymous namespace
10181
10182static bool isFunction(SDValue Op) {
10183 if (Op && Op.getOpcode() == ISD::GlobalAddress) {
10184 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
10185 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
10186
10187 // In normal "call dllimport func" instruction (non-inlineasm) it force
10188 // indirect access by specifing call opcode. And usually specially print
10189 // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
10190 // not do in this way now. (In fact, this is similar with "Data Access"
10191 // action). So here we ignore dllimport function.
10192 if (Fn && !Fn->hasDLLImportStorageClass())
10193 return true;
10194 }
10195 }
10196 return false;
10197}
10198
10199namespace {
10200
10201struct ConstraintDecisionInfo {
10202 SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
10203 std::vector<SDValue> AsmNodeOperands;
10204 SDValue Glue, Chain;
10205 bool HasSideEffect = false;
10206 MCSymbol *BeginLabel = nullptr;
10207
10208 SmallVector<char> Buffer;
10209 raw_svector_ostream ErrorMsg;
10210
10211 ConstraintDecisionInfo() : ErrorMsg(Buffer) {}
10212};
10213
10214} // end anonymous namespace
10215
10216/// Construct operand info objects.
10217static bool
10218constructOperandInfo(ConstraintDecisionInfo &Info,
10219 TargetLowering::AsmOperandInfoVector &TargetConstraints,
10220 SelectionDAGBuilder &Builder, const TargetLowering &TLI,
10221 ExtraFlags &ExtraInfo) {
10222 for (auto &T : TargetConstraints) {
10223 Info.ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
10224 SDISelAsmOperandInfo &OpInfo = Info.ConstraintOperands.back();
10225
10226 if (OpInfo.CallOperandVal)
10227 OpInfo.CallOperand = Builder.getValue(OpInfo.CallOperandVal);
10228
10229 if (!Info.HasSideEffect)
10230 Info.HasSideEffect = OpInfo.hasMemory(TLI);
10231
10232 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
10233 // FIXME: Could we compute this on OpInfo rather than T?
10234
10235 // Compute the constraint code and ConstraintType to use.
10237
10238 if (T.ConstraintType == TargetLowering::C_Immediate && OpInfo.CallOperand &&
10239 !isa<ConstantSDNode>(OpInfo.CallOperand)) {
10240 // We've delayed emitting a diagnostic like the "n" constraint because
10241 // inlining could cause an integer showing up.
10242 Info.ErrorMsg << "constraint '" << T.ConstraintCode
10243 << "' expects an integer constant expression";
10244 return true;
10245 }
10246
10247 ExtraInfo.update(T);
10248 }
10249
10250 return false;
10251}
10252
10253/// Compute which constraint option to use for each operand.
10254static void
10255computeConstraintToUse(ConstraintDecisionInfo &Info, const CallBase &Call,
10256 TargetLowering::AsmOperandInfoVector &TargetConstraints,
10257 SelectionDAGBuilder &Builder, const TargetLowering &TLI,
10258 const TargetMachine &TM, SelectionDAG &DAG) {
10259 const auto *IA = cast<InlineAsm>(Call.getCalledOperand());
10261 IA->collectAsmStrs(AsmStrs);
10262
10263 int OpNo = -1;
10264 for (SDISelAsmOperandInfo &OpInfo : Info.ConstraintOperands) {
10265 if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
10266 OpNo++;
10267
10268 // If this is an output operand with a matching input operand, look up the
10269 // matching input. If their types mismatch, e.g. one is an integer, the
10270 // other is floating point, or their sizes are different, flag it as an
10271 // error.
10272 if (OpInfo.hasMatchingInput()) {
10273 SDISelAsmOperandInfo &Input =
10274 Info.ConstraintOperands[OpInfo.MatchingInput];
10275 patchMatchingInput(OpInfo, Input, DAG);
10276 }
10277
10278 // Compute the constraint code and ConstraintType to use.
10279 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
10280
10281 if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
10282 OpInfo.Type == InlineAsm::isClobber) ||
10283 OpInfo.ConstraintType == TargetLowering::C_Address)
10284 continue;
10285
10286 // In Linux PIC model, there are 4 cases about value/label addressing:
10287 //
10288 // 1: Function call or Label jmp inside the module.
10289 // 2: Data access (such as global variable, static variable) inside module.
10290 // 3: Function call or Label jmp outside the module.
10291 // 4: Data access (such as global variable) outside the module.
10292 //
10293 // Due to current llvm inline asm architecture designed to not "recognize"
10294 // the asm code, there are quite troubles for us to treat mem addressing
10295 // differently for same value/adress used in different instuctions.
10296 // For example, in pic model, call a func may in plt way or direclty
10297 // pc-related, but lea/mov a function adress may use got.
10298 //
10299 // Here we try to "recognize" function call for the case 1 and case 3 in
10300 // inline asm. And try to adjust the constraint for them.
10301 //
10302 // TODO: Due to current inline asm didn't encourage to jmp to the outsider
10303 // label, so here we don't handle jmp function label now, but we need to
10304 // enhance it (especilly in PIC model) if we meet meaningful requirements.
10305 if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
10306 TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
10308 OpInfo.isIndirect = false;
10309 OpInfo.ConstraintType = TargetLowering::C_Address;
10310 }
10311
10312 // If this is a memory input, and if the operand is not indirect, do what we
10313 // need to provide an address for the memory input.
10314 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
10315 !OpInfo.isIndirect) {
10316 assert((OpInfo.isMultipleAlternative ||
10317 (OpInfo.Type == InlineAsm::isInput)) &&
10318 "Can only indirectify direct input operands!");
10319
10320 // Memory operands really want the address of the value.
10321 Info.Chain = getAddressForMemoryInput(Info.Chain, Builder.getCurSDLoc(),
10322 OpInfo, DAG);
10323
10324 // There is no longer a Value* corresponding to this operand.
10325 OpInfo.CallOperandVal = nullptr;
10326
10327 // It is now an indirect operand.
10328 OpInfo.isIndirect = true;
10329 }
10330 }
10331}
10332
10333/// Prepare DAG-level operands. As part of this, assign virtual and physical
10334/// registers for inputs and output.
10335static bool prepareDAGLevelOperands(ConstraintDecisionInfo &Info,
10336 const CallBase &Call,
10337 SelectionDAGBuilder &Builder,
10338 const TargetLowering &TLI,
10339 SelectionDAG &DAG) {
10340 SDLoc DL = Builder.getCurSDLoc();
10341 for (SDISelAsmOperandInfo &OpInfo : Info.ConstraintOperands) {
10342 // Assign Registers.
10343 SDISelAsmOperandInfo &RefOpInfo =
10344 OpInfo.isMatchingInputConstraint()
10345 ? Info.ConstraintOperands[OpInfo.getMatchedOperand()]
10346 : OpInfo;
10347 const auto RegError = getRegistersForValue(DAG, DL, OpInfo, RefOpInfo);
10348 if (RegError) {
10349 const MachineFunction &MF = DAG.getMachineFunction();
10351 const char *RegName = TRI.getName(*RegError);
10352 Info.ErrorMsg << "register '" << RegName << "' allocated for constraint '"
10353 << OpInfo.ConstraintCode
10354 << "' does not match required type";
10355 return true;
10356 }
10357
10358 auto DetectWriteToReservedRegister = [&]() {
10359 const MachineFunction &MF = DAG.getMachineFunction();
10361
10362 for (Register Reg : OpInfo.AssignedRegs.Regs) {
10363 if (Reg.isPhysical() && TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
10364 Info.ErrorMsg << "write to reserved register '"
10365 << TRI.getRegAsmName(Reg) << "'";
10366 return true;
10367 }
10368 }
10369
10370 return false;
10371 };
10372 assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
10373 (OpInfo.Type == InlineAsm::isInput &&
10374 !OpInfo.isMatchingInputConstraint())) &&
10375 "Only address as input operand is allowed.");
10376
10377 switch (OpInfo.Type) {
10379 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10380 const InlineAsm::ConstraintCode ConstraintID =
10381 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10383 "Failed to convert memory constraint code to constraint id.");
10384
10385 // Add information to the INLINEASM node to know about this output.
10387 OpFlags.setMemConstraint(ConstraintID);
10388 Info.AsmNodeOperands.push_back(
10389 DAG.getTargetConstant(OpFlags, DL, MVT::i32));
10390 Info.AsmNodeOperands.push_back(OpInfo.CallOperand);
10391 } else {
10392 // Otherwise, this outputs to a register (directly for C_Register /
10393 // C_RegisterClass, and a target-defined fashion for
10394 // C_Immediate/C_Other). Find a register that we can use.
10395 if (OpInfo.AssignedRegs.Regs.empty()) {
10396 Info.ErrorMsg << "could not allocate output register for "
10397 << "constraint '" << OpInfo.ConstraintCode << "'";
10398 return true;
10399 }
10400
10401 if (DetectWriteToReservedRegister())
10402 return true;
10403
10404 // Add information to the INLINEASM node to know that this register is
10405 // set.
10406 OpInfo.AssignedRegs.AddInlineAsmOperands(
10407 OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
10409 false, 0, DL, DAG, Info.AsmNodeOperands);
10410 }
10411 break;
10412
10413 case InlineAsm::isInput:
10414 case InlineAsm::isLabel: {
10415 SDValue InOperandVal = OpInfo.CallOperand;
10416
10417 if (OpInfo.isMatchingInputConstraint()) {
10418 // If this is required to match an output register we have already set,
10419 // just use its register.
10420 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
10421 Info.AsmNodeOperands);
10422 InlineAsm::Flag Flag(Info.AsmNodeOperands[CurOp]->getAsZExtVal());
10423 if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
10424 if (OpInfo.isIndirect) {
10425 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
10426 Info.ErrorMsg << "inline asm not supported yet: cannot handle "
10427 << "tied indirect register inputs";
10428 return true;
10429 }
10430
10433 MachineRegisterInfo &MRI = MF.getRegInfo();
10435 auto *R = cast<RegisterSDNode>(Info.AsmNodeOperands[CurOp + 1]);
10436 Register TiedReg = R->getReg();
10437 MVT RegVT = R->getSimpleValueType(0);
10438 const TargetRegisterClass *RC =
10439 TiedReg.isVirtual() ? MRI.getRegClass(TiedReg)
10440 : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
10441 : TRI.getMinimalPhysRegClass(TiedReg);
10442 for (unsigned I = 0, E = Flag.getNumOperandRegisters(); I != E; ++I)
10443 Regs.push_back(MRI.createVirtualRegister(RC));
10444
10445 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
10446
10447 // Use the produced MatchedRegs object to
10448 MatchedRegs.getCopyToRegs(InOperandVal, DAG, DL, Info.Chain,
10449 &Info.Glue, &Call);
10451 OpInfo.getMatchedOperand(), DL, DAG,
10452 Info.AsmNodeOperands);
10453 break;
10454 }
10455
10456 assert(Flag.isMemKind() && "Unknown matching constraint!");
10457 assert(Flag.getNumOperandRegisters() == 1 &&
10458 "Unexpected number of operands");
10459
10460 // Add information to the INLINEASM node to know about this input.
10461 // See InlineAsm.h isUseOperandTiedToDef.
10462 Flag.clearMemConstraint();
10463 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10464 Info.AsmNodeOperands.push_back(DAG.getTargetConstant(
10465 Flag, DL, TLI.getPointerTy(DAG.getDataLayout())));
10466 Info.AsmNodeOperands.push_back(Info.AsmNodeOperands[CurOp + 1]);
10467 break;
10468 }
10469
10470 // Treat indirect 'X' constraint as memory.
10471 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
10472 OpInfo.isIndirect)
10473 OpInfo.ConstraintType = TargetLowering::C_Memory;
10474
10475 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
10476 OpInfo.ConstraintType == TargetLowering::C_Other) {
10477 std::vector<SDValue> Ops;
10478 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
10479 Ops, DAG);
10480 if (Ops.empty()) {
10481 if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
10482 if (isa<ConstantSDNode>(InOperandVal)) {
10483 Info.ErrorMsg << "value out of range for constraint '"
10484 << OpInfo.ConstraintCode << "'";
10485 return true;
10486 }
10487
10488 Info.ErrorMsg << "invalid operand for inline asm constraint '"
10489 << OpInfo.ConstraintCode << "'";
10490 return true;
10491 }
10492
10493 // Add information to the INLINEASM node to know about this input.
10494 InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
10495 Info.AsmNodeOperands.push_back(DAG.getTargetConstant(
10496 ResOpType, DL, TLI.getPointerTy(DAG.getDataLayout())));
10497 llvm::append_range(Info.AsmNodeOperands, Ops);
10498 break;
10499 }
10500
10501 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10502 assert((OpInfo.isIndirect ||
10503 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
10504 "Operand must be indirect to be a mem!");
10505 assert(InOperandVal.getValueType() ==
10506 TLI.getPointerTy(DAG.getDataLayout()) &&
10507 "Memory operands expect pointer values");
10508
10509 const InlineAsm::ConstraintCode ConstraintID =
10510 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10512 "Failed to convert memory constraint code to constraint id.");
10513
10514 // Add information to the INLINEASM node to know about this input.
10516 ResOpType.setMemConstraint(ConstraintID);
10517 Info.AsmNodeOperands.push_back(
10518 DAG.getTargetConstant(ResOpType, DL, MVT::i32));
10519 Info.AsmNodeOperands.push_back(InOperandVal);
10520 break;
10521 }
10522
10523 if (OpInfo.ConstraintType == TargetLowering::C_Address) {
10524 const InlineAsm::ConstraintCode ConstraintID =
10525 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10527 "Failed to convert memory constraint code to constraint id.");
10528
10530
10531 SDValue AsmOp = InOperandVal;
10532 if (isFunction(InOperandVal)) {
10533 auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
10534 ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
10535 AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
10536 InOperandVal.getValueType(),
10537 GA->getOffset());
10538 }
10539
10540 // Add information to the INLINEASM node to know about this input.
10541 ResOpType.setMemConstraint(ConstraintID);
10542
10543 Info.AsmNodeOperands.push_back(
10544 DAG.getTargetConstant(ResOpType, DL, MVT::i32));
10545 Info.AsmNodeOperands.push_back(AsmOp);
10546 break;
10547 }
10548
10549 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
10550 OpInfo.ConstraintType != TargetLowering::C_Register) {
10551 Info.ErrorMsg << "unknown asm constraint '" << OpInfo.ConstraintCode
10552 << "'";
10553 return true;
10554 }
10555
10556 // TODO: Support this.
10557 if (OpInfo.isIndirect) {
10558 Info.ErrorMsg << "cannot handle indirect register inputs yet for "
10559 << "constraint '" << OpInfo.ConstraintCode << "'";
10560 return true;
10561 }
10562
10563 // Copy the input into the appropriate registers.
10564 if (OpInfo.AssignedRegs.Regs.empty()) {
10565 Info.ErrorMsg << "could not allocate input reg for constraint '"
10566 << OpInfo.ConstraintCode << "'";
10567 return true;
10568 }
10569
10570 if (DetectWriteToReservedRegister())
10571 return true;
10572
10573 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, DL, Info.Chain,
10574 &Info.Glue, &Call);
10575 OpInfo.AssignedRegs.AddInlineAsmOperands(
10576 InlineAsm::Kind::RegUse, false, 0, DL, DAG, Info.AsmNodeOperands);
10577 break;
10578 }
10579
10581 // Add the clobbered value to the operand list, so that the register
10582 // allocator is aware that the physreg got clobbered.
10583 if (!OpInfo.AssignedRegs.Regs.empty())
10584 OpInfo.AssignedRegs.AddInlineAsmOperands(
10585 InlineAsm::Kind::Clobber, false, 0, DL, DAG, Info.AsmNodeOperands);
10586 break;
10587 }
10588 }
10589
10590 return false;
10591}
10592
10593/// DetermineConstraints - Find the constraints to use for inline asm operands.
10594static bool
10595determineConstraints(ConstraintDecisionInfo &Info,
10596 TargetLowering::AsmOperandInfoVector &TargetConstraints,
10597 const CallBase &Call, SelectionDAGBuilder &Builder,
10598 const TargetLowering &TLI, const TargetMachine &TM,
10599 SelectionDAG &DAG, const BasicBlock *EHPadBB) {
10600 const auto *IA = cast<InlineAsm>(Call.getCalledOperand());
10601 ExtraFlags ExtraInfo(Call);
10602
10603 // First pass: Construct operand info objects.
10604 Info.HasSideEffect = IA->hasSideEffects();
10605 if (constructOperandInfo(Info, TargetConstraints, Builder, TLI, ExtraInfo))
10606 return true;
10607
10608 // We won't need to flush pending loads if this asm doesn't touch
10609 // memory and is nonvolatile.
10610 Info.Chain = Info.HasSideEffect ? Builder.getRoot() : DAG.getRoot();
10611
10612 bool IsCallBr = isa<CallBrInst>(Call);
10613 bool EmitEHLabels = isa<InvokeInst>(Call);
10614 if (IsCallBr || EmitEHLabels)
10615 // If this is a callbr or invoke we need to flush pending exports since
10616 // inlineasm_br and invoke are terminators.
10617 // We need to do this before nodes are glued to the inlineasm_br node.
10618 Info.Chain = Builder.getControlRoot();
10619
10620 if (EmitEHLabels)
10621 Info.Chain = Builder.lowerStartEH(Info.Chain, EHPadBB, Info.BeginLabel);
10622
10623 // Second pass: Compute which constraint option to use.
10624 computeConstraintToUse(Info, Call, TargetConstraints, Builder, TLI, TM, DAG);
10625
10626 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
10627 Info.AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
10628 Info.AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
10629 IA->getAsmString().data(), TLI.getProgramPointerTy(DAG.getDataLayout())));
10630
10631 // If we have a !srcloc metadata node associated with it, we want to attach
10632 // this to the ultimately generated inline asm machineinstr. To do this, we
10633 // pass in the third operand as this (potentially null) inline asm MDNode.
10634 const MDNode *SrcLoc = Call.getMetadata("srcloc");
10635 Info.AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
10636
10637 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
10638 // bits as operand 3.
10639 Info.AsmNodeOperands.push_back(
10640 DAG.getTargetConstant(ExtraInfo.get(), Builder.getCurSDLoc(),
10641 TLI.getPointerTy(DAG.getDataLayout())));
10642
10643 // Third pass: Prepare DAG-level operands
10644 return prepareDAGLevelOperands(Info, Call, Builder, TLI, DAG);
10645}
10646
10647/// visitInlineAsm - Handle a call to an InlineAsm object.
10648void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
10649 const BasicBlock *EHPadBB) {
10650 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10652 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
10653
10654 assert((!isa<InvokeInst>(Call) || EHPadBB) &&
10655 "InvokeInst must have an EHPadBB");
10656
10657 ConstraintDecisionInfo Info;
10658 if (determineConstraints(Info, TargetConstraints, Call, *this, TLI, TM, DAG,
10659 EHPadBB))
10660 return emitInlineAsmError(Call, Info.ErrorMsg.str());
10661
10662 SDValue Glue = Info.Glue;
10663 SDValue Chain = Info.Chain;
10664
10665 // Finish up input operands. Set the input chain and add the flag last.
10666 Info.AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
10667 if (Glue.getNode())
10668 Info.AsmNodeOperands.push_back(Glue);
10669
10670 bool IsCallBr = isa<CallBrInst>(Call);
10671 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10672 Chain =
10673 DAG.getNode(ISDOpc, getCurSDLoc(), DAG.getVTList(MVT::Other, MVT::Glue),
10674 Info.AsmNodeOperands);
10675 Glue = Chain.getValue(1);
10676
10677 // Do additional work to generate outputs.
10678
10679 SmallVector<EVT, 1> ResultVTs;
10680 SmallVector<SDValue, 1> ResultValues;
10681 SmallVector<SDValue, 8> OutChains;
10682
10683 llvm::Type *CallResultType = Call.getType();
10684 ArrayRef<Type *> ResultTypes;
10685 if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
10686 ResultTypes = StructResult->elements();
10687 else if (!CallResultType->isVoidTy())
10688 ResultTypes = ArrayRef(CallResultType);
10689
10690 auto CurResultType = ResultTypes.begin();
10691 auto handleRegAssign = [&](SDValue V) {
10692 assert(CurResultType != ResultTypes.end() && "Unexpected value");
10693 assert((*CurResultType)->isSized() && "Unexpected unsized type");
10694 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
10695 ++CurResultType;
10696 // If the type of the inline asm call site return value is different but has
10697 // same size as the type of the asm output bitcast it. One example of this
10698 // is for vectors with different width / number of elements. This can
10699 // happen for register classes that can contain multiple different value
10700 // types. The preg or vreg allocated may not have the same VT as was
10701 // expected.
10702 //
10703 // This can also happen for a return value that disagrees with the register
10704 // class it is put in, eg. a double in a general-purpose register on a
10705 // 32-bit machine.
10706 if (ResultVT != V.getValueType() &&
10707 ResultVT.getSizeInBits() == V.getValueSizeInBits())
10708 V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
10709 else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
10710 V.getValueType().isInteger()) {
10711 // If a result value was tied to an input value, the computed result
10712 // may have a wider width than the expected result. Extract the
10713 // relevant portion.
10714 V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
10715 }
10716 assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
10717 ResultVTs.push_back(ResultVT);
10718 ResultValues.push_back(V);
10719 };
10720
10721 // Deal with output operands.
10722 for (SDISelAsmOperandInfo &OpInfo : Info.ConstraintOperands) {
10723 if (OpInfo.Type == InlineAsm::isOutput) {
10724 SDValue Val;
10725 // Skip trivial output operands.
10726 if (OpInfo.AssignedRegs.Regs.empty())
10727 continue;
10728
10729 switch (OpInfo.ConstraintType) {
10732 Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
10733 Chain, &Glue, &Call);
10734 break;
10737 Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
10738 OpInfo, DAG);
10739 break;
10741 break; // Already handled.
10743 break; // Silence warning.
10745 assert(false && "Unexpected unknown constraint");
10746 }
10747
10748 // Indirect output manifest as stores. Record output chains.
10749 if (OpInfo.isIndirect) {
10750 const Value *Ptr = OpInfo.CallOperandVal;
10751 assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
10752 SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
10753 MachinePointerInfo(Ptr));
10754 OutChains.push_back(Store);
10755 } else {
10756 // generate CopyFromRegs to associated registers.
10757 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
10758 if (Val.getOpcode() == ISD::MERGE_VALUES) {
10759 for (const SDValue &V : Val->op_values())
10760 handleRegAssign(V);
10761 } else
10762 handleRegAssign(Val);
10763 }
10764 }
10765 }
10766
10767 // Set results.
10768 if (!ResultValues.empty()) {
10769 assert(CurResultType == ResultTypes.end() &&
10770 "Mismatch in number of ResultTypes");
10771 assert(ResultValues.size() == ResultTypes.size() &&
10772 "Mismatch in number of output operands in asm result");
10773
10775 DAG.getVTList(ResultVTs), ResultValues);
10776 setValue(&Call, V);
10777 }
10778
10779 // Collect store chains.
10780 if (!OutChains.empty())
10781 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
10782
10783 if (const auto *II = dyn_cast<InvokeInst>(&Call))
10784 Chain = lowerEndEH(Chain, II, EHPadBB, Info.BeginLabel);
10785
10786 // Only Update Root if inline assembly has a memory effect.
10787 if (ResultValues.empty() || Info.HasSideEffect || !OutChains.empty() ||
10788 IsCallBr || isa<InvokeInst>(Call))
10789 DAG.setRoot(Chain);
10790}
10791
10792void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
10793 const Twine &Message) {
10794 LLVMContext &Ctx = *DAG.getContext();
10795 Ctx.diagnose(DiagnosticInfoInlineAsm(Call, Message));
10796
10797 // Make sure we leave the DAG in a valid state
10798 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10799 SmallVector<EVT, 1> ValueVTs;
10800 ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
10801
10802 if (ValueVTs.empty())
10803 return;
10804
10806 for (const EVT &VT : ValueVTs)
10807 Ops.push_back(DAG.getUNDEF(VT));
10808
10809 setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
10810}
10811
10812void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
10813 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
10814 MVT::Other, getRoot(),
10815 getValue(I.getArgOperand(0)),
10816 DAG.getSrcValue(I.getArgOperand(0))));
10817}
10818
10819void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
10820 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10821 const DataLayout &DL = DAG.getDataLayout();
10822 SDValue V = DAG.getVAArg(
10823 TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
10824 getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
10825 DL.getABITypeAlign(I.getType()).value());
10826 DAG.setRoot(V.getValue(1));
10827
10828 if (I.getType()->isPointerTy())
10829 V = DAG.getPtrExtOrTrunc(
10830 V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
10831 setValue(&I, V);
10832}
10833
10834void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
10835 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
10836 MVT::Other, getRoot(),
10837 getValue(I.getArgOperand(0)),
10838 DAG.getSrcValue(I.getArgOperand(0))));
10839}
10840
10841void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
10842 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
10843 MVT::Other, getRoot(),
10844 getValue(I.getArgOperand(0)),
10845 getValue(I.getArgOperand(1)),
10846 DAG.getSrcValue(I.getArgOperand(0)),
10847 DAG.getSrcValue(I.getArgOperand(1))));
10848}
10849
10851 const Instruction &I,
10852 SDValue Op) {
10853 std::optional<ConstantRange> CR = getRange(I);
10854
10855 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10856 return Op;
10857
10858 APInt Hi = CR->getUnsignedMax();
10859 unsigned Bits = std::max(Hi.getActiveBits(),
10860 static_cast<unsigned>(IntegerType::MIN_INT_BITS));
10861
10862 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
10863
10864 SDLoc SL = getCurSDLoc();
10865
10866 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
10867 DAG.getValueType(SmallVT));
10868 unsigned NumVals = Op.getNode()->getNumValues();
10869 if (NumVals == 1)
10870 return ZExt;
10871
10873
10874 Ops.push_back(ZExt);
10875 for (unsigned I = 1; I != NumVals; ++I)
10876 Ops.push_back(Op.getValue(I));
10877
10878 return DAG.getMergeValues(Ops, SL);
10879}
10880
10882 SelectionDAG &DAG, const Instruction &I, SDValue Op) {
10883 FPClassTest Classes = getNoFPClass(I);
10884 if (Classes == fcNone)
10885 return Op;
10886
10887 SDLoc SL = getCurSDLoc();
10888 SDValue TestConst = DAG.getTargetConstant(Classes, SDLoc(), MVT::i32);
10889
10890 if (Op.getOpcode() != ISD::MERGE_VALUES) {
10891 return DAG.getNode(ISD::AssertNoFPClass, SL, Op.getValueType(), Op,
10892 TestConst);
10893 }
10894
10895 SmallVector<SDValue, 8> Ops(Op.getNumOperands());
10896 for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
10897 SDValue MergeOp = Op.getOperand(I);
10898 Ops[I] = DAG.getNode(ISD::AssertNoFPClass, SL, MergeOp.getValueType(),
10899 MergeOp, TestConst);
10900 }
10901
10902 return DAG.getMergeValues(Ops, SL);
10903}
10904
10905/// Populate a CallLowerinInfo (into \p CLI) based on the properties of
10906/// the call being lowered.
10907///
10908/// This is a helper for lowering intrinsics that follow a target calling
10909/// convention or require stack pointer adjustment. Only a subset of the
10910/// intrinsic's operands need to participate in the calling convention.
10913 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
10914 AttributeSet RetAttrs, bool IsPatchPoint) {
10916 Args.reserve(NumArgs);
10917
10918 // Populate the argument list.
10919 // Attributes for args start at offset 1, after the return attribute.
10920 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10921 ArgI != ArgE; ++ArgI) {
10922 const Value *V = Call->getOperand(ArgI);
10923
10924 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
10925
10926 TargetLowering::ArgListEntry Entry(getValue(V), V->getType());
10927 Entry.setAttributes(Call, ArgI);
10928 Args.push_back(Entry);
10929 }
10930
10932 .setChain(getRoot())
10933 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10934 RetAttrs)
10935 .setDiscardResult(Call->use_empty())
10936 .setIsPatchPoint(IsPatchPoint)
10938 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
10939}
10940
10941/// Add a stack map intrinsic call's live variable operands to a stackmap
10942/// or patchpoint target node's operand list.
10943///
10944/// Constants are converted to TargetConstants purely as an optimization to
10945/// avoid constant materialization and register allocation.
10946///
10947/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
10948/// generate addess computation nodes, and so FinalizeISel can convert the
10949/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
10950/// address materialization and register allocation, but may also be required
10951/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
10952/// alloca in the entry block, then the runtime may assume that the alloca's
10953/// StackMap location can be read immediately after compilation and that the
10954/// location is valid at any point during execution (this is similar to the
10955/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
10956/// only available in a register, then the runtime would need to trap when
10957/// execution reaches the StackMap in order to read the alloca's location.
10958static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
10960 SelectionDAGBuilder &Builder) {
10961 SelectionDAG &DAG = Builder.DAG;
10962 for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
10963 SDValue Op = Builder.getValue(Call.getArgOperand(I));
10964
10965 // Things on the stack are pointer-typed, meaning that they are already
10966 // legal and can be emitted directly to target nodes.
10968 Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
10969 } else {
10970 // Otherwise emit a target independent node to be legalised.
10971 Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
10972 }
10973 }
10974}
10975
10976/// Lower llvm.experimental.stackmap.
10977void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
10978 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
10979 // [live variables...])
10980
10981 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
10982
10983 SDValue Chain, InGlue, Callee;
10985
10986 SDLoc DL = getCurSDLoc();
10988
10989 // The stackmap intrinsic only records the live variables (the arguments
10990 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
10991 // intrinsic, this won't be lowered to a function call. This means we don't
10992 // have to worry about calling conventions and target specific lowering code.
10993 // Instead we perform the call lowering right here.
10994 //
10995 // chain, flag = CALLSEQ_START(chain, 0, 0)
10996 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
10997 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
10998 //
10999 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
11000 InGlue = Chain.getValue(1);
11001
11002 // Add the STACKMAP operands, starting with DAG house-keeping.
11003 Ops.push_back(Chain);
11004 Ops.push_back(InGlue);
11005
11006 // Add the <id>, <numShadowBytes> operands.
11007 //
11008 // These do not require legalisation, and can be emitted directly to target
11009 // constant nodes.
11011 assert(ID.getValueType() == MVT::i64);
11012 SDValue IDConst =
11013 DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
11014 Ops.push_back(IDConst);
11015
11016 SDValue Shad = getValue(CI.getArgOperand(1));
11017 assert(Shad.getValueType() == MVT::i32);
11018 SDValue ShadConst =
11019 DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType());
11020 Ops.push_back(ShadConst);
11021
11022 // Add the live variables.
11023 addStackMapLiveVars(CI, 2, DL, Ops, *this);
11024
11025 // Create the STACKMAP node.
11026 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
11027 Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
11028 InGlue = Chain.getValue(1);
11029
11030 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
11031
11032 // Stackmaps don't generate values, so nothing goes into the NodeMap.
11033
11034 // Set the root to the target-lowered call chain.
11035 DAG.setRoot(Chain);
11036
11037 // Inform the Frame Information that we have a stackmap in this function.
11038 FuncInfo.MF->getFrameInfo().setHasStackMap();
11039}
11040
11041/// Lower llvm.experimental.patchpoint directly to its target opcode.
11042void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
11043 const BasicBlock *EHPadBB) {
11044 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
11045 // i32 <numBytes>,
11046 // i8* <target>,
11047 // i32 <numArgs>,
11048 // [Args...],
11049 // [live variables...])
11050
11052 bool IsAnyRegCC = CC == CallingConv::AnyReg;
11053 bool HasDef = !CB.getType()->isVoidTy();
11054 SDLoc dl = getCurSDLoc();
11056
11057 // Handle immediate and symbolic callees.
11058 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
11059 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
11060 /*isTarget=*/true);
11061 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
11062 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
11063 SDLoc(SymbolicCallee),
11064 SymbolicCallee->getValueType(0));
11065
11066 // Get the real number of arguments participating in the call <numArgs>
11068 unsigned NumArgs = NArgVal->getAsZExtVal();
11069
11070 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
11071 // Intrinsics include all meta-operands up to but not including CC.
11072 unsigned NumMetaOpers = PatchPointOpers::CCPos;
11073 assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
11074 "Not enough arguments provided to the patchpoint intrinsic");
11075
11076 // For AnyRegCC the arguments are lowered later on manually.
11077 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
11078 Type *ReturnTy =
11079 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
11080
11081 TargetLowering::CallLoweringInfo CLI(DAG);
11082 populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
11083 ReturnTy, CB.getAttributes().getRetAttrs(), true);
11084 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
11085
11086 SDNode *CallEnd = Result.second.getNode();
11087 if (CallEnd->getOpcode() == ISD::EH_LABEL)
11088 CallEnd = CallEnd->getOperand(0).getNode();
11089 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
11090 CallEnd = CallEnd->getOperand(0).getNode();
11091
11092 /// Get a call instruction from the call sequence chain.
11093 /// Tail calls are not allowed.
11094 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
11095 "Expected a callseq node.");
11096 SDNode *Call = CallEnd->getOperand(0).getNode();
11097 bool HasGlue = Call->getGluedNode();
11098
11099 // Replace the target specific call node with the patchable intrinsic.
11101
11102 // Push the chain.
11103 Ops.push_back(*(Call->op_begin()));
11104
11105 // Optionally, push the glue (if any).
11106 if (HasGlue)
11107 Ops.push_back(*(Call->op_end() - 1));
11108
11109 // Push the register mask info.
11110 if (HasGlue)
11111 Ops.push_back(*(Call->op_end() - 2));
11112 else
11113 Ops.push_back(*(Call->op_end() - 1));
11114
11115 // Add the <id> and <numBytes> constants.
11117 Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
11119 Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
11120
11121 // Add the callee.
11122 Ops.push_back(Callee);
11123
11124 // Adjust <numArgs> to account for any arguments that have been passed on the
11125 // stack instead.
11126 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
11127 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
11128 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
11129 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
11130
11131 // Add the calling convention
11132 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
11133
11134 // Add the arguments we omitted previously. The register allocator should
11135 // place these in any free register.
11136 if (IsAnyRegCC)
11137 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
11138 Ops.push_back(getValue(CB.getArgOperand(i)));
11139
11140 // Push the arguments from the call instruction.
11141 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
11142 Ops.append(Call->op_begin() + 2, e);
11143
11144 // Push live variables for the stack map.
11145 addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
11146
11147 SDVTList NodeTys;
11148 if (IsAnyRegCC && HasDef) {
11149 // Create the return types based on the intrinsic definition
11150 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11151 SmallVector<EVT, 3> ValueVTs;
11152 ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
11153 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
11154
11155 // There is always a chain and a glue type at the end
11156 ValueVTs.push_back(MVT::Other);
11157 ValueVTs.push_back(MVT::Glue);
11158 NodeTys = DAG.getVTList(ValueVTs);
11159 } else
11160 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
11161
11162 // Replace the target specific call node with a PATCHPOINT node.
11163 SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
11164
11165 // Update the NodeMap.
11166 if (HasDef) {
11167 if (IsAnyRegCC)
11168 setValue(&CB, SDValue(PPV.getNode(), 0));
11169 else
11170 setValue(&CB, Result.first);
11171 }
11172
11173 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
11174 // call sequence. Furthermore the location of the chain and glue can change
11175 // when the AnyReg calling convention is used and the intrinsic returns a
11176 // value.
11177 if (IsAnyRegCC && HasDef) {
11178 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
11179 SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
11180 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
11181 } else
11182 DAG.ReplaceAllUsesWith(Call, PPV.getNode());
11183 DAG.DeleteNode(Call);
11184
11185 // Inform the Frame Information that we have a patchpoint in this function.
11186 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
11187}
11188
11189void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
11190 unsigned Intrinsic) {
11191 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11192 SDValue Op1 = getValue(I.getArgOperand(0));
11193 SDValue Op2;
11194 if (I.arg_size() > 1)
11195 Op2 = getValue(I.getArgOperand(1));
11196 SDLoc dl = getCurSDLoc();
11197 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11198 SDValue Res;
11199 SDNodeFlags SDFlags;
11200 if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
11201 SDFlags.copyFMF(*FPMO);
11202
11203 switch (Intrinsic) {
11204 case Intrinsic::vector_reduce_fadd:
11205 if (SDFlags.hasAllowReassociation())
11206 Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
11207 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
11208 SDFlags);
11209 else
11210 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
11211 break;
11212 case Intrinsic::vector_reduce_fmul:
11213 if (SDFlags.hasAllowReassociation())
11214 Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
11215 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
11216 SDFlags);
11217 else
11218 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
11219 break;
11220 case Intrinsic::vector_reduce_add:
11221 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
11222 break;
11223 case Intrinsic::vector_reduce_mul:
11224 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
11225 break;
11226 case Intrinsic::vector_reduce_and:
11227 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
11228 break;
11229 case Intrinsic::vector_reduce_or:
11230 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
11231 break;
11232 case Intrinsic::vector_reduce_xor:
11233 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
11234 break;
11235 case Intrinsic::vector_reduce_smax:
11236 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
11237 break;
11238 case Intrinsic::vector_reduce_smin:
11239 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
11240 break;
11241 case Intrinsic::vector_reduce_umax:
11242 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
11243 break;
11244 case Intrinsic::vector_reduce_umin:
11245 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
11246 break;
11247 case Intrinsic::vector_reduce_fmax:
11248 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
11249 break;
11250 case Intrinsic::vector_reduce_fmin:
11251 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
11252 break;
11253 case Intrinsic::vector_reduce_fmaximum:
11254 Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
11255 break;
11256 case Intrinsic::vector_reduce_fminimum:
11257 Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
11258 break;
11259 default:
11260 llvm_unreachable("Unhandled vector reduce intrinsic");
11261 }
11262 setValue(&I, Res);
11263}
11264
11265/// Returns an AttributeList representing the attributes applied to the return
11266/// value of the given call.
11269 if (CLI.RetSExt)
11270 Attrs.push_back(Attribute::SExt);
11271 if (CLI.RetZExt)
11272 Attrs.push_back(Attribute::ZExt);
11273 if (CLI.IsInReg)
11274 Attrs.push_back(Attribute::InReg);
11275
11276 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
11277 Attrs);
11278}
11279
11280/// TargetLowering::LowerCallTo - This is the default LowerCallTo
11281/// implementation, which just calls LowerCall.
11282/// FIXME: When all targets are
11283/// migrated to using LowerCall, this hook should be integrated into SDISel.
11284std::pair<SDValue, SDValue>
11286 LLVMContext &Context = CLI.RetTy->getContext();
11287
11288 // Handle the incoming return values from the call.
11289 CLI.Ins.clear();
11290 SmallVector<Type *, 4> RetOrigTys;
11292 auto &DL = CLI.DAG.getDataLayout();
11293 ComputeValueTypes(DL, CLI.OrigRetTy, RetOrigTys, &Offsets);
11294
11295 SmallVector<EVT, 4> RetVTs;
11296 if (CLI.RetTy != CLI.OrigRetTy) {
11297 assert(RetOrigTys.size() == 1 &&
11298 "Only supported for non-aggregate returns");
11299 RetVTs.push_back(getValueType(DL, CLI.RetTy));
11300 } else {
11301 for (Type *Ty : RetOrigTys)
11302 RetVTs.push_back(getValueType(DL, Ty));
11303 }
11304
11305 if (CLI.IsPostTypeLegalization) {
11306 // If we are lowering a libcall after legalization, split the return type.
11307 SmallVector<Type *, 4> OldRetOrigTys;
11308 SmallVector<EVT, 4> OldRetVTs;
11309 SmallVector<TypeSize, 4> OldOffsets;
11310 RetOrigTys.swap(OldRetOrigTys);
11311 RetVTs.swap(OldRetVTs);
11312 Offsets.swap(OldOffsets);
11313
11314 for (size_t i = 0, e = OldRetVTs.size(); i != e; ++i) {
11315 EVT RetVT = OldRetVTs[i];
11316 uint64_t Offset = OldOffsets[i];
11317 MVT RegisterVT = getRegisterType(Context, RetVT);
11318 unsigned NumRegs = getNumRegisters(Context, RetVT);
11319 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
11320 RetOrigTys.append(NumRegs, OldRetOrigTys[i]);
11321 RetVTs.append(NumRegs, RegisterVT);
11322 for (unsigned j = 0; j != NumRegs; ++j)
11323 Offsets.push_back(TypeSize::getFixed(Offset + j * RegisterVTByteSZ));
11324 }
11325 }
11326
11328 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
11329
11330 bool CanLowerReturn =
11332 CLI.IsVarArg, Outs, Context, CLI.RetTy);
11333
11334 SDValue DemoteStackSlot;
11335 int DemoteStackIdx = -100;
11336 if (!CanLowerReturn) {
11337 // FIXME: equivalent assert?
11338 // assert(!CS.hasInAllocaArgument() &&
11339 // "sret demotion is incompatible with inalloca");
11340 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
11341 Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
11343 DemoteStackIdx =
11344 MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
11345 Type *StackSlotPtrType = PointerType::get(Context, DL.getAllocaAddrSpace());
11346
11347 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
11348 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11349 Entry.IsSRet = true;
11350 Entry.Alignment = Alignment;
11351 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
11352 CLI.NumFixedArgs += 1;
11353 CLI.getArgs()[0].IndirectType = CLI.RetTy;
11354 CLI.RetTy = CLI.OrigRetTy = Type::getVoidTy(Context);
11355
11356 // sret demotion isn't compatible with tail-calls, since the sret argument
11357 // points into the callers stack frame.
11358 CLI.IsTailCall = false;
11359 } else {
11360 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
11361 CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
11362 for (unsigned I = 0, E = RetVTs.size(); I != E; ++I) {
11363 ISD::ArgFlagsTy Flags;
11364 if (NeedsRegBlock) {
11365 Flags.setInConsecutiveRegs();
11366 if (I == RetVTs.size() - 1)
11367 Flags.setInConsecutiveRegsLast();
11368 }
11369 EVT VT = RetVTs[I];
11370 MVT RegisterVT = getRegisterTypeForCallingConv(Context, CLI.CallConv, VT);
11371 unsigned NumRegs =
11372 getNumRegistersForCallingConv(Context, CLI.CallConv, VT);
11373 for (unsigned i = 0; i != NumRegs; ++i) {
11374 ISD::InputArg Ret(Flags, RegisterVT, VT, RetOrigTys[I],
11376 if (CLI.RetTy->isPointerTy()) {
11377 Ret.Flags.setPointer();
11379 cast<PointerType>(CLI.RetTy)->getAddressSpace());
11380 }
11381 if (CLI.RetSExt)
11382 Ret.Flags.setSExt();
11383 if (CLI.RetZExt)
11384 Ret.Flags.setZExt();
11385 if (CLI.IsInReg)
11386 Ret.Flags.setInReg();
11387 CLI.Ins.push_back(Ret);
11388 }
11389 }
11390 }
11391
11392 // We push in swifterror return as the last element of CLI.Ins.
11393 ArgListTy &Args = CLI.getArgs();
11394 if (supportSwiftError()) {
11395 for (const ArgListEntry &Arg : Args) {
11396 if (Arg.IsSwiftError) {
11397 ISD::ArgFlagsTy Flags;
11398 Flags.setSwiftError();
11400 PointerType::getUnqual(Context),
11401 /*Used=*/true, ISD::InputArg::NoArgIndex, 0);
11402 CLI.Ins.push_back(Ret);
11403 }
11404 }
11405 }
11406
11407 // Handle all of the outgoing arguments.
11408 CLI.Outs.clear();
11409 CLI.OutVals.clear();
11410 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
11411 SmallVector<Type *, 4> OrigArgTys;
11412 ComputeValueTypes(DL, Args[i].OrigTy, OrigArgTys);
11413 // FIXME: Split arguments if CLI.IsPostTypeLegalization
11414 Type *FinalType = Args[i].Ty;
11415 if (Args[i].IsByVal)
11416 FinalType = Args[i].IndirectType;
11417 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
11418 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
11419 for (unsigned Value = 0, NumValues = OrigArgTys.size(); Value != NumValues;
11420 ++Value) {
11421 Type *OrigArgTy = OrigArgTys[Value];
11422 Type *ArgTy = OrigArgTy;
11423 if (Args[i].Ty != Args[i].OrigTy) {
11424 assert(Value == 0 && "Only supported for non-aggregate arguments");
11425 ArgTy = Args[i].Ty;
11426 }
11427
11428 EVT VT = getValueType(DL, ArgTy);
11429 SDValue Op = SDValue(Args[i].Node.getNode(),
11430 Args[i].Node.getResNo() + Value);
11431 ISD::ArgFlagsTy Flags;
11432
11433 // Certain targets (such as MIPS), may have a different ABI alignment
11434 // for a type depending on the context. Give the target a chance to
11435 // specify the alignment it wants.
11436 const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
11437 Flags.setOrigAlign(OriginalAlignment);
11438
11439 if (i >= CLI.NumFixedArgs)
11440 Flags.setVarArg();
11441 if (ArgTy->isPointerTy()) {
11442 Flags.setPointer();
11443 Flags.setPointerAddrSpace(cast<PointerType>(ArgTy)->getAddressSpace());
11444 }
11445 if (Args[i].IsZExt)
11446 Flags.setZExt();
11447 if (Args[i].IsSExt)
11448 Flags.setSExt();
11449 if (Args[i].IsNoExt)
11450 Flags.setNoExt();
11451 if (Args[i].IsInReg) {
11452 // If we are using vectorcall calling convention, a structure that is
11453 // passed InReg - is surely an HVA
11455 isa<StructType>(FinalType)) {
11456 // The first value of a structure is marked
11457 if (0 == Value)
11458 Flags.setHvaStart();
11459 Flags.setHva();
11460 }
11461 // Set InReg Flag
11462 Flags.setInReg();
11463 }
11464 if (Args[i].IsSRet)
11465 Flags.setSRet();
11466 if (Args[i].IsSwiftSelf)
11467 Flags.setSwiftSelf();
11468 if (Args[i].IsSwiftAsync)
11469 Flags.setSwiftAsync();
11470 if (Args[i].IsSwiftError)
11471 Flags.setSwiftError();
11472 if (Args[i].IsCFGuardTarget)
11473 Flags.setCFGuardTarget();
11474 if (Args[i].IsByVal)
11475 Flags.setByVal();
11476 if (Args[i].IsByRef)
11477 Flags.setByRef();
11478 if (Args[i].IsPreallocated) {
11479 Flags.setPreallocated();
11480 // Set the byval flag for CCAssignFn callbacks that don't know about
11481 // preallocated. This way we can know how many bytes we should've
11482 // allocated and how many bytes a callee cleanup function will pop. If
11483 // we port preallocated to more targets, we'll have to add custom
11484 // preallocated handling in the various CC lowering callbacks.
11485 Flags.setByVal();
11486 }
11487 if (Args[i].IsInAlloca) {
11488 Flags.setInAlloca();
11489 // Set the byval flag for CCAssignFn callbacks that don't know about
11490 // inalloca. This way we can know how many bytes we should've allocated
11491 // and how many bytes a callee cleanup function will pop. If we port
11492 // inalloca to more targets, we'll have to add custom inalloca handling
11493 // in the various CC lowering callbacks.
11494 Flags.setByVal();
11495 }
11496 Align MemAlign;
11497 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11498 unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
11499 Flags.setByValSize(FrameSize);
11500
11501 // info is not there but there are cases it cannot get right.
11502 if (auto MA = Args[i].Alignment)
11503 MemAlign = *MA;
11504 else
11505 MemAlign = getByValTypeAlignment(Args[i].IndirectType, DL);
11506 } else if (auto MA = Args[i].Alignment) {
11507 MemAlign = *MA;
11508 } else {
11509 MemAlign = OriginalAlignment;
11510 }
11511 Flags.setMemAlign(MemAlign);
11512 if (Args[i].IsNest)
11513 Flags.setNest();
11514 if (NeedsRegBlock)
11515 Flags.setInConsecutiveRegs();
11516
11517 MVT PartVT = getRegisterTypeForCallingConv(Context, CLI.CallConv, VT);
11518 unsigned NumParts =
11519 getNumRegistersForCallingConv(Context, CLI.CallConv, VT);
11520 SmallVector<SDValue, 4> Parts(NumParts);
11521 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
11522
11523 if (Args[i].IsSExt)
11524 ExtendKind = ISD::SIGN_EXTEND;
11525 else if (Args[i].IsZExt)
11526 ExtendKind = ISD::ZERO_EXTEND;
11527
11528 // Conservatively only handle 'returned' on non-vectors that can be lowered,
11529 // for now.
11530 if (Args[i].IsReturned && !Op.getValueType().isVector() &&
11532 assert((CLI.RetTy == Args[i].Ty ||
11533 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
11535 Args[i].Ty->getPointerAddressSpace())) &&
11536 RetVTs.size() == NumValues && "unexpected use of 'returned'");
11537 // Before passing 'returned' to the target lowering code, ensure that
11538 // either the register MVT and the actual EVT are the same size or that
11539 // the return value and argument are extended in the same way; in these
11540 // cases it's safe to pass the argument register value unchanged as the
11541 // return register value (although it's at the target's option whether
11542 // to do so)
11543 // TODO: allow code generation to take advantage of partially preserved
11544 // registers rather than clobbering the entire register when the
11545 // parameter extension method is not compatible with the return
11546 // extension method
11547 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
11548 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
11549 CLI.RetZExt == Args[i].IsZExt))
11550 Flags.setReturned();
11551 }
11552
11553 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
11554 CLI.CallConv, ExtendKind);
11555
11556 for (unsigned j = 0; j != NumParts; ++j) {
11557 // if it isn't first piece, alignment must be 1
11558 // For scalable vectors the scalable part is currently handled
11559 // by individual targets, so we just use the known minimum size here.
11560 ISD::OutputArg MyFlags(
11561 Flags, Parts[j].getValueType().getSimpleVT(), VT, OrigArgTy, i,
11562 j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
11563 if (NumParts > 1 && j == 0)
11564 MyFlags.Flags.setSplit();
11565 else if (j != 0) {
11566 MyFlags.Flags.setOrigAlign(Align(1));
11567 if (j == NumParts - 1)
11568 MyFlags.Flags.setSplitEnd();
11569 }
11570
11571 CLI.Outs.push_back(MyFlags);
11572 CLI.OutVals.push_back(Parts[j]);
11573 }
11574
11575 if (NeedsRegBlock && Value == NumValues - 1)
11576 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11577 }
11578 }
11579
11581 CLI.Chain = LowerCall(CLI, InVals);
11582
11583 // Update CLI.InVals to use outside of this function.
11584 CLI.InVals = InVals;
11585
11586 // Verify that the target's LowerCall behaved as expected.
11587 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
11588 "LowerCall didn't return a valid chain!");
11589 assert((!CLI.IsTailCall || InVals.empty()) &&
11590 "LowerCall emitted a return value for a tail call!");
11591 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
11592 "LowerCall didn't emit the correct number of values!");
11593
11594 // For a tail call, the return value is merely live-out and there aren't
11595 // any nodes in the DAG representing it. Return a special value to
11596 // indicate that a tail call has been emitted and no more Instructions
11597 // should be processed in the current block.
11598 if (CLI.IsTailCall) {
11599 CLI.DAG.setRoot(CLI.Chain);
11600 return std::make_pair(SDValue(), SDValue());
11601 }
11602
11603#ifndef NDEBUG
11604 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
11605 assert(InVals[i].getNode() && "LowerCall emitted a null value!");
11606 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
11607 "LowerCall emitted a value with the wrong type!");
11608 }
11609#endif
11610
11611 SmallVector<SDValue, 4> ReturnValues;
11612 if (!CanLowerReturn) {
11613 // The instruction result is the result of loading from the
11614 // hidden sret parameter.
11615 MVT PtrVT = getPointerTy(DL, DL.getAllocaAddrSpace());
11616
11617 unsigned NumValues = RetVTs.size();
11618 ReturnValues.resize(NumValues);
11619 SmallVector<SDValue, 4> Chains(NumValues);
11620
11621 // An aggregate return value cannot wrap around the address space, so
11622 // offsets to its parts don't wrap either.
11624 Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
11625 for (unsigned i = 0; i < NumValues; ++i) {
11627 DemoteStackSlot, CLI.DAG.getConstant(Offsets[i], CLI.DL, PtrVT),
11629 SDValue L = CLI.DAG.getLoad(
11630 RetVTs[i], CLI.DL, CLI.Chain, Add,
11632 DemoteStackIdx, Offsets[i]),
11633 HiddenSRetAlign);
11634 ReturnValues[i] = L;
11635 Chains[i] = L.getValue(1);
11636 }
11637
11638 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
11639 } else {
11640 // Collect the legal value parts into potentially illegal values
11641 // that correspond to the original function's return values.
11642 std::optional<ISD::NodeType> AssertOp;
11643 if (CLI.RetSExt)
11644 AssertOp = ISD::AssertSext;
11645 else if (CLI.RetZExt)
11646 AssertOp = ISD::AssertZext;
11647 unsigned CurReg = 0;
11648 for (EVT VT : RetVTs) {
11649 MVT RegisterVT = getRegisterTypeForCallingConv(Context, CLI.CallConv, VT);
11650 unsigned NumRegs =
11651 getNumRegistersForCallingConv(Context, CLI.CallConv, VT);
11652
11653 ReturnValues.push_back(getCopyFromParts(
11654 CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr,
11655 CLI.Chain, CLI.CallConv, AssertOp));
11656 CurReg += NumRegs;
11657 }
11658
11659 // For a function returning void, there is no return value. We can't create
11660 // such a node, so we just return a null return value in that case. In
11661 // that case, nothing will actually look at the value.
11662 if (ReturnValues.empty())
11663 return std::make_pair(SDValue(), CLI.Chain);
11664 }
11665
11666 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
11667 CLI.DAG.getVTList(RetVTs), ReturnValues);
11668 return std::make_pair(Res, CLI.Chain);
11669}
11670
11671/// Places new result values for the node in Results (their number
11672/// and types must exactly match those of the original return values of
11673/// the node), or leaves Results empty, which indicates that the node is not
11674/// to be custom lowered after all.
11677 SelectionDAG &DAG) const {
11678 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
11679
11680 if (!Res.getNode())
11681 return;
11682
11683 // If the original node has one result, take the return value from
11684 // LowerOperation as is. It might not be result number 0.
11685 if (N->getNumValues() == 1) {
11686 Results.push_back(Res);
11687 return;
11688 }
11689
11690 // If the original node has multiple results, then the return node should
11691 // have the same number of results.
11692 assert((N->getNumValues() == Res->getNumValues()) &&
11693 "Lowering returned the wrong number of results!");
11694
11695 // Places new result values base on N result number.
11696 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
11697 Results.push_back(Res.getValue(I));
11698}
11699
11701 llvm_unreachable("LowerOperation not implemented for this target!");
11702}
11703
11705 Register Reg,
11706 ISD::NodeType ExtendType) {
11708 assert((Op.getOpcode() != ISD::CopyFromReg ||
11709 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
11710 "Copy from a reg to the same reg!");
11711 assert(!Reg.isPhysical() && "Is a physreg");
11712
11713 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11714 // If this is an InlineAsm we have to match the registers required, not the
11715 // notional registers required by the type.
11716
11717 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
11718 std::nullopt); // This is not an ABI copy.
11719 SDValue Chain = DAG.getEntryNode();
11720
11721 if (ExtendType == ISD::ANY_EXTEND) {
11722 auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
11723 if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
11724 ExtendType = PreferredExtendIt->second;
11725 }
11726 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
11727 PendingExports.push_back(Chain);
11728}
11729
11731
11732/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
11733/// entry block, return true. This includes arguments used by switches, since
11734/// the switch may expand into multiple basic blocks.
11735static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
11736 // With FastISel active, we may be splitting blocks, so force creation
11737 // of virtual registers for all non-dead arguments.
11738 if (FastISel)
11739 return A->use_empty();
11740
11741 const BasicBlock &Entry = A->getParent()->front();
11742 for (const User *U : A->users())
11743 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
11744 return false; // Use not in entry block.
11745
11746 return true;
11747}
11748
11750 DenseMap<const Argument *,
11751 std::pair<const AllocaInst *, const StoreInst *>>;
11752
11753/// Scan the entry block of the function in FuncInfo for arguments that look
11754/// like copies into a local alloca. Record any copied arguments in
11755/// ArgCopyElisionCandidates.
11756static void
11758 FunctionLoweringInfo *FuncInfo,
11759 ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
11760 // Record the state of every static alloca used in the entry block. Argument
11761 // allocas are all used in the entry block, so we need approximately as many
11762 // entries as we have arguments.
11763 enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
11765 unsigned NumArgs = FuncInfo->Fn->arg_size();
11766 StaticAllocas.reserve(NumArgs * 2);
11767
11768 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
11769 if (!V)
11770 return nullptr;
11771 V = V->stripPointerCasts();
11772 const auto *AI = dyn_cast<AllocaInst>(V);
11773 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
11774 return nullptr;
11775 auto Iter = StaticAllocas.insert({AI, Unknown});
11776 return &Iter.first->second;
11777 };
11778
11779 // Look for stores of arguments to static allocas. Look through bitcasts and
11780 // GEPs to handle type coercions, as long as the alloca is fully initialized
11781 // by the store. Any non-store use of an alloca escapes it and any subsequent
11782 // unanalyzed store might write it.
11783 // FIXME: Handle structs initialized with multiple stores.
11784 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
11785 // Look for stores, and handle non-store uses conservatively.
11786 const auto *SI = dyn_cast<StoreInst>(&I);
11787 if (!SI) {
11788 // We will look through cast uses, so ignore them completely.
11789 if (I.isCast())
11790 continue;
11791 // Ignore debug info and pseudo op intrinsics, they don't escape or store
11792 // to allocas.
11793 if (I.isDebugOrPseudoInst())
11794 continue;
11795 // This is an unknown instruction. Assume it escapes or writes to all
11796 // static alloca operands.
11797 for (const Use &U : I.operands()) {
11798 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
11799 *Info = StaticAllocaInfo::Clobbered;
11800 }
11801 continue;
11802 }
11803
11804 // If the stored value is a static alloca, mark it as escaped.
11805 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11806 *Info = StaticAllocaInfo::Clobbered;
11807
11808 // Check if the destination is a static alloca.
11809 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11810 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
11811 if (!Info)
11812 continue;
11813 const AllocaInst *AI = cast<AllocaInst>(Dst);
11814
11815 // Skip allocas that have been initialized or clobbered.
11816 if (*Info != StaticAllocaInfo::Unknown)
11817 continue;
11818
11819 // Check if the stored value is an argument, and that this store fully
11820 // initializes the alloca.
11821 // If the argument type has padding bits we can't directly forward a pointer
11822 // as the upper bits may contain garbage.
11823 // Don't elide copies from the same argument twice.
11824 const Value *Val = SI->getValueOperand()->stripPointerCasts();
11825 const auto *Arg = dyn_cast<Argument>(Val);
11826 std::optional<TypeSize> AllocaSize = AI->getAllocationSize(DL);
11827 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11828 Arg->getType()->isEmptyTy() || !AllocaSize ||
11829 DL.getTypeStoreSize(Arg->getType()) != *AllocaSize ||
11830 !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11831 ArgCopyElisionCandidates.count(Arg)) {
11832 *Info = StaticAllocaInfo::Clobbered;
11833 continue;
11834 }
11835
11836 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
11837 << '\n');
11838
11839 // Mark this alloca and store for argument copy elision.
11840 *Info = StaticAllocaInfo::Elidable;
11841 ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
11842
11843 // Stop scanning if we've seen all arguments. This will happen early in -O0
11844 // builds, which is useful, because -O0 builds have large entry blocks and
11845 // many allocas.
11846 if (ArgCopyElisionCandidates.size() == NumArgs)
11847 break;
11848 }
11849}
11850
11851/// Try to elide argument copies from memory into a local alloca. Succeeds if
11852/// ArgVal is a load from a suitable fixed stack object.
11855 DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
11856 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
11857 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
11858 ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
11859 // Check if this is a load from a fixed stack object.
11860 auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11861 if (!LNode)
11862 return;
11863 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11864 if (!FINode)
11865 return;
11866
11867 // Check that the fixed stack object is the right size and alignment.
11868 // Look at the alignment that the user wrote on the alloca instead of looking
11869 // at the stack object.
11870 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11871 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11872 const AllocaInst *AI = ArgCopyIter->second.first;
11873 int FixedIndex = FINode->getIndex();
11874 int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
11875 int OldIndex = AllocaIndex;
11876 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
11877 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
11878 LLVM_DEBUG(
11879 dbgs() << " argument copy elision failed due to bad fixed stack "
11880 "object size\n");
11881 return;
11882 }
11883 Align RequiredAlignment = AI->getAlign();
11884 if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
11885 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
11886 "greater than stack argument alignment ("
11887 << DebugStr(RequiredAlignment) << " vs "
11888 << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
11889 return;
11890 }
11891
11892 // Perform the elision. Delete the old stack object and replace its only use
11893 // in the variable info map. Mark the stack object as mutable and aliased.
11894 LLVM_DEBUG({
11895 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
11896 << " Replacing frame index " << OldIndex << " with " << FixedIndex
11897 << '\n';
11898 });
11899 MFI.RemoveStackObject(OldIndex);
11900 MFI.setIsImmutableObjectIndex(FixedIndex, false);
11901 MFI.setIsAliasedObjectIndex(FixedIndex, true);
11902 AllocaIndex = FixedIndex;
11903 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
11904 for (SDValue ArgVal : ArgVals)
11905 Chains.push_back(ArgVal.getValue(1));
11906
11907 // Avoid emitting code for the store implementing the copy.
11908 const StoreInst *SI = ArgCopyIter->second.second;
11909 ElidedArgCopyInstrs.insert(SI);
11910
11911 // Check for uses of the argument again so that we can avoid exporting ArgVal
11912 // if it is't used by anything other than the store.
11913 for (const Value *U : Arg.users()) {
11914 if (U != SI) {
11915 ArgHasUses = true;
11916 break;
11917 }
11918 }
11919}
11920
11921void SelectionDAGISel::LowerArguments(const Function &F) {
11922 SelectionDAG &DAG = SDB->DAG;
11923 SDLoc dl = SDB->getCurSDLoc();
11924 const DataLayout &DL = DAG.getDataLayout();
11926
11927 // In Naked functions we aren't going to save any registers.
11928 if (F.hasFnAttribute(Attribute::Naked))
11929 return;
11930
11931 if (!FuncInfo->CanLowerReturn) {
11932 // Put in an sret pointer parameter before all the other parameters.
11933 MVT ValueVT = TLI->getPointerTy(DL, DL.getAllocaAddrSpace());
11934
11935 ISD::ArgFlagsTy Flags;
11936 Flags.setSRet();
11937 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVT);
11938 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT, F.getReturnType(), true,
11940 Ins.push_back(RetArg);
11941 }
11942
11943 // Look for stores of arguments to static allocas. Mark such arguments with a
11944 // flag to ask the target to give us the memory location of that argument if
11945 // available.
11946 ArgCopyElisionMapTy ArgCopyElisionCandidates;
11948 ArgCopyElisionCandidates);
11949
11950 // Set up the incoming argument description vector.
11951 for (const Argument &Arg : F.args()) {
11952 unsigned ArgNo = Arg.getArgNo();
11954 ComputeValueTypes(DAG.getDataLayout(), Arg.getType(), Types);
11955 bool isArgValueUsed = !Arg.use_empty();
11956 Type *FinalType = Arg.getType();
11957 if (Arg.hasAttribute(Attribute::ByVal))
11958 FinalType = Arg.getParamByValType();
11959 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
11960 FinalType, F.getCallingConv(), F.isVarArg(), DL);
11961 for (unsigned Value = 0, NumValues = Types.size(); Value != NumValues;
11962 ++Value) {
11963 Type *ArgTy = Types[Value];
11964 EVT VT = TLI->getValueType(DL, ArgTy);
11965 ISD::ArgFlagsTy Flags;
11966
11967 if (ArgTy->isPointerTy()) {
11968 Flags.setPointer();
11969 Flags.setPointerAddrSpace(cast<PointerType>(ArgTy)->getAddressSpace());
11970 }
11971 if (Arg.hasAttribute(Attribute::ZExt))
11972 Flags.setZExt();
11973 if (Arg.hasAttribute(Attribute::SExt))
11974 Flags.setSExt();
11975 if (Arg.hasAttribute(Attribute::InReg)) {
11976 // If we are using vectorcall calling convention, a structure that is
11977 // passed InReg - is surely an HVA
11978 if (F.getCallingConv() == CallingConv::X86_VectorCall &&
11979 isa<StructType>(Arg.getType())) {
11980 // The first value of a structure is marked
11981 if (0 == Value)
11982 Flags.setHvaStart();
11983 Flags.setHva();
11984 }
11985 // Set InReg Flag
11986 Flags.setInReg();
11987 }
11988 if (Arg.hasAttribute(Attribute::StructRet))
11989 Flags.setSRet();
11990 if (Arg.hasAttribute(Attribute::SwiftSelf))
11991 Flags.setSwiftSelf();
11992 if (Arg.hasAttribute(Attribute::SwiftAsync))
11993 Flags.setSwiftAsync();
11994 if (Arg.hasAttribute(Attribute::SwiftError))
11995 Flags.setSwiftError();
11996 if (Arg.hasAttribute(Attribute::ByVal))
11997 Flags.setByVal();
11998 if (Arg.hasAttribute(Attribute::ByRef))
11999 Flags.setByRef();
12000 if (Arg.hasAttribute(Attribute::InAlloca)) {
12001 Flags.setInAlloca();
12002 // Set the byval flag for CCAssignFn callbacks that don't know about
12003 // inalloca. This way we can know how many bytes we should've allocated
12004 // and how many bytes a callee cleanup function will pop. If we port
12005 // inalloca to more targets, we'll have to add custom inalloca handling
12006 // in the various CC lowering callbacks.
12007 Flags.setByVal();
12008 }
12009 if (Arg.hasAttribute(Attribute::Preallocated)) {
12010 Flags.setPreallocated();
12011 // Set the byval flag for CCAssignFn callbacks that don't know about
12012 // preallocated. This way we can know how many bytes we should've
12013 // allocated and how many bytes a callee cleanup function will pop. If
12014 // we port preallocated to more targets, we'll have to add custom
12015 // preallocated handling in the various CC lowering callbacks.
12016 Flags.setByVal();
12017 }
12018
12019 // Certain targets (such as MIPS), may have a different ABI alignment
12020 // for a type depending on the context. Give the target a chance to
12021 // specify the alignment it wants.
12022 const Align OriginalAlignment(
12023 TLI->getABIAlignmentForCallingConv(ArgTy, DL));
12024 Flags.setOrigAlign(OriginalAlignment);
12025
12026 Align MemAlign;
12027 Type *ArgMemTy = nullptr;
12028 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
12029 Flags.isByRef()) {
12030 if (!ArgMemTy)
12031 ArgMemTy = Arg.getPointeeInMemoryValueType();
12032
12033 uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
12034
12035 // For in-memory arguments, size and alignment should be passed from FE.
12036 // BE will guess if this info is not there but there are cases it cannot
12037 // get right.
12038 if (auto ParamAlign = Arg.getParamStackAlign())
12039 MemAlign = *ParamAlign;
12040 else if ((ParamAlign = Arg.getParamAlign()))
12041 MemAlign = *ParamAlign;
12042 else
12043 MemAlign = TLI->getByValTypeAlignment(ArgMemTy, DL);
12044 if (Flags.isByRef())
12045 Flags.setByRefSize(MemSize);
12046 else
12047 Flags.setByValSize(MemSize);
12048 } else if (auto ParamAlign = Arg.getParamStackAlign()) {
12049 MemAlign = *ParamAlign;
12050 } else {
12051 MemAlign = OriginalAlignment;
12052 }
12053 Flags.setMemAlign(MemAlign);
12054
12055 if (Arg.hasAttribute(Attribute::Nest))
12056 Flags.setNest();
12057 if (NeedsRegBlock)
12058 Flags.setInConsecutiveRegs();
12059 if (ArgCopyElisionCandidates.count(&Arg))
12060 Flags.setCopyElisionCandidate();
12061 if (Arg.hasAttribute(Attribute::Returned))
12062 Flags.setReturned();
12063
12064 MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
12065 *CurDAG->getContext(), F.getCallingConv(), VT);
12066 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
12067 *CurDAG->getContext(), F.getCallingConv(), VT);
12068 for (unsigned i = 0; i != NumRegs; ++i) {
12069 // For scalable vectors, use the minimum size; individual targets
12070 // are responsible for handling scalable vector arguments and
12071 // return values.
12072 ISD::InputArg MyFlags(
12073 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
12074 i * RegisterVT.getStoreSize().getKnownMinValue());
12075 if (NumRegs > 1 && i == 0)
12076 MyFlags.Flags.setSplit();
12077 // if it isn't first piece, alignment must be 1
12078 else if (i > 0) {
12079 MyFlags.Flags.setOrigAlign(Align(1));
12080 if (i == NumRegs - 1)
12081 MyFlags.Flags.setSplitEnd();
12082 }
12083 Ins.push_back(MyFlags);
12084 }
12085 if (NeedsRegBlock && Value == NumValues - 1)
12086 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
12087 }
12088 }
12089
12090 // Call the target to set up the argument values.
12092 SDValue NewRoot = TLI->LowerFormalArguments(
12093 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
12094
12095 // Verify that the target's LowerFormalArguments behaved as expected.
12096 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
12097 "LowerFormalArguments didn't return a valid chain!");
12098 assert(InVals.size() == Ins.size() &&
12099 "LowerFormalArguments didn't emit the correct number of values!");
12100 LLVM_DEBUG({
12101 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
12102 assert(InVals[i].getNode() &&
12103 "LowerFormalArguments emitted a null value!");
12104 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
12105 "LowerFormalArguments emitted a value with the wrong type!");
12106 }
12107 });
12108
12109 // Update the DAG with the new chain value resulting from argument lowering.
12110 DAG.setRoot(NewRoot);
12111
12112 // Set up the argument values.
12113 unsigned i = 0;
12114 if (!FuncInfo->CanLowerReturn) {
12115 // Create a virtual register for the sret pointer, and put in a copy
12116 // from the sret argument into it.
12117 MVT VT = TLI->getPointerTy(DL, DL.getAllocaAddrSpace());
12118 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
12119 std::optional<ISD::NodeType> AssertOp;
12120 SDValue ArgValue =
12121 getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, nullptr, NewRoot,
12122 F.getCallingConv(), AssertOp);
12123
12124 MachineFunction& MF = SDB->DAG.getMachineFunction();
12125 MachineRegisterInfo& RegInfo = MF.getRegInfo();
12126 Register SRetReg =
12127 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
12128 FuncInfo->DemoteRegister = SRetReg;
12129 NewRoot =
12130 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
12131 DAG.setRoot(NewRoot);
12132
12133 // i indexes lowered arguments. Bump it past the hidden sret argument.
12134 ++i;
12135 }
12136
12138 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
12139 for (const Argument &Arg : F.args()) {
12140 SmallVector<SDValue, 4> ArgValues;
12141 SmallVector<EVT, 4> ValueVTs;
12142 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
12143 unsigned NumValues = ValueVTs.size();
12144 if (NumValues == 0)
12145 continue;
12146
12147 bool ArgHasUses = !Arg.use_empty();
12148
12149 // Elide the copying store if the target loaded this argument from a
12150 // suitable fixed stack object.
12151 if (Ins[i].Flags.isCopyElisionCandidate()) {
12152 unsigned NumParts = 0;
12153 for (EVT VT : ValueVTs)
12154 NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
12155 F.getCallingConv(), VT);
12156
12157 tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
12158 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
12159 ArrayRef(&InVals[i], NumParts), ArgHasUses);
12160 }
12161
12162 // If this argument is unused then remember its value. It is used to generate
12163 // debugging information.
12164 bool isSwiftErrorArg =
12165 TLI->supportSwiftError() &&
12166 Arg.hasAttribute(Attribute::SwiftError);
12167 if (!ArgHasUses && !isSwiftErrorArg) {
12168 SDB->setUnusedArgValue(&Arg, InVals[i]);
12169
12170 // Also remember any frame index for use in FastISel.
12171 if (FrameIndexSDNode *FI =
12173 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12174 }
12175
12176 for (unsigned Val = 0; Val != NumValues; ++Val) {
12177 EVT VT = ValueVTs[Val];
12178 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
12179 F.getCallingConv(), VT);
12180 unsigned NumParts = TLI->getNumRegistersForCallingConv(
12181 *CurDAG->getContext(), F.getCallingConv(), VT);
12182
12183 // Even an apparent 'unused' swifterror argument needs to be returned. So
12184 // we do generate a copy for it that can be used on return from the
12185 // function.
12186 if (ArgHasUses || isSwiftErrorArg) {
12187 std::optional<ISD::NodeType> AssertOp;
12188 if (Arg.hasAttribute(Attribute::SExt))
12189 AssertOp = ISD::AssertSext;
12190 else if (Arg.hasAttribute(Attribute::ZExt))
12191 AssertOp = ISD::AssertZext;
12192
12193 SDValue OutVal =
12194 getCopyFromParts(DAG, dl, &InVals[i], NumParts, PartVT, VT, nullptr,
12195 NewRoot, F.getCallingConv(), AssertOp);
12196
12197 FPClassTest NoFPClass = Arg.getNoFPClass();
12198 if (NoFPClass != fcNone) {
12199 SDValue SDNoFPClass = DAG.getTargetConstant(
12200 static_cast<uint64_t>(NoFPClass), dl, MVT::i32);
12201 OutVal = DAG.getNode(ISD::AssertNoFPClass, dl, OutVal.getValueType(),
12202 OutVal, SDNoFPClass);
12203 }
12204 ArgValues.push_back(OutVal);
12205 }
12206
12207 i += NumParts;
12208 }
12209
12210 // We don't need to do anything else for unused arguments.
12211 if (ArgValues.empty())
12212 continue;
12213
12214 // Note down frame index.
12215 if (FrameIndexSDNode *FI =
12216 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
12217 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12218
12219 SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
12220 SDB->getCurSDLoc());
12221
12222 SDB->setValue(&Arg, Res);
12223 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
12224 // We want to associate the argument with the frame index, among
12225 // involved operands, that correspond to the lowest address. The
12226 // getCopyFromParts function, called earlier, is swapping the order of
12227 // the operands to BUILD_PAIR depending on endianness. The result of
12228 // that swapping is that the least significant bits of the argument will
12229 // be in the first operand of the BUILD_PAIR node, and the most
12230 // significant bits will be in the second operand.
12231 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
12232 if (LoadSDNode *LNode =
12233 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
12234 if (FrameIndexSDNode *FI =
12235 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
12236 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12237 }
12238
12239 // Analyses past this point are naive and don't expect an assertion.
12240 if (Res.getOpcode() == ISD::AssertZext)
12241 Res = Res.getOperand(0);
12242
12243 // Update the SwiftErrorVRegDefMap.
12244 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
12245 Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
12246 if (Reg.isVirtual())
12247 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
12248 Reg);
12249 }
12250
12251 // If this argument is live outside of the entry block, insert a copy from
12252 // wherever we got it to the vreg that other BB's will reference it as.
12253 if (Res.getOpcode() == ISD::CopyFromReg) {
12254 // If we can, though, try to skip creating an unnecessary vreg.
12255 // FIXME: This isn't very clean... it would be nice to make this more
12256 // general.
12257 Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
12258 if (Reg.isVirtual()) {
12259 FuncInfo->ValueMap[&Arg] = Reg;
12260 continue;
12261 }
12262 }
12263 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
12264 FuncInfo->InitializeRegForValue(&Arg);
12265 SDB->CopyToExportRegsIfNeeded(&Arg);
12266 }
12267 }
12268
12269 if (!Chains.empty()) {
12270 Chains.push_back(NewRoot);
12271 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
12272 }
12273
12274 DAG.setRoot(NewRoot);
12275
12276 assert(i == InVals.size() && "Argument register count mismatch!");
12277
12278 // If any argument copy elisions occurred and we have debug info, update the
12279 // stale frame indices used in the dbg.declare variable info table.
12280 if (!ArgCopyElisionFrameIndexMap.empty()) {
12281 for (MachineFunction::VariableDbgInfo &VI :
12282 MF->getInStackSlotVariableDbgInfo()) {
12283 auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
12284 if (I != ArgCopyElisionFrameIndexMap.end())
12285 VI.updateStackSlot(I->second);
12286 }
12287 }
12288
12289 // Finally, if the target has anything special to do, allow it to do so.
12291}
12292
12293/// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
12294/// ensure constants are generated when needed. Remember the virtual registers
12295/// that need to be added to the Machine PHI nodes as input. We cannot just
12296/// directly add them, because expansion might result in multiple MBB's for one
12297/// BB. As such, the start of the BB might correspond to a different MBB than
12298/// the end.
12299void
12300SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
12301 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12302
12303 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12304
12305 // Check PHI nodes in successors that expect a value to be available from this
12306 // block.
12307 for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
12308 if (!isa<PHINode>(SuccBB->begin())) continue;
12309 MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB);
12310
12311 // If this terminator has multiple identical successors (common for
12312 // switches), only handle each succ once.
12313 if (!SuccsHandled.insert(SuccMBB).second)
12314 continue;
12315
12317
12318 // At this point we know that there is a 1-1 correspondence between LLVM PHI
12319 // nodes and Machine PHI nodes, but the incoming operands have not been
12320 // emitted yet.
12321 for (const PHINode &PN : SuccBB->phis()) {
12322 // Ignore dead phi's.
12323 if (PN.use_empty())
12324 continue;
12325
12326 // Skip empty types
12327 if (PN.getType()->isEmptyTy())
12328 continue;
12329
12330 Register Reg;
12331 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12332
12333 if (const auto *C = dyn_cast<Constant>(PHIOp)) {
12334 Register &RegOut = ConstantsOut[C];
12335 if (!RegOut) {
12336 RegOut = FuncInfo.CreateRegs(&PN);
12337 // We need to zero/sign extend ConstantInt phi operands to match
12338 // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
12339 ISD::NodeType ExtendType = ISD::ANY_EXTEND;
12340 if (auto *CI = dyn_cast<ConstantInt>(C))
12341 ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
12343 CopyValueToVirtualRegister(C, RegOut, ExtendType);
12344 }
12345 Reg = RegOut;
12346 } else {
12347 auto I = FuncInfo.ValueMap.find(PHIOp);
12348 if (I != FuncInfo.ValueMap.end())
12349 Reg = I->second;
12350 else {
12351 assert(isa<AllocaInst>(PHIOp) &&
12352 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
12353 "Didn't codegen value into a register!??");
12354 Reg = FuncInfo.CreateRegs(&PN);
12356 }
12357 }
12358
12359 // Remember that this register needs to added to the machine PHI node as
12360 // the input for this MBB.
12361 SmallVector<EVT, 4> ValueVTs;
12362 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
12363 for (EVT VT : ValueVTs) {
12364 const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
12365 for (unsigned i = 0; i != NumRegisters; ++i)
12366 FuncInfo.PHINodesToUpdate.emplace_back(&*MBBI++, Reg + i);
12367 Reg += NumRegisters;
12368 }
12369 }
12370 }
12371
12372 ConstantsOut.clear();
12373}
12374
12375MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
12377 if (++I == FuncInfo.MF->end())
12378 return nullptr;
12379 return &*I;
12380}
12381
12382/// During lowering new call nodes can be created (such as memset, etc.).
12383/// Those will become new roots of the current DAG, but complications arise
12384/// when they are tail calls. In such cases, the call lowering will update
12385/// the root, but the builder still needs to know that a tail call has been
12386/// lowered in order to avoid generating an additional return.
12387void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
12388 // If the node is null, we do have a tail call.
12389 if (MaybeTC.getNode() != nullptr)
12390 DAG.setRoot(MaybeTC);
12391 else
12392 HasTailCall = true;
12393}
12394
12395void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
12396 MachineBasicBlock *SwitchMBB,
12397 MachineBasicBlock *DefaultMBB) {
12398 MachineFunction *CurMF = FuncInfo.MF;
12399 MachineBasicBlock *NextMBB = nullptr;
12401 if (++BBI != FuncInfo.MF->end())
12402 NextMBB = &*BBI;
12403
12404 unsigned Size = W.LastCluster - W.FirstCluster + 1;
12405
12406 BranchProbabilityInfo *BPI = FuncInfo.BPI;
12407
12408 if (Size == 2 && W.MBB == SwitchMBB) {
12409 // If any two of the cases has the same destination, and if one value
12410 // is the same as the other, but has one bit unset that the other has set,
12411 // use bit manipulation to do two compares at once. For example:
12412 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
12413 // TODO: This could be extended to merge any 2 cases in switches with 3
12414 // cases.
12415 // TODO: Handle cases where W.CaseBB != SwitchBB.
12416 CaseCluster &Small = *W.FirstCluster;
12417 CaseCluster &Big = *W.LastCluster;
12418
12419 if (Small.Low == Small.High && Big.Low == Big.High &&
12420 Small.MBB == Big.MBB) {
12421 const APInt &SmallValue = Small.Low->getValue();
12422 const APInt &BigValue = Big.Low->getValue();
12423
12424 // Check that there is only one bit different.
12425 APInt CommonBit = BigValue ^ SmallValue;
12426 if (CommonBit.isPowerOf2()) {
12427 SDValue CondLHS = getValue(Cond);
12428 EVT VT = CondLHS.getValueType();
12429 SDLoc DL = getCurSDLoc();
12430
12431 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
12432 DAG.getConstant(CommonBit, DL, VT));
12433 SDValue Cond = DAG.getSetCC(
12434 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
12435 ISD::SETEQ);
12436
12437 // Update successor info.
12438 // Both Small and Big will jump to Small.BB, so we sum up the
12439 // probabilities.
12440 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
12441 if (BPI)
12442 addSuccessorWithProb(
12443 SwitchMBB, DefaultMBB,
12444 // The default destination is the first successor in IR.
12445 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
12446 else
12447 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12448
12449 // Insert the true branch.
12450 SDValue BrCond =
12451 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
12452 DAG.getBasicBlock(Small.MBB));
12453 // Insert the false branch.
12454 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
12455 DAG.getBasicBlock(DefaultMBB));
12456
12457 DAG.setRoot(BrCond);
12458 return;
12459 }
12460 }
12461 }
12462
12463 if (TM.getOptLevel() != CodeGenOptLevel::None) {
12464 // Here, we order cases by probability so the most likely case will be
12465 // checked first. However, two clusters can have the same probability in
12466 // which case their relative ordering is non-deterministic. So we use Low
12467 // as a tie-breaker as clusters are guaranteed to never overlap.
12468 llvm::sort(W.FirstCluster, W.LastCluster + 1,
12469 [](const CaseCluster &a, const CaseCluster &b) {
12470 return a.Prob != b.Prob ?
12471 a.Prob > b.Prob :
12472 a.Low->getValue().slt(b.Low->getValue());
12473 });
12474
12475 // Rearrange the case blocks so that the last one falls through if possible
12476 // without changing the order of probabilities.
12477 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
12478 --I;
12479 if (I->Prob > W.LastCluster->Prob)
12480 break;
12481 if (I->Kind == CC_Range && I->MBB == NextMBB) {
12482 std::swap(*I, *W.LastCluster);
12483 break;
12484 }
12485 }
12486 }
12487
12488 // Compute total probability.
12489 BranchProbability DefaultProb = W.DefaultProb;
12490 BranchProbability UnhandledProbs = DefaultProb;
12491 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
12492 UnhandledProbs += I->Prob;
12493
12494 MachineBasicBlock *CurMBB = W.MBB;
12495 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
12496 bool FallthroughUnreachable = false;
12497 MachineBasicBlock *Fallthrough;
12498 if (I == W.LastCluster) {
12499 // For the last cluster, fall through to the default destination.
12500 Fallthrough = DefaultMBB;
12501 FallthroughUnreachable = isa<UnreachableInst>(
12502 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
12503 } else {
12504 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
12505 CurMF->insert(BBI, Fallthrough);
12506 // Put Cond in a virtual register to make it available from the new blocks.
12508 }
12509 UnhandledProbs -= I->Prob;
12510
12511 switch (I->Kind) {
12512 case CC_JumpTable: {
12513 // FIXME: Optimize away range check based on pivot comparisons.
12514 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
12515 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
12516
12517 // The jump block hasn't been inserted yet; insert it here.
12518 MachineBasicBlock *JumpMBB = JT->MBB;
12519 CurMF->insert(BBI, JumpMBB);
12520
12521 auto JumpProb = I->Prob;
12522 auto FallthroughProb = UnhandledProbs;
12523
12524 // If the default statement is a target of the jump table, we evenly
12525 // distribute the default probability to successors of CurMBB. Also
12526 // update the probability on the edge from JumpMBB to Fallthrough.
12527 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
12528 SE = JumpMBB->succ_end();
12529 SI != SE; ++SI) {
12530 if (*SI == DefaultMBB) {
12531 JumpProb += DefaultProb / 2;
12532 FallthroughProb -= DefaultProb / 2;
12533 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
12534 JumpMBB->normalizeSuccProbs();
12535 break;
12536 }
12537 }
12538
12539 // If the default clause is unreachable, propagate that knowledge into
12540 // JTH->FallthroughUnreachable which will use it to suppress the range
12541 // check.
12542 //
12543 // However, don't do this if we're doing branch target enforcement,
12544 // because a table branch _without_ a range check can be a tempting JOP
12545 // gadget - out-of-bounds inputs that are impossible in correct
12546 // execution become possible again if an attacker can influence the
12547 // control flow. So if an attacker doesn't already have a BTI bypass
12548 // available, we don't want them to be able to get one out of this
12549 // table branch.
12550 if (FallthroughUnreachable) {
12551 Function &CurFunc = CurMF->getFunction();
12552 if (!CurFunc.hasFnAttribute("branch-target-enforcement"))
12553 JTH->FallthroughUnreachable = true;
12554 }
12555
12556 if (!JTH->FallthroughUnreachable)
12557 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12558 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12559 CurMBB->normalizeSuccProbs();
12560
12561 // The jump table header will be inserted in our current block, do the
12562 // range check, and fall through to our fallthrough block.
12563 JTH->HeaderBB = CurMBB;
12564 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
12565
12566 // If we're in the right place, emit the jump table header right now.
12567 if (CurMBB == SwitchMBB) {
12568 visitJumpTableHeader(*JT, *JTH, SwitchMBB);
12569 JTH->Emitted = true;
12570 }
12571 break;
12572 }
12573 case CC_BitTests: {
12574 // FIXME: Optimize away range check based on pivot comparisons.
12575 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
12576
12577 // The bit test blocks haven't been inserted yet; insert them here.
12578 for (BitTestCase &BTC : BTB->Cases)
12579 CurMF->insert(BBI, BTC.ThisBB);
12580
12581 // Fill in fields of the BitTestBlock.
12582 BTB->Parent = CurMBB;
12583 BTB->Default = Fallthrough;
12584
12585 BTB->DefaultProb = UnhandledProbs;
12586 // If the cases in bit test don't form a contiguous range, we evenly
12587 // distribute the probability on the edge to Fallthrough to two
12588 // successors of CurMBB.
12589 if (!BTB->ContiguousRange) {
12590 BTB->Prob += DefaultProb / 2;
12591 BTB->DefaultProb -= DefaultProb / 2;
12592 }
12593
12594 if (FallthroughUnreachable)
12595 BTB->FallthroughUnreachable = true;
12596
12597 // If we're in the right place, emit the bit test header right now.
12598 if (CurMBB == SwitchMBB) {
12599 visitBitTestHeader(*BTB, SwitchMBB);
12600 BTB->Emitted = true;
12601 }
12602 break;
12603 }
12604 case CC_Range: {
12605 const Value *RHS, *LHS, *MHS;
12606 ISD::CondCode CC;
12607 if (I->Low == I->High) {
12608 // Check Cond == I->Low.
12609 CC = ISD::SETEQ;
12610 LHS = Cond;
12611 RHS=I->Low;
12612 MHS = nullptr;
12613 } else {
12614 // Check I->Low <= Cond <= I->High.
12615 CC = ISD::SETLE;
12616 LHS = I->Low;
12617 MHS = Cond;
12618 RHS = I->High;
12619 }
12620
12621 // If Fallthrough is unreachable, fold away the comparison.
12622 if (FallthroughUnreachable)
12623 CC = ISD::SETTRUE;
12624
12625 // The false probability is the sum of all unhandled cases.
12626 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
12627 getCurSDLoc(), I->Prob, UnhandledProbs);
12628
12629 if (CurMBB == SwitchMBB)
12630 visitSwitchCase(CB, SwitchMBB);
12631 else
12632 SL->SwitchCases.push_back(CB);
12633
12634 break;
12635 }
12636 }
12637 CurMBB = Fallthrough;
12638 }
12639}
12640
12641void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
12642 const SwitchWorkListItem &W,
12643 Value *Cond,
12644 MachineBasicBlock *SwitchMBB) {
12645 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
12646 "Clusters not sorted?");
12647 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
12648
12649 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12650 SL->computeSplitWorkItemInfo(W);
12651
12652 // Use the first element on the right as pivot since we will make less-than
12653 // comparisons against it.
12654 CaseClusterIt PivotCluster = FirstRight;
12655 assert(PivotCluster > W.FirstCluster);
12656 assert(PivotCluster <= W.LastCluster);
12657
12658 CaseClusterIt FirstLeft = W.FirstCluster;
12659 CaseClusterIt LastRight = W.LastCluster;
12660
12661 const ConstantInt *Pivot = PivotCluster->Low;
12662
12663 // New blocks will be inserted immediately after the current one.
12665 ++BBI;
12666
12667 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
12668 // we can branch to its destination directly if it's squeezed exactly in
12669 // between the known lower bound and Pivot - 1.
12670 MachineBasicBlock *LeftMBB;
12671 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
12672 FirstLeft->Low == W.GE &&
12673 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
12674 LeftMBB = FirstLeft->MBB;
12675 } else {
12676 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12677 FuncInfo.MF->insert(BBI, LeftMBB);
12678 WorkList.push_back(
12679 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
12680 // Put Cond in a virtual register to make it available from the new blocks.
12682 }
12683
12684 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
12685 // single cluster, RHS.Low == Pivot, and we can branch to its destination
12686 // directly if RHS.High equals the current upper bound.
12687 MachineBasicBlock *RightMBB;
12688 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
12689 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
12690 RightMBB = FirstRight->MBB;
12691 } else {
12692 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12693 FuncInfo.MF->insert(BBI, RightMBB);
12694 WorkList.push_back(
12695 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
12696 // Put Cond in a virtual register to make it available from the new blocks.
12698 }
12699
12700 // Create the CaseBlock record that will be used to lower the branch.
12701 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
12702 getCurSDLoc(), LeftProb, RightProb);
12703
12704 if (W.MBB == SwitchMBB)
12705 visitSwitchCase(CB, SwitchMBB);
12706 else
12707 SL->SwitchCases.push_back(CB);
12708}
12709
12710// Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
12711// from the swith statement.
12713 BranchProbability PeeledCaseProb) {
12714 if (PeeledCaseProb == BranchProbability::getOne())
12716 BranchProbability SwitchProb = PeeledCaseProb.getCompl();
12717
12718 uint32_t Numerator = CaseProb.getNumerator();
12719 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
12720 return BranchProbability(Numerator, std::max(Numerator, Denominator));
12721}
12722
12723// Try to peel the top probability case if it exceeds the threshold.
12724// Return current MachineBasicBlock for the switch statement if the peeling
12725// does not occur.
12726// If the peeling is performed, return the newly created MachineBasicBlock
12727// for the peeled switch statement. Also update Clusters to remove the peeled
12728// case. PeeledCaseProb is the BranchProbability for the peeled case.
12729MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
12730 const SwitchInst &SI, CaseClusterVector &Clusters,
12731 BranchProbability &PeeledCaseProb) {
12732 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12733 // Don't perform if there is only one cluster or optimizing for size.
12734 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
12735 TM.getOptLevel() == CodeGenOptLevel::None ||
12736 SwitchMBB->getParent()->getFunction().hasMinSize())
12737 return SwitchMBB;
12738
12739 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
12740 unsigned PeeledCaseIndex = 0;
12741 bool SwitchPeeled = false;
12742 for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
12743 CaseCluster &CC = Clusters[Index];
12744 if (CC.Prob < TopCaseProb)
12745 continue;
12746 TopCaseProb = CC.Prob;
12747 PeeledCaseIndex = Index;
12748 SwitchPeeled = true;
12749 }
12750 if (!SwitchPeeled)
12751 return SwitchMBB;
12752
12753 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
12754 << TopCaseProb << "\n");
12755
12756 // Record the MBB for the peeled switch statement.
12757 MachineFunction::iterator BBI(SwitchMBB);
12758 ++BBI;
12759 MachineBasicBlock *PeeledSwitchMBB =
12760 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
12761 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12762
12763 ExportFromCurrentBlock(SI.getCondition());
12764 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12765 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12766 nullptr, nullptr, TopCaseProb.getCompl()};
12767 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12768
12769 Clusters.erase(PeeledCaseIt);
12770 for (CaseCluster &CC : Clusters) {
12771 LLVM_DEBUG(
12772 dbgs() << "Scale the probablity for one cluster, before scaling: "
12773 << CC.Prob << "\n");
12774 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
12775 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
12776 }
12777 PeeledCaseProb = TopCaseProb;
12778 return PeeledSwitchMBB;
12779}
12780
12781void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
12782 // Extract cases from the switch.
12783 BranchProbabilityInfo *BPI = FuncInfo.BPI;
12784 CaseClusterVector Clusters;
12785 Clusters.reserve(SI.getNumCases());
12786 for (auto I : SI.cases()) {
12787 MachineBasicBlock *Succ = FuncInfo.getMBB(I.getCaseSuccessor());
12788 const ConstantInt *CaseVal = I.getCaseValue();
12789 BranchProbability Prob =
12790 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
12791 : BranchProbability(1, SI.getNumCases() + 1);
12792 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
12793 }
12794
12795 MachineBasicBlock *DefaultMBB = FuncInfo.getMBB(SI.getDefaultDest());
12796
12797 // Cluster adjacent cases with the same destination. We do this at all
12798 // optimization levels because it's cheap to do and will make codegen faster
12799 // if there are many clusters.
12800 sortAndRangeify(Clusters);
12801
12802 // The branch probablity of the peeled case.
12803 BranchProbability PeeledCaseProb = BranchProbability::getZero();
12804 MachineBasicBlock *PeeledSwitchMBB =
12805 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12806
12807 // If there is only the default destination, jump there directly.
12808 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12809 if (Clusters.empty()) {
12810 assert(PeeledSwitchMBB == SwitchMBB);
12811 SwitchMBB->addSuccessor(DefaultMBB);
12812 if (DefaultMBB != NextBlock(SwitchMBB)) {
12813 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
12814 getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
12815 }
12816 return;
12817 }
12818
12819 SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
12820 DAG.getBFI());
12821 SL->findBitTestClusters(Clusters, &SI);
12822
12823 LLVM_DEBUG({
12824 dbgs() << "Case clusters: ";
12825 for (const CaseCluster &C : Clusters) {
12826 if (C.Kind == CC_JumpTable)
12827 dbgs() << "JT:";
12828 if (C.Kind == CC_BitTests)
12829 dbgs() << "BT:";
12830
12831 C.Low->getValue().print(dbgs(), true);
12832 if (C.Low != C.High) {
12833 dbgs() << '-';
12834 C.High->getValue().print(dbgs(), true);
12835 }
12836 dbgs() << ' ';
12837 }
12838 dbgs() << '\n';
12839 });
12840
12841 assert(!Clusters.empty());
12842 SwitchWorkList WorkList;
12843 CaseClusterIt First = Clusters.begin();
12844 CaseClusterIt Last = Clusters.end() - 1;
12845 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12846 // Scale the branchprobability for DefaultMBB if the peel occurs and
12847 // DefaultMBB is not replaced.
12848 if (PeeledCaseProb != BranchProbability::getZero() &&
12849 DefaultMBB == FuncInfo.getMBB(SI.getDefaultDest()))
12850 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
12851 WorkList.push_back(
12852 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
12853
12854 while (!WorkList.empty()) {
12855 SwitchWorkListItem W = WorkList.pop_back_val();
12856 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
12857
12858 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
12859 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
12860 // For optimized builds, lower large range as a balanced binary tree.
12861 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
12862 continue;
12863 }
12864
12865 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
12866 }
12867}
12868
12869void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
12870 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12871 auto DL = getCurSDLoc();
12872 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12873 setValue(&I, DAG.getStepVector(DL, ResultVT));
12874}
12875
12876void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
12877 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12878 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12879
12880 SDLoc DL = getCurSDLoc();
12881 SDValue V = getValue(I.getOperand(0));
12882 assert(VT == V.getValueType() && "Malformed vector.reverse!");
12883
12884 if (VT.isScalableVector()) {
12885 setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
12886 return;
12887 }
12888
12889 // Use VECTOR_SHUFFLE for the fixed-length vector
12890 // to maintain existing behavior.
12891 SmallVector<int, 8> Mask;
12892 unsigned NumElts = VT.getVectorMinNumElements();
12893 for (unsigned i = 0; i != NumElts; ++i)
12894 Mask.push_back(NumElts - 1 - i);
12895
12896 setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
12897}
12898
12899void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I,
12900 unsigned Factor) {
12901 auto DL = getCurSDLoc();
12902 SDValue InVec = getValue(I.getOperand(0));
12903
12904 SmallVector<EVT, 4> ValueVTs;
12905 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
12906 ValueVTs);
12907
12908 EVT OutVT = ValueVTs[0];
12909 unsigned OutNumElts = OutVT.getVectorMinNumElements();
12910
12911 SmallVector<SDValue, 4> SubVecs(Factor);
12912 for (unsigned i = 0; i != Factor; ++i) {
12913 assert(ValueVTs[i] == OutVT && "Expected VTs to be the same");
12914 SubVecs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
12915 DAG.getVectorIdxConstant(OutNumElts * i, DL));
12916 }
12917
12918 // Use VECTOR_SHUFFLE for fixed-length vectors with factor of 2 to benefit
12919 // from existing legalisation and combines.
12920 if (OutVT.isFixedLengthVector() && Factor == 2) {
12921 SDValue Even = DAG.getVectorShuffle(OutVT, DL, SubVecs[0], SubVecs[1],
12922 createStrideMask(0, 2, OutNumElts));
12923 SDValue Odd = DAG.getVectorShuffle(OutVT, DL, SubVecs[0], SubVecs[1],
12924 createStrideMask(1, 2, OutNumElts));
12925 SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
12926 setValue(&I, Res);
12927 return;
12928 }
12929
12930 SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
12931 DAG.getVTList(ValueVTs), SubVecs);
12932 setValue(&I, Res);
12933}
12934
12935void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I,
12936 unsigned Factor) {
12937 auto DL = getCurSDLoc();
12938 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12939 EVT InVT = getValue(I.getOperand(0)).getValueType();
12940 EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12941
12942 SmallVector<SDValue, 8> InVecs(Factor);
12943 for (unsigned i = 0; i < Factor; ++i) {
12944 InVecs[i] = getValue(I.getOperand(i));
12945 assert(InVecs[i].getValueType() == InVecs[0].getValueType() &&
12946 "Expected VTs to be the same");
12947 }
12948
12949 // Use VECTOR_SHUFFLE for fixed-length vectors with factor of 2 to benefit
12950 // from existing legalisation and combines.
12951 if (OutVT.isFixedLengthVector() && Factor == 2) {
12952 unsigned NumElts = InVT.getVectorMinNumElements();
12953 SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVecs);
12954 setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
12955 createInterleaveMask(NumElts, 2)));
12956 return;
12957 }
12958
12959 SmallVector<EVT, 8> ValueVTs(Factor, InVT);
12960 SDValue Res =
12961 DAG.getNode(ISD::VECTOR_INTERLEAVE, DL, DAG.getVTList(ValueVTs), InVecs);
12962
12964 for (unsigned i = 0; i < Factor; ++i)
12965 Results[i] = Res.getValue(i);
12966
12967 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Results);
12968 setValue(&I, Res);
12969}
12970
12971void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
12972 SmallVector<EVT, 4> ValueVTs;
12973 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
12974 ValueVTs);
12975 unsigned NumValues = ValueVTs.size();
12976 if (NumValues == 0) return;
12977
12978 SmallVector<SDValue, 4> Values(NumValues);
12979 SDValue Op = getValue(I.getOperand(0));
12980
12981 for (unsigned i = 0; i != NumValues; ++i)
12982 Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
12983 SDValue(Op.getNode(), Op.getResNo() + i));
12984
12986 DAG.getVTList(ValueVTs), Values));
12987}
12988
12989void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
12990 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12991 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12992
12993 SDLoc DL = getCurSDLoc();
12994 SDValue V1 = getValue(I.getOperand(0));
12995 SDValue V2 = getValue(I.getOperand(1));
12996 const bool IsLeft = I.getIntrinsicID() == Intrinsic::vector_splice_left;
12997
12998 // VECTOR_SHUFFLE doesn't support a scalable or non-constant mask.
12999 if (VT.isScalableVector() || !isa<ConstantInt>(I.getOperand(2))) {
13000 SDValue Offset = DAG.getZExtOrTrunc(
13001 getValue(I.getOperand(2)), DL, TLI.getVectorIdxTy(DAG.getDataLayout()));
13002 setValue(&I, DAG.getNode(IsLeft ? ISD::VECTOR_SPLICE_LEFT
13004 DL, VT, V1, V2, Offset));
13005 return;
13006 }
13007 uint64_t Imm = cast<ConstantInt>(I.getOperand(2))->getZExtValue();
13008
13009 unsigned NumElts = VT.getVectorNumElements();
13010
13011 uint64_t Idx = IsLeft ? Imm : NumElts - Imm;
13012
13013 // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
13014 SmallVector<int, 8> Mask;
13015 for (unsigned i = 0; i < NumElts; ++i)
13016 Mask.push_back(Idx + i);
13017 setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
13018}
13019
13020// Consider the following MIR after SelectionDAG, which produces output in
13021// phyregs in the first case or virtregs in the second case.
13022//
13023// INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
13024// %5:gr32 = COPY $ebx
13025// %6:gr32 = COPY $edx
13026// %1:gr32 = COPY %6:gr32
13027// %0:gr32 = COPY %5:gr32
13028//
13029// INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
13030// %1:gr32 = COPY %6:gr32
13031// %0:gr32 = COPY %5:gr32
13032//
13033// Given %0, we'd like to return $ebx in the first case and %5 in the second.
13034// Given %1, we'd like to return $edx in the first case and %6 in the second.
13035//
13036// If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
13037// to a single virtreg (such as %0). The remaining outputs monotonically
13038// increase in virtreg number from there. If a callbr has no outputs, then it
13039// should not have a corresponding callbr landingpad; in fact, the callbr
13040// landingpad would not even be able to refer to such a callbr.
13043 // There is definitely at least one copy.
13044 assert(MI->getOpcode() == TargetOpcode::COPY &&
13045 "start of copy chain MUST be COPY");
13046 Reg = MI->getOperand(1).getReg();
13047
13048 // If the copied register in the first copy must be virtual.
13049 assert(Reg.isVirtual() && "expected COPY of virtual register");
13050 MI = MRI.def_begin(Reg)->getParent();
13051
13052 // There may be an optional second copy.
13053 if (MI->getOpcode() == TargetOpcode::COPY) {
13054 assert(Reg.isVirtual() && "expected COPY of virtual register");
13055 Reg = MI->getOperand(1).getReg();
13056 assert(Reg.isPhysical() && "expected COPY of physical register");
13057 } else {
13058 // The start of the chain must be an INLINEASM_BR.
13059 assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
13060 "end of copy chain MUST be INLINEASM_BR");
13061 }
13062
13063 return Reg;
13064}
13065
13066// We must do this walk rather than the simpler
13067// setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
13068// otherwise we will end up with copies of virtregs only valid along direct
13069// edges.
13070void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
13071 SmallVector<EVT, 8> ResultVTs;
13072 SmallVector<SDValue, 8> ResultValues;
13073 const auto *CBR =
13074 cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
13075
13076 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13077 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
13078 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
13079
13080 Register InitialDef = FuncInfo.ValueMap[CBR];
13081 SDValue Chain = DAG.getRoot();
13082
13083 // Re-parse the asm constraints string.
13084 TargetLowering::AsmOperandInfoVector TargetConstraints =
13085 TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
13086 for (auto &T : TargetConstraints) {
13087 SDISelAsmOperandInfo OpInfo(T);
13088 if (OpInfo.Type != InlineAsm::isOutput)
13089 continue;
13090
13091 // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
13092 // individual constraint.
13093 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
13094
13095 switch (OpInfo.ConstraintType) {
13098 // Fill in OpInfo.AssignedRegs.Regs.
13099 getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
13100
13101 // getRegistersForValue may produce 1 to many registers based on whether
13102 // the OpInfo.ConstraintVT is legal on the target or not.
13103 for (Register &Reg : OpInfo.AssignedRegs.Regs) {
13104 Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
13105 if (OriginalDef.isPhysical())
13106 FuncInfo.MBB->addLiveIn(OriginalDef);
13107 // Update the assigned registers to use the original defs.
13108 Reg = OriginalDef;
13109 }
13110
13111 SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
13112 DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
13113 ResultValues.push_back(V);
13114 ResultVTs.push_back(OpInfo.ConstraintVT);
13115 break;
13116 }
13118 SDValue Flag;
13119 SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
13120 OpInfo, DAG);
13121 ++InitialDef;
13122 ResultValues.push_back(V);
13123 ResultVTs.push_back(OpInfo.ConstraintVT);
13124 break;
13125 }
13126 default:
13127 break;
13128 }
13129 }
13131 DAG.getVTList(ResultVTs), ResultValues);
13132 setValue(&I, V);
13133}
return SDValue()
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
dxil translate DXIL Translate Metadata
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
Definition FastISel.cpp:942
#define Check(C,...)
static Value * getCondition(Instruction *I)
Hexagon Common GEP
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
static void computeConstraintToUse(const TargetLowering *TLI, TargetLowering::AsmOperandInfo &OpInfo)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
Type::TypeID TypeID
#define T
#define T1
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
#define P(N)
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V, bool LookThroughCmp=false)
Returns the "element type" of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static bool prepareDAGLevelOperands(ConstraintDecisionInfo &Info, const CallBase &Call, SelectionDAGBuilder &Builder, const TargetLowering &TLI, SelectionDAG &DAG)
Prepare DAG-level operands.
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static bool determineConstraints(ConstraintDecisionInfo &Info, TargetLowering::AsmOperandInfoVector &TargetConstraints, const CallBase &Call, SelectionDAGBuilder &Builder, const TargetLowering &TLI, const TargetMachine &TM, SelectionDAG &DAG, const BasicBlock *EHPadBB)
DetermineConstraints - Find the constraints to use for inline asm operands.
static bool constructOperandInfo(ConstraintDecisionInfo &Info, TargetLowering::AsmOperandInfoVector &TargetConstraints, SelectionDAGBuilder &Builder, const TargetLowering &TLI, ExtraFlags &ExtraInfo)
Construct operand info objects.
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static FPClassTest getNoFPClass(const Instruction &I)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const uint8_t *MatcherTable, size_t &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:119
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
uint16_t RegSizeInBits(const MCRegisterInfo &MRI, MCRegister RegNo)
Value * RHS
Value * LHS
The Input class is used to parse a yaml document into in-memory structs and vectors.
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
static LLVM_ABI Semantics SemanticsToEnum(const llvm::fltSemantics &Sem)
Definition APFloat.cpp:145
static LLVM_ABI const fltSemantics * getArbitraryFPSemantics(StringRef Format)
Returns the fltSemantics for a given arbitrary FP format string, or nullptr if invalid.
Definition APFloat.cpp:6007
Class for arbitrary precision integers.
Definition APInt.h:78
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
Definition Function.cpp:338
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition Argument.h:50
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:130
size_t size() const
Get the array size.
Definition ArrayRef.h:141
iterator begin() const
Definition ArrayRef.h:129
bool empty() const
Check if the array is empty.
Definition ArrayRef.h:136
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMaximumNum
*p = maximumnum(old, v) maximumnum matches the behavior of llvm.maximumnum.
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ FMinimumNum
*p = minimumnum(old, v) minimumnum matches the behavior of llvm.minimumnum.
@ Nand
*p = ~(old & v)
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:407
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Definition Constants.h:1071
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
Conditional Branch instruction.
Class for constant bytes.
Definition Constants.h:281
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
Definition Constants.h:742
A constant value that is initialized with an expression using other constant values.
Definition Constants.h:1297
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A signed pointer, in the ptrauth sense.
Definition Constants.h:1204
uint64_t getZExtValue() const
Constant Vector Declarations.
Definition Constants.h:663
This is an important base class in LLVM.
Definition Constant.h:43
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
bool isBigEndian() const
Definition DataLayout.h:218
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A debug info location.
Definition DebugLoc.h:123
LLVM_ABI DILocation * getInlinedAt() const
Definition DebugLoc.cpp:67
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:239
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition DenseMap.h:114
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
Class representing an expression and its matching format.
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
bool allowReassoc() const
Flag queries.
Definition FMF.h:67
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:873
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
Definition Function.h:809
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition Function.h:711
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
Definition Function.cpp:740
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:354
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:251
size_t arg_size() const
Definition Function.h:901
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
Garbage collection metadata for a single function.
Definition GCMetadata.h:80
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
bool isInBounds() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
void setMemConstraint(ConstraintCode C)
setMemConstraint - Augment an existing flag with the constraint code for a memory constraint.
Definition InlineAsm.h:414
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static LocationSize upperBound(uint64_t Value)
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1080
Machine Value Type.
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
def_iterator def_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition MapVector.h:118
bool contains(const KeyT &Key) const
Definition MapVector.h:148
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
Metadata wrapper in the Value hierarchy.
Definition Metadata.h:184
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const CondBrInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
SDValue lowerStartEH(SDValue Chain, const BasicBlock *EHPadBB, MCSymbol *&BeginLabel)
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
const TargetTransformInfo * TTI
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG, const Instruction &I, SDValue Op)
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li, const TargetTransformInfo &TTI)
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
SDValue getFPOperationRoot(fp::ExceptionBehavior EB)
Return the current virtual root of the Selection DAG, flushing PendingConstrainedFP or PendingConstra...
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemccpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue C, SDValue Size, const CallInst *CI) const
Emit target-specific code that performs a memccpy, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, const CallInst *CI) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrstr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, const CallInst *CI) const
Emit target-specific code that performs a strstr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo, const CallInst *CI) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy, const CallInst *CI) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
const LibcallLoweringInfo & getLibcalls() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void resize(size_type N)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
constexpr bool empty() const
Check if the string is empty.
Definition StringRef.h:141
constexpr const char * data() const
Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:138
Multiway switch.
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Function * getSSPStackGuardCheck(const Module &M, const LibcallLoweringInfo &Libcalls) const
If the target has a standard stack protection check function that performs validation and error handl...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr, CodeGenOptLevel OptLevel=CodeGenOptLevel::Default) const
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual void getTgtMemIntrinsic(SmallVectorImpl< IntrinsicInfo > &Infos, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
virtual Value * getSDagStackGuard(const Module &M, const LibcallLoweringInfo &Libcalls) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ TCK_Latency
The latency of instruction.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
Definition Type.cpp:184
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:236
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
Unconditional Branch instruction.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_iterator op_begin()
Definition User.h:259
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
const ParentTy * getParent() const
Definition ilist_node.h:34
A raw_ostream that writes to an std::string.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition CallingConv.h:60
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition ISDOpcodes.h:261
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CONVERGENCECTRL_ANCHOR
The llvm.experimental.convergence.* intrinsics.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
Definition ISDOpcodes.h:511
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition ISDOpcodes.h:45
@ SET_FPENV
Sets the current floating-point environment.
@ ATOMIC_LOAD_FMINIMUMNUM
@ LOOP_DEPENDENCE_RAW_MASK
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ COND_LOOP
COND_LOOP is a conditional branch to self, used for implementing efficient conditional traps.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
Definition ISDOpcodes.h:168
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition ISDOpcodes.h:600
@ STACKADDRESS
STACKADDRESS - Represents the llvm.stackaddress intrinsic.
Definition ISDOpcodes.h:127
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:783
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:394
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition ISDOpcodes.h:400
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:857
@ CTTZ_ELTS
Returns the number of number of trailing (least significant) zero elements in a vector.
@ ATOMIC_LOAD_USUB_COND
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ VECTOR_FIND_LAST_ACTIVE
Finds the index of the last active mask element Operands: Mask.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition ISDOpcodes.h:220
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
Definition ISDOpcodes.h:172
@ GlobalAddress
Definition ISDOpcodes.h:88
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:884
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:584
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:747
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
Definition ISDOpcodes.h:528
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
Definition ISDOpcodes.h:515
@ FAKE_USE
FAKE_USE represents a use of the operand but does not do anything.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:997
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:254
@ CLMUL
Carry-less multiplication operations.
Definition ISDOpcodes.h:778
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
Definition ISDOpcodes.h:407
@ CONVERT_FROM_ARBITRARY_FP
CONVERT_FROM_ARBITRARY_FP - This operator converts from an arbitrary floating-point represented as an...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ ATOMIC_LOAD_USUB_SAT
@ CTLZ_ZERO_POISON
Definition ISDOpcodes.h:792
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition ISDOpcodes.h:156
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
Definition ISDOpcodes.h:979
@ CONVERGENCECTRL_GLUE
This does not correspond to any convergence control intrinsic.
@ PARTIAL_REDUCE_UMLA
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:848
@ PREALLOCATED_SETUP
PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE with the preallocated call Va...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
Definition ISDOpcodes.h:117
@ CONVERGENCECTRL_ENTRY
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ PARTIAL_REDUCE_FMLA
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:352
@ PREALLOCATED_ARG
PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE with the preallocated call Value,...
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
Definition ISDOpcodes.h:635
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:541
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition ISDOpcodes.h:548
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:800
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition ISDOpcodes.h:247
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition ISDOpcodes.h:672
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ GET_ACTIVE_LANE_MASK
GET_ACTIVE_LANE_MASK - this corrosponds to the llvm.get.active.lane.mask intrinsic.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:230
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
Definition ISDOpcodes.h:974
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ ATOMIC_LOAD_FMAXIMUM
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:769
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
Definition ISDOpcodes.h:78
@ PtrAuthGlobalAddress
A ptrauth constant.
Definition ISDOpcodes.h:100
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:614
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
Definition ISDOpcodes.h:48
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
Definition ISDOpcodes.h:139
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:854
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
Definition ISDOpcodes.h:135
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:386
@ PATCHPOINT
The llvm.experimental.patchpoint.
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:356
@ ATOMIC_LOAD_FMINIMUM
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ VECTOR_SPLICE_LEFT
VECTOR_SPLICE_LEFT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1, VEC2) left by OFFSET elements an...
Definition ISDOpcodes.h:653
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ MASKED_UDIV
Masked vector arithmetic that returns poison on disabled lanes.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
Definition ISDOpcodes.h:640
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition ISDOpcodes.h:413
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:982
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:809
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ ATOMIC_LOAD_FMAXIMUMNUM
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
Definition ISDOpcodes.h:150
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ ATOMIC_LOAD_UDEC_WRAP
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
Definition ISDOpcodes.h:500
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:930
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ RELOC_NONE
Issue a no-op relocation against a given symbol at the current location.
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:205
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:735
@ VECTOR_SPLICE_RIGHT
VECTOR_SPLICE_RIGHT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1,VEC2) right by OFFSET elements a...
Definition ISDOpcodes.h:657
@ STRICT_FADD
Constrained versions of the binary floating point operators.
Definition ISDOpcodes.h:427
@ STACKMAP
The llvm.experimental.stackmap intrinsic.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition ISDOpcodes.h:241
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ CTTZ_ZERO_POISON
Bit counting operators with a poisoned result for zero inputs.
Definition ISDOpcodes.h:791
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:963
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
Definition ISDOpcodes.h:699
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
Definition ISDOpcodes.h:122
@ CLEAR_CACHE
llvm.clear_cache intrinsic Operands: Input Chain, Start Addres, End Address Outputs: Output Chain
@ CONVERGENCECTRL_LOOP
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:949
@ VECREDUCE_FMINIMUM
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
Definition ISDOpcodes.h:162
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:860
@ BRCOND
BRCOND - Conditional branch.
@ VECREDUCE_SEQ_FMUL
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ ATOMIC_LOAD_UINC_WRAP
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
Definition ISDOpcodes.h:624
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ CTTZ_ELTS_ZERO_POISON
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition ISDOpcodes.h:213
@ ABS_MIN_POISON
ABS with a poison result for INT_MIN.
Definition ISDOpcodes.h:751
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:556
@ LOOP_DEPENDENCE_WAR_MASK
The llvm.loop.dependence.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
Offsets
Offsets in bytes from the start of the input buffer.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition Dwarf.h:149
ExceptionBehavior
Exception behavior used for floating point operations.
Definition FPEnv.h:39
@ ebStrict
This corresponds to "fpexcept.strict".
Definition FPEnv.h:42
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
Definition FPEnv.h:41
@ ebIgnore
This corresponds to "fpexcept.ignore".
Definition FPEnv.h:40
constexpr float log2ef
Definition MathExtras.h:51
constexpr double e
constexpr float ln2f
Definition MathExtras.h:49
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
@ Offset
Definition DWP.cpp:557
@ Length
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
Definition Analysis.cpp:237
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1668
SDValue peekThroughFreeze(SDValue V)
Return the non-frozen source operand of V if it exists.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ Done
Definition Threading.h:60
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:315
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
auto cast_or_null(const Y &Val)
Definition Casting.h:714
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
gep_type_iterator gep_type_end(const User *GEP)
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
Definition STLExtras.h:2172
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:156
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1151
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
Definition Analysis.cpp:72
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
Definition STLExtras.h:853
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
Definition STLExtras.h:299
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
Definition Analysis.cpp:203
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2289
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:221
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition Analysis.cpp:539
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
Definition Analysis.cpp:225
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2191
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition Analysis.cpp:181
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2165
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition Analysis.cpp:33
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:347
@ Default
The result value is uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
#define N
#define NC
Definition regutils.h:42
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:403
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:70
uint64_t getScalarStoreSize() const
Definition ValueTypes.h:410
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:292
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:308
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition ValueTypes.h:155
ElementCount getVectorElementCount() const
Definition ValueTypes.h:358
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
Definition ValueTypes.h:367
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:393
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
Definition ValueTypes.h:98
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:61
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
Definition ValueTypes.h:187
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:389
bool isFixedLengthVector() const
Definition ValueTypes.h:189
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:331
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition ValueTypes.h:300
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:182
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:336
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
Definition ValueTypes.h:121
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:165
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:344
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
void setPointerAddrSpace(unsigned AS)
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
Definition InlineAsm.h:128
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:262
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
RegsForValue()=default
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:334
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
Register Reg
The virtual register containing the index of the jump table entry to jump to.
MachineBasicBlock * Default
The MBB of the default bb, which is a successor of the range check MBB.
unsigned JTI
The JumpTableIndex for this jump table in the function.
MachineBasicBlock * MBB
The MBB into which to emit the code for the indirect jump.
std::optional< SDLoc > SL
The debug location of the instruction this JumpTable was produced from.
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setDeactivationSymbol(GlobalValue *Sym)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)