LLVM 23.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
35#include "llvm/IR/Constants.h"
39#include <numeric>
40#include <optional>
41
42#define DEBUG_TYPE "globalisel-utils"
43
44using namespace llvm;
45using namespace MIPatternMatch;
46
48 const TargetInstrInfo &TII,
49 const RegisterBankInfo &RBI, Register Reg,
50 const TargetRegisterClass &RegClass) {
51 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
52 return MRI.createVirtualRegister(&RegClass);
53
54 return Reg;
55}
56
58 const MachineFunction &MF, const TargetRegisterInfo &TRI,
60 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
61 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
62 Register Reg = RegMO.getReg();
63 // Assume physical registers are properly constrained.
64 assert(Reg.isVirtual() && "PhysReg not implemented");
65
66 // Save the old register class to check whether
67 // the change notifications will be required.
68 // TODO: A better approach would be to pass
69 // the observers to constrainRegToClass().
70 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
71 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
72 // If we created a new virtual register because the class is not compatible
73 // then create a copy between the new and the old register.
74 if (ConstrainedReg != Reg) {
75 MachineBasicBlock::iterator InsertIt(&InsertPt);
76 MachineBasicBlock &MBB = *InsertPt.getParent();
77 // FIXME: The copy needs to have the classes constrained for its operands.
78 // Use operand's regbank to get the class for old register (Reg).
79 if (RegMO.isUse()) {
80 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
81 TII.get(TargetOpcode::COPY), ConstrainedReg)
82 .addReg(Reg);
83 } else {
84 assert(RegMO.isDef() && "Must be a definition");
85 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
86 TII.get(TargetOpcode::COPY), Reg)
87 .addReg(ConstrainedReg);
88 }
89 if (GISelChangeObserver *Observer = MF.getObserver()) {
90 Observer->changingInstr(*RegMO.getParent());
91 }
92 RegMO.setReg(ConstrainedReg);
93 if (GISelChangeObserver *Observer = MF.getObserver()) {
94 Observer->changedInstr(*RegMO.getParent());
95 }
96 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
97 if (GISelChangeObserver *Observer = MF.getObserver()) {
98 if (!RegMO.isDef()) {
99 MachineInstr *RegDef = MRI.getVRegDef(Reg);
100 Observer->changedInstr(*RegDef);
101 }
102 Observer->changingAllUsesOfReg(MRI, Reg);
103 Observer->finishedChangingAllUsesOfReg();
104 }
105 }
106 return ConstrainedReg;
107}
108
110 const MachineFunction &MF, const TargetRegisterInfo &TRI,
112 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
113 MachineOperand &RegMO, unsigned OpIdx) {
114 Register Reg = RegMO.getReg();
115 // Assume physical registers are properly constrained.
116 assert(Reg.isVirtual() && "PhysReg not implemented");
117
118 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx);
119 // Some of the target independent instructions, like COPY, may not impose any
120 // register class constraints on some of their operands: If it's a use, we can
121 // skip constraining as the instruction defining the register would constrain
122 // it.
123
124 if (OpRC) {
125 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
126 // can have multiple regbanks for a superclass that combine different
127 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
128 // resolved by targets during regbankselect should not be overridden.
129 if (const auto *SubRC = TRI.getCommonSubClass(
130 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
131 OpRC = SubRC;
132
133 OpRC = TRI.getAllocatableClass(OpRC);
134 }
135
136 if (!OpRC) {
137 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
138 "Register class constraint is required unless either the "
139 "instruction is target independent or the operand is a use");
140 // FIXME: Just bailing out like this here could be not enough, unless we
141 // expect the users of this function to do the right thing for PHIs and
142 // COPY:
143 // v1 = COPY v0
144 // v2 = COPY v1
145 // v1 here may end up not being constrained at all. Please notice that to
146 // reproduce the issue we likely need a destination pattern of a selection
147 // rule producing such extra copies, not just an input GMIR with them as
148 // every existing target using selectImpl handles copies before calling it
149 // and they never reach this function.
150 return Reg;
151 }
152 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
153 RegMO);
154}
155
157 const TargetInstrInfo &TII,
158 const TargetRegisterInfo &TRI,
159 const RegisterBankInfo &RBI) {
160 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
161 "A selected instruction is expected");
162 MachineBasicBlock &MBB = *I.getParent();
163 MachineFunction &MF = *MBB.getParent();
165
166 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
167 MachineOperand &MO = I.getOperand(OpI);
168
169 // There's nothing to be done on non-register operands.
170 if (!MO.isReg())
171 continue;
172
173 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
174
175 Register Reg = MO.getReg();
176 // Physical registers don't need to be constrained.
177 if (Reg.isPhysical())
178 continue;
179
180 // Register operands with a value of 0 (e.g. predicate operands) don't need
181 // to be constrained.
182 if (Reg == 0)
183 continue;
184
185 // If the operand is a vreg, we should constrain its regclass, and only
186 // insert COPYs if that's impossible.
187 // constrainOperandRegClass does that for us.
188 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
189
190 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
191 // done.
192 if (MO.isUse()) {
193 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
194 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
195 I.tieOperands(DefIdx, OpI);
196 }
197 }
198}
199
201 MachineRegisterInfo &MRI) {
202 // Give up if either DstReg or SrcReg is a physical register.
203 if (DstReg.isPhysical() || SrcReg.isPhysical())
204 return false;
205 // Give up if the types don't match.
206 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
207 return false;
208 // Replace if either DstReg has no constraints or the register
209 // constraints match.
210 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
211 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
212 return true;
213
214 // Otherwise match if the Src is already a regclass that is covered by the Dst
215 // RegBank.
216 return isa<const RegisterBank *>(DstRBC) && MRI.getRegClassOrNull(SrcReg) &&
217 cast<const RegisterBank *>(DstRBC)->covers(
218 *MRI.getRegClassOrNull(SrcReg));
219}
220
222 const MachineRegisterInfo &MRI) {
223 // Instructions without side-effects are dead iff they only define dead regs.
224 // This function is hot and this loop returns early in the common case,
225 // so only perform additional checks before this if absolutely necessary.
226 for (const auto &MO : MI.all_defs()) {
227 Register Reg = MO.getReg();
228 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
229 return false;
230 }
231 return MI.wouldBeTriviallyDead();
232}
233
235 MachineFunction &MF,
238 bool IsGlobalISelAbortEnabled =
240 bool IsFatal = Severity == DS_Error && IsGlobalISelAbortEnabled;
241 // Print the function name explicitly if we don't have a debug location (which
242 // makes the diagnostic less useful) or if we're going to emit a raw error.
243 if (!R.getLocation().isValid() || IsFatal)
244 R << (" (in function: " + MF.getName() + ")").str();
245
246 if (IsFatal)
247 reportFatalUsageError(Twine(R.getMsg()));
248 else
249 MORE.emit(R);
250}
251
257
264
267 const char *PassName, StringRef Msg,
268 const MachineInstr &MI) {
269 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
270 MI.getDebugLoc(), MI.getParent());
271 R << Msg;
272 // Printing MI is expensive; only do it if expensive remarks are enabled.
274 MORE.allowExtraAnalysis(PassName))
275 R << ": " << ore::MNV("Inst", MI);
276 reportGISelFailure(MF, MORE, R);
277}
278
279unsigned llvm::getInverseGMinMaxOpcode(unsigned MinMaxOpc) {
280 switch (MinMaxOpc) {
281 case TargetOpcode::G_SMIN:
282 return TargetOpcode::G_SMAX;
283 case TargetOpcode::G_SMAX:
284 return TargetOpcode::G_SMIN;
285 case TargetOpcode::G_UMIN:
286 return TargetOpcode::G_UMAX;
287 case TargetOpcode::G_UMAX:
288 return TargetOpcode::G_UMIN;
289 default:
290 llvm_unreachable("unrecognized opcode");
291 }
292}
293
294std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
295 const MachineRegisterInfo &MRI) {
296 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
297 VReg, MRI, /*LookThroughInstrs*/ false);
298 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
299 "Value found while looking through instrs");
300 if (!ValAndVReg)
301 return std::nullopt;
302 return ValAndVReg->Value;
303}
304
306 const MachineRegisterInfo &MRI) {
307 MachineInstr *Const = MRI.getVRegDef(Reg);
308 assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
309 "expected a G_CONSTANT on Reg");
310 return Const->getOperand(1).getCImm()->getValue();
311}
312
313std::optional<int64_t>
315 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
316 if (Val && Val->getBitWidth() <= 64)
317 return Val->getSExtValue();
318 return std::nullopt;
319}
320
321namespace {
322
323// This function is used in many places, and as such, it has some
324// micro-optimizations to try and make it as fast as it can be.
325//
326// - We use template arguments to avoid an indirect call caused by passing a
327// function_ref/std::function
328// - GetAPCstValue does not return std::optional<APInt> as that's expensive.
329// Instead it returns true/false and places the result in a pre-constructed
330// APInt.
331//
332// Please change this function carefully and benchmark your changes.
333template <bool (*IsConstantOpcode)(const MachineInstr *),
334 bool (*GetAPCstValue)(const MachineInstr *MI, APInt &)>
335std::optional<ValueAndVReg>
336getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
337 bool LookThroughInstrs = true,
338 bool LookThroughAnyExt = false) {
341
342 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
343 LookThroughInstrs) {
344 switch (MI->getOpcode()) {
345 case TargetOpcode::G_ANYEXT:
346 if (!LookThroughAnyExt)
347 return std::nullopt;
348 [[fallthrough]];
349 case TargetOpcode::G_TRUNC:
350 case TargetOpcode::G_SEXT:
351 case TargetOpcode::G_ZEXT:
352 SeenOpcodes.push_back(std::make_pair(
353 MI->getOpcode(),
354 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
355 VReg = MI->getOperand(1).getReg();
356 break;
357 case TargetOpcode::COPY:
358 VReg = MI->getOperand(1).getReg();
359 if (VReg.isPhysical())
360 return std::nullopt;
361 break;
362 case TargetOpcode::G_INTTOPTR:
363 VReg = MI->getOperand(1).getReg();
364 break;
365 default:
366 return std::nullopt;
367 }
368 }
369 if (!MI || !IsConstantOpcode(MI))
370 return std::nullopt;
371
372 APInt Val;
373 if (!GetAPCstValue(MI, Val))
374 return std::nullopt;
375 for (auto &Pair : reverse(SeenOpcodes)) {
376 switch (Pair.first) {
377 case TargetOpcode::G_TRUNC:
378 Val = Val.trunc(Pair.second);
379 break;
380 case TargetOpcode::G_ANYEXT:
381 case TargetOpcode::G_SEXT:
382 Val = Val.sext(Pair.second);
383 break;
384 case TargetOpcode::G_ZEXT:
385 Val = Val.zext(Pair.second);
386 break;
387 }
388 }
389
390 return ValueAndVReg{std::move(Val), VReg};
391}
392
393bool isIConstant(const MachineInstr *MI) {
394 if (!MI)
395 return false;
396 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
397}
398
399bool isFConstant(const MachineInstr *MI) {
400 if (!MI)
401 return false;
402 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
403}
404
405bool isAnyConstant(const MachineInstr *MI) {
406 if (!MI)
407 return false;
408 unsigned Opc = MI->getOpcode();
409 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
410}
411
412bool getCImmAsAPInt(const MachineInstr *MI, APInt &Result) {
413 const MachineOperand &CstVal = MI->getOperand(1);
414 if (!CstVal.isCImm())
415 return false;
416 Result = CstVal.getCImm()->getValue();
417 return true;
418}
419
420bool getCImmOrFPImmAsAPInt(const MachineInstr *MI, APInt &Result) {
421 const MachineOperand &CstVal = MI->getOperand(1);
422 if (CstVal.isCImm())
423 Result = CstVal.getCImm()->getValue();
424 else if (CstVal.isFPImm())
426 else
427 return false;
428 return true;
429}
430
431} // end anonymous namespace
432
434 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
435 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
436 VReg, MRI, LookThroughInstrs);
437}
438
440 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
441 bool LookThroughAnyExt) {
442 return getConstantVRegValWithLookThrough<isAnyConstant,
443 getCImmOrFPImmAsAPInt>(
444 VReg, MRI, LookThroughInstrs, LookThroughAnyExt);
445}
446
447std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
448 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
449 auto Reg =
450 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
451 VReg, MRI, LookThroughInstrs);
452 if (!Reg)
453 return std::nullopt;
454
455 APFloat FloatVal(getFltSemanticForLLT(LLT::scalar(Reg->Value.getBitWidth())),
456 Reg->Value);
457 return FPValueAndVReg{FloatVal, Reg->VReg};
458}
459
460const ConstantFP *
462 MachineInstr *MI = MRI.getVRegDef(VReg);
463 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
464 return nullptr;
465 return MI->getOperand(1).getFPImm();
466}
467
468std::optional<DefinitionAndSourceRegister>
470 Register DefSrcReg = Reg;
471 // This assumes that the code is in SSA form, so there should only be one
472 // definition.
473 auto DefIt = MRI.def_begin(Reg);
474 if (DefIt == MRI.def_end())
475 return {};
476 MachineOperand &DefOpnd = *DefIt;
477 MachineInstr *DefMI = DefOpnd.getParent();
478 auto DstTy = MRI.getType(DefOpnd.getReg());
479 if (!DstTy.isValid())
480 return std::nullopt;
481 unsigned Opc = DefMI->getOpcode();
482 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
483 Register SrcReg = DefMI->getOperand(1).getReg();
484 auto SrcTy = MRI.getType(SrcReg);
485 if (!SrcTy.isValid())
486 break;
487 DefMI = MRI.getVRegDef(SrcReg);
488 DefSrcReg = SrcReg;
489 Opc = DefMI->getOpcode();
490 }
491 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
492}
493
495 const MachineRegisterInfo &MRI) {
496 std::optional<DefinitionAndSourceRegister> DefSrcReg =
498 return DefSrcReg ? DefSrcReg->MI : nullptr;
499}
500
502 const MachineRegisterInfo &MRI) {
503 std::optional<DefinitionAndSourceRegister> DefSrcReg =
505 return DefSrcReg ? DefSrcReg->Reg : Register();
506}
507
508void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
510 MachineIRBuilder &MIRBuilder,
511 MachineRegisterInfo &MRI) {
512 for (int i = 0; i < NumParts; ++i)
514 MIRBuilder.buildUnmerge(VRegs, Reg);
515}
516
517bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
519 SmallVectorImpl<Register> &LeftoverRegs,
520 MachineIRBuilder &MIRBuilder,
521 MachineRegisterInfo &MRI) {
522 assert(!LeftoverTy.isValid() && "this is an out argument");
523
524 unsigned RegSize = RegTy.getSizeInBits();
525 unsigned MainSize = MainTy.getSizeInBits();
526 unsigned NumParts = RegSize / MainSize;
527 unsigned LeftoverSize = RegSize - NumParts * MainSize;
528
529 // Use an unmerge when possible.
530 if (LeftoverSize == 0) {
531 for (unsigned I = 0; I < NumParts; ++I)
532 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
533 MIRBuilder.buildUnmerge(VRegs, Reg);
534 return true;
535 }
536
537 // Try to use unmerge for irregular vector split where possible
538 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
539 // leftover, it becomes:
540 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
541 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
542 if (RegTy.isVector() && MainTy.isVector()) {
543 unsigned RegNumElts = RegTy.getNumElements();
544 unsigned MainNumElts = MainTy.getNumElements();
545 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
546 // If can unmerge to LeftoverTy, do it
547 if (MainNumElts % LeftoverNumElts == 0 &&
548 RegNumElts % LeftoverNumElts == 0 &&
549 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
550 LeftoverNumElts > 1) {
551 LeftoverTy = LLT::fixed_vector(LeftoverNumElts, RegTy.getElementType());
552
553 // Unmerge the SrcReg to LeftoverTy vectors
554 SmallVector<Register, 4> UnmergeValues;
555 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
556 MIRBuilder, MRI);
557
558 // Find how many LeftoverTy makes one MainTy
559 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
560 unsigned NumOfLeftoverVal =
561 ((RegNumElts % MainNumElts) / LeftoverNumElts);
562
563 // Create as many MainTy as possible using unmerged value
564 SmallVector<Register, 4> MergeValues;
565 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
566 MergeValues.push_back(UnmergeValues[I]);
567 if (MergeValues.size() == LeftoverPerMain) {
568 VRegs.push_back(
569 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
570 MergeValues.clear();
571 }
572 }
573 // Populate LeftoverRegs with the leftovers
574 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
575 I < UnmergeValues.size(); I++) {
576 LeftoverRegs.push_back(UnmergeValues[I]);
577 }
578 return true;
579 }
580 }
581 // Perform irregular split. Leftover is last element of RegPieces.
582 if (MainTy.isVector()) {
583 SmallVector<Register, 8> RegPieces;
584 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
585 MRI);
586 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
587 VRegs.push_back(RegPieces[i]);
588 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
589 LeftoverTy = MRI.getType(LeftoverRegs[0]);
590 return true;
591 }
592
593 LeftoverTy = LLT::integer(LeftoverSize);
594 // For irregular sizes, extract the individual parts.
595 for (unsigned I = 0; I != NumParts; ++I) {
596 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
597 VRegs.push_back(NewReg);
598 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
599 }
600
601 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
602 Offset += LeftoverSize) {
603 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
604 LeftoverRegs.push_back(NewReg);
605 MIRBuilder.buildExtract(NewReg, Reg, Offset);
606 }
607
608 return true;
609}
610
611void llvm::extractVectorParts(Register Reg, unsigned NumElts,
613 MachineIRBuilder &MIRBuilder,
614 MachineRegisterInfo &MRI) {
615 LLT RegTy = MRI.getType(Reg);
616 assert(RegTy.isVector() && "Expected a vector type");
617
618 LLT EltTy = RegTy.getElementType();
619 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
620 unsigned RegNumElts = RegTy.getNumElements();
621 unsigned LeftoverNumElts = RegNumElts % NumElts;
622 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
623
624 // Perfect split without leftover
625 if (LeftoverNumElts == 0)
626 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
627 MRI);
628
629 // Irregular split. Provide direct access to all elements for artifact
630 // combiner using unmerge to elements. Then build vectors with NumElts
631 // elements. Remaining element(s) will be (used to build vector) Leftover.
633 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
634
635 unsigned Offset = 0;
636 // Requested sub-vectors of NarrowTy.
637 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
638 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
639 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
640 }
641
642 // Leftover element(s).
643 if (LeftoverNumElts == 1) {
644 VRegs.push_back(Elts[Offset]);
645 } else {
646 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
647 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
648 VRegs.push_back(
649 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
650 }
651}
652
654 const MachineRegisterInfo &MRI) {
656 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
657}
658
659APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
660 if (Size == 32)
661 return APFloat(float(Val));
662 if (Size == 64)
663 return APFloat(Val);
664 if (Size != 16)
665 llvm_unreachable("Unsupported FPConstant size");
666 bool Ignored;
667 APFloat APF(Val);
669 return APF;
670}
671
672std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
673 const Register Op1,
674 const Register Op2,
675 const MachineRegisterInfo &MRI) {
676 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
677 if (!MaybeOp2Cst)
678 return std::nullopt;
679
680 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
681 if (!MaybeOp1Cst)
682 return std::nullopt;
683
684 const APInt &C1 = MaybeOp1Cst->Value;
685 const APInt &C2 = MaybeOp2Cst->Value;
686 switch (Opcode) {
687 default:
688 break;
689 case TargetOpcode::G_ADD:
690 return C1 + C2;
691 case TargetOpcode::G_PTR_ADD:
692 // Types can be of different width here.
693 // Result needs to be the same width as C1, so trunc or sext C2.
694 return C1 + C2.sextOrTrunc(C1.getBitWidth());
695 case TargetOpcode::G_AND:
696 return C1 & C2;
697 case TargetOpcode::G_ASHR:
698 return C1.ashr(C2);
699 case TargetOpcode::G_LSHR:
700 return C1.lshr(C2);
701 case TargetOpcode::G_MUL:
702 return C1 * C2;
703 case TargetOpcode::G_OR:
704 return C1 | C2;
705 case TargetOpcode::G_SHL:
706 return C1 << C2;
707 case TargetOpcode::G_SUB:
708 return C1 - C2;
709 case TargetOpcode::G_XOR:
710 return C1 ^ C2;
711 case TargetOpcode::G_UDIV:
712 if (!C2.getBoolValue())
713 break;
714 return C1.udiv(C2);
715 case TargetOpcode::G_SDIV:
716 if (!C2.getBoolValue())
717 break;
718 return C1.sdiv(C2);
719 case TargetOpcode::G_UREM:
720 if (!C2.getBoolValue())
721 break;
722 return C1.urem(C2);
723 case TargetOpcode::G_SREM:
724 if (!C2.getBoolValue())
725 break;
726 return C1.srem(C2);
727 case TargetOpcode::G_SMIN:
728 return APIntOps::smin(C1, C2);
729 case TargetOpcode::G_SMAX:
730 return APIntOps::smax(C1, C2);
731 case TargetOpcode::G_UMIN:
732 return APIntOps::umin(C1, C2);
733 case TargetOpcode::G_UMAX:
734 return APIntOps::umax(C1, C2);
735 }
736
737 return std::nullopt;
738}
739
740std::optional<APFloat>
741llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
742 const Register Op2, const MachineRegisterInfo &MRI) {
743 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
744 if (!Op2Cst)
745 return std::nullopt;
746
747 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
748 if (!Op1Cst)
749 return std::nullopt;
750
751 APFloat C1 = Op1Cst->getValueAPF();
752 const APFloat &C2 = Op2Cst->getValueAPF();
753 switch (Opcode) {
754 case TargetOpcode::G_FADD:
756 return C1;
757 case TargetOpcode::G_FSUB:
759 return C1;
760 case TargetOpcode::G_FMUL:
762 return C1;
763 case TargetOpcode::G_FDIV:
765 return C1;
766 case TargetOpcode::G_FREM:
767 C1.mod(C2);
768 return C1;
769 case TargetOpcode::G_FCOPYSIGN:
770 C1.copySign(C2);
771 return C1;
772 case TargetOpcode::G_FMINNUM:
773 return minnum(C1, C2);
774 case TargetOpcode::G_FMAXNUM:
775 return maxnum(C1, C2);
776 case TargetOpcode::G_FMINIMUM:
777 return minimum(C1, C2);
778 case TargetOpcode::G_FMAXIMUM:
779 return maximum(C1, C2);
780 case TargetOpcode::G_FMINIMUMNUM:
781 return minimumnum(C1, C2);
782 case TargetOpcode::G_FMAXIMUMNUM:
783 return maximumnum(C1, C2);
784 case TargetOpcode::G_FMINNUM_IEEE:
785 case TargetOpcode::G_FMAXNUM_IEEE:
786 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
787 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
788 // and currently there isn't a nice wrapper in APFloat for the version with
789 // correct snan handling.
790 break;
791 default:
792 break;
793 }
794
795 return std::nullopt;
796}
797
799 const MachineRegisterInfo &MRI) {
800 if (auto *BV = getOpcodeDef<GBuildVector>(Reg, MRI))
801 return BV;
802
803 auto *Bitcast = getOpcodeDef(TargetOpcode::G_BITCAST, Reg, MRI);
804 if (!Bitcast)
805 return nullptr;
806
807 auto [Dst, DstTy, Src, SrcTy] = Bitcast->getFirst2RegLLTs();
808 if (!SrcTy.isVector() || !DstTy.isVector())
809 return nullptr;
810 if (SrcTy.getElementCount() != DstTy.getElementCount())
811 return nullptr;
812 if (SrcTy.getScalarSizeInBits() != DstTy.getScalarSizeInBits())
813 return nullptr;
814
815 return getOpcodeDef<GBuildVector>(Src, MRI);
816}
817
819llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
820 const Register Op2,
821 const MachineRegisterInfo &MRI) {
822 auto *SrcVec2 = getBuildVectorLikeDef(Op2, MRI);
823 if (!SrcVec2)
824 return SmallVector<APInt>();
825
826 auto *SrcVec1 = getBuildVectorLikeDef(Op1, MRI);
827 if (!SrcVec1)
828 return SmallVector<APInt>();
829
830 SmallVector<APInt> FoldedElements;
831 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
832 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
833 SrcVec2->getSourceReg(Idx), MRI);
834 if (!MaybeCst)
835 return SmallVector<APInt>();
836 FoldedElements.push_back(*MaybeCst);
837 }
838 return FoldedElements;
839}
840
842 const MachinePointerInfo &MPO) {
845 MachineFrameInfo &MFI = MF.getFrameInfo();
846 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
847 MPO.Offset);
848 }
849
850 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
851 const Module *M = MF.getFunction().getParent();
852 return V->getPointerAlignment(M->getDataLayout());
853 }
854
855 return Align(1);
856}
857
859 const TargetInstrInfo &TII,
860 MCRegister PhysReg,
861 const TargetRegisterClass &RC,
862 const DebugLoc &DL, LLT RegTy) {
863 MachineBasicBlock &EntryMBB = MF.front();
865 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
866 if (LiveIn) {
867 MachineInstr *Def = MRI.getVRegDef(LiveIn);
868 if (Def) {
869 // FIXME: Should the verifier check this is in the entry block?
870 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
871 return LiveIn;
872 }
873
874 // It's possible the incoming argument register and copy was added during
875 // lowering, but later deleted due to being/becoming dead. If this happens,
876 // re-insert the copy.
877 } else {
878 // The live in register was not present, so add it.
879 LiveIn = MF.addLiveIn(PhysReg, &RC);
880 if (RegTy.isValid())
881 MRI.setType(LiveIn, RegTy);
882 }
883
884 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
885 .addReg(PhysReg);
886 if (!EntryMBB.isLiveIn(PhysReg))
887 EntryMBB.addLiveIn(PhysReg);
888 return LiveIn;
889}
890
891std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
892 const Register Op1, uint64_t Imm,
893 const MachineRegisterInfo &MRI) {
894 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
895 if (MaybeOp1Cst) {
896 switch (Opcode) {
897 default:
898 break;
899 case TargetOpcode::G_SEXT_INREG: {
900 LLT Ty = MRI.getType(Op1);
901 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
902 }
903 }
904 }
905 return std::nullopt;
906}
907
908std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
909 const Register Op0,
910 const MachineRegisterInfo &MRI) {
911 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
912 if (!Val)
913 return Val;
914
915 const unsigned DstSize = DstTy.getScalarSizeInBits();
916
917 switch (Opcode) {
918 case TargetOpcode::G_SEXT:
919 return Val->sext(DstSize);
920 case TargetOpcode::G_ZEXT:
921 case TargetOpcode::G_ANYEXT:
922 // TODO: DAG considers target preference when constant folding any_extend.
923 return Val->zext(DstSize);
924 default:
925 break;
926 }
927
928 llvm_unreachable("unexpected cast opcode to constant fold");
929}
930
931std::optional<APFloat>
932llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
933 const MachineRegisterInfo &MRI) {
934 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
935 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
936 APFloat DstVal(getFltSemanticForLLT(DstTy));
937 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
939 return DstVal;
940 }
941 return std::nullopt;
942}
943
945llvm::ConstantFoldUnaryIntOp(unsigned Opcode, LLT DstTy, Register Src,
946 const MachineRegisterInfo &MRI) {
947 unsigned EltBits = DstTy.getScalarSizeInBits();
948 auto Fold = [Opcode, EltBits](const APInt &V) -> APInt {
949 switch (Opcode) {
950 case TargetOpcode::G_CTLZ:
951 case TargetOpcode::G_CTLZ_ZERO_POISON:
952 return APInt(EltBits, V.countl_zero());
953 case TargetOpcode::G_CTTZ:
954 case TargetOpcode::G_CTTZ_ZERO_POISON:
955 return APInt(EltBits, V.countr_zero());
956 case TargetOpcode::G_CTPOP:
957 return APInt(EltBits, V.popcount());
958 case TargetOpcode::G_ABS:
959 return V.abs();
960 case TargetOpcode::G_BSWAP:
961 return V.byteSwap();
962 case TargetOpcode::G_BITREVERSE:
963 return V.reverseBits();
964 }
965 llvm_unreachable("unexpected opcode in ConstantFoldUnaryIntOp");
966 };
967
968 auto tryFoldScalar = [&](Register R) -> std::optional<APInt> {
969 if (auto MaybeCst = getIConstantVRegVal(R, MRI))
970 return Fold(*MaybeCst);
971 return std::nullopt;
972 };
973 if (MRI.getType(Src).isVector()) {
974 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
975 if (!BV)
976 return {};
977 SmallVector<APInt> Folded;
978 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
979 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
980 Folded.emplace_back(std::move(*MaybeFold));
981 continue;
982 }
983 return {};
984 }
985 return Folded;
986 }
987 if (auto MaybeCst = tryFoldScalar(Src))
988 return {std::move(*MaybeCst)};
989 return {};
990}
991
992std::optional<SmallVector<APInt>>
993llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
994 unsigned DstScalarSizeInBits, unsigned ExtOp,
995 const MachineRegisterInfo &MRI) {
996 assert(ExtOp == TargetOpcode::G_SEXT || ExtOp == TargetOpcode::G_ZEXT ||
997 ExtOp == TargetOpcode::G_ANYEXT);
998
999 const LLT Ty = MRI.getType(Op1);
1000
1001 auto GetICmpResultCst = [&](bool IsTrue) {
1002 if (IsTrue)
1003 return ExtOp == TargetOpcode::G_SEXT
1004 ? APInt::getAllOnes(DstScalarSizeInBits)
1005 : APInt::getOneBitSet(DstScalarSizeInBits, 0);
1006 return APInt::getZero(DstScalarSizeInBits);
1007 };
1008
1009 auto TryFoldScalar = [&](Register LHS, Register RHS) -> std::optional<APInt> {
1010 auto RHSCst = getIConstantVRegVal(RHS, MRI);
1011 if (!RHSCst)
1012 return std::nullopt;
1013 auto LHSCst = getIConstantVRegVal(LHS, MRI);
1014 if (!LHSCst)
1015 return std::nullopt;
1016
1017 switch (Pred) {
1019 return GetICmpResultCst(LHSCst->eq(*RHSCst));
1021 return GetICmpResultCst(LHSCst->ne(*RHSCst));
1023 return GetICmpResultCst(LHSCst->ugt(*RHSCst));
1025 return GetICmpResultCst(LHSCst->uge(*RHSCst));
1027 return GetICmpResultCst(LHSCst->ult(*RHSCst));
1029 return GetICmpResultCst(LHSCst->ule(*RHSCst));
1031 return GetICmpResultCst(LHSCst->sgt(*RHSCst));
1033 return GetICmpResultCst(LHSCst->sge(*RHSCst));
1035 return GetICmpResultCst(LHSCst->slt(*RHSCst));
1037 return GetICmpResultCst(LHSCst->sle(*RHSCst));
1038 default:
1039 return std::nullopt;
1040 }
1041 };
1042
1043 SmallVector<APInt> FoldedICmps;
1044
1045 if (Ty.isVector()) {
1046 // Try to constant fold each element.
1047 auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1048 auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1049 if (!BV1 || !BV2)
1050 return std::nullopt;
1051 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1052 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1053 if (auto MaybeFold =
1054 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1055 FoldedICmps.emplace_back(*MaybeFold);
1056 continue;
1057 }
1058 return std::nullopt;
1059 }
1060 return FoldedICmps;
1061 }
1062
1063 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1064 FoldedICmps.emplace_back(*MaybeCst);
1065 return FoldedICmps;
1066 }
1067
1068 return std::nullopt;
1069}
1070
1072 GISelValueTracking *VT, bool OrNegative) {
1073 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1075 if (!DefSrcReg)
1076 return false;
1077
1078 const MachineInstr &MI = *DefSrcReg->MI;
1079 const LLT Ty = MRI.getType(Reg);
1080
1081 auto IsPow2 = [OrNegative](const APInt &V) {
1082 return V.isPowerOf2() || (OrNegative && V.isNegatedPowerOf2());
1083 };
1084
1085 switch (MI.getOpcode()) {
1086 case TargetOpcode::G_CONSTANT: {
1087 unsigned BitWidth = Ty.getScalarSizeInBits();
1088 const ConstantInt *CI = MI.getOperand(1).getCImm();
1089 return IsPow2(CI->getValue().zextOrTrunc(BitWidth));
1090 }
1091 case TargetOpcode::G_SHL: {
1092 // A left-shift of a constant one will have exactly one bit set because
1093 // shifting the bit off the end is undefined.
1094
1095 // TODO: Constant splat
1096 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1097 if (*ConstLHS == 1)
1098 return true;
1099 }
1100
1101 break;
1102 }
1103 case TargetOpcode::G_LSHR: {
1104 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1105 if (ConstLHS->isSignMask())
1106 return true;
1107 }
1108
1109 break;
1110 }
1111 case TargetOpcode::G_BUILD_VECTOR: {
1112 // TODO: Probably should have a recursion depth guard since you could have
1113 // bitcasted vector elements.
1114 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1115 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, VT, OrNegative))
1116 return false;
1117
1118 return true;
1119 }
1120 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1121 // Only handle constants since we would need to know if number of leading
1122 // zeros is greater than the truncation amount.
1123 const unsigned BitWidth = Ty.getScalarSizeInBits();
1124 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1125 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1126 if (!Const || !IsPow2(Const->zextOrTrunc(BitWidth)))
1127 return false;
1128 }
1129
1130 return true;
1131 }
1132 default:
1133 break;
1134 }
1135
1136 if (!VT)
1137 return false;
1138
1139 // More could be done here, though the above checks are enough
1140 // to handle some common cases.
1141
1142 // Fall back to computeKnownBits to catch other known cases.
1143 KnownBits Known = VT->getKnownBits(Reg);
1144 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1145}
1146
1150
1151LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1152 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1153 return OrigTy;
1154
1155 if (OrigTy.isVector() && TargetTy.isVector()) {
1156 LLT OrigElt = OrigTy.getElementType();
1157 LLT TargetElt = TargetTy.getElementType();
1158
1159 // TODO: The docstring for this function says the intention is to use this
1160 // function to build MERGE/UNMERGE instructions. It won't be the case that
1161 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1162 // could implement getLCMType between the two in the future if there was a
1163 // need, but it is not worth it now as this function should not be used in
1164 // that way.
1165 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1166 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1167 "getLCMType not implemented between fixed and scalable vectors.");
1168
1169 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1170 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1171 TargetTy.getElementCount().getKnownMinValue());
1172 // Prefer the original element type.
1174 TargetTy.getElementCount().getKnownMinValue());
1175 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1176 OrigTy.getElementType());
1177 }
1178 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1179 TargetTy.getSizeInBits().getKnownMinValue());
1180 return LLT::vector(
1181 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1182 OrigElt);
1183 }
1184
1185 // One type is scalar, one type is vector
1186 if (OrigTy.isVector() || TargetTy.isVector()) {
1187 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1188 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1189 LLT EltTy = VecTy.getElementType();
1190 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1191
1192 // Prefer scalar type from OrigTy.
1193 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1194 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1195
1196 // Different size scalars. Create vector with the same total size.
1197 // LCM will take fixed/scalable from VecTy.
1198 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1200 ScalarTy.getSizeInBits().getFixedValue());
1201 // Prefer type from OrigTy
1202 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1203 VecTy.getElementCount().isScalable()),
1204 OrigEltTy);
1205 }
1206
1207 // At this point, both types are scalars of different size
1208 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1209 TargetTy.getSizeInBits().getFixedValue());
1210 // Preserve pointer types.
1211 if (LCM == OrigTy.getSizeInBits())
1212 return OrigTy;
1213 if (LCM == TargetTy.getSizeInBits())
1214 return TargetTy;
1215 return LLT::scalar(LCM);
1216}
1217
1218LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1219
1220 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1221 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1223 "getCoverTy not implemented between fixed and scalable vectors.");
1224
1225 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1226 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1227 return getLCMType(OrigTy, TargetTy);
1228
1229 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1230 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1231 if (OrigTyNumElts % TargetTyNumElts == 0)
1232 return OrigTy;
1233
1234 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1236 OrigTy.getElementType());
1237}
1238
1239LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1240 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1241 return OrigTy;
1242
1243 if (OrigTy.isVector() && TargetTy.isVector()) {
1244 LLT OrigElt = OrigTy.getElementType();
1245
1246 // TODO: The docstring for this function says the intention is to use this
1247 // function to build MERGE/UNMERGE instructions. It won't be the case that
1248 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1249 // could implement getGCDType between the two in the future if there was a
1250 // need, but it is not worth it now as this function should not be used in
1251 // that way.
1252 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1253 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1254 "getGCDType not implemented between fixed and scalable vectors.");
1255
1256 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1257 TargetTy.getSizeInBits().getKnownMinValue());
1258 if (GCD == OrigElt.getSizeInBits())
1260 OrigElt);
1261
1262 // Cannot produce original element type, but both have vscale in common.
1263 if (GCD < OrigElt.getSizeInBits())
1265 GCD);
1266
1267 return LLT::vector(
1269 OrigTy.isScalable()),
1270 OrigElt);
1271 }
1272
1273 // If one type is vector and the element size matches the scalar size, then
1274 // the gcd is the scalar type.
1275 if (OrigTy.isVector() &&
1276 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1277 return OrigTy.getElementType();
1278 if (TargetTy.isVector() &&
1279 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1280 return OrigTy;
1281
1282 // At this point, both types are either scalars of different type or one is a
1283 // vector and one is a scalar. If both types are scalars, the GCD type is the
1284 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1285 // the GCD type is the GCD between the scalar and the vector element size.
1286 LLT OrigScalar = OrigTy.getScalarType();
1287 LLT TargetScalar = TargetTy.getScalarType();
1288 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1289 TargetScalar.getSizeInBits().getFixedValue());
1290 return LLT::integer(GCD);
1291}
1292
1294 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1295 "Only G_SHUFFLE_VECTOR can have a splat index!");
1296 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1297 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1298
1299 // If all elements are undefined, this shuffle can be considered a splat.
1300 // Return 0 for better potential for callers to simplify.
1301 if (FirstDefinedIdx == Mask.end())
1302 return 0;
1303
1304 // Make sure all remaining elements are either undef or the same
1305 // as the first non-undef value.
1306 int SplatValue = *FirstDefinedIdx;
1307 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1308 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1309 return std::nullopt;
1310
1311 return SplatValue;
1312}
1313
1314static bool isBuildVectorOp(unsigned Opcode) {
1315 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1316 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1317}
1318
1319namespace {
1320
1321std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1322 const MachineRegisterInfo &MRI,
1323 bool AllowUndef) {
1324 MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1325 if (!MI)
1326 return std::nullopt;
1327
1328 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1329 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1330 return std::nullopt;
1331
1332 std::optional<ValueAndVReg> SplatValAndReg;
1333 for (MachineOperand &Op : MI->uses()) {
1334 Register Element = Op.getReg();
1335 // If we have a G_CONCAT_VECTOR, we recursively look into the
1336 // vectors that we're concatenating to see if they're splats.
1337 auto ElementValAndReg =
1338 isConcatVectorsOp
1339 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1341
1342 // If AllowUndef, treat undef as value that will result in a constant splat.
1343 if (!ElementValAndReg) {
1344 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1345 continue;
1346 return std::nullopt;
1347 }
1348
1349 // Record splat value
1350 if (!SplatValAndReg)
1351 SplatValAndReg = ElementValAndReg;
1352
1353 // Different constant than the one already recorded, not a constant splat.
1354 if (SplatValAndReg->Value != ElementValAndReg->Value)
1355 return std::nullopt;
1356 }
1357
1358 return SplatValAndReg;
1359}
1360
1361} // end anonymous namespace
1362
1364 const MachineRegisterInfo &MRI,
1365 int64_t SplatValue, bool AllowUndef) {
1366 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1367 return SplatValAndReg->Value.getSExtValue() == SplatValue;
1368
1369 return false;
1370}
1371
1373 const MachineRegisterInfo &MRI,
1374 const APInt &SplatValue,
1375 bool AllowUndef) {
1376 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef)) {
1377 if (SplatValAndReg->Value.getBitWidth() < SplatValue.getBitWidth())
1378 return APInt::isSameValue(
1379 SplatValAndReg->Value.sext(SplatValue.getBitWidth()), SplatValue);
1380 return APInt::isSameValue(
1381 SplatValAndReg->Value,
1382 SplatValue.sext(SplatValAndReg->Value.getBitWidth()));
1383 }
1384
1385 return false;
1386}
1387
1389 const MachineRegisterInfo &MRI,
1390 int64_t SplatValue, bool AllowUndef) {
1391 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1392 AllowUndef);
1393}
1394
1396 const MachineRegisterInfo &MRI,
1397 const APInt &SplatValue,
1398 bool AllowUndef) {
1399 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1400 AllowUndef);
1401}
1402
1403std::optional<APInt>
1405 if (auto SplatValAndReg =
1406 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1407 if (std::optional<ValueAndVReg> ValAndVReg =
1408 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1409 return ValAndVReg->Value;
1410 }
1411
1412 return std::nullopt;
1413}
1414
1415std::optional<APInt>
1417 const MachineRegisterInfo &MRI) {
1418 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1419}
1420
1421std::optional<int64_t>
1423 const MachineRegisterInfo &MRI) {
1424 if (auto SplatValAndReg =
1425 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1426 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1427 return std::nullopt;
1428}
1429
1430std::optional<int64_t>
1432 const MachineRegisterInfo &MRI) {
1433 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1434}
1435
1436std::optional<FPValueAndVReg>
1438 bool AllowUndef) {
1439 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1440 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1441 return std::nullopt;
1442}
1443
1445 const MachineRegisterInfo &MRI,
1446 bool AllowUndef) {
1447 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1448}
1449
1451 const MachineRegisterInfo &MRI,
1452 bool AllowUndef) {
1453 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1454}
1455
1456std::optional<RegOrConstant>
1458 unsigned Opc = MI.getOpcode();
1459 if (!isBuildVectorOp(Opc))
1460 return std::nullopt;
1461 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1462 return RegOrConstant(*Splat);
1463 auto Reg = MI.getOperand(1).getReg();
1464 if (any_of(drop_begin(MI.operands(), 2),
1465 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1466 return std::nullopt;
1467 return RegOrConstant(Reg);
1468}
1469
1471 const MachineRegisterInfo &MRI,
1472 bool AllowFP = true,
1473 bool AllowOpaqueConstants = true) {
1474 switch (MI.getOpcode()) {
1475 case TargetOpcode::G_CONSTANT:
1476 case TargetOpcode::G_IMPLICIT_DEF:
1477 return true;
1478 case TargetOpcode::G_FCONSTANT:
1479 return AllowFP;
1480 case TargetOpcode::G_GLOBAL_VALUE:
1481 case TargetOpcode::G_FRAME_INDEX:
1482 case TargetOpcode::G_BLOCK_ADDR:
1483 case TargetOpcode::G_JUMP_TABLE:
1484 return AllowOpaqueConstants;
1485 default:
1486 return false;
1487 }
1488}
1489
1491 const MachineRegisterInfo &MRI) {
1492 Register Def = MI.getOperand(0).getReg();
1493 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1494 return true;
1496 if (!BV)
1497 return false;
1498 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1499 if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1500 getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1501 continue;
1502 return false;
1503 }
1504 return true;
1505}
1506
1508 const MachineRegisterInfo &MRI,
1509 bool AllowFP, bool AllowOpaqueConstants) {
1510 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1511 return true;
1512
1513 if (!isBuildVectorOp(MI.getOpcode()))
1514 return false;
1515
1516 const unsigned NumOps = MI.getNumOperands();
1517 for (unsigned I = 1; I != NumOps; ++I) {
1518 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1519 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1520 return false;
1521 }
1522
1523 return true;
1524}
1525
1526std::optional<APInt>
1528 const MachineRegisterInfo &MRI) {
1529 Register Def = MI.getOperand(0).getReg();
1530 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1531 return C->Value;
1532 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1533 if (!MaybeCst)
1534 return std::nullopt;
1535 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1536 return APInt(ScalarSize, *MaybeCst, true);
1537}
1538
1539std::optional<APFloat>
1541 const MachineRegisterInfo &MRI) {
1542 Register Def = MI.getOperand(0).getReg();
1543 if (auto FpConst = getFConstantVRegValWithLookThrough(Def, MRI))
1544 return FpConst->Value;
1545 auto MaybeCstFP = getFConstantSplat(Def, MRI, /*allowUndef=*/false);
1546 if (!MaybeCstFP)
1547 return std::nullopt;
1548 return MaybeCstFP->Value;
1549}
1550
1552 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1553 switch (MI.getOpcode()) {
1554 case TargetOpcode::G_IMPLICIT_DEF:
1555 return AllowUndefs;
1556 case TargetOpcode::G_CONSTANT:
1557 return MI.getOperand(1).getCImm()->isNullValue();
1558 case TargetOpcode::G_FCONSTANT: {
1559 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1560 return FPImm->isZero() && !FPImm->isNegative();
1561 }
1562 default:
1563 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1564 return false;
1565 return isBuildVectorAllZeros(MI, MRI);
1566 }
1567}
1568
1570 const MachineRegisterInfo &MRI,
1571 bool AllowUndefs) {
1572 switch (MI.getOpcode()) {
1573 case TargetOpcode::G_IMPLICIT_DEF:
1574 return AllowUndefs;
1575 case TargetOpcode::G_CONSTANT:
1576 return MI.getOperand(1).getCImm()->isAllOnesValue();
1577 default:
1578 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1579 return false;
1580 return isBuildVectorAllOnes(MI, MRI);
1581 }
1582}
1583
1585 const MachineRegisterInfo &MRI, Register Reg,
1586 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1587
1588 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1589 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1590 return Match(nullptr);
1591
1592 // TODO: Also handle fconstant
1593 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1594 return Match(Def->getOperand(1).getCImm());
1595
1596 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1597 return false;
1598
1599 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1600 Register SrcElt = Def->getOperand(I).getReg();
1601 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1602 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1603 if (!Match(nullptr))
1604 return false;
1605 continue;
1606 }
1607
1608 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1609 !Match(SrcDef->getOperand(1).getCImm()))
1610 return false;
1611 }
1612
1613 return true;
1614}
1615
1616bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1617 bool IsFP) {
1618 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1620 return Val & 0x1;
1622 return Val == 1;
1624 return Val == -1;
1625 }
1626 llvm_unreachable("Invalid boolean contents");
1627}
1628
1629bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1630 bool IsVector, bool IsFP) {
1631 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1633 return ~Val & 0x1;
1636 return Val == 0;
1637 }
1638 llvm_unreachable("Invalid boolean contents");
1639}
1640
1641int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1642 bool IsFP) {
1643 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1646 return 1;
1648 return -1;
1649 }
1650 llvm_unreachable("Invalid boolean contents");
1651}
1652
1654 LostDebugLocObserver *LocObserver,
1655 SmallInstListTy &DeadInstChain) {
1656 for (MachineOperand &Op : MI.uses()) {
1657 if (Op.isReg() && Op.getReg().isVirtual())
1658 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1659 }
1660 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1661 DeadInstChain.remove(&MI);
1662 MI.eraseFromParent();
1663 if (LocObserver)
1664 LocObserver->checkpoint(false);
1665}
1666
1669 LostDebugLocObserver *LocObserver) {
1670 SmallInstListTy DeadInstChain;
1671 for (MachineInstr *MI : DeadInstrs)
1672 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1673
1674 while (!DeadInstChain.empty()) {
1675 MachineInstr *Inst = DeadInstChain.pop_back_val();
1676 if (!isTriviallyDead(*Inst, MRI))
1677 continue;
1678 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1679 }
1680}
1681
1683 LostDebugLocObserver *LocObserver) {
1684 return eraseInstrs({&MI}, MRI, LocObserver);
1685}
1686
1688 for (auto &Def : MI.defs()) {
1689 assert(Def.isReg() && "Must be a reg");
1690
1692 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1693 MachineInstr *DbgValue = MOUse.getParent();
1694 // Ignore partially formed DBG_VALUEs.
1695 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1696 DbgUsers.push_back(&MOUse);
1697 }
1698 }
1699
1700 if (!DbgUsers.empty()) {
1701 salvageDebugInfoForDbgValue(MRI, MI, DbgUsers);
1702 }
1703 }
1704}
1705
1707 switch (Opc) {
1708 case TargetOpcode::G_FABS:
1709 case TargetOpcode::G_FADD:
1710 case TargetOpcode::G_FCANONICALIZE:
1711 case TargetOpcode::G_FCEIL:
1712 case TargetOpcode::G_FCONSTANT:
1713 case TargetOpcode::G_FCOPYSIGN:
1714 case TargetOpcode::G_FCOS:
1715 case TargetOpcode::G_FDIV:
1716 case TargetOpcode::G_FEXP2:
1717 case TargetOpcode::G_FEXP:
1718 case TargetOpcode::G_FFLOOR:
1719 case TargetOpcode::G_FLOG10:
1720 case TargetOpcode::G_FLOG2:
1721 case TargetOpcode::G_FLOG:
1722 case TargetOpcode::G_FMA:
1723 case TargetOpcode::G_FMAD:
1724 case TargetOpcode::G_FMAXIMUM:
1725 case TargetOpcode::G_FMAXIMUMNUM:
1726 case TargetOpcode::G_FMAXNUM:
1727 case TargetOpcode::G_FMAXNUM_IEEE:
1728 case TargetOpcode::G_FMINIMUM:
1729 case TargetOpcode::G_FMINIMUMNUM:
1730 case TargetOpcode::G_FMINNUM:
1731 case TargetOpcode::G_FMINNUM_IEEE:
1732 case TargetOpcode::G_FMUL:
1733 case TargetOpcode::G_FNEARBYINT:
1734 case TargetOpcode::G_FNEG:
1735 case TargetOpcode::G_FPEXT:
1736 case TargetOpcode::G_FPEXTLOAD:
1737 case TargetOpcode::G_FPOW:
1738 case TargetOpcode::G_FPTRUNC:
1739 case TargetOpcode::G_FPTRUNCSTORE:
1740 case TargetOpcode::G_FREM:
1741 case TargetOpcode::G_FRINT:
1742 case TargetOpcode::G_FSIN:
1743 case TargetOpcode::G_FTAN:
1744 case TargetOpcode::G_FACOS:
1745 case TargetOpcode::G_FASIN:
1746 case TargetOpcode::G_FATAN:
1747 case TargetOpcode::G_FATAN2:
1748 case TargetOpcode::G_FCOSH:
1749 case TargetOpcode::G_FSINH:
1750 case TargetOpcode::G_FTANH:
1751 case TargetOpcode::G_FSQRT:
1752 case TargetOpcode::G_FSUB:
1753 case TargetOpcode::G_INTRINSIC_ROUND:
1754 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1755 case TargetOpcode::G_INTRINSIC_TRUNC:
1756 return true;
1757 default:
1758 return false;
1759 }
1760}
1761
1762/// Shifts return poison if shiftwidth is larger than the bitwidth.
1763static bool shiftAmountKnownInRange(Register ShiftAmount,
1764 const MachineRegisterInfo &MRI) {
1765 LLT Ty = MRI.getType(ShiftAmount);
1766
1767 if (Ty.isScalableVector())
1768 return false; // Can't tell, just return false to be safe
1769
1770 if (Ty.isScalar()) {
1771 std::optional<ValueAndVReg> Val =
1772 getIConstantVRegValWithLookThrough(ShiftAmount, MRI);
1773 if (!Val)
1774 return false;
1775 return Val->Value.ult(Ty.getScalarSizeInBits());
1776 }
1777
1778 GBuildVector *BV = getOpcodeDef<GBuildVector>(ShiftAmount, MRI);
1779 if (!BV)
1780 return false;
1781
1782 unsigned Sources = BV->getNumSources();
1783 for (unsigned I = 0; I < Sources; ++I) {
1784 std::optional<ValueAndVReg> Val =
1786 if (!Val)
1787 return false;
1788 if (!Val->Value.ult(Ty.getScalarSizeInBits()))
1789 return false;
1790 }
1791
1792 return true;
1793}
1794
1796 bool ConsiderFlagsAndMetadata,
1797 UndefPoisonKind Kind) {
1798 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1799
1800 if (ConsiderFlagsAndMetadata && includesPoison(Kind))
1801 if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1802 if (GMI->hasPoisonGeneratingFlags())
1803 return true;
1804
1805 // Check whether opcode is a poison/undef-generating operation.
1806 switch (RegDef->getOpcode()) {
1807 case TargetOpcode::G_BUILD_VECTOR:
1808 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1809 return false;
1810 case TargetOpcode::G_SHL:
1811 case TargetOpcode::G_ASHR:
1812 case TargetOpcode::G_LSHR:
1813 return includesPoison(Kind) &&
1814 !shiftAmountKnownInRange(RegDef->getOperand(2).getReg(), MRI);
1815 case TargetOpcode::G_FPTOSI:
1816 case TargetOpcode::G_FPTOUI:
1817 // fptosi/ui yields poison if the resulting value does not fit in the
1818 // destination type.
1819 return true;
1820 case TargetOpcode::G_CTLZ:
1821 case TargetOpcode::G_CTTZ:
1822 case TargetOpcode::G_CTLS:
1823 case TargetOpcode::G_ABS:
1824 case TargetOpcode::G_CTPOP:
1825 case TargetOpcode::G_BSWAP:
1826 case TargetOpcode::G_BITREVERSE:
1827 case TargetOpcode::G_FSHL:
1828 case TargetOpcode::G_FSHR:
1829 case TargetOpcode::G_SMAX:
1830 case TargetOpcode::G_SMIN:
1831 case TargetOpcode::G_SCMP:
1832 case TargetOpcode::G_UMAX:
1833 case TargetOpcode::G_UMIN:
1834 case TargetOpcode::G_UCMP:
1835 case TargetOpcode::G_PTRMASK:
1836 case TargetOpcode::G_SADDO:
1837 case TargetOpcode::G_SSUBO:
1838 case TargetOpcode::G_UADDO:
1839 case TargetOpcode::G_USUBO:
1840 case TargetOpcode::G_SMULO:
1841 case TargetOpcode::G_UMULO:
1842 case TargetOpcode::G_SADDSAT:
1843 case TargetOpcode::G_UADDSAT:
1844 case TargetOpcode::G_SSUBSAT:
1845 case TargetOpcode::G_USUBSAT:
1846 case TargetOpcode::G_SBFX:
1847 case TargetOpcode::G_UBFX:
1848 return false;
1849 case TargetOpcode::G_SSHLSAT:
1850 case TargetOpcode::G_USHLSAT:
1851 return includesPoison(Kind) &&
1852 !shiftAmountKnownInRange(RegDef->getOperand(2).getReg(), MRI);
1853 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1855 if (includesPoison(Kind)) {
1856 std::optional<ValueAndVReg> Index =
1857 getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
1858 if (!Index)
1859 return true;
1860 LLT VecTy = MRI.getType(Insert->getVectorReg());
1861 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1862 }
1863 return false;
1864 }
1865 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1867 if (includesPoison(Kind)) {
1868 std::optional<ValueAndVReg> Index =
1870 if (!Index)
1871 return true;
1872 LLT VecTy = MRI.getType(Extract->getVectorReg());
1873 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1874 }
1875 return false;
1876 }
1877 case TargetOpcode::G_SHUFFLE_VECTOR: {
1878 GShuffleVector *Shuffle = cast<GShuffleVector>(RegDef);
1879 ArrayRef<int> Mask = Shuffle->getMask();
1880 return includesPoison(Kind) && is_contained(Mask, -1);
1881 }
1882 case TargetOpcode::G_FNEG:
1883 case TargetOpcode::G_PHI:
1884 case TargetOpcode::G_SELECT:
1885 case TargetOpcode::G_UREM:
1886 case TargetOpcode::G_SREM:
1887 case TargetOpcode::G_FREEZE:
1888 case TargetOpcode::G_ICMP:
1889 case TargetOpcode::G_FCMP:
1890 case TargetOpcode::G_FADD:
1891 case TargetOpcode::G_FSUB:
1892 case TargetOpcode::G_FMUL:
1893 case TargetOpcode::G_FDIV:
1894 case TargetOpcode::G_FREM:
1895 case TargetOpcode::G_PTR_ADD:
1896 return false;
1897 default:
1898 return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1899 }
1900}
1901
1903 const MachineRegisterInfo &MRI,
1904 unsigned Depth,
1905 UndefPoisonKind Kind) {
1907 return false;
1908
1909 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1910
1911 switch (RegDef->getOpcode()) {
1912 case TargetOpcode::G_FREEZE:
1913 return true;
1914 case TargetOpcode::G_IMPLICIT_DEF:
1915 return !includesUndef(Kind);
1916 case TargetOpcode::G_CONSTANT:
1917 case TargetOpcode::G_FCONSTANT:
1918 return true;
1919 case TargetOpcode::G_BUILD_VECTOR: {
1920 GBuildVector *BV = cast<GBuildVector>(RegDef);
1921 unsigned NumSources = BV->getNumSources();
1922 for (unsigned I = 0; I < NumSources; ++I)
1924 Depth + 1, Kind))
1925 return false;
1926 return true;
1927 }
1928 case TargetOpcode::G_PHI: {
1929 GPhi *Phi = cast<GPhi>(RegDef);
1930 unsigned NumIncoming = Phi->getNumIncomingValues();
1931 for (unsigned I = 0; I < NumIncoming; ++I)
1932 if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI,
1933 Depth + 1, Kind))
1934 return false;
1935 return true;
1936 }
1937 default: {
1938 auto MOCheck = [&](const MachineOperand &MO) {
1939 if (!MO.isReg())
1940 return true;
1941 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
1942 Kind);
1943 };
1944 return !::canCreateUndefOrPoison(Reg, MRI,
1945 /*ConsiderFlagsAndMetadata=*/true, Kind) &&
1946 all_of(RegDef->uses(), MOCheck);
1947 }
1948 }
1949}
1950
1952 bool ConsiderFlagsAndMetadata) {
1953 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1955}
1956
1958 bool ConsiderFlagsAndMetadata = true) {
1959 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1961}
1962
1964 const MachineRegisterInfo &MRI,
1965 unsigned Depth) {
1966 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1968}
1969
1971 const MachineRegisterInfo &MRI,
1972 unsigned Depth) {
1973 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1975}
1976
1978 const MachineRegisterInfo &MRI,
1979 unsigned Depth) {
1980 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1982}
1983
1985 if (Ty.isVector())
1986 return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1987 Ty.getElementCount());
1988 return IntegerType::get(C, Ty.getSizeInBits());
1989}
1990
1992 switch (MI.getOpcode()) {
1993 default:
1994 return false;
1995 case TargetOpcode::G_ASSERT_ALIGN:
1996 case TargetOpcode::G_ASSERT_SEXT:
1997 case TargetOpcode::G_ASSERT_ZEXT:
1998 return true;
1999 }
2000}
2001
2003 assert(Kind == GIConstantKind::Scalar && "Expected scalar constant");
2004
2005 return Value;
2006}
2007
2008std::optional<GIConstant>
2011
2013 std::optional<ValueAndVReg> MayBeConstant =
2014 getIConstantVRegValWithLookThrough(Splat->getScalarReg(), MRI);
2015 if (!MayBeConstant)
2016 return std::nullopt;
2017 return GIConstant(MayBeConstant->Value, GIConstantKind::ScalableVector);
2018 }
2019
2021 SmallVector<APInt> Values;
2022 unsigned NumSources = Build->getNumSources();
2023 for (unsigned I = 0; I < NumSources; ++I) {
2024 Register SrcReg = Build->getSourceReg(I);
2025 std::optional<ValueAndVReg> MayBeConstant =
2027 if (!MayBeConstant)
2028 return std::nullopt;
2029 Values.push_back(MayBeConstant->Value);
2030 }
2031 return GIConstant(Values);
2032 }
2033
2034 std::optional<ValueAndVReg> MayBeConstant =
2036 if (!MayBeConstant)
2037 return std::nullopt;
2038
2039 return GIConstant(MayBeConstant->Value, GIConstantKind::Scalar);
2040}
2041
2043 assert(Kind == GFConstantKind::Scalar && "Expected scalar constant");
2044
2045 return Values[0];
2046}
2047
2048std::optional<GFConstant>
2051
2053 std::optional<FPValueAndVReg> MayBeConstant =
2054 getFConstantVRegValWithLookThrough(Splat->getScalarReg(), MRI);
2055 if (!MayBeConstant)
2056 return std::nullopt;
2057 return GFConstant(MayBeConstant->Value, GFConstantKind::ScalableVector);
2058 }
2059
2061 SmallVector<APFloat> Values;
2062 unsigned NumSources = Build->getNumSources();
2063 for (unsigned I = 0; I < NumSources; ++I) {
2064 Register SrcReg = Build->getSourceReg(I);
2065 std::optional<FPValueAndVReg> MayBeConstant =
2067 if (!MayBeConstant)
2068 return std::nullopt;
2069 Values.push_back(MayBeConstant->Value);
2070 }
2071 return GFConstant(Values);
2072 }
2073
2074 std::optional<FPValueAndVReg> MayBeConstant =
2076 if (!MayBeConstant)
2077 return std::nullopt;
2078
2079 return GFConstant(MayBeConstant->Value, GFConstantKind::Scalar);
2080}
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition Utils.cpp:234
static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)
Shifts return poison if shiftwidth is larger than the bitwidth.
Definition Utils.cpp:1763
static bool isBuildVectorOp(unsigned Opcode)
Definition Utils.cpp:1314
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition Utils.cpp:1470
static GBuildVector * getBuildVectorLikeDef(Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:798
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define LLVM_DEBUG(...)
Definition Debug.h:119
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
This file contains the UndefPoisonKind enum and helper functions.
static const char PassName[]
Class recording the (high level) value of a variable.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static const fltSemantics & IEEEhalf()
Definition APFloat.h:294
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1267
void copySign(const APFloat &RHS)
Definition APFloat.h:1361
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:5899
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1249
opStatus add(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1240
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition APFloat.h:1406
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1258
APInt bitcastToAPInt() const
Definition APFloat.h:1430
opStatus mod(const APFloat &RHS)
Definition APFloat.h:1285
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1616
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1055
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1076
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:968
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1709
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1687
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1084
static bool isSameValue(const APInt &I1, const APInt &I2, bool SignedCompare=false)
Determine if two APInts have the same value, after zero-extending or sign-extending (if SignedCompare...
Definition APInt.h:555
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1788
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:1028
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
bool isNegative() const
Return true if the sign bit is set.
Definition Constants.h:473
bool isZero() const
Return true if the value is positive or negative zero.
Definition Constants.h:467
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
This is an important base class in LLVM.
Definition Constant.h:43
A debug info location.
Definition DebugLoc.h:123
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
Represents a G_BUILD_VECTOR.
Represents an extract vector element.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2049
GFConstant(ArrayRef< APFloat > Values)
Definition Utils.h:691
LLVM_ABI APFloat getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2042
LLVM_ABI APInt getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2002
static LLVM_ABI std::optional< GIConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2009
GIConstant(ArrayRef< APInt > Values)
Definition Utils.h:650
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
MachineInstr * pop_back_val()
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Represents an insert vector element.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_PHI.
Represents a G_SHUFFLE_VECTOR.
ArrayRef< int > getMask() const
Represents a splat vector.
Module * getParent()
Get the module that this global value is contained inside of...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
LLT getScalarType() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
static LLT integer(unsigned SizeInBits)
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
mop_range uses()
Returns all operands which may be register uses.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
def_iterator def_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
LLVM_ABI Register getLiveInVirtReg(MCRegister PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in virtual r...
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
static def_iterator def_end()
iterator_range< use_iterator > use_operands(Register Reg) const
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Represents a value which can be a Register or a constant.
Definition Utils.h:395
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetOptions Options
GlobalISelAbortMode GlobalISelAbort
EnableGlobalISelAbort - Control abort behaviour when global instruction selection fails to lower/sele...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM Value Representation.
Definition Value.h:75
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition APInt.h:2277
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition APInt.h:2282
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2287
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition APInt.h:2292
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition Utils.cpp:858
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
LLVM_ABI std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, unsigned DstScalarSizeInBits, unsigned ExtOp, const MachineRegisterInfo &MRI)
Definition Utils.cpp:993
@ Offset
Definition DWP.cpp:557
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1444
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
Definition Utils.cpp:1984
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition Utils.cpp:57
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition Utils.cpp:653
LLVM_ABI const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:461
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:294
LLVM_ABI std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition Utils.cpp:932
LLVM_ABI std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1404
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition Utils.cpp:1569
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
LLVM_ABI std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:741
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1687
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:156
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition Utils.cpp:891
LLVM_ABI std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1457
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition APFloat.h:1740
GISelWorkList< 4 > SmallInstListTy
Definition Utils.h:570
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition Utils.cpp:1527
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition Utils.cpp:1551
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition Utils.cpp:494
LLVM_ABI bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition Utils.cpp:1584
bool isPreISelGenericOptimizationHint(unsigned Opcode)
LLVM_ABI void reportGISelWarning(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:252
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition Utils.cpp:1616
LLVM_ABI LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition Utils.cpp:1151
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:314
LLVM_ABI std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:672
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI const APInt & getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)
VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:305
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
Definition APFloat.h:1695
LLVM_ABI bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition Utils.cpp:1507
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
LLVM_ABI bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition Utils.cpp:200
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
Definition APFloat.h:1726
LLVM_ABI void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition Utils.cpp:1653
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:258
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition Utils.cpp:439
LLVM_ABI bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1450
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition Utils.cpp:819
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition Utils.cpp:1437
bool includesPoison(UndefPoisonKind Kind)
Returns true if Kind includes the Poison bit.
Definition UndefPoison.h:27
LLVM_ABI std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition Utils.cpp:908
LLVM_ABI void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition Utils.cpp:508
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition Utils.cpp:1147
LLVM_ABI LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition Utils.cpp:1218
bool includesUndef(UndefPoisonKind Kind)
Returns true if Kind includes the Undef bit.
Definition UndefPoison.h:33
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
Definition APFloat.h:1676
LLVM_ABI unsigned getInverseGMinMaxOpcode(unsigned MinMaxOpc)
Returns the inverse opcode of MinMaxOpc, which is a generic min/max opcode like G_SMIN.
Definition Utils.cpp:279
@ Mul
Product of integers.
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition Utils.cpp:447
LLVM_ABI bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition Utils.cpp:1629
LLVM_ABI std::optional< APFloat > isConstantOrConstantSplatVectorFP(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a float constant integer or a splat vector of float constant integers.
Definition Utils.cpp:1540
constexpr unsigned BitWidth
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:659
LLVM_ABI bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition Utils.cpp:1363
LLVM_ABI void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1682
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
LLVM_ABI Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition Utils.cpp:47
LLVM_ABI int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition Utils.cpp:1641
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:433
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1771
UndefPoisonKind
Enumeration to track whether we are interested in Undef, Poison, or both.
Definition UndefPoison.h:20
LLVM_ABI bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition Utils.cpp:1706
LLVM_ABI std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition Utils.cpp:469
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI SmallVector< APInt > ConstantFoldUnaryIntOp(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Tries to constant fold a unary integer operation (G_CTLZ, G_CTTZ, G_CTPOP and their _ZERO_POISON vari...
Definition Utils.cpp:945
LLVM_ABI void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1667
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition Utils.cpp:501
LLVM_ABI LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition Utils.cpp:1239
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition APFloat.h:1713
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
Definition APFloat.h:1753
LLVM_ABI std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1422
LLVM_ABI bool isAssertMI(const MachineInstr &MI)
Returns true if the instruction MI is one of the assert instructions.
Definition Utils.cpp:1991
LLVM_ABI void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition Utils.cpp:611
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition Utils.cpp:221
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition Utils.cpp:841
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
#define MORE()
Definition regcomp.c:246
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition Utils.h:229
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:303
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition KnownBits.h:300
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition Utils.h:188