33#include "llvm/IR/IntrinsicsAArch64.h"
38#define GET_TARGET_REGBANK_IMPL
39#include "AArch64GenRegisterBank.inc"
42#include "AArch64GenRegisterBankInfo.def"
51 static auto InitializeRegisterBankOnce = [&]() {
60 assert(&AArch64::GPRRegBank == &RBGPR &&
61 "The order in RegBanks is messed up");
65 assert(&AArch64::FPRRegBank == &RBFPR &&
66 "The order in RegBanks is messed up");
70 assert(&AArch64::CCRegBank == &RBCCR &&
71 "The order in RegBanks is messed up");
76 "Subclass not added?");
78 "GPRs should hold up to 128-bit");
83 "Subclass not added?");
85 "Subclass not added?");
87 "FPRs should hold up to 512-bit via QQQQ sequence");
92 "CCR should hold up to 32-bit");
98 "PartialMappingIdx's are incorrectly ordered");
102 "PartialMappingIdx's are incorrectly ordered");
105#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
108 checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
109 #Idx " is incorrectly initialized"); \
123#define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
125 assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size, \
126 PartialMappingIdx::PMI_First##RBName, Size, \
128 #RBName #Size " " #Offset " is incorrectly initialized"); \
131#define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
145#define CHECK_VALUEMAP_3OPS(RBName, Size) \
147 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
148 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
149 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
161#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
163 unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min; \
164 unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min; \
165 (void)PartialMapDstIdx; \
166 (void)PartialMapSrcIdx; \
167 const ValueMapping *Map = getCopyMapping(AArch64::RBNameDst##RegBankID, \
168 AArch64::RBNameSrc##RegBankID, \
169 TypeSize::getFixed(Size)); \
171 assert(Map[0].BreakDown == \
172 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
173 Map[0].NumBreakDowns == 1 && \
174 #RBNameDst #Size " Dst is incorrectly initialized"); \
175 assert(Map[1].BreakDown == \
176 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
177 Map[1].NumBreakDowns == 1 && \
178 #RBNameSrc #Size " Src is incorrectly initialized"); \
191#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize) \
193 unsigned PartialMapDstIdx = PMI_FPR##DstSize - PMI_Min; \
194 unsigned PartialMapSrcIdx = PMI_FPR##SrcSize - PMI_Min; \
195 (void)PartialMapDstIdx; \
196 (void)PartialMapSrcIdx; \
197 const ValueMapping *Map = getFPExtMapping(DstSize, SrcSize); \
199 assert(Map[0].BreakDown == \
200 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
201 Map[0].NumBreakDowns == 1 && "FPR" #DstSize \
202 " Dst is incorrectly initialized"); \
203 assert(Map[1].BreakDown == \
204 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
205 Map[1].NumBreakDowns == 1 && "FPR" #SrcSize \
206 " Src is incorrectly initialized"); \
218 llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce);
232 if (&
A == &AArch64::GPRRegBank && &
B == &AArch64::FPRRegBank)
235 if (&
A == &AArch64::FPRRegBank && &
B == &AArch64::GPRRegBank)
245 switch (RC.
getID()) {
246 case AArch64::GPR64sponlyRegClassID:
261 switch (
MI.getOpcode()) {
262 case TargetOpcode::G_OR: {
271 if (
MI.getNumOperands() != 3)
285 case TargetOpcode::G_BITCAST: {
292 if (
MI.getNumOperands() != 2)
307 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
314 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
325 case TargetOpcode::G_LOAD: {
332 if (
MI.getNumOperands() != 2)
361void AArch64RegisterBankInfo::applyMappingImpl(
366 switch (
MI.getOpcode()) {
367 case TargetOpcode::G_OR:
368 case TargetOpcode::G_BITCAST:
369 case TargetOpcode::G_LOAD:
371 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
372 OpdMapper.getInstrMapping().getID() <= 4) &&
373 "Don't know how to handle that ID");
375 case TargetOpcode::G_INSERT_VECTOR_ELT: {
377 Builder.setInsertPt(*
MI.getParent(),
MI.getIterator());
378 auto Ext = Builder.buildAnyExt(
LLT::scalar(32),
MI.getOperand(2).getReg());
379 MRI.setRegBank(Ext.getReg(0),
getRegBank(AArch64::GPRRegBankID));
380 MI.getOperand(2).setReg(Ext.getReg(0));
383 case AArch64::G_DUP: {
385 assert(
MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits() < 32 &&
386 "Expected sources smaller than 32-bits");
387 Builder.setInsertPt(*
MI.getParent(),
MI.getIterator());
390 auto ConstMI =
MRI.getVRegDef(
MI.getOperand(1).getReg());
391 if (ConstMI->getOpcode() == TargetOpcode::G_CONSTANT) {
392 auto CstVal = ConstMI->getOperand(1).getCImm()->getValue();
394 Builder.buildConstant(
LLT::scalar(32), CstVal.sext(32)).getReg(0);
400 MI.getOperand(1).setReg(ConstReg);
409AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
411 const unsigned Opc =
MI.getOpcode();
412 const MachineFunction &MF = *
MI.getParent()->getParent();
415 unsigned NumOperands =
MI.getNumOperands();
416 assert(NumOperands <= 3 &&
417 "This code is for instructions with 3 or less operands");
419 LLT Ty =
MRI.getType(
MI.getOperand(0).getReg());
434 for (
unsigned Idx = 1; Idx != NumOperands; ++Idx) {
435 LLT OpTy =
MRI.getType(
MI.getOperand(Idx).getReg());
440 "Operand has incompatible size");
443 assert(IsFPR == OpIsFPR &&
"Operand has incompatible type");
458 case Intrinsic::aarch64_neon_uaddlv:
459 case Intrinsic::aarch64_neon_uaddv:
460 case Intrinsic::aarch64_neon_saddv:
461 case Intrinsic::aarch64_neon_umaxv:
462 case Intrinsic::aarch64_neon_smaxv:
463 case Intrinsic::aarch64_neon_uminv:
464 case Intrinsic::aarch64_neon_sminv:
465 case Intrinsic::aarch64_neon_faddv:
466 case Intrinsic::aarch64_neon_fmaxv:
467 case Intrinsic::aarch64_neon_fminv:
468 case Intrinsic::aarch64_neon_fmaxnmv:
469 case Intrinsic::aarch64_neon_fminnmv:
470 case Intrinsic::aarch64_neon_fmulx:
471 case Intrinsic::aarch64_neon_frecpe:
472 case Intrinsic::aarch64_neon_frecps:
473 case Intrinsic::aarch64_neon_frecpx:
474 case Intrinsic::aarch64_neon_frsqrte:
475 case Intrinsic::aarch64_neon_frsqrts:
476 case Intrinsic::aarch64_neon_facge:
477 case Intrinsic::aarch64_neon_facgt:
478 case Intrinsic::aarch64_neon_fabd:
479 case Intrinsic::aarch64_sisd_fabd:
480 case Intrinsic::aarch64_neon_sqrdmlah:
481 case Intrinsic::aarch64_neon_sqrdmlsh:
482 case Intrinsic::aarch64_neon_sqrdmulh:
483 case Intrinsic::aarch64_neon_sqadd:
484 case Intrinsic::aarch64_neon_sqsub:
485 case Intrinsic::aarch64_crypto_sha1h:
486 case Intrinsic::aarch64_crypto_sha1c:
487 case Intrinsic::aarch64_crypto_sha1p:
488 case Intrinsic::aarch64_crypto_sha1m:
489 case Intrinsic::aarch64_sisd_fcvtxn:
491 case Intrinsic::aarch64_neon_saddlv: {
492 const LLT SrcTy =
MRI.getType(
MI.getOperand(2).getReg());
493 return SrcTy.getElementType().getSizeInBits() >= 16 &&
494 SrcTy.getElementCount().getFixedValue() >= 4;
499bool AArch64RegisterBankInfo::isPHIWithFPConstraints(
502 if (!
MI.isPHI() ||
Depth > MaxFPRSearchDepth)
505 return any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
506 [&](
const MachineInstr &
UseMI) {
507 if (onlyUsesFP(UseMI, MRI, TRI, Depth + 1))
509 return isPHIWithFPConstraints(UseMI, MRI, TRI, Depth + 1);
513bool AArch64RegisterBankInfo::hasFPConstraints(
const MachineInstr &
MI,
516 unsigned Depth)
const {
517 unsigned Op =
MI.getOpcode();
527 if (
Op != TargetOpcode::COPY && !
MI.isPHI() &&
533 if (RB == &AArch64::FPRRegBank)
535 if (RB == &AArch64::GPRRegBank)
542 if (!
MI.isPHI() ||
Depth > MaxFPRSearchDepth)
545 return any_of(
MI.explicit_uses(), [&](
const MachineOperand &
Op) {
547 onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
554 unsigned Depth)
const {
555 switch (
MI.getOpcode()) {
556 case TargetOpcode::G_FPTOSI:
557 case TargetOpcode::G_FPTOUI:
558 case TargetOpcode::G_FPTOSI_SAT:
559 case TargetOpcode::G_FPTOUI_SAT:
560 case TargetOpcode::G_FCMP:
561 case TargetOpcode::G_LROUND:
562 case TargetOpcode::G_LLROUND:
563 case AArch64::G_PMULL:
565 case TargetOpcode::G_INTRINSIC:
567 case Intrinsic::aarch64_neon_fcvtas:
568 case Intrinsic::aarch64_neon_fcvtau:
569 case Intrinsic::aarch64_neon_fcvtzs:
570 case Intrinsic::aarch64_neon_fcvtzu:
571 case Intrinsic::aarch64_neon_fcvtms:
572 case Intrinsic::aarch64_neon_fcvtmu:
573 case Intrinsic::aarch64_neon_fcvtns:
574 case Intrinsic::aarch64_neon_fcvtnu:
575 case Intrinsic::aarch64_neon_fcvtps:
576 case Intrinsic::aarch64_neon_fcvtpu:
588bool AArch64RegisterBankInfo::onlyDefinesFP(
const MachineInstr &
MI,
591 unsigned Depth)
const {
592 switch (
MI.getOpcode()) {
594 case AArch64::G_SADDLP:
595 case AArch64::G_UADDLP:
596 case TargetOpcode::G_SITOFP:
597 case TargetOpcode::G_UITOFP:
598 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
599 case TargetOpcode::G_INSERT_VECTOR_ELT:
600 case TargetOpcode::G_BUILD_VECTOR:
601 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
603 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
605 case Intrinsic::aarch64_neon_ld1x2:
606 case Intrinsic::aarch64_neon_ld1x3:
607 case Intrinsic::aarch64_neon_ld1x4:
608 case Intrinsic::aarch64_neon_ld2:
609 case Intrinsic::aarch64_neon_ld2lane:
610 case Intrinsic::aarch64_neon_ld2r:
611 case Intrinsic::aarch64_neon_ld3:
612 case Intrinsic::aarch64_neon_ld3lane:
613 case Intrinsic::aarch64_neon_ld3r:
614 case Intrinsic::aarch64_neon_ld4:
615 case Intrinsic::aarch64_neon_ld4lane:
616 case Intrinsic::aarch64_neon_ld4r:
631 unsigned Depth)
const {
632 switch (
MI.getOpcode()) {
633 case TargetOpcode::G_SITOFP:
634 case TargetOpcode::G_UITOFP:
635 return MRI.getType(
MI.getOperand(0).getReg()).getSizeInBits() ==
636 MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
641bool AArch64RegisterBankInfo::isLoadFromFPType(
const MachineInstr &
MI)
const {
644 const Value *LdVal = MemOp->getMMO().getValue();
648 Type *EltTy =
nullptr;
650 EltTy = GV->getValueType();
654 if (StructEltTy->getNumElements() == 0)
656 EltTy = StructEltTy->getTypeAtIndex(0U);
664 for (
const auto *LdUser : LdVal->
users()) {
666 EltTy = LdUser->getType();
670 EltTy = LdUser->getOperand(0)->getType();
680 const unsigned Opc =
MI.getOpcode();
685 Opc == TargetOpcode::G_PHI) {
700 case TargetOpcode::G_ADD:
701 case TargetOpcode::G_SUB:
702 case TargetOpcode::G_PTR_ADD:
703 case TargetOpcode::G_MUL:
704 case TargetOpcode::G_SDIV:
705 case TargetOpcode::G_UDIV:
707 case TargetOpcode::G_AND:
708 case TargetOpcode::G_OR:
709 case TargetOpcode::G_XOR:
711 case TargetOpcode::G_FADD:
712 case TargetOpcode::G_FSUB:
713 case TargetOpcode::G_FMUL:
714 case TargetOpcode::G_FDIV:
715 case TargetOpcode::G_FMAXIMUM:
716 case TargetOpcode::G_FMINIMUM:
717 return getSameKindOfOperandsMapping(
MI);
718 case TargetOpcode::G_FPEXT: {
719 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
720 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
727 case TargetOpcode::G_SHL:
728 case TargetOpcode::G_LSHR:
729 case TargetOpcode::G_ASHR: {
730 LLT ShiftAmtTy =
MRI.getType(
MI.getOperand(2).getReg());
731 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
732 if (ShiftAmtTy.
getSizeInBits() == 64 && SrcTy.getSizeInBits() == 32)
735 return getSameKindOfOperandsMapping(
MI);
737 case TargetOpcode::COPY: {
741 if ((DstReg.
isPhysical() || !
MRI.getType(DstReg).isValid()) ||
751 assert(DstRB && SrcRB &&
"Both RegBank were nullptr");
762 case TargetOpcode::G_BITCAST: {
763 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
764 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
767 bool SrcIsGPR = !SrcTy.isVector() && SrcTy.getSizeInBits() <= 64;
769 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
771 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
776 Opc == TargetOpcode::G_BITCAST ? 2 : 1);
782 unsigned NumOperands =
MI.getNumOperands();
788 for (
unsigned Idx = 0; Idx < NumOperands; ++Idx) {
789 auto &MO =
MI.getOperand(Idx);
790 if (!MO.isReg() || !MO.getReg())
793 LLT Ty =
MRI.getType(MO.getReg());
796 OpSize[Idx] = Ty.getSizeInBits().getKnownMinValue();
804 (MO.isDef() && onlyDefinesFP(
MI,
MRI,
TRI)) ||
805 (MO.isUse() && onlyUsesFP(
MI,
MRI,
TRI)) ||
806 Ty.getSizeInBits() > 64)
816 case AArch64::G_DUP: {
817 Register ScalarReg =
MI.getOperand(1).getReg();
818 LLT ScalarTy =
MRI.getType(ScalarReg);
819 auto ScalarDef =
MRI.getVRegDef(ScalarReg);
821 if (ScalarDef->getOpcode() == TargetOpcode::G_LOAD)
826 onlyDefinesFP(*ScalarDef,
MRI,
TRI)))
838 case TargetOpcode::G_TRUNC: {
839 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
840 if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 128)
844 case TargetOpcode::G_SITOFP:
845 case TargetOpcode::G_UITOFP: {
846 if (
MRI.getType(
MI.getOperand(0).getReg()).isVector())
852 MRI.getType(SrcReg).getSizeInBits() ==
853 MRI.getType(
MI.getOperand(0).getReg()).getSizeInBits())
859 case TargetOpcode::G_FPTOSI_SAT:
860 case TargetOpcode::G_FPTOUI_SAT:
861 case TargetOpcode::G_FPTOSI:
862 case TargetOpcode::G_FPTOUI: {
863 LLT DstType =
MRI.getType(
MI.getOperand(0).getReg());
864 if (DstType.isVector())
872 if (((DstSize == SrcSize) || STI.hasFeature(AArch64::FeatureFPRCVT)) &&
873 all_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
875 return onlyUsesFP(UseMI, MRI, TRI) ||
876 prefersFPUse(UseMI, MRI, TRI);
883 case TargetOpcode::G_INTRINSIC_LRINT:
884 case TargetOpcode::G_INTRINSIC_LLRINT:
885 if (
MRI.getType(
MI.getOperand(0).getReg()).isVector())
889 case TargetOpcode::G_FCMP: {
894 OpRegBankIdx = {Idx0,
898 case TargetOpcode::G_BITCAST:
900 if (OpRegBankIdx[0] != OpRegBankIdx[1])
906 case TargetOpcode::G_LOAD: {
925 if (isLoadFromFPType(
MI)) {
933 if (
any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
944 if (isPHIWithFPConstraints(UseMI, MRI, TRI))
947 return onlyUsesFP(UseMI, MRI, TRI) ||
948 prefersFPUse(UseMI, MRI, TRI);
953 case TargetOpcode::G_STORE:
965 case TargetOpcode::G_INDEXED_STORE:
976 case TargetOpcode::G_INDEXED_SEXTLOAD:
977 case TargetOpcode::G_INDEXED_ZEXTLOAD:
981 case TargetOpcode::G_INDEXED_LOAD: {
982 if (isLoadFromFPType(
MI))
986 case TargetOpcode::G_SELECT: {
993 LLT SrcTy =
MRI.getType(
MI.getOperand(2).getReg());
994 if (SrcTy.isVector()) {
1010 if (
any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
1027 for (
unsigned Idx = 2; Idx < 4; ++Idx) {
1042 case TargetOpcode::G_UNMERGE_VALUES: {
1048 LLT SrcTy =
MRI.getType(
MI.getOperand(
MI.getNumOperands()-1).getReg());
1051 if (SrcTy.isVector() || SrcTy ==
LLT::scalar(128) ||
1052 any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
1055 for (
unsigned Idx = 0, NumOperands =
MI.getNumOperands();
1056 Idx < NumOperands; ++Idx)
1061 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1069 case TargetOpcode::G_INSERT_VECTOR_ELT:
1079 LLT Ty =
MRI.getType(
MI.getOperand(2).getReg());
1080 if (Ty.getSizeInBits() == 8 || Ty.getSizeInBits() == 16) {
1090 case TargetOpcode::G_EXTRACT: {
1092 auto Src =
MI.getOperand(1).getReg();
1093 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
1094 if (SrcTy.getSizeInBits() != 128)
1096 auto Idx =
MRI.getRegClassOrNull(Src) == &AArch64::XSeqPairsClassRegClass
1099 OpRegBankIdx[0] = Idx;
1100 OpRegBankIdx[1] = Idx;
1103 case TargetOpcode::G_BUILD_VECTOR: {
1118 unsigned DefOpc =
DefMI->getOpcode();
1119 const LLT SrcTy =
MRI.getType(VReg);
1121 return Op.isDef() || MRI.getVRegDef(Op.getReg())->getOpcode() ==
1122 TargetOpcode::G_CONSTANT;
1126 SrcTy.getSizeInBits() < 32 ||
1130 unsigned NumOperands =
MI.getNumOperands();
1131 for (
unsigned Idx = 0; Idx < NumOperands; ++Idx)
1136 case TargetOpcode::G_VECREDUCE_FADD:
1137 case TargetOpcode::G_VECREDUCE_FMUL:
1138 case TargetOpcode::G_VECREDUCE_FMAX:
1139 case TargetOpcode::G_VECREDUCE_FMIN:
1140 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1141 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1142 case TargetOpcode::G_VECREDUCE_ADD:
1143 case TargetOpcode::G_VECREDUCE_MUL:
1144 case TargetOpcode::G_VECREDUCE_AND:
1145 case TargetOpcode::G_VECREDUCE_OR:
1146 case TargetOpcode::G_VECREDUCE_XOR:
1147 case TargetOpcode::G_VECREDUCE_SMAX:
1148 case TargetOpcode::G_VECREDUCE_SMIN:
1149 case TargetOpcode::G_VECREDUCE_UMAX:
1150 case TargetOpcode::G_VECREDUCE_UMIN:
1155 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1156 case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
1161 case TargetOpcode::G_INTRINSIC:
1162 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1164 case Intrinsic::aarch64_neon_fcvtas:
1165 case Intrinsic::aarch64_neon_fcvtau:
1166 case Intrinsic::aarch64_neon_fcvtzs:
1167 case Intrinsic::aarch64_neon_fcvtzu:
1168 case Intrinsic::aarch64_neon_fcvtms:
1169 case Intrinsic::aarch64_neon_fcvtmu:
1170 case Intrinsic::aarch64_neon_fcvtns:
1171 case Intrinsic::aarch64_neon_fcvtnu:
1172 case Intrinsic::aarch64_neon_fcvtps:
1173 case Intrinsic::aarch64_neon_fcvtpu: {
1175 if (
MRI.getType(
MI.getOperand(0).getReg()).isVector()) {
1181 if (((DstSize == SrcSize) || STI.hasFeature(AArch64::FeatureFPRCVT)) &&
1182 all_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
1184 return onlyUsesFP(UseMI, MRI, TRI) ||
1185 prefersFPUse(UseMI, MRI, TRI);
1192 case Intrinsic::aarch64_neon_vcvtfxs2fp:
1193 case Intrinsic::aarch64_neon_vcvtfxu2fp:
1194 case Intrinsic::aarch64_neon_vcvtfp2fxs:
1195 case Intrinsic::aarch64_neon_vcvtfp2fxu:
1209 for (
const auto &
Op :
MI.defs()) {
1215 Idx +=
MI.getNumExplicitDefs();
1218 for (
const auto &
Op :
MI.explicit_uses()) {
1228 case TargetOpcode::G_LROUND:
1229 case TargetOpcode::G_LLROUND: {
1238 for (
unsigned Idx = 0; Idx < NumOperands; ++Idx) {
1239 if (
MI.getOperand(Idx).isReg() &&
MI.getOperand(Idx).getReg()) {
1240 LLT Ty =
MRI.getType(
MI.getOperand(Idx).getReg());
1245 if (!Mapping->isValid())
1248 OpdsMapping[Idx] = Mapping;
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define CHECK_VALUEMAP(RBName, Size)
static bool isFPIntrinsic(const MachineRegisterInfo &MRI, const MachineInstr &MI)
#define CHECK_VALUEMAP_3OPS(RBName, Size)
static const unsigned CustomMappingID
#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)
#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size)
#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize)
This file declares the targeting of the RegisterBankInfo class for AArch64.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
This file defines the SmallVector class.
static unsigned getRegBankBaseIdxOffset(unsigned RBIdx, TypeSize Size)
static const RegisterBankInfo::ValueMapping * getCopyMapping(unsigned DstBankID, unsigned SrcBankID, TypeSize Size)
Get the pointer to the ValueMapping of the operands of a copy instruction from the SrcBankID register...
static bool checkPartialMappingIdx(PartialMappingIdx FirstAlias, PartialMappingIdx LastAlias, ArrayRef< PartialMappingIdx > Order)
static const RegisterBankInfo::PartialMapping PartMappings[]
static const RegisterBankInfo::ValueMapping * getFPExtMapping(unsigned DstSize, unsigned SrcSize)
Get the instruction mapping for G_FPEXT.
static const RegisterBankInfo::ValueMapping * getValueMapping(PartialMappingIdx RBIdx, TypeSize Size)
Get the pointer to the ValueMapping representing the RegisterBank at RBIdx with a size of Size.
static const RegisterBankInfo::ValueMapping ValMappings[]
InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const override
Get the alternative mappings for MI.
unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const override
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const override
Get a register bank that covers RC.
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
const InstructionMapping & getInstrMapping(const MachineInstr &MI) const override
Get the mapping of the different operands of MI on the register bank.
const AArch64RegisterInfo * getRegisterInfo() const override
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Helper class that represents how the value of an instruction may be mapped and what is the related co...
bool isValid() const
Check whether this object is valid.
virtual InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const
Get the alternative mappings for MI.
const InstructionMapping & getInstructionMapping(unsigned ID, unsigned Cost, const ValueMapping *OperandsMapping, unsigned NumOperands) const
Method to get a uniquely generated InstructionMapping.
static void applyDefaultMapping(const OperandsMapper &OpdMapper)
Helper method to apply something that is like the default mapping.
const InstructionMapping & getInvalidInstructionMapping() const
Method to get a uniquely generated invalid InstructionMapping.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
virtual const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const
Get a register bank that covers RC.
const ValueMapping * getOperandsMapping(Iterator Begin, Iterator End) const
Get the uniquely generated array of ValueMapping for the elements of between Begin and End.
static const unsigned DefaultMappingID
Identifier used when the related instruction mapping instance is generated by target independent code...
SmallVector< const InstructionMapping *, 4 > InstructionMappings
Convenient type to represent the alternatives for mapping an instruction.
virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const InstructionMapping & getInstrMappingImpl(const MachineInstr &MI) const
Try to get the mapping of MI.
This class implements the register bank concept.
LLVM_ABI bool covers(const TargetRegisterClass &RC) const
Check whether this register bank covers RC.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Type * getArrayElementType() const
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
iterator_range< user_iterator > users()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
bool isPreISelGenericOptimizationHint(unsigned Opcode)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
DWARFExpression::Operation Op
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
The llvm::once_flag structure.