41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#define GET_INSTRINFO_NAMED_OPS
46#include "RISCVGenInstrInfo.inc"
48#define DEBUG_TYPE "riscv-instr-info"
50 "Number of registers within vector register groups spilled");
52 "Number of registers within vector register groups reloaded");
56 cl::desc(
"Prefer whole register move for vector registers."));
59 "riscv-force-machine-combiner-strategy",
cl::Hidden,
60 cl::desc(
"Force machine combiner to use a specific strategy for machine "
61 "trace metrics evaluation."),
66 "MinInstrCount strategy.")));
72#define GET_RISCVVPseudosTable_IMPL
73#include "RISCVGenSearchableTables.inc"
79#define GET_RISCVMaskedPseudosTable_IMPL
80#include "RISCVGenSearchableTables.inc"
88#define GET_INSTRINFO_HELPERS
89#include "RISCVGenInstrInfo.inc"
92 if (
STI.hasStdExtZca())
101 int &FrameIndex)
const {
111 case RISCV::VL1RE8_V:
112 case RISCV::VL1RE16_V:
113 case RISCV::VL1RE32_V:
114 case RISCV::VL1RE64_V:
117 case RISCV::VL2RE8_V:
118 case RISCV::VL2RE16_V:
119 case RISCV::VL2RE32_V:
120 case RISCV::VL2RE64_V:
123 case RISCV::VL4RE8_V:
124 case RISCV::VL4RE16_V:
125 case RISCV::VL4RE32_V:
126 case RISCV::VL4RE64_V:
129 case RISCV::VL8RE8_V:
130 case RISCV::VL8RE16_V:
131 case RISCV::VL8RE32_V:
132 case RISCV::VL8RE64_V:
140 switch (
MI.getOpcode()) {
164 case RISCV::VL1RE8_V:
165 case RISCV::VL2RE8_V:
166 case RISCV::VL4RE8_V:
167 case RISCV::VL8RE8_V:
168 if (!
MI.getOperand(1).isFI())
170 FrameIndex =
MI.getOperand(1).getIndex();
173 return MI.getOperand(0).getReg();
176 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
177 MI.getOperand(2).getImm() == 0) {
178 FrameIndex =
MI.getOperand(1).getIndex();
179 return MI.getOperand(0).getReg();
186 int &FrameIndex)
const {
194 switch (
MI.getOpcode()) {
219 if (!
MI.getOperand(1).isFI())
221 FrameIndex =
MI.getOperand(1).getIndex();
224 return MI.getOperand(0).getReg();
227 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
228 MI.getOperand(2).getImm() == 0) {
229 FrameIndex =
MI.getOperand(1).getIndex();
230 return MI.getOperand(0).getReg();
240 case RISCV::VFMV_V_F:
243 case RISCV::VFMV_S_F:
245 return MI.getOperand(1).isUndef();
253 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
264 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
265 "Unexpected COPY instruction.");
269 bool FoundDef =
false;
270 bool FirstVSetVLI =
false;
271 unsigned FirstSEW = 0;
274 if (
MBBI->isMetaInstruction())
277 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
287 unsigned FirstVType =
MBBI->getOperand(2).getImm();
292 if (FirstLMul != LMul)
297 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
303 unsigned VType =
MBBI->getOperand(2).getImm();
321 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
323 }
else if (
MBBI->getNumDefs()) {
326 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
332 if (!MO.isReg() || !MO.isDef())
334 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
349 if (MO.getReg() != SrcReg)
390 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
391 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
393 assert(!Fractional &&
"It is impossible be fractional lmul here.");
394 unsigned NumRegs = NF * LMulVal;
400 SrcEncoding += NumRegs - 1;
401 DstEncoding += NumRegs - 1;
407 unsigned,
unsigned> {
415 uint16_t Diff = DstEncoding - SrcEncoding;
416 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
417 DstEncoding % 8 == 7)
419 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
420 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
421 DstEncoding % 4 == 3)
423 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
424 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
425 DstEncoding % 2 == 1)
427 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
430 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
435 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
437 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
438 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
440 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
441 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
443 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
446 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
449 while (
I != NumRegs) {
454 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
455 GetCopyInfo(SrcEncoding, DstEncoding);
459 if (LMul == LMulCopied &&
462 if (DefMBBI->getOpcode() == VIOpc)
469 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
471 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
479 MIB = MIB.add(DefMBBI->getOperand(2));
487 MIB.addImm(Log2SEW ? Log2SEW : 3);
499 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
500 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
509 bool RenamableDest,
bool RenamableSrc)
const {
513 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
520 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
526 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
532 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
533 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
534 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
536 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
538 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
542 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
543 .
addReg(EvenReg, KillFlag)
546 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
553 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
554 RISCV::GPRRegClass.
contains(DstReg)) {
556 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
561 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
563 if (
STI.hasStdExtZfh()) {
564 Opc = RISCV::FSGNJ_H;
567 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
568 "Unexpected extensions");
570 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
571 &RISCV::FPR32RegClass);
572 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
573 &RISCV::FPR32RegClass);
574 Opc = RISCV::FSGNJ_S;
578 .
addReg(SrcReg, KillFlag);
582 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
585 .
addReg(SrcReg, KillFlag);
589 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
592 .
addReg(SrcReg, KillFlag);
596 if (RISCV::FPR32RegClass.
contains(DstReg) &&
597 RISCV::GPRRegClass.
contains(SrcReg)) {
599 .
addReg(SrcReg, KillFlag);
603 if (RISCV::GPRRegClass.
contains(DstReg) &&
604 RISCV::FPR32RegClass.
contains(SrcReg)) {
606 .
addReg(SrcReg, KillFlag);
610 if (RISCV::FPR64RegClass.
contains(DstReg) &&
611 RISCV::GPRRegClass.
contains(SrcReg)) {
612 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
614 .
addReg(SrcReg, KillFlag);
618 if (RISCV::GPRRegClass.
contains(DstReg) &&
619 RISCV::FPR64RegClass.
contains(SrcReg)) {
620 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
622 .
addReg(SrcReg, KillFlag);
628 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
639 Register SrcReg,
bool IsKill,
int FI,
648 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
649 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
650 RISCV::SW : RISCV::SD;
651 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
652 Opcode = RISCV::SH_INX;
653 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
654 Opcode = RISCV::SW_INX;
655 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
656 Opcode = RISCV::PseudoRV32ZdinxSD;
657 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
659 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
661 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
663 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
664 Opcode = RISCV::VS1R_V;
665 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
666 Opcode = RISCV::VS2R_V;
667 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
668 Opcode = RISCV::VS4R_V;
669 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
670 Opcode = RISCV::VS8R_V;
671 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
672 Opcode = RISCV::PseudoVSPILL2_M1;
673 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
674 Opcode = RISCV::PseudoVSPILL2_M2;
675 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
676 Opcode = RISCV::PseudoVSPILL2_M4;
677 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
678 Opcode = RISCV::PseudoVSPILL3_M1;
679 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
680 Opcode = RISCV::PseudoVSPILL3_M2;
681 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
682 Opcode = RISCV::PseudoVSPILL4_M1;
683 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
684 Opcode = RISCV::PseudoVSPILL4_M2;
685 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
686 Opcode = RISCV::PseudoVSPILL5_M1;
687 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
688 Opcode = RISCV::PseudoVSPILL6_M1;
689 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
690 Opcode = RISCV::PseudoVSPILL7_M1;
691 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
692 Opcode = RISCV::PseudoVSPILL8_M1;
732 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
733 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
734 RISCV::LW : RISCV::LD;
735 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
736 Opcode = RISCV::LH_INX;
737 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
738 Opcode = RISCV::LW_INX;
739 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
740 Opcode = RISCV::PseudoRV32ZdinxLD;
741 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
743 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
745 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
747 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
748 Opcode = RISCV::VL1RE8_V;
749 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
750 Opcode = RISCV::VL2RE8_V;
751 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
752 Opcode = RISCV::VL4RE8_V;
753 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
754 Opcode = RISCV::VL8RE8_V;
755 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
756 Opcode = RISCV::PseudoVRELOAD2_M1;
757 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
758 Opcode = RISCV::PseudoVRELOAD2_M2;
759 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
760 Opcode = RISCV::PseudoVRELOAD2_M4;
761 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
762 Opcode = RISCV::PseudoVRELOAD3_M1;
763 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
764 Opcode = RISCV::PseudoVRELOAD3_M2;
765 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
766 Opcode = RISCV::PseudoVRELOAD4_M1;
767 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
768 Opcode = RISCV::PseudoVRELOAD4_M2;
769 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
770 Opcode = RISCV::PseudoVRELOAD5_M1;
771 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
772 Opcode = RISCV::PseudoVRELOAD6_M1;
773 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
774 Opcode = RISCV::PseudoVRELOAD7_M1;
775 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
776 Opcode = RISCV::PseudoVRELOAD8_M1;
814 if (
Ops.size() != 1 ||
Ops[0] != 1)
817 switch (
MI.getOpcode()) {
819 if (RISCVInstrInfo::isSEXT_W(
MI))
821 if (RISCVInstrInfo::isZEXT_W(
MI))
823 if (RISCVInstrInfo::isZEXT_B(
MI))
830 case RISCV::ZEXT_H_RV32:
831 case RISCV::ZEXT_H_RV64:
838 case RISCV::VMV_X_S: {
841 if (ST.getXLen() < (1U << Log2SEW))
856 case RISCV::VFMV_F_S: {
883 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
893 bool DstIsDead)
const {
909 bool SrcRenamable =
false;
913 bool LastItem = ++Num == Seq.
size();
918 switch (Inst.getOpndKind()) {
928 .
addReg(SrcReg, SrcRegState)
935 .
addReg(SrcReg, SrcRegState)
936 .
addReg(SrcReg, SrcRegState)
942 .
addReg(SrcReg, SrcRegState)
950 SrcRenamable = DstRenamable;
960 case RISCV::CV_BEQIMM:
962 case RISCV::QC_E_BEQI:
964 case RISCV::NDS_BEQC:
969 case RISCV::QC_E_BNEI:
970 case RISCV::CV_BNEIMM:
972 case RISCV::NDS_BNEC:
976 case RISCV::QC_E_BLTI:
980 case RISCV::QC_E_BGEI:
983 case RISCV::QC_BLTUI:
984 case RISCV::QC_E_BLTUI:
987 case RISCV::QC_BGEUI:
988 case RISCV::QC_E_BGEUI:
1020 "Unknown conditional branch");
1031 case RISCV::QC_MVEQ:
1032 return RISCV::QC_MVNE;
1033 case RISCV::QC_MVNE:
1034 return RISCV::QC_MVEQ;
1035 case RISCV::QC_MVLT:
1036 return RISCV::QC_MVGE;
1037 case RISCV::QC_MVGE:
1038 return RISCV::QC_MVLT;
1039 case RISCV::QC_MVLTU:
1040 return RISCV::QC_MVGEU;
1041 case RISCV::QC_MVGEU:
1042 return RISCV::QC_MVLTU;
1043 case RISCV::QC_MVEQI:
1044 return RISCV::QC_MVNEI;
1045 case RISCV::QC_MVNEI:
1046 return RISCV::QC_MVEQI;
1047 case RISCV::QC_MVLTI:
1048 return RISCV::QC_MVGEI;
1049 case RISCV::QC_MVGEI:
1050 return RISCV::QC_MVLTI;
1051 case RISCV::QC_MVLTUI:
1052 return RISCV::QC_MVGEUI;
1053 case RISCV::QC_MVGEUI:
1054 return RISCV::QC_MVLTUI;
1059 switch (SelectOpc) {
1078 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1088 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1093 return RISCV::CV_BEQIMM;
1095 return RISCV::CV_BNEIMM;
1098 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1103 return RISCV::QC_BEQI;
1105 return RISCV::QC_BNEI;
1107 return RISCV::QC_BLTI;
1109 return RISCV::QC_BGEI;
1112 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1117 return RISCV::QC_BLTUI;
1119 return RISCV::QC_BGEUI;
1122 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1127 return RISCV::QC_E_BEQI;
1129 return RISCV::QC_E_BNEI;
1131 return RISCV::QC_E_BLTI;
1133 return RISCV::QC_E_BGEI;
1136 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1141 return RISCV::QC_E_BLTUI;
1143 return RISCV::QC_E_BGEUI;
1146 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1151 return RISCV::NDS_BBC;
1153 return RISCV::NDS_BBS;
1156 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1161 return RISCV::NDS_BEQC;
1163 return RISCV::NDS_BNEC;
1192 bool AllowModify)
const {
1193 TBB = FBB =
nullptr;
1198 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1204 int NumTerminators = 0;
1205 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1208 if (J->getDesc().isUnconditionalBranch() ||
1209 J->getDesc().isIndirectBranch()) {
1216 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1217 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1218 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1221 I = FirstUncondOrIndirectBr;
1225 if (
I->getDesc().isIndirectBranch())
1229 if (
I->isPreISelOpcode())
1233 if (NumTerminators > 2)
1237 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1243 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1249 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1250 I->getDesc().isUnconditionalBranch()) {
1261 int *BytesRemoved)
const {
1268 if (!
I->getDesc().isUnconditionalBranch() &&
1269 !
I->getDesc().isConditionalBranch())
1275 I->eraseFromParent();
1279 if (
I ==
MBB.begin())
1282 if (!
I->getDesc().isConditionalBranch())
1288 I->eraseFromParent();
1301 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1303 "RISC-V branch conditions have two components!");
1337 assert(RS &&
"RegScavenger required for long branching");
1339 "new block should be inserted for expanding unconditional branch");
1342 "restore block should be inserted for restoring clobbered registers");
1351 "Branch offsets outside of the signed 32-bit range not supported");
1356 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1357 auto II =
MBB.end();
1363 RS->enterBasicBlockEnd(
MBB);
1365 RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
MI.getIterator(),
1369 RS->setRegUsed(TmpGPR);
1374 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1377 if (FrameIndex == -1)
1382 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1385 MI.getOperand(1).setMBB(&RestoreBB);
1389 TRI->eliminateFrameIndex(RestoreBB.
back(),
1393 MRI.replaceRegWith(ScratchReg, TmpGPR);
1394 MRI.clearVirtRegs();
1399 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1404 Cond[0].setImm(RISCV::BNE);
1407 Cond[0].setImm(RISCV::BNEI);
1410 Cond[0].setImm(RISCV::BEQ);
1413 Cond[0].setImm(RISCV::BEQI);
1416 Cond[0].setImm(RISCV::BGE);
1419 Cond[0].setImm(RISCV::BLT);
1422 Cond[0].setImm(RISCV::BGEU);
1425 Cond[0].setImm(RISCV::BLTU);
1427 case RISCV::CV_BEQIMM:
1428 Cond[0].setImm(RISCV::CV_BNEIMM);
1430 case RISCV::CV_BNEIMM:
1431 Cond[0].setImm(RISCV::CV_BEQIMM);
1433 case RISCV::QC_BEQI:
1434 Cond[0].setImm(RISCV::QC_BNEI);
1436 case RISCV::QC_BNEI:
1437 Cond[0].setImm(RISCV::QC_BEQI);
1439 case RISCV::QC_BGEI:
1440 Cond[0].setImm(RISCV::QC_BLTI);
1442 case RISCV::QC_BLTI:
1443 Cond[0].setImm(RISCV::QC_BGEI);
1445 case RISCV::QC_BGEUI:
1446 Cond[0].setImm(RISCV::QC_BLTUI);
1448 case RISCV::QC_BLTUI:
1449 Cond[0].setImm(RISCV::QC_BGEUI);
1451 case RISCV::QC_E_BEQI:
1452 Cond[0].setImm(RISCV::QC_E_BNEI);
1454 case RISCV::QC_E_BNEI:
1455 Cond[0].setImm(RISCV::QC_E_BEQI);
1457 case RISCV::QC_E_BGEI:
1458 Cond[0].setImm(RISCV::QC_E_BLTI);
1460 case RISCV::QC_E_BLTI:
1461 Cond[0].setImm(RISCV::QC_E_BGEI);
1463 case RISCV::QC_E_BGEUI:
1464 Cond[0].setImm(RISCV::QC_E_BLTUI);
1466 case RISCV::QC_E_BLTUI:
1467 Cond[0].setImm(RISCV::QC_E_BGEUI);
1469 case RISCV::NDS_BBC:
1470 Cond[0].setImm(RISCV::NDS_BBS);
1472 case RISCV::NDS_BBS:
1473 Cond[0].setImm(RISCV::NDS_BBC);
1475 case RISCV::NDS_BEQC:
1476 Cond[0].setImm(RISCV::NDS_BNEC);
1478 case RISCV::NDS_BNEC:
1479 Cond[0].setImm(RISCV::NDS_BEQC);
1489 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1490 MI->getOperand(1).getReg() == RISCV::X0) {
1491 Imm =
MI->getOperand(2).getImm();
1504 if (Reg == RISCV::X0) {
1508 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1512 bool IsSigned =
false;
1513 bool IsEquality =
false;
1514 switch (
MI.getOpcode()) {
1550 MI.eraseFromParent();
1576 auto searchConst = [&](int64_t C1) ->
Register {
1578 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1581 I.getOperand(0).getReg().isVirtual();
1584 return DefC1->getOperand(0).getReg();
1597 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1599 if (
Register RegZ = searchConst(C0 + 1)) {
1606 MRI.clearKillFlags(RegZ);
1607 MI.eraseFromParent();
1618 MRI.hasOneUse(RHS.getReg())) {
1620 if (
Register RegZ = searchConst(C0 - 1)) {
1627 MRI.clearKillFlags(RegZ);
1628 MI.eraseFromParent();
1638 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1640 int NumOp =
MI.getNumExplicitOperands();
1641 return MI.getOperand(NumOp - 1).getMBB();
1645 int64_t BrOffset)
const {
1646 unsigned XLen =
STI.getXLen();
1653 case RISCV::NDS_BBC:
1654 case RISCV::NDS_BBS:
1655 case RISCV::NDS_BEQC:
1656 case RISCV::NDS_BNEC:
1666 case RISCV::CV_BEQIMM:
1667 case RISCV::CV_BNEIMM:
1668 case RISCV::QC_BEQI:
1669 case RISCV::QC_BNEI:
1670 case RISCV::QC_BGEI:
1671 case RISCV::QC_BLTI:
1672 case RISCV::QC_BLTUI:
1673 case RISCV::QC_BGEUI:
1674 case RISCV::QC_E_BEQI:
1675 case RISCV::QC_E_BNEI:
1676 case RISCV::QC_E_BGEI:
1677 case RISCV::QC_E_BLTI:
1678 case RISCV::QC_E_BLTUI:
1679 case RISCV::QC_E_BGEUI:
1682 case RISCV::PseudoBR:
1684 case RISCV::PseudoJump:
1695 case RISCV::ADD:
return RISCV::PseudoCCADD;
1696 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1697 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1698 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1699 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1700 case RISCV::AND:
return RISCV::PseudoCCAND;
1701 case RISCV::OR:
return RISCV::PseudoCCOR;
1702 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1703 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1704 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1705 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1706 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1707 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1709 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1710 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1711 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1712 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1713 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1714 case RISCV::ORI:
return RISCV::PseudoCCORI;
1715 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1717 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1718 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1719 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1720 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1721 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1723 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1724 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1725 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1726 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1728 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1729 case RISCV::ORN:
return RISCV::PseudoCCORN;
1730 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1732 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1733 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1737 return RISCV::INSTRUCTION_LIST_END;
1746 if (!
Reg.isVirtual())
1748 if (!
MRI.hasOneNonDBGUse(
Reg))
1754 if (!STI.hasShortForwardBranchIMinMax() &&
1755 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1756 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1759 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1766 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1767 MI->getOperand(1).getReg() == RISCV::X0)
1772 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1782 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1785 bool DontMoveAcrossStores =
true;
1786 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1793 unsigned &TrueOp,
unsigned &FalseOp,
1794 bool &Optimizable)
const {
1795 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1796 "Unknown select instruction");
1806 Cond.push_back(
MI.getOperand(1));
1807 Cond.push_back(
MI.getOperand(2));
1808 Cond.push_back(
MI.getOperand(3));
1810 Optimizable =
STI.hasShortForwardBranchOpt();
1817 bool PreferFalse)
const {
1818 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1819 "Unknown select instruction");
1820 if (!
STI.hasShortForwardBranchOpt())
1826 bool Invert = !
DefMI;
1834 Register DestReg =
MI.getOperand(0).getReg();
1836 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1840 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1847 NewMI.
add(
MI.getOperand(1));
1848 NewMI.
add(
MI.getOperand(2));
1857 NewMI.
add(FalseReg);
1872 if (
DefMI->getParent() !=
MI.getParent())
1876 DefMI->eraseFromParent();
1881 if (
MI.isMetaInstruction())
1884 unsigned Opcode =
MI.getOpcode();
1886 if (Opcode == TargetOpcode::INLINEASM ||
1887 Opcode == TargetOpcode::INLINEASM_BR) {
1889 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1893 if (!
MI.memoperands_empty()) {
1896 if (
STI.hasStdExtZca()) {
1897 if (isCompressibleInst(
MI,
STI))
1905 if (Opcode == TargetOpcode::BUNDLE)
1906 return getInstBundleLength(
MI);
1908 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1909 if (isCompressibleInst(
MI,
STI))
1914 case RISCV::PseudoMV_FPR16INX:
1915 case RISCV::PseudoMV_FPR32INX:
1917 return STI.hasStdExtZca() ? 2 : 4;
1918 case TargetOpcode::STACKMAP:
1921 case TargetOpcode::PATCHPOINT:
1924 case TargetOpcode::STATEPOINT: {
1928 return std::max(NumBytes, 8U);
1930 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1931 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1932 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1935 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1936 F.hasFnAttribute(
"patchable-function-entry")) {
1938 if (
F.getFnAttribute(
"patchable-function-entry")
1940 .getAsInteger(10, Num))
1941 return get(Opcode).getSize();
1944 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
1948 return STI.is64Bit() ? 68 : 44;
1951 return get(Opcode).getSize();
1955unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1959 while (++
I != E &&
I->isInsideBundle()) {
1960 assert(!
I->isBundle() &&
"No nested bundle!");
1967 const unsigned Opcode =
MI.getOpcode();
1971 case RISCV::FSGNJ_D:
1972 case RISCV::FSGNJ_S:
1973 case RISCV::FSGNJ_H:
1974 case RISCV::FSGNJ_D_INX:
1975 case RISCV::FSGNJ_D_IN32X:
1976 case RISCV::FSGNJ_S_INX:
1977 case RISCV::FSGNJ_H_INX:
1979 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1980 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1984 return (
MI.getOperand(1).isReg() &&
1985 MI.getOperand(1).getReg() == RISCV::X0) ||
1986 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1988 return MI.isAsCheapAsAMove();
1991std::optional<DestSourcePair>
1995 switch (
MI.getOpcode()) {
2001 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2002 MI.getOperand(2).isReg())
2004 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2005 MI.getOperand(1).isReg())
2010 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2011 MI.getOperand(2).getImm() == 0)
2015 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2016 MI.getOperand(1).isReg())
2020 case RISCV::SH1ADD_UW:
2022 case RISCV::SH2ADD_UW:
2024 case RISCV::SH3ADD_UW:
2025 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2026 MI.getOperand(2).isReg())
2029 case RISCV::FSGNJ_D:
2030 case RISCV::FSGNJ_S:
2031 case RISCV::FSGNJ_H:
2032 case RISCV::FSGNJ_D_INX:
2033 case RISCV::FSGNJ_D_IN32X:
2034 case RISCV::FSGNJ_S_INX:
2035 case RISCV::FSGNJ_H_INX:
2037 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2038 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2042 return std::nullopt;
2050 const auto &SchedModel =
STI.getSchedModel();
2051 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2063 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2067 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2068 RISCV::OpName::frm) < 0;
2070 "New instructions require FRM whereas the old one does not have it");
2077 for (
auto *NewMI : InsInstrs) {
2079 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2080 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2122bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2123 bool Invert)
const {
2124#define OPCODE_LMUL_CASE(OPC) \
2125 case RISCV::OPC##_M1: \
2126 case RISCV::OPC##_M2: \
2127 case RISCV::OPC##_M4: \
2128 case RISCV::OPC##_M8: \
2129 case RISCV::OPC##_MF2: \
2130 case RISCV::OPC##_MF4: \
2131 case RISCV::OPC##_MF8
2133#define OPCODE_LMUL_MASK_CASE(OPC) \
2134 case RISCV::OPC##_M1_MASK: \
2135 case RISCV::OPC##_M2_MASK: \
2136 case RISCV::OPC##_M4_MASK: \
2137 case RISCV::OPC##_M8_MASK: \
2138 case RISCV::OPC##_MF2_MASK: \
2139 case RISCV::OPC##_MF4_MASK: \
2140 case RISCV::OPC##_MF8_MASK
2145 Opcode = *InvOpcode;
2162#undef OPCODE_LMUL_MASK_CASE
2163#undef OPCODE_LMUL_CASE
2166bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2173 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2177 const uint64_t TSFlags =
Desc.TSFlags;
2179 auto checkImmOperand = [&](
unsigned OpIdx) {
2183 auto checkRegOperand = [&](
unsigned OpIdx) {
2191 if (!checkRegOperand(1))
2206 bool SeenMI2 =
false;
2207 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2216 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2217 Register SrcReg = It->getOperand(1).getReg();
2235 if (MI1VReg != SrcReg)
2244 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2283bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2284 bool &Commuted)
const {
2288 "Expect the present of passthrough operand.");
2294 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2295 areRVVInstsReassociable(Inst, *MI2);
2299 return areRVVInstsReassociable(Inst, *MI1) &&
2300 (isVectorAssociativeAndCommutative(*MI1) ||
2301 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2308 if (!isVectorAssociativeAndCommutative(Inst) &&
2309 !isVectorAssociativeAndCommutative(Inst,
true))
2321 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2323 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2335 for (
unsigned I = 0;
I < 5; ++
I)
2341 bool &Commuted)
const {
2342 if (isVectorAssociativeAndCommutative(Inst) ||
2343 isVectorAssociativeAndCommutative(Inst,
true))
2344 return hasReassociableVectorSibling(Inst, Commuted);
2350 unsigned OperandIdx = Commuted ? 2 : 1;
2354 int16_t InstFrmOpIdx =
2355 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2356 int16_t SiblingFrmOpIdx =
2357 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2359 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2364 bool Invert)
const {
2365 if (isVectorAssociativeAndCommutative(Inst, Invert))
2373 Opc = *InverseOpcode;
2418std::optional<unsigned>
2420#define RVV_OPC_LMUL_CASE(OPC, INV) \
2421 case RISCV::OPC##_M1: \
2422 return RISCV::INV##_M1; \
2423 case RISCV::OPC##_M2: \
2424 return RISCV::INV##_M2; \
2425 case RISCV::OPC##_M4: \
2426 return RISCV::INV##_M4; \
2427 case RISCV::OPC##_M8: \
2428 return RISCV::INV##_M8; \
2429 case RISCV::OPC##_MF2: \
2430 return RISCV::INV##_MF2; \
2431 case RISCV::OPC##_MF4: \
2432 return RISCV::INV##_MF4; \
2433 case RISCV::OPC##_MF8: \
2434 return RISCV::INV##_MF8
2436#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2437 case RISCV::OPC##_M1_MASK: \
2438 return RISCV::INV##_M1_MASK; \
2439 case RISCV::OPC##_M2_MASK: \
2440 return RISCV::INV##_M2_MASK; \
2441 case RISCV::OPC##_M4_MASK: \
2442 return RISCV::INV##_M4_MASK; \
2443 case RISCV::OPC##_M8_MASK: \
2444 return RISCV::INV##_M8_MASK; \
2445 case RISCV::OPC##_MF2_MASK: \
2446 return RISCV::INV##_MF2_MASK; \
2447 case RISCV::OPC##_MF4_MASK: \
2448 return RISCV::INV##_MF4_MASK; \
2449 case RISCV::OPC##_MF8_MASK: \
2450 return RISCV::INV##_MF8_MASK
2454 return std::nullopt;
2456 return RISCV::FSUB_H;
2458 return RISCV::FSUB_S;
2460 return RISCV::FSUB_D;
2462 return RISCV::FADD_H;
2464 return RISCV::FADD_S;
2466 return RISCV::FADD_D;
2483#undef RVV_OPC_LMUL_MASK_CASE
2484#undef RVV_OPC_LMUL_CASE
2489 bool DoRegPressureReduce) {
2505 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2516 bool DoRegPressureReduce) {
2523 DoRegPressureReduce)) {
2529 DoRegPressureReduce)) {
2539 bool DoRegPressureReduce) {
2547 unsigned CombineOpc) {
2554 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2557 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2568 unsigned OuterShiftAmt) {
2574 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2601 case RISCV::SH1ADD_UW:
2603 case RISCV::SH2ADD_UW:
2605 case RISCV::SH3ADD_UW:
2651 bool DoRegPressureReduce)
const {
2660 DoRegPressureReduce);
2668 return RISCV::FMADD_H;
2670 return RISCV::FMADD_S;
2672 return RISCV::FMADD_D;
2717 bool Mul1IsKill = Mul1.
isKill();
2718 bool Mul2IsKill = Mul2.
isKill();
2719 bool AddendIsKill = Addend.
isKill();
2728 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2753 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2760 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2763 switch (InnerShiftAmt - OuterShiftAmt) {
2767 InnerOpc = RISCV::ADD;
2770 InnerOpc = RISCV::SH1ADD;
2773 InnerOpc = RISCV::SH2ADD;
2776 InnerOpc = RISCV::SH3ADD;
2784 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2794 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2811 DelInstrs, InstrIdxForVirtReg);
2838 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2839 unsigned OpType = Operand.OperandType;
2844 ErrInfo =
"Expected a non-register operand.";
2848 int64_t Imm = MO.
getImm();
2855#define CASE_OPERAND_UIMM(NUM) \
2856 case RISCVOp::OPERAND_UIMM##NUM: \
2857 Ok = isUInt<NUM>(Imm); \
2859#define CASE_OPERAND_SIMM(NUM) \
2860 case RISCVOp::OPERAND_SIMM##NUM: \
2861 Ok = isInt<NUM>(Imm); \
2893 Ok = (
isUInt<5>(Imm) && (Imm != 0)) || (Imm == 32);
2932 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
2942 Ok = (
isInt<5>(Imm) && Imm != -16) || Imm == 16;
2973 Ok = Ok && Imm != 0;
2977 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2980 Ok = Imm >= 0 && Imm <= 10;
2983 Ok = Imm >= 0 && Imm <= 7;
2986 Ok = Imm >= 1 && Imm <= 10;
2989 Ok = Imm >= 2 && Imm <= 14;
2998 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3031 ErrInfo =
"Invalid immediate";
3041 if (!
Op.isImm() && !
Op.isReg()) {
3042 ErrInfo =
"Invalid operand type for VL operand";
3045 if (
Op.isReg() &&
Op.getReg().isValid()) {
3047 auto *RC =
MRI.getRegClass(
Op.getReg());
3048 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
3049 ErrInfo =
"Invalid register class for VL operand";
3054 ErrInfo =
"VL operand w/o SEW operand?";
3060 if (!
MI.getOperand(
OpIdx).isImm()) {
3061 ErrInfo =
"SEW value expected to be an immediate";
3066 ErrInfo =
"Unexpected SEW value";
3069 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3071 ErrInfo =
"Unexpected SEW value";
3077 if (!
MI.getOperand(
OpIdx).isImm()) {
3078 ErrInfo =
"Policy operand expected to be an immediate";
3083 ErrInfo =
"Invalid Policy Value";
3087 ErrInfo =
"policy operand w/o VL operand?";
3095 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3096 ErrInfo =
"policy operand w/o tied operand?";
3103 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3104 ErrInfo =
"dynamic rounding mode should read FRM";
3126 case RISCV::LD_RV32:
3136 case RISCV::SD_RV32:
3152 int64_t NewOffset = OldOffset + Disp;
3174 "Addressing mode not supported for folding");
3248 case RISCV::LD_RV32:
3251 case RISCV::SD_RV32:
3258 OffsetIsScalable =
false;
3274 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3282 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3285 auto Base1 = MO1->getValue();
3286 auto Base2 = MO2->getValue();
3287 if (!Base1 || !Base2)
3295 return Base1 == Base2;
3301 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3302 unsigned NumBytes)
const {
3305 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3310 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3316 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3322 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3372 int64_t OffsetA = 0, OffsetB = 0;
3378 int LowOffset = std::min(OffsetA, OffsetB);
3379 int HighOffset = std::max(OffsetA, OffsetB);
3380 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3382 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3389std::pair<unsigned, unsigned>
3392 return std::make_pair(TF & Mask, TF & ~Mask);
3398 static const std::pair<unsigned, const char *> TargetFlags[] = {
3399 {MO_CALL,
"riscv-call"},
3400 {MO_LO,
"riscv-lo"},
3401 {MO_HI,
"riscv-hi"},
3402 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3403 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3404 {MO_GOT_HI,
"riscv-got-hi"},
3405 {MO_TPREL_LO,
"riscv-tprel-lo"},
3406 {MO_TPREL_HI,
"riscv-tprel-hi"},
3407 {MO_TPREL_ADD,
"riscv-tprel-add"},
3408 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3409 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3410 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3411 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3412 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3413 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3421 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3434 unsigned &Flags)
const {
3453 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3454 F.hasFnAttribute(
"patchable-function-entry");
3459 return MI.readsRegister(RegNo,
TRI) ||
3460 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3465 return MI.modifiesRegister(RegNo,
TRI) ||
3466 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3470 if (!
MBB.back().isReturn())
3493 if (
C.back().isReturn()) {
3495 "The candidate who uses return instruction must be outlined "
3508 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3511std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3514 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3515 unsigned MinRepeats)
const {
3521 if (RepeatedSequenceLocs.size() < MinRepeats)
3522 return std::nullopt;
3526 unsigned InstrSizeCExt =
3528 unsigned CallOverhead = 0, FrameOverhead = 0;
3531 unsigned CFICount = 0;
3532 for (
auto &
I : Candidate) {
3533 if (
I.isCFIInstruction())
3544 std::vector<MCCFIInstruction> CFIInstructions =
3545 C.getMF()->getFrameInstructions();
3547 if (CFICount > 0 && CFICount != CFIInstructions.size())
3548 return std::nullopt;
3556 CallOverhead = 4 + InstrSizeCExt;
3563 FrameOverhead = InstrSizeCExt;
3569 return std::nullopt;
3571 for (
auto &
C : RepeatedSequenceLocs)
3572 C.setCallInfo(MOCI, CallOverhead);
3574 unsigned SequenceSize = 0;
3575 for (
auto &
MI : Candidate)
3578 return std::make_unique<outliner::OutlinedFunction>(
3579 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3585 unsigned Flags)
const {
3589 MBB->getParent()->getSubtarget().getRegisterInfo();
3590 const auto &
F =
MI.getMF()->getFunction();
3595 if (
MI.isCFIInstruction())
3603 for (
const auto &MO :
MI.operands()) {
3608 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3609 F.hasSection() ||
F.getSectionPrefix()))
3626 MBB.addLiveIn(RISCV::X5);
3641 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3649 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3660 return std::nullopt;
3664 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3665 MI.getOperand(2).isImm())
3666 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3668 return std::nullopt;
3676 std::string GenericComment =
3678 if (!GenericComment.empty())
3679 return GenericComment;
3683 return std::string();
3687 return std::string();
3689 std::string Comment;
3696 switch (OpInfo.OperandType) {
3699 unsigned Imm =
Op.getImm();
3704 unsigned Imm =
Op.getImm();
3710 unsigned Log2SEW =
Op.getImm();
3711 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3717 unsigned Policy =
Op.getImm();
3719 "Invalid Policy Value");
3729#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3730 RISCV::Pseudo##OP##_##LMUL
3732#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3733 RISCV::Pseudo##OP##_##LMUL##_MASK
3735#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3736 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3737 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3739#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3740 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3741 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3742 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3743 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3744 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3745 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3747#define CASE_RVV_OPCODE_UNMASK(OP) \
3748 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3749 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3751#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3752 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3753 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3754 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3755 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3756 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3757 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3759#define CASE_RVV_OPCODE_MASK(OP) \
3760 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3761 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3763#define CASE_RVV_OPCODE_WIDEN(OP) \
3764 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3765 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3767#define CASE_RVV_OPCODE(OP) \
3768 CASE_RVV_OPCODE_UNMASK(OP): \
3769 case CASE_RVV_OPCODE_MASK(OP)
3773#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3774 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3776#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3777 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3778 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3779 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3780 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3781 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3782 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3783 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3786#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3787 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3789#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3790 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3791 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3792 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3793 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3795#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3796 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3797 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3799#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3800 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3801 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3803#define CASE_VFMA_OPCODE_VV(OP) \
3804 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3805 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
3806 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3807 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3809#define CASE_VFMA_SPLATS(OP) \
3810 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3811 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
3812 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3813 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3817 unsigned &SrcOpIdx1,
3818 unsigned &SrcOpIdx2)
const {
3820 if (!
Desc.isCommutable())
3823 switch (
MI.getOpcode()) {
3824 case RISCV::TH_MVEQZ:
3825 case RISCV::TH_MVNEZ:
3829 if (
MI.getOperand(2).getReg() == RISCV::X0)
3832 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3833 case RISCV::QC_SELECTIEQ:
3834 case RISCV::QC_SELECTINE:
3835 case RISCV::QC_SELECTIIEQ:
3836 case RISCV::QC_SELECTIINE:
3837 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3838 case RISCV::QC_MVEQ:
3839 case RISCV::QC_MVNE:
3840 case RISCV::QC_MVLT:
3841 case RISCV::QC_MVGE:
3842 case RISCV::QC_MVLTU:
3843 case RISCV::QC_MVGEU:
3844 case RISCV::QC_MVEQI:
3845 case RISCV::QC_MVNEI:
3846 case RISCV::QC_MVLTI:
3847 case RISCV::QC_MVGEI:
3848 case RISCV::QC_MVLTUI:
3849 case RISCV::QC_MVGEUI:
3850 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
3851 case RISCV::TH_MULA:
3852 case RISCV::TH_MULAW:
3853 case RISCV::TH_MULAH:
3854 case RISCV::TH_MULS:
3855 case RISCV::TH_MULSW:
3856 case RISCV::TH_MULSH:
3858 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3859 case RISCV::PseudoCCMOVGPRNoX0:
3860 case RISCV::PseudoCCMOVGPR:
3862 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3889 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3916 unsigned CommutableOpIdx1 = 1;
3917 unsigned CommutableOpIdx2 = 3;
3918 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3939 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3941 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3945 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3946 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3952 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3953 SrcOpIdx2 == CommuteAnyOperandIndex) {
3956 unsigned CommutableOpIdx1 = SrcOpIdx1;
3957 if (SrcOpIdx1 == SrcOpIdx2) {
3960 CommutableOpIdx1 = 1;
3961 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3963 CommutableOpIdx1 = SrcOpIdx2;
3968 unsigned CommutableOpIdx2;
3969 if (CommutableOpIdx1 != 1) {
3971 CommutableOpIdx2 = 1;
3973 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3978 if (Op1Reg !=
MI.getOperand(2).getReg())
3979 CommutableOpIdx2 = 2;
3981 CommutableOpIdx2 = 3;
3986 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3999#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4000 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4001 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4004#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4005 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4006 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4007 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4008 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4009 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4010 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4011 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4014#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4015 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4016 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4019#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4020 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4021 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4022 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4023 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4025#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4026 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4027 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4029#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4030 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4031 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4033#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4034 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4035 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4036 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4037 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4039#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4040 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4041 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4042 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4043 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4049 unsigned OpIdx2)
const {
4052 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4056 switch (
MI.getOpcode()) {
4057 case RISCV::TH_MVEQZ:
4058 case RISCV::TH_MVNEZ: {
4059 auto &WorkingMI = cloneIfNew(
MI);
4060 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4061 : RISCV::TH_MVEQZ));
4065 case RISCV::QC_SELECTIEQ:
4066 case RISCV::QC_SELECTINE:
4067 case RISCV::QC_SELECTIIEQ:
4068 case RISCV::QC_SELECTIINE:
4070 case RISCV::QC_MVEQ:
4071 case RISCV::QC_MVNE:
4072 case RISCV::QC_MVLT:
4073 case RISCV::QC_MVGE:
4074 case RISCV::QC_MVLTU:
4075 case RISCV::QC_MVGEU:
4076 case RISCV::QC_MVEQI:
4077 case RISCV::QC_MVNEI:
4078 case RISCV::QC_MVLTI:
4079 case RISCV::QC_MVGEI:
4080 case RISCV::QC_MVLTUI:
4081 case RISCV::QC_MVGEUI: {
4082 auto &WorkingMI = cloneIfNew(
MI);
4087 case RISCV::PseudoCCMOVGPRNoX0:
4088 case RISCV::PseudoCCMOVGPR: {
4092 auto &WorkingMI = cloneIfNew(
MI);
4093 WorkingMI.getOperand(3).setImm(CC);
4117 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4118 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4120 switch (
MI.getOpcode()) {
4143 auto &WorkingMI = cloneIfNew(
MI);
4144 WorkingMI.setDesc(
get(
Opc));
4154 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4157 if (OpIdx1 == 3 || OpIdx2 == 3) {
4159 switch (
MI.getOpcode()) {
4170 auto &WorkingMI = cloneIfNew(
MI);
4171 WorkingMI.setDesc(
get(
Opc));
4183#undef CASE_VMA_CHANGE_OPCODE_COMMON
4184#undef CASE_VMA_CHANGE_OPCODE_LMULS
4185#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4186#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4187#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4188#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4189#undef CASE_VFMA_CHANGE_OPCODE_VV
4190#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4192#undef CASE_RVV_OPCODE_UNMASK_LMUL
4193#undef CASE_RVV_OPCODE_MASK_LMUL
4194#undef CASE_RVV_OPCODE_LMUL
4195#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4196#undef CASE_RVV_OPCODE_UNMASK
4197#undef CASE_RVV_OPCODE_MASK_WIDEN
4198#undef CASE_RVV_OPCODE_MASK
4199#undef CASE_RVV_OPCODE_WIDEN
4200#undef CASE_RVV_OPCODE
4202#undef CASE_VMA_OPCODE_COMMON
4203#undef CASE_VMA_OPCODE_LMULS
4204#undef CASE_VFMA_OPCODE_COMMON
4205#undef CASE_VFMA_OPCODE_LMULS_M1
4206#undef CASE_VFMA_OPCODE_LMULS_MF2
4207#undef CASE_VFMA_OPCODE_LMULS_MF4
4208#undef CASE_VFMA_OPCODE_VV
4209#undef CASE_VFMA_SPLATS
4212 switch (
MI.getOpcode()) {
4220 if (
MI.getOperand(1).getReg() == RISCV::X0)
4221 commuteInstruction(
MI);
4223 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4224 MI.getOperand(2).ChangeToImmediate(0);
4225 MI.setDesc(
get(RISCV::ADDI));
4229 if (
MI.getOpcode() == RISCV::XOR &&
4230 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4231 MI.getOperand(1).setReg(RISCV::X0);
4232 MI.getOperand(2).ChangeToImmediate(0);
4233 MI.setDesc(
get(RISCV::ADDI));
4240 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4241 MI.setDesc(
get(RISCV::ADDI));
4247 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4248 MI.getOperand(2).ChangeToImmediate(0);
4249 MI.setDesc(
get(RISCV::ADDI));
4255 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4256 MI.getOperand(2).ChangeToImmediate(0);
4257 MI.setDesc(
get(RISCV::ADDIW));
4264 if (
MI.getOperand(1).getReg() == RISCV::X0)
4265 commuteInstruction(
MI);
4267 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4268 MI.getOperand(2).ChangeToImmediate(0);
4269 MI.setDesc(
get(RISCV::ADDIW));
4274 case RISCV::SH1ADD_UW:
4276 case RISCV::SH2ADD_UW:
4278 case RISCV::SH3ADD_UW:
4280 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4281 MI.removeOperand(1);
4283 MI.setDesc(
get(RISCV::ADDI));
4287 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4288 MI.removeOperand(2);
4289 unsigned Opc =
MI.getOpcode();
4290 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4291 Opc == RISCV::SH3ADD_UW) {
4293 MI.setDesc(
get(RISCV::SLLI_UW));
4297 MI.setDesc(
get(RISCV::SLLI));
4311 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4312 MI.getOperand(2).getReg() == RISCV::X0) {
4313 MI.getOperand(1).setReg(RISCV::X0);
4314 MI.getOperand(2).ChangeToImmediate(0);
4315 MI.setDesc(
get(RISCV::ADDI));
4321 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4322 MI.getOperand(2).setImm(0);
4323 MI.setDesc(
get(RISCV::ADDI));
4331 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4332 MI.getOperand(2).ChangeToImmediate(0);
4333 MI.setDesc(
get(RISCV::ADDI));
4337 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4338 MI.getOperand(2).ChangeToImmediate(0);
4339 MI.setDesc(
get(RISCV::ADDI));
4347 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4348 MI.getOperand(2).ChangeToImmediate(0);
4349 MI.setDesc(
get(RISCV::ADDI));
4359 case RISCV::SLLI_UW:
4361 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4362 MI.getOperand(2).setImm(0);
4363 MI.setDesc(
get(RISCV::ADDI));
4371 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4372 MI.getOperand(2).getReg() == RISCV::X0) {
4373 MI.getOperand(2).ChangeToImmediate(0);
4374 MI.setDesc(
get(RISCV::ADDI));
4378 if (
MI.getOpcode() == RISCV::ADD_UW &&
4379 MI.getOperand(1).getReg() == RISCV::X0) {
4380 MI.removeOperand(1);
4382 MI.setDesc(
get(RISCV::ADDI));
4388 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4389 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4390 MI.setDesc(
get(RISCV::ADDI));
4396 case RISCV::ZEXT_H_RV32:
4397 case RISCV::ZEXT_H_RV64:
4400 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4402 MI.setDesc(
get(RISCV::ADDI));
4411 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4412 MI.getOperand(2).ChangeToImmediate(0);
4413 MI.setDesc(
get(RISCV::ADDI));
4420 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4422 MI.removeOperand(0);
4423 MI.insert(
MI.operands_begin() + 1, {MO0});
4428 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4430 MI.removeOperand(0);
4431 MI.insert(
MI.operands_begin() + 1, {MO0});
4432 MI.setDesc(
get(RISCV::BNE));
4437 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4439 MI.removeOperand(0);
4440 MI.insert(
MI.operands_begin() + 1, {MO0});
4441 MI.setDesc(
get(RISCV::BEQ));
4449#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4450 RISCV::PseudoV##OP##_##LMUL##_TIED
4452#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4453 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4454 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4455 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4456 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4457 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4458 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4460#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4461 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4462 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4465#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4466 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4467 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4468 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4469 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4470 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4471 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4474#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4475 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4477#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4478 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4479 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4480 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4481 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4482 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4483 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4484 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4485 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4486 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4488#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4489 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4490 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4493#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4494 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4495 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4496 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4497 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4498 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4499 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4500 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4501 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4502 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4504#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4505 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4506 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4507 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4508 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4509 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4511#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4512 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4513 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4514 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4515 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4516 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4523 switch (
MI.getOpcode()) {
4531 MI.getNumExplicitOperands() == 7 &&
4532 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4539 switch (
MI.getOpcode()) {
4551 .
add(
MI.getOperand(0))
4553 .
add(
MI.getOperand(1))
4554 .
add(
MI.getOperand(2))
4555 .
add(
MI.getOperand(3))
4556 .
add(
MI.getOperand(4))
4557 .
add(
MI.getOperand(5))
4558 .
add(
MI.getOperand(6));
4567 MI.getNumExplicitOperands() == 6);
4574 switch (
MI.getOpcode()) {
4586 .
add(
MI.getOperand(0))
4588 .
add(
MI.getOperand(1))
4589 .
add(
MI.getOperand(2))
4590 .
add(
MI.getOperand(3))
4591 .
add(
MI.getOperand(4))
4592 .
add(
MI.getOperand(5));
4599 unsigned NumOps =
MI.getNumOperands();
4602 if (
Op.isReg() &&
Op.isKill())
4610 if (
MI.getOperand(0).isEarlyClobber()) {
4624#undef CASE_WIDEOP_OPCODE_COMMON
4625#undef CASE_WIDEOP_OPCODE_LMULS
4626#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4627#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4628#undef CASE_FP_WIDEOP_OPCODE_COMMON
4629#undef CASE_FP_WIDEOP_OPCODE_LMULS
4630#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4631#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4640 if (ShiftAmount == 0)
4646 }
else if (
int ShXAmount, ShiftAmount;
4648 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4651 switch (ShXAmount) {
4653 Opc = RISCV::SH1ADD;
4656 Opc = RISCV::SH2ADD;
4659 Opc = RISCV::SH3ADD;
4674 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4685 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4695 }
else if (
STI.hasStdExtZmmul()) {
4696 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4705 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4706 if (Amount & (1U << ShiftAmount)) {
4710 .
addImm(ShiftAmount - PrevShiftAmount)
4712 if (Amount >> (ShiftAmount + 1)) {
4715 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4726 PrevShiftAmount = ShiftAmount;
4729 assert(Acc &&
"Expected valid accumulator");
4739 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4747 ?
STI.getTailDupAggressiveThreshold()
4754 unsigned Opcode =
MI.getOpcode();
4755 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4761std::optional<std::pair<unsigned, unsigned>>
4765 return std::nullopt;
4766 case RISCV::PseudoVSPILL2_M1:
4767 case RISCV::PseudoVRELOAD2_M1:
4768 return std::make_pair(2u, 1u);
4769 case RISCV::PseudoVSPILL2_M2:
4770 case RISCV::PseudoVRELOAD2_M2:
4771 return std::make_pair(2u, 2u);
4772 case RISCV::PseudoVSPILL2_M4:
4773 case RISCV::PseudoVRELOAD2_M4:
4774 return std::make_pair(2u, 4u);
4775 case RISCV::PseudoVSPILL3_M1:
4776 case RISCV::PseudoVRELOAD3_M1:
4777 return std::make_pair(3u, 1u);
4778 case RISCV::PseudoVSPILL3_M2:
4779 case RISCV::PseudoVRELOAD3_M2:
4780 return std::make_pair(3u, 2u);
4781 case RISCV::PseudoVSPILL4_M1:
4782 case RISCV::PseudoVRELOAD4_M1:
4783 return std::make_pair(4u, 1u);
4784 case RISCV::PseudoVSPILL4_M2:
4785 case RISCV::PseudoVRELOAD4_M2:
4786 return std::make_pair(4u, 2u);
4787 case RISCV::PseudoVSPILL5_M1:
4788 case RISCV::PseudoVRELOAD5_M1:
4789 return std::make_pair(5u, 1u);
4790 case RISCV::PseudoVSPILL6_M1:
4791 case RISCV::PseudoVRELOAD6_M1:
4792 return std::make_pair(6u, 1u);
4793 case RISCV::PseudoVSPILL7_M1:
4794 case RISCV::PseudoVRELOAD7_M1:
4795 return std::make_pair(7u, 1u);
4796 case RISCV::PseudoVSPILL8_M1:
4797 case RISCV::PseudoVRELOAD8_M1:
4798 return std::make_pair(8u, 1u);
4803 int16_t MI1FrmOpIdx =
4804 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4805 int16_t MI2FrmOpIdx =
4806 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4807 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4814std::optional<unsigned>
4818 return std::nullopt;
4821 case RISCV::VSLL_VX:
4822 case RISCV::VSRL_VX:
4823 case RISCV::VSRA_VX:
4825 case RISCV::VSSRL_VX:
4826 case RISCV::VSSRA_VX:
4828 case RISCV::VROL_VX:
4829 case RISCV::VROR_VX:
4834 case RISCV::VNSRL_WX:
4835 case RISCV::VNSRA_WX:
4837 case RISCV::VNCLIPU_WX:
4838 case RISCV::VNCLIP_WX:
4840 case RISCV::VWSLL_VX:
4845 case RISCV::VADD_VX:
4846 case RISCV::VSUB_VX:
4847 case RISCV::VRSUB_VX:
4849 case RISCV::VWADDU_VX:
4850 case RISCV::VWSUBU_VX:
4851 case RISCV::VWADD_VX:
4852 case RISCV::VWSUB_VX:
4853 case RISCV::VWADDU_WX:
4854 case RISCV::VWSUBU_WX:
4855 case RISCV::VWADD_WX:
4856 case RISCV::VWSUB_WX:
4858 case RISCV::VADC_VXM:
4859 case RISCV::VADC_VIM:
4860 case RISCV::VMADC_VXM:
4861 case RISCV::VMADC_VIM:
4862 case RISCV::VMADC_VX:
4863 case RISCV::VSBC_VXM:
4864 case RISCV::VMSBC_VXM:
4865 case RISCV::VMSBC_VX:
4867 case RISCV::VAND_VX:
4869 case RISCV::VXOR_VX:
4871 case RISCV::VMSEQ_VX:
4872 case RISCV::VMSNE_VX:
4873 case RISCV::VMSLTU_VX:
4874 case RISCV::VMSLT_VX:
4875 case RISCV::VMSLEU_VX:
4876 case RISCV::VMSLE_VX:
4877 case RISCV::VMSGTU_VX:
4878 case RISCV::VMSGT_VX:
4880 case RISCV::VMINU_VX:
4881 case RISCV::VMIN_VX:
4882 case RISCV::VMAXU_VX:
4883 case RISCV::VMAX_VX:
4885 case RISCV::VMUL_VX:
4886 case RISCV::VMULH_VX:
4887 case RISCV::VMULHU_VX:
4888 case RISCV::VMULHSU_VX:
4890 case RISCV::VDIVU_VX:
4891 case RISCV::VDIV_VX:
4892 case RISCV::VREMU_VX:
4893 case RISCV::VREM_VX:
4895 case RISCV::VWMUL_VX:
4896 case RISCV::VWMULU_VX:
4897 case RISCV::VWMULSU_VX:
4899 case RISCV::VMACC_VX:
4900 case RISCV::VNMSAC_VX:
4901 case RISCV::VMADD_VX:
4902 case RISCV::VNMSUB_VX:
4904 case RISCV::VWMACCU_VX:
4905 case RISCV::VWMACC_VX:
4906 case RISCV::VWMACCSU_VX:
4907 case RISCV::VWMACCUS_VX:
4909 case RISCV::VMERGE_VXM:
4911 case RISCV::VMV_V_X:
4913 case RISCV::VSADDU_VX:
4914 case RISCV::VSADD_VX:
4915 case RISCV::VSSUBU_VX:
4916 case RISCV::VSSUB_VX:
4918 case RISCV::VAADDU_VX:
4919 case RISCV::VAADD_VX:
4920 case RISCV::VASUBU_VX:
4921 case RISCV::VASUB_VX:
4923 case RISCV::VSMUL_VX:
4925 case RISCV::VMV_S_X:
4927 case RISCV::VANDN_VX:
4928 return 1U << Log2SEW;
4934 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
4937 return RVV->BaseInstr;
4947 unsigned Scaled = Log2SEW + (DestEEW - 1);
4961 return std::nullopt;
4966 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
4967 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
4968 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
4969 LHS.getReg() == RHS.getReg())
4973 if (LHS.isImm() && LHS.getImm() == 0)
4979 if (!LHSImm || !RHSImm)
4981 return LHSImm <= RHSImm;
4993 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
4995 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5005 std::optional<bool> createTripCountGreaterCondition(
5006 int TC, MachineBasicBlock &
MBB,
5007 SmallVectorImpl<MachineOperand> &CondParam)
override {
5015 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5017 void adjustTripCount(
int TripCountAdjust)
override {}
5021std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5029 if (
TBB == LoopBB && FBB == LoopBB)
5036 assert((
TBB == LoopBB || FBB == LoopBB) &&
5037 "The Loop must be a single-basic-block loop");
5048 if (!Reg.isVirtual())
5050 return MRI.getVRegDef(Reg);
5055 if (LHS && LHS->isPHI())
5057 if (RHS && RHS->isPHI())
5060 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5066 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5083 case RISCV::FDIV_H_INX:
5084 case RISCV::FDIV_S_INX:
5085 case RISCV::FDIV_D_INX:
5086 case RISCV::FDIV_D_IN32X:
5087 case RISCV::FSQRT_H:
5088 case RISCV::FSQRT_S:
5089 case RISCV::FSQRT_D:
5090 case RISCV::FSQRT_H_INX:
5091 case RISCV::FSQRT_S_INX:
5092 case RISCV::FSQRT_D_INX:
5093 case RISCV::FSQRT_D_IN32X:
5095 case RISCV::VDIV_VV:
5096 case RISCV::VDIV_VX:
5097 case RISCV::VDIVU_VV:
5098 case RISCV::VDIVU_VX:
5099 case RISCV::VREM_VV:
5100 case RISCV::VREM_VX:
5101 case RISCV::VREMU_VV:
5102 case RISCV::VREMU_VX:
5104 case RISCV::VFDIV_VV:
5105 case RISCV::VFDIV_VF:
5106 case RISCV::VFRDIV_VF:
5107 case RISCV::VFSQRT_V:
5108 case RISCV::VFRSQRT7_V:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.