34#include "llvm/IR/IntrinsicsAMDGPU.h"
41#define DEBUG_TYPE "si-instr-info"
43#define GET_INSTRINFO_CTOR_DTOR
44#include "AMDGPUGenInstrInfo.inc"
47#define GET_D16ImageDimIntrinsics_IMPL
48#define GET_ImageDimIntrinsicTable_IMPL
49#define GET_RsrcIntrinsics_IMPL
50#include "AMDGPUGenSearchableTables.inc"
58 cl::desc(
"Restrict range of branch instructions (DEBUG)"));
61 "amdgpu-fix-16-bit-physreg-copies",
62 cl::desc(
"Fix copies between 32 and 16 bit registers by extending to 32 bit"),
78 unsigned N =
Node->getNumOperands();
79 while (
N &&
Node->getOperand(
N - 1).getValueType() == MVT::Glue)
91 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0,
OpName);
92 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1,
OpName);
94 if (Op0Idx == -1 && Op1Idx == -1)
98 if ((Op0Idx == -1 && Op1Idx != -1) ||
99 (Op1Idx == -1 && Op0Idx != -1))
120 return !
MI.memoperands_empty() &&
122 return MMO->isLoad() && MMO->isInvariant();
144 if (!
MI.hasImplicitDef() &&
145 MI.getNumImplicitOperands() ==
MI.getDesc().implicit_uses().size() &&
146 !
MI.mayRaiseFPException())
154bool SIInstrInfo::resultDependsOnExec(
const MachineInstr &
MI)
const {
158 if (
MI.isConvergent())
185 if (
MI.getOpcode() == AMDGPU::SI_IF_BREAK)
190 for (
auto Op :
MI.uses()) {
191 if (
Op.isReg() &&
Op.getReg().isVirtual() &&
197 if (FromCycle ==
nullptr)
203 while (FromCycle && !FromCycle->
contains(ToCycle)) {
223 int64_t &Offset1)
const {
231 if (!
get(Opc0).mayLoad() || !
get(Opc1).mayLoad())
235 if (!
get(Opc0).getNumDefs() || !
get(Opc1).getNumDefs())
251 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
252 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
253 if (Offset0Idx == -1 || Offset1Idx == -1)
260 Offset0Idx -=
get(Opc0).NumDefs;
261 Offset1Idx -=
get(Opc1).NumDefs;
291 if (!Load0Offset || !Load1Offset)
308 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
309 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
311 if (OffIdx0 == -1 || OffIdx1 == -1)
317 OffIdx0 -=
get(Opc0).NumDefs;
318 OffIdx1 -=
get(Opc1).NumDefs;
337 case AMDGPU::DS_READ2ST64_B32:
338 case AMDGPU::DS_READ2ST64_B64:
339 case AMDGPU::DS_WRITE2ST64_B32:
340 case AMDGPU::DS_WRITE2ST64_B64:
355 OffsetIsScalable =
false;
372 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
374 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data0);
375 if (
Opc == AMDGPU::DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64)
388 unsigned Offset0 = Offset0Op->
getImm() & 0xff;
389 unsigned Offset1 = Offset1Op->
getImm() & 0xff;
390 if (Offset0 + 1 != Offset1)
401 int Data0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data0);
409 Offset = EltSize * Offset0;
411 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
412 if (DataOpIdx == -1) {
413 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data0);
415 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data1);
431 if (BaseOp && !BaseOp->
isFI())
439 if (SOffset->
isReg())
445 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
447 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdata);
456 isMIMG(LdSt) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
457 int SRsrcIdx = AMDGPU::getNamedOperandIdx(
Opc, RsrcOpName);
459 int VAddr0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vaddr0);
460 if (VAddr0Idx >= 0) {
462 for (
int I = VAddr0Idx;
I < SRsrcIdx; ++
I)
469 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdata);
484 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::sdst);
501 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
503 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdata);
520 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
528 if (MO1->getAddrSpace() != MO2->getAddrSpace())
531 const auto *Base1 = MO1->getValue();
532 const auto *Base2 = MO2->getValue();
533 if (!Base1 || !Base2)
541 return Base1 == Base2;
545 int64_t Offset1,
bool OffsetIsScalable1,
547 int64_t Offset2,
bool OffsetIsScalable2,
548 unsigned ClusterSize,
549 unsigned NumBytes)
const {
562 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
581 const unsigned LoadSize = NumBytes / ClusterSize;
582 const unsigned NumDWords = ((LoadSize + 3) / 4) * ClusterSize;
583 return NumDWords <= MaxMemoryClusterDWords;
597 int64_t Offset0, int64_t Offset1,
598 unsigned NumLoads)
const {
599 assert(Offset1 > Offset0 &&
600 "Second offset should be larger than first offset!");
605 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
612 const char *Msg =
"illegal VGPR to SGPR copy") {
633 assert((
TII.getSubtarget().hasMAIInsts() &&
634 !
TII.getSubtarget().hasGFX90AInsts()) &&
635 "Expected GFX908 subtarget.");
638 AMDGPU::AGPR_32RegClass.
contains(SrcReg)) &&
639 "Source register of the copy should be either an SGPR or an AGPR.");
642 "Destination register of the copy should be an AGPR.");
651 for (
auto Def =
MI,
E =
MBB.begin(); Def !=
E; ) {
654 if (!Def->modifiesRegister(SrcReg, &RI))
657 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
658 Def->getOperand(0).getReg() != SrcReg)
665 bool SafeToPropagate =
true;
668 for (
auto I = Def;
I !=
MI && SafeToPropagate; ++
I)
669 if (
I->modifiesRegister(DefOp.
getReg(), &RI))
670 SafeToPropagate =
false;
672 if (!SafeToPropagate)
675 for (
auto I = Def;
I !=
MI; ++
I)
676 I->clearRegisterKills(DefOp.
getReg(), &RI);
685 if (ImpUseSuperReg) {
686 Builder.addReg(ImpUseSuperReg,
694 RS.enterBasicBlockEnd(
MBB);
695 RS.backward(std::next(
MI));
704 unsigned RegNo = (DestReg - AMDGPU::AGPR0) % 3;
707 assert(
MBB.getParent()->getRegInfo().isReserved(Tmp) &&
708 "VGPR used for an intermediate copy should have been reserved.");
713 Register Tmp2 = RS.scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass,
MI,
723 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32;
724 if (AMDGPU::AGPR_32RegClass.
contains(SrcReg)) {
725 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64;
732 if (ImpUseSuperReg) {
733 UseBuilder.
addReg(ImpUseSuperReg,
754 for (
unsigned Idx = 0; Idx < BaseIndices.
size(); ++Idx) {
755 int16_t SubIdx = BaseIndices[Idx];
756 Register DestSubReg = RI.getSubReg(DestReg, SubIdx);
757 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
758 assert(DestSubReg && SrcSubReg &&
"Failed to find subregs!");
759 unsigned Opcode = AMDGPU::S_MOV_B32;
762 bool AlignedDest = ((DestSubReg - AMDGPU::SGPR0) % 2) == 0;
763 bool AlignedSrc = ((SrcSubReg - AMDGPU::SGPR0) % 2) == 0;
764 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.
size())) {
768 DestSubReg = RI.getSubReg(DestReg, SubIdx);
769 SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
770 assert(DestSubReg && SrcSubReg &&
"Failed to find subregs!");
771 Opcode = AMDGPU::S_MOV_B64;
786 assert(FirstMI && LastMI);
794 LastMI->addRegisterKilled(SrcReg, &RI);
800 Register SrcReg,
bool KillSrc,
bool RenamableDest,
801 bool RenamableSrc)
const {
803 unsigned Size = RI.getRegSizeInBits(*RC);
805 unsigned SrcSize = RI.getRegSizeInBits(*SrcRC);
811 if (((
Size == 16) != (SrcSize == 16))) {
813 assert(ST.useRealTrue16Insts());
815 MCRegister SubReg = RI.getSubReg(RegToFix, AMDGPU::lo16);
818 if (DestReg == SrcReg) {
824 RC = RI.getPhysRegBaseClass(DestReg);
825 Size = RI.getRegSizeInBits(*RC);
826 SrcRC = RI.getPhysRegBaseClass(SrcReg);
827 SrcSize = RI.getRegSizeInBits(*SrcRC);
831 if (RC == &AMDGPU::VGPR_32RegClass) {
833 AMDGPU::SReg_32RegClass.
contains(SrcReg) ||
834 AMDGPU::AGPR_32RegClass.
contains(SrcReg));
835 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ?
836 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32;
842 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
843 RC == &AMDGPU::SReg_32RegClass) {
844 if (SrcReg == AMDGPU::SCC) {
851 if (!AMDGPU::SReg_32RegClass.
contains(SrcReg)) {
852 if (DestReg == AMDGPU::VCC_LO) {
870 if (RC == &AMDGPU::SReg_64RegClass) {
871 if (SrcReg == AMDGPU::SCC) {
878 if (!AMDGPU::SReg_64_EncodableRegClass.
contains(SrcReg)) {
879 if (DestReg == AMDGPU::VCC) {
897 if (DestReg == AMDGPU::SCC) {
900 if (AMDGPU::SReg_64RegClass.
contains(SrcReg)) {
904 assert(ST.hasScalarCompareEq64());
918 if (RC == &AMDGPU::AGPR_32RegClass) {
919 if (AMDGPU::VGPR_32RegClass.
contains(SrcReg) ||
920 (ST.hasGFX90AInsts() && AMDGPU::SReg_32RegClass.contains(SrcReg))) {
926 if (AMDGPU::AGPR_32RegClass.
contains(SrcReg) && ST.hasGFX90AInsts()) {
935 const bool Overlap = RI.regsOverlap(SrcReg, DestReg);
942 AMDGPU::SReg_LO16RegClass.
contains(SrcReg) ||
943 AMDGPU::AGPR_LO16RegClass.
contains(SrcReg));
945 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg);
946 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg);
947 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg);
948 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
951 MCRegister NewDestReg = RI.get32BitRegister(DestReg);
952 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg);
965 if (IsAGPRDst || IsAGPRSrc) {
966 if (!DstLow || !SrcLow) {
968 "Cannot use hi16 subreg with an AGPR!");
975 if (ST.useRealTrue16Insts()) {
981 if (AMDGPU::VGPR_16_Lo128RegClass.
contains(DestReg) &&
982 (IsSGPRSrc || AMDGPU::VGPR_16_Lo128RegClass.
contains(SrcReg))) {
994 if (IsSGPRSrc && !ST.hasSDWAScalar()) {
995 if (!DstLow || !SrcLow) {
997 "Cannot use hi16 subreg on VI!");
1020 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) {
1021 if (ST.hasVMovB64Inst()) {
1026 if (ST.hasPkMovB32()) {
1042 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
1043 if (RI.isSGPRClass(RC)) {
1044 if (!RI.isSGPRClass(SrcRC)) {
1048 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg);
1054 unsigned EltSize = 4;
1055 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
1056 if (RI.isAGPRClass(RC)) {
1057 if (ST.hasGFX90AInsts() && RI.isAGPRClass(SrcRC))
1058 Opcode = AMDGPU::V_ACCVGPR_MOV_B32;
1059 else if (RI.hasVGPRs(SrcRC) ||
1060 (ST.hasGFX90AInsts() && RI.isSGPRClass(SrcRC)))
1061 Opcode = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
1063 Opcode = AMDGPU::INSTRUCTION_LIST_END;
1064 }
else if (RI.hasVGPRs(RC) && RI.isAGPRClass(SrcRC)) {
1065 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64;
1066 }
else if ((
Size % 64 == 0) && RI.hasVGPRs(RC) &&
1067 (RI.isProperlyAlignedRC(*RC) &&
1068 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) {
1070 if (ST.hasVMovB64Inst()) {
1071 Opcode = AMDGPU::V_MOV_B64_e32;
1073 }
else if (ST.hasPkMovB32()) {
1074 Opcode = AMDGPU::V_PK_MOV_B32;
1084 std::unique_ptr<RegScavenger> RS;
1085 if (Opcode == AMDGPU::INSTRUCTION_LIST_END)
1086 RS = std::make_unique<RegScavenger>();
1092 const bool Overlap = RI.regsOverlap(SrcReg, DestReg);
1093 const bool CanKillSuperReg = KillSrc && !Overlap;
1095 for (
unsigned Idx = 0; Idx < SubIndices.
size(); ++Idx) {
1098 SubIdx = SubIndices[Idx];
1100 SubIdx = SubIndices[SubIndices.
size() - Idx - 1];
1101 Register DestSubReg = RI.getSubReg(DestReg, SubIdx);
1102 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
1103 assert(DestSubReg && SrcSubReg &&
"Failed to find subregs!");
1105 bool IsFirstSubreg = Idx == 0;
1106 bool UseKill = CanKillSuperReg && Idx == SubIndices.
size() - 1;
1108 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) {
1112 *RS, Overlap, ImpDefSuper, ImpUseSuper);
1113 }
else if (Opcode == AMDGPU::V_PK_MOV_B32) {
1159 return &AMDGPU::VGPR_32RegClass;
1172 "Not a VGPR32 reg");
1174 if (
Cond.size() == 1) {
1184 }
else if (
Cond.size() == 2) {
1185 assert(
Cond[0].isImm() &&
"Cond[0] is not an immediate");
1187 case SIInstrInfo::SCC_TRUE: {
1198 case SIInstrInfo::SCC_FALSE: {
1209 case SIInstrInfo::VCCNZ: {
1223 case SIInstrInfo::VCCZ: {
1237 case SIInstrInfo::EXECNZ: {
1250 case SIInstrInfo::EXECZ: {
1300 int64_t &ImmVal)
const {
1301 switch (
MI.getOpcode()) {
1302 case AMDGPU::V_MOV_B32_e32:
1303 case AMDGPU::S_MOV_B32:
1304 case AMDGPU::S_MOVK_I32:
1305 case AMDGPU::S_MOV_B64:
1306 case AMDGPU::V_MOV_B64_e32:
1307 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
1308 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
1309 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
1310 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
1311 case AMDGPU::V_MOV_B64_PSEUDO:
1312 case AMDGPU::V_MOV_B16_t16_e32: {
1316 return MI.getOperand(0).getReg() == Reg;
1321 case AMDGPU::V_MOV_B16_t16_e64: {
1323 if (Src0.
isImm() && !
MI.getOperand(1).getImm()) {
1325 return MI.getOperand(0).getReg() == Reg;
1330 case AMDGPU::S_BREV_B32:
1331 case AMDGPU::V_BFREV_B32_e32:
1332 case AMDGPU::V_BFREV_B32_e64: {
1336 return MI.getOperand(0).getReg() == Reg;
1341 case AMDGPU::S_NOT_B32:
1342 case AMDGPU::V_NOT_B32_e32:
1343 case AMDGPU::V_NOT_B32_e64: {
1346 ImmVal =
static_cast<int64_t
>(~static_cast<int32_t>(Src0.
getImm()));
1347 return MI.getOperand(0).getReg() == Reg;
1357std::optional<int64_t>
1362 if (!
Op.isReg() || !
Op.getReg().isVirtual())
1363 return std::nullopt;
1366 if (Def && Def->isMoveImmediate()) {
1372 return std::nullopt;
1377 if (RI.isAGPRClass(DstRC))
1378 return AMDGPU::COPY;
1379 if (RI.getRegSizeInBits(*DstRC) == 16) {
1382 return RI.isSGPRClass(DstRC) ? AMDGPU::COPY : AMDGPU::V_MOV_B16_t16_e64;
1384 if (RI.getRegSizeInBits(*DstRC) == 32)
1385 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1386 if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC))
1387 return AMDGPU::S_MOV_B64;
1388 if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC))
1389 return AMDGPU::V_MOV_B64_PSEUDO;
1390 return AMDGPU::COPY;
1395 bool IsIndirectSrc)
const {
1396 if (IsIndirectSrc) {
1398 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1);
1400 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2);
1402 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3);
1404 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4);
1406 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5);
1408 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V6);
1410 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V7);
1412 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8);
1414 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V9);
1416 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V10);
1418 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V11);
1420 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V12);
1422 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16);
1423 if (VecSize <= 1024)
1424 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32);
1430 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1);
1432 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2);
1434 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3);
1436 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4);
1438 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5);
1440 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V6);
1442 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V7);
1444 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8);
1446 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V9);
1448 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V10);
1450 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V11);
1452 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V12);
1454 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16);
1455 if (VecSize <= 1024)
1456 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32);
1463 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1465 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1467 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1469 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1471 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1473 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V6;
1475 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V7;
1477 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1479 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V9;
1481 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V10;
1483 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V11;
1485 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V12;
1487 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1488 if (VecSize <= 1024)
1489 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1496 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1498 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1500 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1502 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1504 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1506 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V6;
1508 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V7;
1510 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1512 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V9;
1514 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V10;
1516 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V11;
1518 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V12;
1520 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1521 if (VecSize <= 1024)
1522 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1529 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1;
1531 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2;
1533 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4;
1535 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8;
1536 if (VecSize <= 1024)
1537 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16;
1544 bool IsSGPR)
const {
1556 assert(EltSize == 32 &&
"invalid reg indexing elt size");
1563 return AMDGPU::SI_SPILL_S32_SAVE;
1565 return AMDGPU::SI_SPILL_S64_SAVE;
1567 return AMDGPU::SI_SPILL_S96_SAVE;
1569 return AMDGPU::SI_SPILL_S128_SAVE;
1571 return AMDGPU::SI_SPILL_S160_SAVE;
1573 return AMDGPU::SI_SPILL_S192_SAVE;
1575 return AMDGPU::SI_SPILL_S224_SAVE;
1577 return AMDGPU::SI_SPILL_S256_SAVE;
1579 return AMDGPU::SI_SPILL_S288_SAVE;
1581 return AMDGPU::SI_SPILL_S320_SAVE;
1583 return AMDGPU::SI_SPILL_S352_SAVE;
1585 return AMDGPU::SI_SPILL_S384_SAVE;
1587 return AMDGPU::SI_SPILL_S512_SAVE;
1589 return AMDGPU::SI_SPILL_S1024_SAVE;
1598 return AMDGPU::SI_SPILL_V16_SAVE;
1600 return AMDGPU::SI_SPILL_V32_SAVE;
1602 return AMDGPU::SI_SPILL_V64_SAVE;
1604 return AMDGPU::SI_SPILL_V96_SAVE;
1606 return AMDGPU::SI_SPILL_V128_SAVE;
1608 return AMDGPU::SI_SPILL_V160_SAVE;
1610 return AMDGPU::SI_SPILL_V192_SAVE;
1612 return AMDGPU::SI_SPILL_V224_SAVE;
1614 return AMDGPU::SI_SPILL_V256_SAVE;
1616 return AMDGPU::SI_SPILL_V288_SAVE;
1618 return AMDGPU::SI_SPILL_V320_SAVE;
1620 return AMDGPU::SI_SPILL_V352_SAVE;
1622 return AMDGPU::SI_SPILL_V384_SAVE;
1624 return AMDGPU::SI_SPILL_V512_SAVE;
1626 return AMDGPU::SI_SPILL_V1024_SAVE;
1635 return AMDGPU::SI_SPILL_AV32_SAVE;
1637 return AMDGPU::SI_SPILL_AV64_SAVE;
1639 return AMDGPU::SI_SPILL_AV96_SAVE;
1641 return AMDGPU::SI_SPILL_AV128_SAVE;
1643 return AMDGPU::SI_SPILL_AV160_SAVE;
1645 return AMDGPU::SI_SPILL_AV192_SAVE;
1647 return AMDGPU::SI_SPILL_AV224_SAVE;
1649 return AMDGPU::SI_SPILL_AV256_SAVE;
1651 return AMDGPU::SI_SPILL_AV288_SAVE;
1653 return AMDGPU::SI_SPILL_AV320_SAVE;
1655 return AMDGPU::SI_SPILL_AV352_SAVE;
1657 return AMDGPU::SI_SPILL_AV384_SAVE;
1659 return AMDGPU::SI_SPILL_AV512_SAVE;
1661 return AMDGPU::SI_SPILL_AV1024_SAVE;
1668 bool IsVectorSuperClass) {
1673 if (IsVectorSuperClass)
1674 return AMDGPU::SI_SPILL_WWM_AV32_SAVE;
1676 return AMDGPU::SI_SPILL_WWM_V32_SAVE;
1682 bool IsVectorSuperClass = RI.isVectorSuperClass(RC);
1689 if (ST.hasMAIInsts())
1708 FrameInfo.getObjectAlign(FrameIndex));
1709 unsigned SpillSize = RI.getSpillSize(*RC);
1712 if (RI.isSGPRClass(RC)) {
1714 assert(SrcReg != AMDGPU::M0 &&
"m0 should not be spilled");
1715 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI &&
1716 SrcReg != AMDGPU::EXEC &&
"exec should not be spilled");
1724 if (SrcReg.
isVirtual() && SpillSize == 4) {
1734 if (RI.spillSGPRToVGPR())
1754 return AMDGPU::SI_SPILL_S32_RESTORE;
1756 return AMDGPU::SI_SPILL_S64_RESTORE;
1758 return AMDGPU::SI_SPILL_S96_RESTORE;
1760 return AMDGPU::SI_SPILL_S128_RESTORE;
1762 return AMDGPU::SI_SPILL_S160_RESTORE;
1764 return AMDGPU::SI_SPILL_S192_RESTORE;
1766 return AMDGPU::SI_SPILL_S224_RESTORE;
1768 return AMDGPU::SI_SPILL_S256_RESTORE;
1770 return AMDGPU::SI_SPILL_S288_RESTORE;
1772 return AMDGPU::SI_SPILL_S320_RESTORE;
1774 return AMDGPU::SI_SPILL_S352_RESTORE;
1776 return AMDGPU::SI_SPILL_S384_RESTORE;
1778 return AMDGPU::SI_SPILL_S512_RESTORE;
1780 return AMDGPU::SI_SPILL_S1024_RESTORE;
1789 return AMDGPU::SI_SPILL_V16_RESTORE;
1791 return AMDGPU::SI_SPILL_V32_RESTORE;
1793 return AMDGPU::SI_SPILL_V64_RESTORE;
1795 return AMDGPU::SI_SPILL_V96_RESTORE;
1797 return AMDGPU::SI_SPILL_V128_RESTORE;
1799 return AMDGPU::SI_SPILL_V160_RESTORE;
1801 return AMDGPU::SI_SPILL_V192_RESTORE;
1803 return AMDGPU::SI_SPILL_V224_RESTORE;
1805 return AMDGPU::SI_SPILL_V256_RESTORE;
1807 return AMDGPU::SI_SPILL_V288_RESTORE;
1809 return AMDGPU::SI_SPILL_V320_RESTORE;
1811 return AMDGPU::SI_SPILL_V352_RESTORE;
1813 return AMDGPU::SI_SPILL_V384_RESTORE;
1815 return AMDGPU::SI_SPILL_V512_RESTORE;
1817 return AMDGPU::SI_SPILL_V1024_RESTORE;
1826 return AMDGPU::SI_SPILL_AV32_RESTORE;
1828 return AMDGPU::SI_SPILL_AV64_RESTORE;
1830 return AMDGPU::SI_SPILL_AV96_RESTORE;
1832 return AMDGPU::SI_SPILL_AV128_RESTORE;
1834 return AMDGPU::SI_SPILL_AV160_RESTORE;
1836 return AMDGPU::SI_SPILL_AV192_RESTORE;
1838 return AMDGPU::SI_SPILL_AV224_RESTORE;
1840 return AMDGPU::SI_SPILL_AV256_RESTORE;
1842 return AMDGPU::SI_SPILL_AV288_RESTORE;
1844 return AMDGPU::SI_SPILL_AV320_RESTORE;
1846 return AMDGPU::SI_SPILL_AV352_RESTORE;
1848 return AMDGPU::SI_SPILL_AV384_RESTORE;
1850 return AMDGPU::SI_SPILL_AV512_RESTORE;
1852 return AMDGPU::SI_SPILL_AV1024_RESTORE;
1859 bool IsVectorSuperClass) {
1864 if (IsVectorSuperClass)
1865 return AMDGPU::SI_SPILL_WWM_AV32_RESTORE;
1867 return AMDGPU::SI_SPILL_WWM_V32_RESTORE;
1873 bool IsVectorSuperClass = RI.isVectorSuperClass(RC);
1880 if (ST.hasMAIInsts())
1883 assert(!RI.isAGPRClass(RC));
1897 unsigned SpillSize = RI.getSpillSize(*RC);
1904 FrameInfo.getObjectAlign(FrameIndex));
1906 if (RI.isSGPRClass(RC)) {
1908 assert(DestReg != AMDGPU::M0 &&
"m0 should not be reloaded into");
1909 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI &&
1910 DestReg != AMDGPU::EXEC &&
"exec should not be spilled");
1915 if (DestReg.
isVirtual() && SpillSize == 4) {
1920 if (RI.spillSGPRToVGPR())
1946 unsigned Quantity)
const {
1948 unsigned MaxSNopCount = 1u << ST.getSNopBits();
1949 while (Quantity > 0) {
1950 unsigned Arg = std::min(Quantity, MaxSNopCount);
1957 auto *MF =
MBB.getParent();
1960 assert(Info->isEntryFunction());
1962 if (
MBB.succ_empty()) {
1963 bool HasNoTerminator =
MBB.getFirstTerminator() ==
MBB.end();
1964 if (HasNoTerminator) {
1965 if (Info->returnsVoid()) {
1979 constexpr unsigned DoorbellIDMask = 0x3ff;
1980 constexpr unsigned ECQueueWaveAbort = 0x400;
1985 if (!
MBB.succ_empty() || std::next(
MI.getIterator()) !=
MBB.end()) {
1986 MBB.splitAt(
MI,
false);
1990 MBB.addSuccessor(TrapBB);
2000 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_MOV_B32), AMDGPU::TTMP2)
2004 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_AND_B32), DoorbellRegMasked)
2009 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_OR_B32), SetWaveAbortBit)
2010 .
addUse(DoorbellRegMasked)
2011 .
addImm(ECQueueWaveAbort);
2012 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2013 .
addUse(SetWaveAbortBit);
2016 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2027 return MBB.getNextNode();
2031 switch (
MI.getOpcode()) {
2033 if (
MI.isMetaInstruction())
2038 return MI.getOperand(0).getImm() + 1;
2048 switch (
MI.getOpcode()) {
2050 case AMDGPU::S_MOV_B64_term:
2053 MI.setDesc(
get(AMDGPU::S_MOV_B64));
2056 case AMDGPU::S_MOV_B32_term:
2059 MI.setDesc(
get(AMDGPU::S_MOV_B32));
2062 case AMDGPU::S_XOR_B64_term:
2065 MI.setDesc(
get(AMDGPU::S_XOR_B64));
2068 case AMDGPU::S_XOR_B32_term:
2071 MI.setDesc(
get(AMDGPU::S_XOR_B32));
2073 case AMDGPU::S_OR_B64_term:
2076 MI.setDesc(
get(AMDGPU::S_OR_B64));
2078 case AMDGPU::S_OR_B32_term:
2081 MI.setDesc(
get(AMDGPU::S_OR_B32));
2084 case AMDGPU::S_ANDN2_B64_term:
2087 MI.setDesc(
get(AMDGPU::S_ANDN2_B64));
2090 case AMDGPU::S_ANDN2_B32_term:
2093 MI.setDesc(
get(AMDGPU::S_ANDN2_B32));
2096 case AMDGPU::S_AND_B64_term:
2099 MI.setDesc(
get(AMDGPU::S_AND_B64));
2102 case AMDGPU::S_AND_B32_term:
2105 MI.setDesc(
get(AMDGPU::S_AND_B32));
2108 case AMDGPU::S_AND_SAVEEXEC_B64_term:
2111 MI.setDesc(
get(AMDGPU::S_AND_SAVEEXEC_B64));
2114 case AMDGPU::S_AND_SAVEEXEC_B32_term:
2117 MI.setDesc(
get(AMDGPU::S_AND_SAVEEXEC_B32));
2120 case AMDGPU::SI_SPILL_S32_TO_VGPR:
2121 MI.setDesc(
get(AMDGPU::V_WRITELANE_B32));
2124 case AMDGPU::SI_RESTORE_S32_FROM_VGPR:
2125 MI.setDesc(
get(AMDGPU::V_READLANE_B32));
2127 case AMDGPU::AV_MOV_B32_IMM_PSEUDO: {
2131 get(IsAGPR ? AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::V_MOV_B32_e32));
2134 case AMDGPU::AV_MOV_B64_IMM_PSEUDO: {
2137 int64_t Imm =
MI.getOperand(1).getImm();
2139 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2140 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2145 MI.eraseFromParent();
2151 case AMDGPU::V_MOV_B64_PSEUDO: {
2153 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2154 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2162 if (ST.hasVMovB64Inst() && Mov64RC->
contains(Dst)) {
2163 MI.setDesc(Mov64Desc);
2168 if (
SrcOp.isImm()) {
2170 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
2171 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
2195 if (ST.hasPkMovB32() &&
2214 MI.eraseFromParent();
2217 case AMDGPU::V_MOV_B64_DPP_PSEUDO: {
2221 case AMDGPU::S_MOV_B64_IMM_PSEUDO: {
2225 if (ST.has64BitLiterals()) {
2226 MI.setDesc(
get(AMDGPU::S_MOV_B64));
2232 MI.setDesc(
get(AMDGPU::S_MOV_B64));
2237 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2238 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2240 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
2241 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
2246 MI.eraseFromParent();
2249 case AMDGPU::V_SET_INACTIVE_B32: {
2253 .
add(
MI.getOperand(3))
2254 .
add(
MI.getOperand(4))
2255 .
add(
MI.getOperand(1))
2256 .
add(
MI.getOperand(2))
2257 .
add(
MI.getOperand(5));
2258 MI.eraseFromParent();
2261 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1:
2262 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2:
2263 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3:
2264 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4:
2265 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5:
2266 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V6:
2267 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V7:
2268 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8:
2269 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V9:
2270 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V10:
2271 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V11:
2272 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V12:
2273 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16:
2274 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32:
2275 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1:
2276 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2:
2277 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3:
2278 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4:
2279 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5:
2280 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V6:
2281 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V7:
2282 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8:
2283 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V9:
2284 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V10:
2285 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V11:
2286 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V12:
2287 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16:
2288 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32:
2289 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1:
2290 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2:
2291 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4:
2292 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8:
2293 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: {
2297 if (RI.hasVGPRs(EltRC)) {
2298 Opc = AMDGPU::V_MOVRELD_B32_e32;
2300 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64
2301 : AMDGPU::S_MOVRELD_B32;
2306 bool IsUndef =
MI.getOperand(1).isUndef();
2307 unsigned SubReg =
MI.getOperand(3).getImm();
2308 assert(VecReg ==
MI.getOperand(1).getReg());
2313 .
add(
MI.getOperand(2))
2317 const int ImpDefIdx =
2319 const int ImpUseIdx = ImpDefIdx + 1;
2321 MI.eraseFromParent();
2324 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1:
2325 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2:
2326 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3:
2327 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4:
2328 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5:
2329 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V6:
2330 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V7:
2331 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8:
2332 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V9:
2333 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V10:
2334 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V11:
2335 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V12:
2336 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16:
2337 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: {
2338 assert(ST.useVGPRIndexMode());
2340 bool IsUndef =
MI.getOperand(1).isUndef();
2349 const MCInstrDesc &OpDesc =
get(AMDGPU::V_MOV_B32_indirect_write);
2353 .
add(
MI.getOperand(2))
2357 const int ImpDefIdx =
2359 const int ImpUseIdx = ImpDefIdx + 1;
2366 MI.eraseFromParent();
2369 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1:
2370 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2:
2371 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3:
2372 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4:
2373 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5:
2374 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V6:
2375 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V7:
2376 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8:
2377 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V9:
2378 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V10:
2379 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V11:
2380 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V12:
2381 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16:
2382 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: {
2383 assert(ST.useVGPRIndexMode());
2386 bool IsUndef =
MI.getOperand(1).isUndef();
2390 .
add(
MI.getOperand(2))
2403 MI.eraseFromParent();
2406 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
2409 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
2410 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
2429 if (ST.hasGetPCZeroExtension()) {
2433 BuildMI(MF,
DL,
get(AMDGPU::S_SEXT_I32_I16), RegHi).addReg(RegHi));
2440 BuildMI(MF,
DL,
get(AMDGPU::S_ADD_U32), RegLo).addReg(RegLo).add(OpLo));
2450 MI.eraseFromParent();
2453 case AMDGPU::SI_PC_ADD_REL_OFFSET64: {
2463 Op.setOffset(
Op.getOffset() + 4);
2465 BuildMI(MF,
DL,
get(AMDGPU::S_ADD_U64), Reg).addReg(Reg).add(
Op));
2469 MI.eraseFromParent();
2472 case AMDGPU::ENTER_STRICT_WWM: {
2478 case AMDGPU::ENTER_STRICT_WQM: {
2485 MI.eraseFromParent();
2488 case AMDGPU::EXIT_STRICT_WWM:
2489 case AMDGPU::EXIT_STRICT_WQM: {
2495 case AMDGPU::SI_RETURN: {
2509 MI.eraseFromParent();
2513 case AMDGPU::S_MUL_U64_U32_PSEUDO:
2514 case AMDGPU::S_MUL_I64_I32_PSEUDO:
2515 MI.setDesc(
get(AMDGPU::S_MUL_U64));
2518 case AMDGPU::S_GETPC_B64_pseudo:
2519 MI.setDesc(
get(AMDGPU::S_GETPC_B64));
2520 if (ST.hasGetPCZeroExtension()) {
2522 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2531 case AMDGPU::V_MAX_BF16_PSEUDO_e64: {
2532 assert(ST.hasBF16PackedInsts());
2533 MI.setDesc(
get(AMDGPU::V_PK_MAX_NUM_BF16));
2544 case AMDGPU::GET_STACK_BASE:
2547 if (ST.getFrameLowering()->mayReserveScratchForCWSR(*
MBB.getParent())) {
2554 Register DestReg =
MI.getOperand(0).getReg();
2564 MI.getOperand(
MI.getNumExplicitOperands()).setIsDead(
false);
2565 MI.getOperand(
MI.getNumExplicitOperands()).setIsUse();
2566 MI.setDesc(
get(AMDGPU::S_CMOVK_I32));
2569 MI.setDesc(
get(AMDGPU::S_MOV_B32));
2572 MI.getNumExplicitOperands());
2590 case AMDGPU::S_MOV_B64:
2591 case AMDGPU::S_MOV_B64_IMM_PSEUDO: {
2600 if (UsedLanes.
all())
2605 unsigned LoSubReg = RI.composeSubRegIndices(OrigSubReg, AMDGPU::sub0);
2606 unsigned HiSubReg = RI.composeSubRegIndices(OrigSubReg, AMDGPU::sub1);
2608 bool NeedLo = (UsedLanes & RI.getSubRegIndexLaneMask(LoSubReg)).any();
2609 bool NeedHi = (UsedLanes & RI.getSubRegIndexLaneMask(HiSubReg)).any();
2611 if (NeedLo && NeedHi)
2615 int32_t Imm32 = NeedLo ?
Lo_32(Imm64) :
Hi_32(Imm64);
2617 unsigned UseSubReg = NeedLo ? LoSubReg : HiSubReg;
2626 case AMDGPU::S_LOAD_DWORDX16_IMM:
2627 case AMDGPU::S_LOAD_DWORDX8_IMM: {
2640 for (
auto &CandMO :
I->operands()) {
2641 if (!CandMO.isReg() || CandMO.getReg() != RegToFind || CandMO.isDef())
2649 if (!UseMO || UseMO->
getSubReg() == AMDGPU::NoSubRegister)
2653 unsigned SubregSize = RI.getSubRegIdxSize(UseMO->
getSubReg());
2659 unsigned NewOpcode = -1;
2660 if (SubregSize == 256)
2661 NewOpcode = AMDGPU::S_LOAD_DWORDX8_IMM;
2662 else if (SubregSize == 128)
2663 NewOpcode = AMDGPU::S_LOAD_DWORDX4_IMM;
2673 UseMO->
setSubReg(AMDGPU::NoSubRegister);
2678 MI->getOperand(0).setReg(DestReg);
2679 MI->getOperand(0).setSubReg(AMDGPU::NoSubRegister);
2683 OffsetMO->
setImm(FinalOffset);
2689 MI->setMemRefs(*MF, NewMMOs);
2702std::pair<MachineInstr*, MachineInstr*>
2704 assert (
MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
2706 if (ST.hasVMovB64Inst() && ST.hasFeature(AMDGPU::FeatureDPALU_DPP) &&
2709 MI.setDesc(
get(AMDGPU::V_MOV_B64_dpp));
2710 return std::pair(&
MI,
nullptr);
2721 for (
auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) {
2723 if (Dst.isPhysical()) {
2724 MovDPP.addDef(RI.getSubReg(Dst,
Sub));
2731 for (
unsigned I = 1;
I <= 2; ++
I) {
2734 if (
SrcOp.isImm()) {
2736 Imm.ashrInPlace(Part * 32);
2737 MovDPP.addImm(Imm.getLoBits(32).getZExtValue());
2741 if (Src.isPhysical())
2742 MovDPP.addReg(RI.getSubReg(Src,
Sub));
2749 MovDPP.addImm(MO.getImm());
2751 Split[Part] = MovDPP;
2755 if (Dst.isVirtual())
2762 MI.eraseFromParent();
2763 return std::pair(Split[0], Split[1]);
2766std::optional<DestSourcePair>
2768 if (
MI.getOpcode() == AMDGPU::WWM_COPY)
2771 return std::nullopt;
2775 AMDGPU::OpName Src0OpName,
2777 AMDGPU::OpName Src1OpName)
const {
2784 "All commutable instructions have both src0 and src1 modifiers");
2786 int Src0ModsVal = Src0Mods->
getImm();
2787 int Src1ModsVal = Src1Mods->
getImm();
2789 Src1Mods->
setImm(Src0ModsVal);
2790 Src0Mods->
setImm(Src1ModsVal);
2799 bool IsKill = RegOp.
isKill();
2801 bool IsUndef = RegOp.
isUndef();
2802 bool IsDebug = RegOp.
isDebug();
2804 if (NonRegOp.
isImm())
2806 else if (NonRegOp.
isFI())
2827 int64_t NonRegVal = NonRegOp1.
getImm();
2830 NonRegOp2.
setImm(NonRegVal);
2837 unsigned OpIdx1)
const {
2842 unsigned Opc =
MI.getOpcode();
2843 int Src0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0);
2853 if ((
int)OpIdx0 == Src0Idx && !MO0.
isReg() &&
2856 if ((
int)OpIdx1 == Src0Idx && !MO1.
isReg() &&
2861 if ((
int)OpIdx1 != Src0Idx && MO0.
isReg()) {
2867 if ((
int)OpIdx0 != Src0Idx && MO1.
isReg()) {
2882 unsigned Src1Idx)
const {
2883 assert(!NewMI &&
"this should never be used");
2885 unsigned Opc =
MI.getOpcode();
2887 if (CommutedOpcode == -1)
2890 if (Src0Idx > Src1Idx)
2893 assert(AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0) ==
2894 static_cast<int>(Src0Idx) &&
2895 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1) ==
2896 static_cast<int>(Src1Idx) &&
2897 "inconsistency with findCommutedOpIndices");
2922 Src1, AMDGPU::OpName::src1_modifiers);
2925 AMDGPU::OpName::src1_sel);
2937 unsigned &SrcOpIdx0,
2938 unsigned &SrcOpIdx1)
const {
2943 unsigned &SrcOpIdx0,
2944 unsigned &SrcOpIdx1)
const {
2945 if (!
Desc.isCommutable())
2948 unsigned Opc =
Desc.getOpcode();
2949 int Src0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0);
2953 int Src1Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1);
2957 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
2961 int64_t BrOffset)
const {
2978 return MI.getOperand(0).getMBB();
2983 if (
MI.getOpcode() == AMDGPU::SI_IF ||
MI.getOpcode() == AMDGPU::SI_ELSE ||
2984 MI.getOpcode() == AMDGPU::SI_LOOP)
2996 "new block should be inserted for expanding unconditional branch");
2999 "restore block should be inserted for restoring clobbered registers");
3007 if (ST.useAddPC64Inst()) {
3009 MCCtx.createTempSymbol(
"offset",
true);
3013 MCCtx.createTempSymbol(
"post_addpc",
true);
3014 AddPC->setPostInstrSymbol(*MF, PostAddPCLabel);
3018 Offset->setVariableValue(OffsetExpr);
3022 assert(RS &&
"RegScavenger required for long branching");
3030 const bool FlushSGPRWrites = (ST.isWave64() && ST.hasVALUMaskWriteHazard()) ||
3031 ST.hasVALUReadSGPRHazard();
3032 auto ApplyHazardWorkarounds = [
this, &
MBB, &
I, &
DL, FlushSGPRWrites]() {
3033 if (FlushSGPRWrites)
3041 ApplyHazardWorkarounds();
3044 MCCtx.createTempSymbol(
"post_getpc",
true);
3048 MCCtx.createTempSymbol(
"offset_lo",
true);
3050 MCCtx.createTempSymbol(
"offset_hi",
true);
3053 .
addReg(PCReg, {}, AMDGPU::sub0)
3057 .
addReg(PCReg, {}, AMDGPU::sub1)
3059 ApplyHazardWorkarounds();
3100 if (LongBranchReservedReg) {
3101 RS->enterBasicBlock(
MBB);
3102 Scav = LongBranchReservedReg;
3104 RS->enterBasicBlockEnd(
MBB);
3105 Scav = RS->scavengeRegisterBackwards(
3110 RS->setRegUsed(Scav);
3118 TRI->spillEmergencySGPR(GetPC, RestoreBB, AMDGPU::SGPR0_SGPR1, RS);
3135unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate
Cond) {
3137 case SIInstrInfo::SCC_TRUE:
3138 return AMDGPU::S_CBRANCH_SCC1;
3139 case SIInstrInfo::SCC_FALSE:
3140 return AMDGPU::S_CBRANCH_SCC0;
3141 case SIInstrInfo::VCCNZ:
3142 return AMDGPU::S_CBRANCH_VCCNZ;
3143 case SIInstrInfo::VCCZ:
3144 return AMDGPU::S_CBRANCH_VCCZ;
3145 case SIInstrInfo::EXECNZ:
3146 return AMDGPU::S_CBRANCH_EXECNZ;
3147 case SIInstrInfo::EXECZ:
3148 return AMDGPU::S_CBRANCH_EXECZ;
3154SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(
unsigned Opcode) {
3156 case AMDGPU::S_CBRANCH_SCC0:
3158 case AMDGPU::S_CBRANCH_SCC1:
3160 case AMDGPU::S_CBRANCH_VCCNZ:
3162 case AMDGPU::S_CBRANCH_VCCZ:
3164 case AMDGPU::S_CBRANCH_EXECNZ:
3166 case AMDGPU::S_CBRANCH_EXECZ:
3178 bool AllowModify)
const {
3179 if (
I->getOpcode() == AMDGPU::S_BRANCH) {
3181 TBB =
I->getOperand(0).getMBB();
3185 BranchPredicate Pred = getBranchPredicate(
I->getOpcode());
3186 if (Pred == INVALID_BR)
3191 Cond.push_back(
I->getOperand(1));
3195 if (
I ==
MBB.end()) {
3201 if (
I->getOpcode() == AMDGPU::S_BRANCH) {
3203 FBB =
I->getOperand(0).getMBB();
3213 bool AllowModify)
const {
3221 while (
I != E && !
I->isBranch() && !
I->isReturn()) {
3222 switch (
I->getOpcode()) {
3223 case AMDGPU::S_MOV_B64_term:
3224 case AMDGPU::S_XOR_B64_term:
3225 case AMDGPU::S_OR_B64_term:
3226 case AMDGPU::S_ANDN2_B64_term:
3227 case AMDGPU::S_AND_B64_term:
3228 case AMDGPU::S_AND_SAVEEXEC_B64_term:
3229 case AMDGPU::S_MOV_B32_term:
3230 case AMDGPU::S_XOR_B32_term:
3231 case AMDGPU::S_OR_B32_term:
3232 case AMDGPU::S_ANDN2_B32_term:
3233 case AMDGPU::S_AND_B32_term:
3234 case AMDGPU::S_AND_SAVEEXEC_B32_term:
3237 case AMDGPU::SI_ELSE:
3238 case AMDGPU::SI_KILL_I1_TERMINATOR:
3239 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
3256 int *BytesRemoved)
const {
3258 unsigned RemovedSize = 0;
3261 if (
MI.isBranch() ||
MI.isReturn()) {
3263 MI.eraseFromParent();
3269 *BytesRemoved = RemovedSize;
3286 int *BytesAdded)
const {
3287 if (!FBB &&
Cond.empty()) {
3291 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
3298 = getBranchOpcode(
static_cast<BranchPredicate
>(
Cond[0].
getImm()));
3310 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
3328 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8;
3335 if (
Cond.size() != 2) {
3339 if (
Cond[0].isImm()) {
3350 Register FalseReg,
int &CondCycles,
3351 int &TrueCycles,
int &FalseCycles)
const {
3361 CondCycles = TrueCycles = FalseCycles = NumInsts;
3364 return RI.hasVGPRs(RC) && NumInsts <= 6;
3378 if (NumInsts % 2 == 0)
3381 CondCycles = TrueCycles = FalseCycles = NumInsts;
3382 return RI.isSGPRClass(RC);
3393 BranchPredicate Pred =
static_cast<BranchPredicate
>(
Cond[0].getImm());
3394 if (Pred == VCCZ || Pred == SCC_FALSE) {
3395 Pred =
static_cast<BranchPredicate
>(-Pred);
3401 unsigned DstSize = RI.getRegSizeInBits(*DstRC);
3403 if (DstSize == 32) {
3405 if (Pred == SCC_TRUE) {
3420 if (DstSize == 64 && Pred == SCC_TRUE) {
3430 static const int16_t Sub0_15[] = {
3431 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
3432 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
3433 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
3434 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
3437 static const int16_t Sub0_15_64[] = {
3438 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
3439 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
3440 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
3441 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
3444 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
3446 const int16_t *SubIndices = Sub0_15;
3447 int NElts = DstSize / 32;
3451 if (Pred == SCC_TRUE) {
3453 SelOp = AMDGPU::S_CSELECT_B32;
3454 EltRC = &AMDGPU::SGPR_32RegClass;
3456 SelOp = AMDGPU::S_CSELECT_B64;
3457 EltRC = &AMDGPU::SGPR_64RegClass;
3458 SubIndices = Sub0_15_64;
3464 MBB,
I,
DL,
get(AMDGPU::REG_SEQUENCE), DstReg);
3469 for (
int Idx = 0; Idx != NElts; ++Idx) {
3473 unsigned SubIdx = SubIndices[Idx];
3476 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) {
3478 .
addReg(FalseReg, {}, SubIdx)
3479 .addReg(TrueReg, {}, SubIdx);
3482 .
addReg(TrueReg, {}, SubIdx)
3483 .addReg(FalseReg, {}, SubIdx);
3495 switch (
MI.getOpcode()) {
3496 case AMDGPU::V_MOV_B16_t16_e32:
3497 case AMDGPU::V_MOV_B16_t16_e64:
3498 case AMDGPU::V_MOV_B32_e32:
3499 case AMDGPU::V_MOV_B32_e64:
3500 case AMDGPU::V_MOV_B64_PSEUDO:
3501 case AMDGPU::V_MOV_B64_e32:
3502 case AMDGPU::V_MOV_B64_e64:
3503 case AMDGPU::S_MOV_B32:
3504 case AMDGPU::S_MOV_B64:
3505 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
3507 case AMDGPU::WWM_COPY:
3508 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
3509 case AMDGPU::V_ACCVGPR_READ_B32_e64:
3510 case AMDGPU::V_ACCVGPR_MOV_B32:
3511 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
3512 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
3520 switch (
MI.getOpcode()) {
3521 case AMDGPU::V_MOV_B16_t16_e32:
3522 case AMDGPU::V_MOV_B16_t16_e64:
3524 case AMDGPU::V_MOV_B32_e32:
3525 case AMDGPU::V_MOV_B32_e64:
3526 case AMDGPU::V_MOV_B64_PSEUDO:
3527 case AMDGPU::V_MOV_B64_e32:
3528 case AMDGPU::V_MOV_B64_e64:
3529 case AMDGPU::S_MOV_B32:
3530 case AMDGPU::S_MOV_B64:
3531 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
3533 case AMDGPU::WWM_COPY:
3534 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
3535 case AMDGPU::V_ACCVGPR_READ_B32_e64:
3536 case AMDGPU::V_ACCVGPR_MOV_B32:
3537 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
3538 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
3546 AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
3547 AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::clamp,
3548 AMDGPU::OpName::omod, AMDGPU::OpName::op_sel};
3551 unsigned Opc =
MI.getOpcode();
3553 int Idx = AMDGPU::getNamedOperandIdx(
Opc, Name);
3555 MI.removeOperand(Idx);
3561 MI.setDesc(NewDesc);
3567 unsigned NumOps =
Desc.getNumOperands() +
Desc.implicit_uses().size() +
3568 Desc.implicit_defs().size();
3570 for (
unsigned I =
MI.getNumOperands() - 1;
I >=
NumOps; --
I)
3571 MI.removeOperand(
I);
3575 unsigned SubRegIndex) {
3576 switch (SubRegIndex) {
3577 case AMDGPU::NoSubRegister:
3587 case AMDGPU::sub1_lo16:
3589 case AMDGPU::sub1_hi16:
3592 return std::nullopt;
3600 case AMDGPU::V_MAC_F16_e32:
3601 case AMDGPU::V_MAC_F16_e64:
3602 case AMDGPU::V_MAD_F16_e64:
3603 return AMDGPU::V_MADAK_F16;
3604 case AMDGPU::V_MAC_F32_e32:
3605 case AMDGPU::V_MAC_F32_e64:
3606 case AMDGPU::V_MAD_F32_e64:
3607 return AMDGPU::V_MADAK_F32;
3608 case AMDGPU::V_FMAC_F32_e32:
3609 case AMDGPU::V_FMAC_F32_e64:
3610 case AMDGPU::V_FMA_F32_e64:
3611 return AMDGPU::V_FMAAK_F32;
3612 case AMDGPU::V_FMAC_F16_e32:
3613 case AMDGPU::V_FMAC_F16_e64:
3614 case AMDGPU::V_FMAC_F16_t16_e64:
3615 case AMDGPU::V_FMAC_F16_fake16_e64:
3616 case AMDGPU::V_FMAC_F16_t16_e32:
3617 case AMDGPU::V_FMAC_F16_fake16_e32:
3618 case AMDGPU::V_FMA_F16_e64:
3619 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
3620 ? AMDGPU::V_FMAAK_F16_t16
3621 : AMDGPU::V_FMAAK_F16_fake16
3622 : AMDGPU::V_FMAAK_F16;
3623 case AMDGPU::V_FMAC_F64_e32:
3624 case AMDGPU::V_FMAC_F64_e64:
3625 case AMDGPU::V_FMA_F64_e64:
3626 return AMDGPU::V_FMAAK_F64;
3634 case AMDGPU::V_MAC_F16_e32:
3635 case AMDGPU::V_MAC_F16_e64:
3636 case AMDGPU::V_MAD_F16_e64:
3637 return AMDGPU::V_MADMK_F16;
3638 case AMDGPU::V_MAC_F32_e32:
3639 case AMDGPU::V_MAC_F32_e64:
3640 case AMDGPU::V_MAD_F32_e64:
3641 return AMDGPU::V_MADMK_F32;
3642 case AMDGPU::V_FMAC_F32_e32:
3643 case AMDGPU::V_FMAC_F32_e64:
3644 case AMDGPU::V_FMA_F32_e64:
3645 return AMDGPU::V_FMAMK_F32;
3646 case AMDGPU::V_FMAC_F16_e32:
3647 case AMDGPU::V_FMAC_F16_e64:
3648 case AMDGPU::V_FMAC_F16_t16_e64:
3649 case AMDGPU::V_FMAC_F16_fake16_e64:
3650 case AMDGPU::V_FMAC_F16_t16_e32:
3651 case AMDGPU::V_FMAC_F16_fake16_e32:
3652 case AMDGPU::V_FMA_F16_e64:
3653 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
3654 ? AMDGPU::V_FMAMK_F16_t16
3655 : AMDGPU::V_FMAMK_F16_fake16
3656 : AMDGPU::V_FMAMK_F16;
3657 case AMDGPU::V_FMAC_F64_e32:
3658 case AMDGPU::V_FMAC_F64_e64:
3659 case AMDGPU::V_FMA_F64_e64:
3660 return AMDGPU::V_FMAMK_F64;
3674 assert(!
DefMI.getOperand(0).getSubReg() &&
"Expected SSA form");
3677 if (
Opc == AMDGPU::COPY) {
3678 assert(!
UseMI.getOperand(0).getSubReg() &&
"Expected SSA form");
3685 if (HasMultipleUses) {
3688 unsigned ImmDefSize = RI.getRegSizeInBits(*MRI->
getRegClass(Reg));
3691 if (UseSubReg != AMDGPU::NoSubRegister && ImmDefSize == 64)
3699 if (ImmDefSize == 32 &&
3704 bool Is16Bit = UseSubReg != AMDGPU::NoSubRegister &&
3705 RI.getSubRegIdxSize(UseSubReg) == 16;
3708 if (RI.hasVGPRs(DstRC))
3711 if (DstReg.
isVirtual() && UseSubReg != AMDGPU::lo16)
3717 unsigned NewOpc = AMDGPU::INSTRUCTION_LIST_END;
3724 for (
unsigned MovOp :
3725 {AMDGPU::S_MOV_B32, AMDGPU::V_MOV_B32_e32, AMDGPU::S_MOV_B64,
3726 AMDGPU::V_MOV_B64_PSEUDO, AMDGPU::V_ACCVGPR_WRITE_B32_e64}) {
3734 MovDstRC = RI.getMatchingSuperRegClass(MovDstRC, DstRC, AMDGPU::lo16);
3738 if (MovDstPhysReg) {
3742 RI.getMatchingSuperReg(MovDstPhysReg, AMDGPU::lo16, MovDstRC);
3749 if (MovDstPhysReg) {
3750 if (!MovDstRC->
contains(MovDstPhysReg))
3766 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType) &&
3774 if (NewOpc == AMDGPU::INSTRUCTION_LIST_END)
3778 UseMI.getOperand(0).setSubReg(AMDGPU::NoSubRegister);
3780 UseMI.getOperand(0).setReg(MovDstPhysReg);
3785 UseMI.setDesc(NewMCID);
3786 UseMI.getOperand(1).ChangeToImmediate(*SubRegImm);
3787 UseMI.addImplicitDefUseOperands(*MF);
3791 if (HasMultipleUses)
3794 if (
Opc == AMDGPU::V_MAD_F32_e64 ||
Opc == AMDGPU::V_MAC_F32_e64 ||
3795 Opc == AMDGPU::V_MAD_F16_e64 ||
Opc == AMDGPU::V_MAC_F16_e64 ||
3796 Opc == AMDGPU::V_FMA_F32_e64 ||
Opc == AMDGPU::V_FMAC_F32_e64 ||
3797 Opc == AMDGPU::V_FMA_F16_e64 ||
Opc == AMDGPU::V_FMAC_F16_e64 ||
3798 Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3799 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
Opc == AMDGPU::V_FMA_F64_e64 ||
3800 Opc == AMDGPU::V_FMAC_F64_e64) {
3809 int Src0Idx = getNamedOperandIdx(
UseMI.getOpcode(), AMDGPU::OpName::src0);
3820 auto CopyRegOperandToNarrowerRC =
3823 if (!
MI.getOperand(OpNo).isReg())
3827 if (RI.getCommonSubClass(RC, NewRC) != NewRC)
3830 BuildMI(*
MI.getParent(),
MI.getIterator(),
MI.getDebugLoc(),
3831 get(AMDGPU::COPY), Tmp)
3833 MI.getOperand(OpNo).setReg(Tmp);
3834 MI.getOperand(OpNo).setIsKill();
3841 Src1->
isReg() && Src1->
getReg() == Reg ? Src0 : Src1;
3842 if (!RegSrc->
isReg())
3845 ST.getConstantBusLimit(
Opc) < 2)
3860 if (Def && Def->isMoveImmediate() &&
3875 unsigned SrcSubReg = RegSrc->
getSubReg();
3880 if (
Opc == AMDGPU::V_MAC_F32_e64 ||
Opc == AMDGPU::V_MAC_F16_e64 ||
3881 Opc == AMDGPU::V_FMAC_F32_e64 ||
Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3882 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
3883 Opc == AMDGPU::V_FMAC_F16_e64 ||
Opc == AMDGPU::V_FMAC_F64_e64)
3884 UseMI.untieRegOperand(
3885 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2));
3892 if (NewOpc == AMDGPU::V_FMAMK_F16_t16 ||
3893 NewOpc == AMDGPU::V_FMAMK_F16_fake16) {
3897 UseMI.getDebugLoc(),
get(AMDGPU::COPY),
3898 UseMI.getOperand(0).getReg())
3900 UseMI.getOperand(0).setReg(Tmp);
3901 CopyRegOperandToNarrowerRC(
UseMI, 1, NewRC);
3902 CopyRegOperandToNarrowerRC(
UseMI, 3, NewRC);
3907 DefMI.eraseFromParent();
3914 if (ST.getConstantBusLimit(
Opc) < 2) {
3917 bool Src0Inlined =
false;
3918 if (Src0->
isReg()) {
3923 if (Def && Def->isMoveImmediate() &&
3928 }
else if (ST.getConstantBusLimit(
Opc) <= 1 &&
3929 RI.isSGPRReg(*MRI, Src0->
getReg())) {
3935 if (Src1->
isReg() && !Src0Inlined) {
3938 if (Def && Def->isMoveImmediate() &&
3942 else if (RI.isSGPRReg(*MRI, Src1->
getReg()))
3955 if (
Opc == AMDGPU::V_MAC_F32_e64 ||
Opc == AMDGPU::V_MAC_F16_e64 ||
3956 Opc == AMDGPU::V_FMAC_F32_e64 ||
Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3957 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
3958 Opc == AMDGPU::V_FMAC_F16_e64 ||
Opc == AMDGPU::V_FMAC_F64_e64)
3959 UseMI.untieRegOperand(
3960 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2));
3962 const std::optional<int64_t> SubRegImm =
3972 if (NewOpc == AMDGPU::V_FMAAK_F16_t16 ||
3973 NewOpc == AMDGPU::V_FMAAK_F16_fake16) {
3977 UseMI.getDebugLoc(),
get(AMDGPU::COPY),
3978 UseMI.getOperand(0).getReg())
3980 UseMI.getOperand(0).setReg(Tmp);
3981 CopyRegOperandToNarrowerRC(
UseMI, 1, NewRC);
3982 CopyRegOperandToNarrowerRC(
UseMI, 2, NewRC);
3992 DefMI.eraseFromParent();
4004 if (BaseOps1.
size() != BaseOps2.
size())
4006 for (
size_t I = 0,
E = BaseOps1.
size();
I <
E; ++
I) {
4007 if (!BaseOps1[
I]->isIdenticalTo(*BaseOps2[
I]))
4015 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
4016 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
4017 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
4019 LowOffset + (int)LowWidth.
getValue() <= HighOffset;
4022bool SIInstrInfo::checkInstOffsetsDoNotOverlap(
const MachineInstr &MIa,
4025 int64_t Offset0, Offset1;
4028 bool Offset0IsScalable, Offset1IsScalable;
4042 LocationSize Width0 = MIa.
memoperands().front()->getSize();
4043 LocationSize Width1 = MIb.
memoperands().front()->getSize();
4050 "MIa must load from or modify a memory location");
4052 "MIb must load from or modify a memory location");
4074 return checkInstOffsetsDoNotOverlap(MIa, MIb);
4081 return checkInstOffsetsDoNotOverlap(MIa, MIb);
4091 return checkInstOffsetsDoNotOverlap(MIa, MIb);
4105 return checkInstOffsetsDoNotOverlap(MIa, MIb);
4116 if (
Reg.isPhysical())
4120 Imm = Def->getOperand(1).getImm();
4140 unsigned NumOps =
MI.getNumOperands();
4143 if (
Op.isReg() &&
Op.isKill())
4151 case AMDGPU::V_MAC_F16_e32:
4152 case AMDGPU::V_MAC_F16_e64:
4153 return AMDGPU::V_MAD_F16_e64;
4154 case AMDGPU::V_MAC_F32_e32:
4155 case AMDGPU::V_MAC_F32_e64:
4156 return AMDGPU::V_MAD_F32_e64;
4157 case AMDGPU::V_MAC_LEGACY_F32_e32:
4158 case AMDGPU::V_MAC_LEGACY_F32_e64:
4159 return AMDGPU::V_MAD_LEGACY_F32_e64;
4160 case AMDGPU::V_FMAC_LEGACY_F32_e32:
4161 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4162 return AMDGPU::V_FMA_LEGACY_F32_e64;
4163 case AMDGPU::V_FMAC_F16_e32:
4164 case AMDGPU::V_FMAC_F16_e64:
4165 case AMDGPU::V_FMAC_F16_t16_e64:
4166 case AMDGPU::V_FMAC_F16_fake16_e64:
4167 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
4168 ? AMDGPU::V_FMA_F16_gfx9_t16_e64
4169 : AMDGPU::V_FMA_F16_gfx9_fake16_e64
4170 : AMDGPU::V_FMA_F16_gfx9_e64;
4171 case AMDGPU::V_FMAC_F32_e32:
4172 case AMDGPU::V_FMAC_F32_e64:
4173 return AMDGPU::V_FMA_F32_e64;
4174 case AMDGPU::V_FMAC_F64_e32:
4175 case AMDGPU::V_FMAC_F64_e64:
4176 return AMDGPU::V_FMA_F64_e64;
4196 if (
MI.isBundle()) {
4199 if (
MI.getBundleSize() != 1)
4201 CandidateMI =
MI.getNextNode();
4205 MachineInstr *NewMI = convertToThreeAddressImpl(*CandidateMI, U);
4209 if (
MI.isBundle()) {
4214 MI.untieRegOperand(MO.getOperandNo());
4222 if (Def.isEarlyClobber() && Def.isReg() &&
4227 auto UpdateDefIndex = [&](
LiveRange &LR) {
4228 auto *S = LR.find(OldIndex);
4229 if (S != LR.end() && S->start == OldIndex) {
4230 assert(S->valno && S->valno->def == OldIndex);
4231 S->start = NewIndex;
4232 S->valno->def = NewIndex;
4236 for (
auto &SR : LI.subranges())
4242 if (U.RemoveMIUse) {
4245 Register DefReg = U.RemoveMIUse->getOperand(0).getReg();
4249 U.RemoveMIUse->setDesc(
get(AMDGPU::IMPLICIT_DEF));
4250 U.RemoveMIUse->getOperand(0).setIsDead(
true);
4251 for (
unsigned I = U.RemoveMIUse->getNumOperands() - 1;
I != 0; --
I)
4252 U.RemoveMIUse->removeOperand(
I);
4257 if (
MI.isBundle()) {
4261 if (MO.isReg() && MO.getReg() == DefReg) {
4262 assert(MO.getSubReg() == 0 &&
4263 "tied sub-registers in bundles currently not supported");
4264 MI.removeOperand(MO.getOperandNo());
4281 if (MIOp.isReg() && MIOp.getReg() == DefReg) {
4282 MIOp.setIsUndef(
true);
4283 MIOp.setReg(DummyReg);
4287 if (
MI.isBundle()) {
4291 if (MIOp.isReg() && MIOp.getReg() == DefReg) {
4292 MIOp.setIsUndef(
true);
4293 MIOp.setReg(DummyReg);
4306 return MI.isBundle() ? &
MI : NewMI;
4311 ThreeAddressUpdates &U)
const {
4313 unsigned Opc =
MI.getOpcode();
4317 if (NewMFMAOpc != -1) {
4320 for (
unsigned I = 0, E =
MI.getNumExplicitOperands();
I != E; ++
I)
4321 MIB.
add(
MI.getOperand(
I));
4329 for (
unsigned I = 0,
E =
MI.getNumExplicitOperands();
I !=
E; ++
I)
4334 assert(
Opc != AMDGPU::V_FMAC_F16_t16_e32 &&
4335 Opc != AMDGPU::V_FMAC_F16_fake16_e32 &&
4336 "V_FMAC_F16_t16/fake16_e32 is not supported and not expected to be "
4340 bool IsF64 =
Opc == AMDGPU::V_FMAC_F64_e32 ||
Opc == AMDGPU::V_FMAC_F64_e64;
4341 bool IsLegacy =
Opc == AMDGPU::V_MAC_LEGACY_F32_e32 ||
4342 Opc == AMDGPU::V_MAC_LEGACY_F32_e64 ||
4343 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 ||
4344 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64;
4345 bool Src0Literal =
false;
4350 case AMDGPU::V_MAC_F16_e64:
4351 case AMDGPU::V_FMAC_F16_e64:
4352 case AMDGPU::V_FMAC_F16_t16_e64:
4353 case AMDGPU::V_FMAC_F16_fake16_e64:
4354 case AMDGPU::V_MAC_F32_e64:
4355 case AMDGPU::V_MAC_LEGACY_F32_e64:
4356 case AMDGPU::V_FMAC_F32_e64:
4357 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4358 case AMDGPU::V_FMAC_F64_e64:
4360 case AMDGPU::V_MAC_F16_e32:
4361 case AMDGPU::V_FMAC_F16_e32:
4362 case AMDGPU::V_MAC_F32_e32:
4363 case AMDGPU::V_MAC_LEGACY_F32_e32:
4364 case AMDGPU::V_FMAC_F32_e32:
4365 case AMDGPU::V_FMAC_LEGACY_F32_e32:
4366 case AMDGPU::V_FMAC_F64_e32: {
4367 int Src0Idx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
4368 AMDGPU::OpName::src0);
4369 const MachineOperand *Src0 = &
MI.getOperand(Src0Idx);
4380 MachineInstrBuilder MIB;
4383 const MachineOperand *Src0Mods =
4386 const MachineOperand *Src1Mods =
4389 const MachineOperand *Src2Mods =
4395 if (!Src0Mods && !Src1Mods && !Src2Mods && !Clamp && !Omod && !IsLegacy &&
4396 (!IsF64 || ST.hasFmaakFmamkF64Insts()) &&
4398 (ST.getConstantBusLimit(
Opc) > 1 || !Src0->
isReg() ||
4400 MachineInstr *
DefMI;
4436 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0),
4452 if (Src0Literal && !ST.hasVOP3Literal())
4480 switch (
MI.getOpcode()) {
4481 case AMDGPU::S_SET_GPR_IDX_ON:
4482 case AMDGPU::S_SET_GPR_IDX_MODE:
4483 case AMDGPU::S_SET_GPR_IDX_OFF:
4501 if (
MI.isTerminator() ||
MI.isPosition())
4505 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
4508 if (
MI.getOpcode() == AMDGPU::SCHED_BARRIER &&
MI.getOperand(0).getImm() == 0)
4514 return MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
4515 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
4516 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
4517 MI.getOpcode() == AMDGPU::S_SETPRIO ||
4518 MI.getOpcode() == AMDGPU::S_SETPRIO_INC_WG ||
4523 return Opcode == AMDGPU::DS_ORDERED_COUNT ||
4524 Opcode == AMDGPU::DS_ADD_GS_REG_RTN ||
4525 Opcode == AMDGPU::DS_SUB_GS_REG_RTN ||
isGWS(Opcode);
4539 if (
MI.getMF()->getFunction().hasFnAttribute(
"amdgpu-no-flat-scratch-init"))
4544 if (
MI.memoperands_empty())
4549 unsigned AS = Memop->getAddrSpace();
4550 if (AS == AMDGPUAS::FLAT_ADDRESS) {
4551 const MDNode *MD = Memop->getAAInfo().NoAliasAddrSpace;
4552 return !MD || !AMDGPU::hasValueInRangeLikeMetadata(
4553 *MD, AMDGPUAS::PRIVATE_ADDRESS);
4568 if (
MI.memoperands_empty())
4577 unsigned AS = Memop->getAddrSpace();
4594 if (ST.isTgSplitEnabled())
4599 if (
MI.memoperands_empty())
4604 unsigned AS = Memop->getAddrSpace();
4620 unsigned Opcode =
MI.getOpcode();
4635 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT ||
4636 isEXP(Opcode) || Opcode == AMDGPU::DS_ORDERED_COUNT ||
4637 Opcode == AMDGPU::S_TRAP || Opcode == AMDGPU::S_WAIT_EVENT ||
4638 Opcode == AMDGPU::S_SETHALT)
4641 if (
MI.isCall() ||
MI.isInlineAsm())
4657 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 ||
4658 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32 ||
4659 Opcode == AMDGPU::SI_RESTORE_S32_FROM_VGPR ||
4660 Opcode == AMDGPU::SI_SPILL_S32_TO_VGPR)
4668 if (
MI.isMetaInstruction())
4672 if (
MI.isCopyLike()) {
4673 if (!RI.isSGPRReg(MRI,
MI.getOperand(0).getReg()))
4677 return MI.readsRegister(AMDGPU::EXEC, &RI);
4688 return !
isSALU(
MI) ||
MI.readsRegister(AMDGPU::EXEC, &RI);
4692 switch (Imm.getBitWidth()) {
4698 ST.hasInv2PiInlineImm());
4701 ST.hasInv2PiInlineImm());
4703 return ST.has16BitInsts() &&
4705 ST.hasInv2PiInlineImm());
4712 APInt IntImm = Imm.bitcastToAPInt();
4714 bool HasInv2Pi = ST.hasInv2PiInlineImm();
4722 return ST.has16BitInsts() &&
4725 return ST.has16BitInsts() &&
4735 switch (OperandType) {
4745 int32_t Trunc =
static_cast<int32_t
>(Imm);
4787 int16_t Trunc =
static_cast<int16_t
>(Imm);
4788 return ST.has16BitInsts() &&
4797 int16_t Trunc =
static_cast<int16_t
>(Imm);
4798 return ST.has16BitInsts() &&
4849 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType))
4855 return ST.hasVOP3Literal();
4859 int64_t ImmVal)
const {
4862 if (
isMAI(InstDesc) && ST.hasMFMAInlineLiteralBug() &&
4863 OpNo == (
unsigned)AMDGPU::getNamedOperandIdx(InstDesc.
getOpcode(),
4864 AMDGPU::OpName::src2))
4866 return RI.opCanUseInlineConstant(OpInfo.OperandType);
4878 "unexpected imm-like operand kind");
4891 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts())
4909 AMDGPU::OpName
OpName)
const {
4911 return Mods && Mods->
getImm();
4924 switch (
MI.getOpcode()) {
4925 default:
return false;
4927 case AMDGPU::V_ADDC_U32_e64:
4928 case AMDGPU::V_SUBB_U32_e64:
4929 case AMDGPU::V_SUBBREV_U32_e64: {
4932 if (!Src1->
isReg() || !RI.isVGPR(MRI, Src1->
getReg()))
4937 case AMDGPU::V_MAC_F16_e64:
4938 case AMDGPU::V_MAC_F32_e64:
4939 case AMDGPU::V_MAC_LEGACY_F32_e64:
4940 case AMDGPU::V_FMAC_F16_e64:
4941 case AMDGPU::V_FMAC_F16_t16_e64:
4942 case AMDGPU::V_FMAC_F16_fake16_e64:
4943 case AMDGPU::V_FMAC_F32_e64:
4944 case AMDGPU::V_FMAC_F64_e64:
4945 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4946 if (!Src2->
isReg() || !RI.isVGPR(MRI, Src2->
getReg()) ||
4951 case AMDGPU::V_CNDMASK_B32_e64:
4957 if (Src1 && (!Src1->
isReg() || !RI.isVGPR(MRI, Src1->
getReg()) ||
4987 (
Use.getReg() == AMDGPU::VCC ||
Use.getReg() == AMDGPU::VCC_LO)) {
4996 unsigned Op32)
const {
5010 Inst32.
add(
MI.getOperand(
I));
5014 int Idx =
MI.getNumExplicitDefs();
5016 int OpTy =
MI.getDesc().operands()[Idx++].OperandType;
5021 if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2) == -1) {
5043 if (Reg == AMDGPU::SGPR_NULL || Reg == AMDGPU::SGPR_NULL64)
5051 return Reg == AMDGPU::VCC || Reg == AMDGPU::VCC_LO || Reg == AMDGPU::M0;
5054 return AMDGPU::SReg_32RegClass.contains(Reg) ||
5055 AMDGPU::SReg_64RegClass.contains(Reg);
5083 switch (MO.getReg()) {
5085 case AMDGPU::VCC_LO:
5086 case AMDGPU::VCC_HI:
5088 case AMDGPU::FLAT_SCR:
5101 switch (
MI.getOpcode()) {
5102 case AMDGPU::V_READLANE_B32:
5103 case AMDGPU::SI_RESTORE_S32_FROM_VGPR:
5104 case AMDGPU::V_WRITELANE_B32:
5105 case AMDGPU::SI_SPILL_S32_TO_VGPR:
5112 if (
MI.isPreISelOpcode() ||
5113 SIInstrInfo::isGenericOpcode(
MI.getOpcode()) ||
5131 return SubReg.
getSubReg() != AMDGPU::NoSubRegister &&
5142 if (RI.isVectorRegister(MRI, SrcReg) && RI.isSGPRReg(MRI, DstReg)) {
5143 ErrInfo =
"illegal copy from vector register to SGPR";
5161 if (!MRI.
isSSA() &&
MI.isCopy())
5162 return verifyCopy(
MI, MRI, ErrInfo);
5164 if (SIInstrInfo::isGenericOpcode(Opcode))
5167 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
5168 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
5169 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
5171 if (Src0Idx == -1) {
5173 Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0X);
5174 Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1X);
5175 Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0Y);
5176 Src3Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1Y);
5181 if (!
Desc.isVariadic() &&
5182 Desc.getNumOperands() !=
MI.getNumExplicitOperands()) {
5183 ErrInfo =
"Instruction has wrong number of operands.";
5187 if (
MI.isInlineAsm()) {
5200 if (!Reg.isVirtual() && !RC->
contains(Reg)) {
5201 ErrInfo =
"inlineasm operand has incorrect register class.";
5209 if (
isImage(
MI) &&
MI.memoperands_empty() &&
MI.mayLoadOrStore()) {
5210 ErrInfo =
"missing memory operand from image instruction.";
5215 for (
int i = 0, e =
Desc.getNumOperands(); i != e; ++i) {
5218 ErrInfo =
"FPImm Machine Operands are not supported. ISel should bitcast "
5219 "all fp values to integers.";
5224 int16_t RegClass = getOpRegClassID(OpInfo);
5226 switch (OpInfo.OperandType) {
5228 if (
MI.getOperand(i).isImm() ||
MI.getOperand(i).isGlobal()) {
5229 ErrInfo =
"Illegal immediate value for operand.";
5260 ErrInfo =
"Illegal immediate value for operand.";
5269 if (ST.has64BitLiterals() &&
Desc.getSize() != 4 && MO.
isImm() &&
5272 OpInfo.OperandType ==
5274 ErrInfo =
"illegal 64-bit immediate value for operand.";
5281 ErrInfo =
"Expected inline constant for operand.";
5295 if (!
MI.getOperand(i).isImm() && !
MI.getOperand(i).isFI()) {
5296 ErrInfo =
"Expected immediate, but got non-immediate";
5305 if (OpInfo.isGenericType())
5320 if (ST.needsAlignedVGPRs() && Opcode != AMDGPU::AV_MOV_B64_IMM_PSEUDO &&
5321 Opcode != AMDGPU::V_MOV_B64_PSEUDO && !
isSpill(
MI)) {
5323 if (RI.hasVectorRegisters(RC) && MO.
getSubReg()) {
5325 RI.getSubRegisterClass(RC, MO.
getSubReg())) {
5326 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.
getSubReg());
5333 if (!RC || !RI.isProperlyAlignedRC(*RC)) {
5334 ErrInfo =
"Subtarget requires even aligned vector registers";
5339 if (RegClass != -1) {
5340 if (Reg.isVirtual())
5345 ErrInfo =
"Operand has incorrect register class.";
5353 if (!ST.hasSDWA()) {
5354 ErrInfo =
"SDWA is not supported on this target";
5358 for (
auto Op : {AMDGPU::OpName::src0_sel, AMDGPU::OpName::src1_sel,
5359 AMDGPU::OpName::dst_sel}) {
5363 int64_t Imm = MO->
getImm();
5365 ErrInfo =
"Invalid SDWA selection";
5370 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
5372 for (
int OpIdx : {DstIdx, Src0Idx, Src1Idx, Src2Idx}) {
5377 if (!ST.hasSDWAScalar()) {
5379 if (!MO.
isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.
getReg()))) {
5380 ErrInfo =
"Only VGPRs allowed as operands in SDWA instructions on VI";
5387 "Only reg allowed as operands in SDWA instructions on GFX9+";
5393 if (!ST.hasSDWAOmod()) {
5396 if (OMod !=
nullptr &&
5398 ErrInfo =
"OMod not allowed in SDWA instructions on VI";
5403 if (Opcode == AMDGPU::V_CVT_F32_FP8_sdwa ||
5404 Opcode == AMDGPU::V_CVT_F32_BF8_sdwa ||
5405 Opcode == AMDGPU::V_CVT_PK_F32_FP8_sdwa ||
5406 Opcode == AMDGPU::V_CVT_PK_F32_BF8_sdwa) {
5409 unsigned Mods = Src0ModsMO->
getImm();
5412 ErrInfo =
"sext, abs and neg are not allowed on this instruction";
5418 if (
isVOPC(BasicOpcode)) {
5419 if (!ST.hasSDWASdst() && DstIdx != -1) {
5422 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
5423 ErrInfo =
"Only VCC allowed as dst in SDWA instructions on VI";
5426 }
else if (!ST.hasSDWAOutModsVOPC()) {
5429 if (Clamp && (!Clamp->
isImm() || Clamp->
getImm() != 0)) {
5430 ErrInfo =
"Clamp not allowed in VOPC SDWA instructions on VI";
5436 if (OMod && (!OMod->
isImm() || OMod->
getImm() != 0)) {
5437 ErrInfo =
"OMod not allowed in VOPC SDWA instructions on VI";
5444 if (DstUnused && DstUnused->isImm() &&
5447 if (!Dst.isReg() || !Dst.isTied()) {
5448 ErrInfo =
"Dst register should have tied register";
5453 MI.getOperand(
MI.findTiedOperandIdx(DstIdx));
5456 "Dst register should be tied to implicit use of preserved register";
5460 ErrInfo =
"Dst register should use same physical register as preserved";
5467 if (
isImage(Opcode) && !
MI.mayStore()) {
5479 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
5487 AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
5491 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
5492 if (RegCount > DstSize) {
5493 ErrInfo =
"Image instruction returns too many registers for dst "
5502 if (
isVALU(
MI) &&
Desc.getOpcode() != AMDGPU::V_WRITELANE_B32) {
5503 unsigned ConstantBusCount = 0;
5504 bool UsesLiteral =
false;
5507 int ImmIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm);
5511 LiteralVal = &
MI.getOperand(ImmIdx);
5520 for (
int OpIdx : {Src0Idx, Src1Idx, Src2Idx, Src3Idx}) {
5531 }
else if (!MO.
isFI()) {
5538 ErrInfo =
"VOP2/VOP3 instruction uses more than one literal";
5548 if (
llvm::all_of(SGPRsUsed, [
this, SGPRUsed](
unsigned SGPR) {
5549 return !RI.regsOverlap(SGPRUsed, SGPR);
5558 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) &&
5559 Opcode != AMDGPU::V_WRITELANE_B32) {
5560 ErrInfo =
"VOP* instruction violates constant bus restriction";
5564 if (
isVOP3(
MI) && UsesLiteral && !ST.hasVOP3Literal()) {
5565 ErrInfo =
"VOP3 instruction uses literal";
5572 if (
Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) {
5573 unsigned SGPRCount = 0;
5576 for (
int OpIdx : {Src0Idx, Src1Idx}) {
5584 if (MO.
getReg() != SGPRUsed)
5589 if (SGPRCount > ST.getConstantBusLimit(Opcode)) {
5590 ErrInfo =
"WRITELANE instruction violates constant bus restriction";
5597 if (
Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 ||
5598 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) {
5605 ErrInfo =
"v_div_scale_{f32|f64} require src0 = src1 or src2";
5615 ErrInfo =
"ABS not allowed in VOP3B instructions";
5628 ErrInfo =
"SOP2/SOPC instruction requires too many immediate constants";
5635 if (
Desc.isBranch()) {
5637 ErrInfo =
"invalid branch target for SOPK instruction";
5644 ErrInfo =
"invalid immediate for SOPK instruction";
5649 ErrInfo =
"invalid immediate for SOPK instruction";
5656 if (
Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
5657 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
5658 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
5659 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
5660 const bool IsDst =
Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
5661 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
5663 const unsigned StaticNumOps =
5664 Desc.getNumOperands() +
Desc.implicit_uses().size();
5665 const unsigned NumImplicitOps = IsDst ? 2 : 1;
5671 if (
MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
5672 ErrInfo =
"missing implicit register operands";
5678 if (!Dst->isUse()) {
5679 ErrInfo =
"v_movreld_b32 vdst should be a use operand";
5684 if (!
MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
5685 UseOpIdx != StaticNumOps + 1) {
5686 ErrInfo =
"movrel implicit operands should be tied";
5693 =
MI.getOperand(StaticNumOps + NumImplicitOps - 1);
5695 !
isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
5696 ErrInfo =
"src0 should be subreg of implicit vector use";
5704 if (!
MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
5705 ErrInfo =
"VALU instruction does not implicitly read exec mask";
5711 if (
MI.mayStore() &&
5716 if (Soff && Soff->
getReg() != AMDGPU::M0) {
5717 ErrInfo =
"scalar stores must use m0 as offset register";
5723 if (
isFLAT(
MI) && !ST.hasFlatInstOffsets()) {
5725 if (
Offset->getImm() != 0) {
5726 ErrInfo =
"subtarget does not support offsets in flat instructions";
5731 if (
isDS(
MI) && !ST.hasGDS()) {
5733 if (GDSOp && GDSOp->
getImm() != 0) {
5734 ErrInfo =
"GDS is not supported on this subtarget";
5742 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode,
5743 AMDGPU::OpName::vaddr0);
5744 AMDGPU::OpName RSrcOpName =
5745 isMIMG(
MI) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
5746 int RsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, RSrcOpName);
5754 ErrInfo =
"dim is out of range";
5759 if (ST.hasR128A16()) {
5761 IsA16 = R128A16->
getImm() != 0;
5762 }
else if (ST.hasA16()) {
5764 IsA16 = A16->
getImm() != 0;
5767 bool IsNSA = RsrcIdx - VAddr0Idx > 1;
5769 unsigned AddrWords =
5772 unsigned VAddrWords;
5774 VAddrWords = RsrcIdx - VAddr0Idx;
5775 if (ST.hasPartialNSAEncoding() &&
5777 unsigned LastVAddrIdx = RsrcIdx - 1;
5778 VAddrWords +=
getOpSize(
MI, LastVAddrIdx) / 4 - 1;
5786 if (VAddrWords != AddrWords) {
5788 <<
" but got " << VAddrWords <<
"\n");
5789 ErrInfo =
"bad vaddr size";
5799 unsigned DC = DppCt->
getImm();
5800 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 ||
5801 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST ||
5802 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) ||
5803 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) ||
5804 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) ||
5805 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) ||
5806 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) {
5807 ErrInfo =
"Invalid dpp_ctrl value";
5810 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 &&
5811 !ST.hasDPPWavefrontShifts()) {
5812 ErrInfo =
"Invalid dpp_ctrl value: "
5813 "wavefront shifts are not supported on GFX10+";
5816 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 &&
5817 !ST.hasDPPBroadcasts()) {
5818 ErrInfo =
"Invalid dpp_ctrl value: "
5819 "broadcasts are not supported on GFX10+";
5822 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST &&
5824 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST &&
5825 DC <= DppCtrl::ROW_NEWBCAST_LAST &&
5826 !ST.hasGFX90AInsts()) {
5827 ErrInfo =
"Invalid dpp_ctrl value: "
5828 "row_newbroadcast/row_share is not supported before "
5832 if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) {
5833 ErrInfo =
"Invalid dpp_ctrl value: "
5834 "row_share and row_xmask are not supported before GFX10";
5839 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
5842 ErrInfo =
"Invalid dpp_ctrl value: "
5843 "DP ALU dpp only support row_newbcast";
5850 AMDGPU::OpName DataName =
5851 isDS(Opcode) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata;
5857 if (ST.hasGFX90AInsts()) {
5858 if (Dst &&
Data && !Dst->isTied() && !
Data->isTied() &&
5859 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI,
Data->getReg()))) {
5860 ErrInfo =
"Invalid register class: "
5861 "vdata and vdst should be both VGPR or AGPR";
5864 if (
Data && Data2 &&
5865 (RI.isAGPR(MRI,
Data->getReg()) != RI.isAGPR(MRI, Data2->
getReg()))) {
5866 ErrInfo =
"Invalid register class: "
5867 "both data operands should be VGPR or AGPR";
5871 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) ||
5872 (
Data && RI.isAGPR(MRI,
Data->getReg())) ||
5873 (Data2 && RI.isAGPR(MRI, Data2->
getReg()))) {
5874 ErrInfo =
"Invalid register class: "
5875 "agpr loads and stores not supported on this GPU";
5881 if (ST.needsAlignedVGPRs()) {
5882 const auto isAlignedReg = [&
MI, &MRI,
this](AMDGPU::OpName
OpName) ->
bool {
5887 if (Reg.isPhysical())
5888 return !(RI.getHWRegIndex(Reg) & 1);
5890 return RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) &&
5891 !(RI.getChannelFromSubReg(
Op->getSubReg()) & 1);
5894 if (Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_SEMA_BR ||
5895 Opcode == AMDGPU::DS_GWS_BARRIER) {
5897 if (!isAlignedReg(AMDGPU::OpName::data0)) {
5898 ErrInfo =
"Subtarget requires even aligned vector registers "
5899 "for DS_GWS instructions";
5905 if (!isAlignedReg(AMDGPU::OpName::vaddr)) {
5906 ErrInfo =
"Subtarget requires even aligned vector registers "
5907 "for vaddr operand of image instructions";
5913 if (Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts()) {
5915 if (Src->isReg() && RI.isSGPRReg(MRI, Src->getReg())) {
5916 ErrInfo =
"Invalid register class: "
5917 "v_accvgpr_write with an SGPR is not supported on this GPU";
5922 if (
Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
5925 ErrInfo =
"pseudo expects only physical SGPRs";
5932 if (!ST.hasScaleOffset()) {
5933 ErrInfo =
"Subtarget does not support offset scaling";
5937 ErrInfo =
"Instruction does not support offset scaling";
5946 for (
unsigned I = 0;
I < 3; ++
I) {
5952 if (ST.hasFlatScratchHiInB64InstHazard() &&
isSALU(
MI) &&
5953 MI.readsRegister(AMDGPU::SRC_FLAT_SCRATCH_BASE_HI,
nullptr)) {
5955 if ((Dst && RI.getRegClassForReg(MRI, Dst->getReg()) ==
5956 &AMDGPU::SReg_64RegClass) ||
5957 Opcode == AMDGPU::S_BITCMP0_B64 || Opcode == AMDGPU::S_BITCMP1_B64) {
5958 ErrInfo =
"Instruction cannot read flat_scratch_base_hi";
5967 if (
MI.getOpcode() == AMDGPU::S_MOV_B32) {
5969 return MI.getOperand(1).isReg() || RI.isAGPR(MRI,
MI.getOperand(0).getReg())
5971 : AMDGPU::V_MOV_B32_e32;
5981 default:
return AMDGPU::INSTRUCTION_LIST_END;
5982 case AMDGPU::REG_SEQUENCE:
return AMDGPU::REG_SEQUENCE;
5983 case AMDGPU::COPY:
return AMDGPU::COPY;
5984 case AMDGPU::PHI:
return AMDGPU::PHI;
5985 case AMDGPU::INSERT_SUBREG:
return AMDGPU::INSERT_SUBREG;
5986 case AMDGPU::WQM:
return AMDGPU::WQM;
5987 case AMDGPU::SOFT_WQM:
return AMDGPU::SOFT_WQM;
5988 case AMDGPU::STRICT_WWM:
return AMDGPU::STRICT_WWM;
5989 case AMDGPU::STRICT_WQM:
return AMDGPU::STRICT_WQM;
5990 case AMDGPU::S_ADD_I32:
5991 return ST.hasAddNoCarryInsts() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
5992 case AMDGPU::S_ADDC_U32:
5993 return AMDGPU::V_ADDC_U32_e32;
5994 case AMDGPU::S_SUB_I32:
5995 return ST.hasAddNoCarryInsts() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32;
5998 case AMDGPU::S_ADD_U32:
5999 return AMDGPU::V_ADD_CO_U32_e32;
6000 case AMDGPU::S_SUB_U32:
6001 return AMDGPU::V_SUB_CO_U32_e32;
6002 case AMDGPU::S_ADD_U64_PSEUDO:
6003 return AMDGPU::V_ADD_U64_PSEUDO;
6004 case AMDGPU::S_SUB_U64_PSEUDO:
6005 return AMDGPU::V_SUB_U64_PSEUDO;
6006 case AMDGPU::S_SUBB_U32:
return AMDGPU::V_SUBB_U32_e32;
6007 case AMDGPU::S_MUL_I32:
return AMDGPU::V_MUL_LO_U32_e64;
6008 case AMDGPU::S_MUL_HI_U32:
return AMDGPU::V_MUL_HI_U32_e64;
6009 case AMDGPU::S_MUL_HI_I32:
return AMDGPU::V_MUL_HI_I32_e64;
6010 case AMDGPU::S_AND_B32:
return AMDGPU::V_AND_B32_e64;
6011 case AMDGPU::S_OR_B32:
return AMDGPU::V_OR_B32_e64;
6012 case AMDGPU::S_XOR_B32:
return AMDGPU::V_XOR_B32_e64;
6013 case AMDGPU::S_XNOR_B32:
6014 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
6015 case AMDGPU::S_MIN_I32:
return AMDGPU::V_MIN_I32_e64;
6016 case AMDGPU::S_MIN_U32:
return AMDGPU::V_MIN_U32_e64;
6017 case AMDGPU::S_MAX_I32:
return AMDGPU::V_MAX_I32_e64;
6018 case AMDGPU::S_MAX_U32:
return AMDGPU::V_MAX_U32_e64;
6019 case AMDGPU::S_ASHR_I32:
return AMDGPU::V_ASHR_I32_e32;
6020 case AMDGPU::S_ASHR_I64:
return AMDGPU::V_ASHR_I64_e64;
6021 case AMDGPU::S_LSHL_B32:
return AMDGPU::V_LSHL_B32_e32;
6022 case AMDGPU::S_LSHL_B64:
return AMDGPU::V_LSHL_B64_e64;
6023 case AMDGPU::S_LSHR_B32:
return AMDGPU::V_LSHR_B32_e32;
6024 case AMDGPU::S_LSHR_B64:
return AMDGPU::V_LSHR_B64_e64;
6025 case AMDGPU::S_SEXT_I32_I8:
return AMDGPU::V_BFE_I32_e64;
6026 case AMDGPU::S_SEXT_I32_I16:
return AMDGPU::V_BFE_I32_e64;
6027 case AMDGPU::S_BFE_U32:
return AMDGPU::V_BFE_U32_e64;
6028 case AMDGPU::S_BFE_I32:
return AMDGPU::V_BFE_I32_e64;
6029 case AMDGPU::S_BFM_B32:
return AMDGPU::V_BFM_B32_e64;
6030 case AMDGPU::S_BREV_B32:
return AMDGPU::V_BFREV_B32_e32;
6031 case AMDGPU::S_NOT_B32:
return AMDGPU::V_NOT_B32_e32;
6032 case AMDGPU::S_NOT_B64:
return AMDGPU::V_NOT_B32_e32;
6033 case AMDGPU::S_CMP_EQ_I32:
return AMDGPU::V_CMP_EQ_I32_e64;
6034 case AMDGPU::S_CMP_LG_I32:
return AMDGPU::V_CMP_NE_I32_e64;
6035 case AMDGPU::S_CMP_GT_I32:
return AMDGPU::V_CMP_GT_I32_e64;
6036 case AMDGPU::S_CMP_GE_I32:
return AMDGPU::V_CMP_GE_I32_e64;
6037 case AMDGPU::S_CMP_LT_I32:
return AMDGPU::V_CMP_LT_I32_e64;
6038 case AMDGPU::S_CMP_LE_I32:
return AMDGPU::V_CMP_LE_I32_e64;
6039 case AMDGPU::S_CMP_EQ_U32:
return AMDGPU::V_CMP_EQ_U32_e64;
6040 case AMDGPU::S_CMP_LG_U32:
return AMDGPU::V_CMP_NE_U32_e64;
6041 case AMDGPU::S_CMP_GT_U32:
return AMDGPU::V_CMP_GT_U32_e64;
6042 case AMDGPU::S_CMP_GE_U32:
return AMDGPU::V_CMP_GE_U32_e64;
6043 case AMDGPU::S_CMP_LT_U32:
return AMDGPU::V_CMP_LT_U32_e64;
6044 case AMDGPU::S_CMP_LE_U32:
return AMDGPU::V_CMP_LE_U32_e64;
6045 case AMDGPU::S_CMP_EQ_U64:
return AMDGPU::V_CMP_EQ_U64_e64;
6046 case AMDGPU::S_CMP_LG_U64:
return AMDGPU::V_CMP_NE_U64_e64;
6047 case AMDGPU::S_BCNT1_I32_B32:
return AMDGPU::V_BCNT_U32_B32_e64;
6048 case AMDGPU::S_FF1_I32_B32:
return AMDGPU::V_FFBL_B32_e32;
6049 case AMDGPU::S_FLBIT_I32_B32:
return AMDGPU::V_FFBH_U32_e32;
6050 case AMDGPU::S_FLBIT_I32:
return AMDGPU::V_FFBH_I32_e64;
6051 case AMDGPU::S_CBRANCH_SCC0:
return AMDGPU::S_CBRANCH_VCCZ;
6052 case AMDGPU::S_CBRANCH_SCC1:
return AMDGPU::S_CBRANCH_VCCNZ;
6053 case AMDGPU::S_CVT_F32_I32:
return AMDGPU::V_CVT_F32_I32_e64;
6054 case AMDGPU::S_CVT_F32_U32:
return AMDGPU::V_CVT_F32_U32_e64;
6055 case AMDGPU::S_CVT_I32_F32:
return AMDGPU::V_CVT_I32_F32_e64;
6056 case AMDGPU::S_CVT_U32_F32:
return AMDGPU::V_CVT_U32_F32_e64;
6057 case AMDGPU::S_CVT_F32_F16:
6058 case AMDGPU::S_CVT_HI_F32_F16:
6059 return ST.useRealTrue16Insts() ? AMDGPU::V_CVT_F32_F16_t16_e64
6060 : AMDGPU::V_CVT_F32_F16_fake16_e64;
6061 case AMDGPU::S_CVT_F16_F32:
6062 return ST.useRealTrue16Insts() ? AMDGPU::V_CVT_F16_F32_t16_e64
6063 : AMDGPU::V_CVT_F16_F32_fake16_e64;
6064 case AMDGPU::S_CEIL_F32:
return AMDGPU::V_CEIL_F32_e64;
6065 case AMDGPU::S_FLOOR_F32:
return AMDGPU::V_FLOOR_F32_e64;
6066 case AMDGPU::S_TRUNC_F32:
return AMDGPU::V_TRUNC_F32_e64;
6067 case AMDGPU::S_RNDNE_F32:
return AMDGPU::V_RNDNE_F32_e64;
6068 case AMDGPU::S_CEIL_F16:
6069 return ST.useRealTrue16Insts() ? AMDGPU::V_CEIL_F16_t16_e64
6070 : AMDGPU::V_CEIL_F16_fake16_e64;
6071 case AMDGPU::S_FLOOR_F16:
6072 return ST.useRealTrue16Insts() ? AMDGPU::V_FLOOR_F16_t16_e64
6073 : AMDGPU::V_FLOOR_F16_fake16_e64;
6074 case AMDGPU::S_TRUNC_F16:
6075 return ST.useRealTrue16Insts() ? AMDGPU::V_TRUNC_F16_t16_e64
6076 : AMDGPU::V_TRUNC_F16_fake16_e64;
6077 case AMDGPU::S_RNDNE_F16:
6078 return ST.useRealTrue16Insts() ? AMDGPU::V_RNDNE_F16_t16_e64
6079 : AMDGPU::V_RNDNE_F16_fake16_e64;
6080 case AMDGPU::S_ADD_F32:
return AMDGPU::V_ADD_F32_e64;
6081 case AMDGPU::S_SUB_F32:
return AMDGPU::V_SUB_F32_e64;
6082 case AMDGPU::S_MIN_F32:
return AMDGPU::V_MIN_F32_e64;
6083 case AMDGPU::S_MAX_F32:
return AMDGPU::V_MAX_F32_e64;
6084 case AMDGPU::S_MINIMUM_F32:
return AMDGPU::V_MINIMUM_F32_e64;
6085 case AMDGPU::S_MAXIMUM_F32:
return AMDGPU::V_MAXIMUM_F32_e64;
6086 case AMDGPU::S_MUL_F32:
return AMDGPU::V_MUL_F32_e64;
6087 case AMDGPU::S_ADD_F16:
6088 return ST.useRealTrue16Insts() ? AMDGPU::V_ADD_F16_t16_e64
6089 : AMDGPU::V_ADD_F16_fake16_e64;
6090 case AMDGPU::S_SUB_F16:
6091 return ST.useRealTrue16Insts() ? AMDGPU::V_SUB_F16_t16_e64
6092 : AMDGPU::V_SUB_F16_fake16_e64;
6093 case AMDGPU::S_MIN_F16:
6094 return ST.useRealTrue16Insts() ? AMDGPU::V_MIN_F16_t16_e64
6095 : AMDGPU::V_MIN_F16_fake16_e64;
6096 case AMDGPU::S_MAX_F16:
6097 return ST.useRealTrue16Insts() ? AMDGPU::V_MAX_F16_t16_e64
6098 : AMDGPU::V_MAX_F16_fake16_e64;
6099 case AMDGPU::S_MINIMUM_F16:
6100 return ST.useRealTrue16Insts() ? AMDGPU::V_MINIMUM_F16_t16_e64
6101 : AMDGPU::V_MINIMUM_F16_fake16_e64;
6102 case AMDGPU::S_MAXIMUM_F16:
6103 return ST.useRealTrue16Insts() ? AMDGPU::V_MAXIMUM_F16_t16_e64
6104 : AMDGPU::V_MAXIMUM_F16_fake16_e64;
6105 case AMDGPU::S_MUL_F16:
6106 return ST.useRealTrue16Insts() ? AMDGPU::V_MUL_F16_t16_e64
6107 : AMDGPU::V_MUL_F16_fake16_e64;
6108 case AMDGPU::S_CVT_PK_RTZ_F16_F32:
return AMDGPU::V_CVT_PKRTZ_F16_F32_e64;
6109 case AMDGPU::S_FMAC_F32:
return AMDGPU::V_FMAC_F32_e64;
6110 case AMDGPU::S_FMAC_F16:
6111 return ST.useRealTrue16Insts() ? AMDGPU::V_FMAC_F16_t16_e64
6112 : AMDGPU::V_FMAC_F16_fake16_e64;
6113 case AMDGPU::S_FMAMK_F32:
return AMDGPU::V_FMAMK_F32;
6114 case AMDGPU::S_FMAAK_F32:
return AMDGPU::V_FMAAK_F32;
6115 case AMDGPU::S_CMP_LT_F32:
return AMDGPU::V_CMP_LT_F32_e64;
6116 case AMDGPU::S_CMP_EQ_F32:
return AMDGPU::V_CMP_EQ_F32_e64;
6117 case AMDGPU::S_CMP_LE_F32:
return AMDGPU::V_CMP_LE_F32_e64;
6118 case AMDGPU::S_CMP_GT_F32:
return AMDGPU::V_CMP_GT_F32_e64;
6119 case AMDGPU::S_CMP_LG_F32:
return AMDGPU::V_CMP_LG_F32_e64;
6120 case AMDGPU::S_CMP_GE_F32:
return AMDGPU::V_CMP_GE_F32_e64;
6121 case AMDGPU::S_CMP_O_F32:
return AMDGPU::V_CMP_O_F32_e64;
6122 case AMDGPU::S_CMP_U_F32:
return AMDGPU::V_CMP_U_F32_e64;
6123 case AMDGPU::S_CMP_NGE_F32:
return AMDGPU::V_CMP_NGE_F32_e64;
6124 case AMDGPU::S_CMP_NLG_F32:
return AMDGPU::V_CMP_NLG_F32_e64;
6125 case AMDGPU::S_CMP_NGT_F32:
return AMDGPU::V_CMP_NGT_F32_e64;
6126 case AMDGPU::S_CMP_NLE_F32:
return AMDGPU::V_CMP_NLE_F32_e64;
6127 case AMDGPU::S_CMP_NEQ_F32:
return AMDGPU::V_CMP_NEQ_F32_e64;
6128 case AMDGPU::S_CMP_NLT_F32:
return AMDGPU::V_CMP_NLT_F32_e64;
6129 case AMDGPU::S_CMP_LT_F16:
6130 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LT_F16_t16_e64
6131 : AMDGPU::V_CMP_LT_F16_fake16_e64;
6132 case AMDGPU::S_CMP_EQ_F16:
6133 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_EQ_F16_t16_e64
6134 : AMDGPU::V_CMP_EQ_F16_fake16_e64;
6135 case AMDGPU::S_CMP_LE_F16:
6136 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LE_F16_t16_e64
6137 : AMDGPU::V_CMP_LE_F16_fake16_e64;
6138 case AMDGPU::S_CMP_GT_F16:
6139 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_GT_F16_t16_e64
6140 : AMDGPU::V_CMP_GT_F16_fake16_e64;
6141 case AMDGPU::S_CMP_LG_F16:
6142 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LG_F16_t16_e64
6143 : AMDGPU::V_CMP_LG_F16_fake16_e64;
6144 case AMDGPU::S_CMP_GE_F16:
6145 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_GE_F16_t16_e64
6146 : AMDGPU::V_CMP_GE_F16_fake16_e64;
6147 case AMDGPU::S_CMP_O_F16:
6148 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_O_F16_t16_e64
6149 : AMDGPU::V_CMP_O_F16_fake16_e64;
6150 case AMDGPU::S_CMP_U_F16:
6151 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_U_F16_t16_e64
6152 : AMDGPU::V_CMP_U_F16_fake16_e64;
6153 case AMDGPU::S_CMP_NGE_F16:
6154 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NGE_F16_t16_e64
6155 : AMDGPU::V_CMP_NGE_F16_fake16_e64;
6156 case AMDGPU::S_CMP_NLG_F16:
6157 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLG_F16_t16_e64
6158 : AMDGPU::V_CMP_NLG_F16_fake16_e64;
6159 case AMDGPU::S_CMP_NGT_F16:
6160 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NGT_F16_t16_e64
6161 : AMDGPU::V_CMP_NGT_F16_fake16_e64;
6162 case AMDGPU::S_CMP_NLE_F16:
6163 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLE_F16_t16_e64
6164 : AMDGPU::V_CMP_NLE_F16_fake16_e64;
6165 case AMDGPU::S_CMP_NEQ_F16:
6166 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NEQ_F16_t16_e64
6167 : AMDGPU::V_CMP_NEQ_F16_fake16_e64;
6168 case AMDGPU::S_CMP_NLT_F16:
6169 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLT_F16_t16_e64
6170 : AMDGPU::V_CMP_NLT_F16_fake16_e64;
6171 case AMDGPU::V_S_EXP_F32_e64:
return AMDGPU::V_EXP_F32_e64;
6172 case AMDGPU::V_S_EXP_F16_e64:
6173 return ST.useRealTrue16Insts() ? AMDGPU::V_EXP_F16_t16_e64
6174 : AMDGPU::V_EXP_F16_fake16_e64;
6175 case AMDGPU::V_S_LOG_F32_e64:
return AMDGPU::V_LOG_F32_e64;
6176 case AMDGPU::V_S_LOG_F16_e64:
6177 return ST.useRealTrue16Insts() ? AMDGPU::V_LOG_F16_t16_e64
6178 : AMDGPU::V_LOG_F16_fake16_e64;
6179 case AMDGPU::V_S_RCP_F32_e64:
return AMDGPU::V_RCP_F32_e64;
6180 case AMDGPU::V_S_RCP_F16_e64:
6181 return ST.useRealTrue16Insts() ? AMDGPU::V_RCP_F16_t16_e64
6182 : AMDGPU::V_RCP_F16_fake16_e64;
6183 case AMDGPU::V_S_RSQ_F32_e64:
return AMDGPU::V_RSQ_F32_e64;
6184 case AMDGPU::V_S_RSQ_F16_e64:
6185 return ST.useRealTrue16Insts() ? AMDGPU::V_RSQ_F16_t16_e64
6186 : AMDGPU::V_RSQ_F16_fake16_e64;
6187 case AMDGPU::V_S_SQRT_F32_e64:
return AMDGPU::V_SQRT_F32_e64;
6188 case AMDGPU::V_S_SQRT_F16_e64:
6189 return ST.useRealTrue16Insts() ? AMDGPU::V_SQRT_F16_t16_e64
6190 : AMDGPU::V_SQRT_F16_fake16_e64;
6193 "Unexpected scalar opcode without corresponding vector one!");
6242 "Not a whole wave func");
6245 if (
MI.getOpcode() == AMDGPU::SI_WHOLE_WAVE_FUNC_SETUP ||
6246 MI.getOpcode() == AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_SETUP)
6253 unsigned OpNo)
const {
6255 if (
MI.isVariadic() || OpNo >=
Desc.getNumOperands() ||
6256 Desc.operands()[OpNo].RegClass == -1) {
6259 if (Reg.isVirtual()) {
6263 return RI.getPhysRegBaseClass(Reg);
6266 int16_t RegClass = getOpRegClassID(
Desc.operands()[OpNo]);
6267 return RegClass < 0 ? nullptr : RI.getRegClass(RegClass);
6275 unsigned RCID = getOpRegClassID(
get(
MI.getOpcode()).operands()[
OpIdx]);
6277 unsigned Size = RI.getRegSizeInBits(*RC);
6278 unsigned Opcode = (
Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO
6279 :
Size == 16 ? AMDGPU::V_MOV_B16_t16_e64
6280 : AMDGPU::V_MOV_B32_e32;
6282 Opcode = AMDGPU::COPY;
6283 else if (RI.isSGPRClass(RC))
6284 Opcode = (
Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
6298 return RI.getSubReg(SuperReg.
getReg(), SubIdx);
6304 unsigned NewSubIdx = RI.composeSubRegIndices(SuperReg.
getSubReg(), SubIdx);
6315 if (SubIdx == AMDGPU::sub0)
6317 if (SubIdx == AMDGPU::sub1)
6329void SIInstrInfo::swapOperands(
MachineInstr &Inst)
const {
6345 if (Reg.isPhysical())
6355 return RI.getMatchingSuperRegClass(SuperRC, DRC, MO.
getSubReg()) !=
nullptr;
6358 return RI.getCommonSubClass(DRC, RC) !=
nullptr;
6365 unsigned Opc =
MI.getOpcode();
6371 constexpr AMDGPU::OpName OpNames[] = {
6372 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2};
6375 int SrcIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), OpNames[
I]);
6376 if (
static_cast<unsigned>(SrcIdx) ==
OpIdx &&
6386 bool IsAGPR = RI.isAGPR(MRI, MO.
getReg());
6387 if (IsAGPR && !ST.hasMAIInsts())
6393 const int VDstIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
6394 const int DataIdx = AMDGPU::getNamedOperandIdx(
6395 Opc,
isDS(
Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata);
6396 if ((
int)
OpIdx == VDstIdx && DataIdx != -1 &&
6397 MI.getOperand(DataIdx).isReg() &&
6398 RI.isAGPR(MRI,
MI.getOperand(DataIdx).getReg()) != IsAGPR)
6400 if ((
int)
OpIdx == DataIdx) {
6401 if (VDstIdx != -1 &&
6402 RI.isAGPR(MRI,
MI.getOperand(VDstIdx).getReg()) != IsAGPR)
6405 const int Data1Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data1);
6406 if (Data1Idx != -1 &&
MI.getOperand(Data1Idx).isReg() &&
6407 RI.isAGPR(MRI,
MI.getOperand(Data1Idx).getReg()) != IsAGPR)
6412 if (
Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts() &&
6413 (
int)
OpIdx == AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0) &&
6414 RI.isSGPRReg(MRI, MO.
getReg()))
6417 if (ST.hasFlatScratchHiInB64InstHazard() &&
6424 if (
Opc == AMDGPU::S_BITCMP0_B64 ||
Opc == AMDGPU::S_BITCMP1_B64)
6445 constexpr unsigned NumOps = 3;
6446 constexpr AMDGPU::OpName OpNames[
NumOps * 2] = {
6447 AMDGPU::OpName::src0, AMDGPU::OpName::src1,
6448 AMDGPU::OpName::src2, AMDGPU::OpName::src0_modifiers,
6449 AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src2_modifiers};
6454 int SrcIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), OpNames[SrcN]);
6457 MO = &
MI.getOperand(SrcIdx);
6460 if (!MO->
isReg() || !RI.isSGPRReg(MRI, MO->
getReg()))
6464 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), OpNames[
NumOps + SrcN]);
6468 unsigned Mods =
MI.getOperand(ModsIdx).getImm();
6472 return !OpSel && !OpSelHi;
6481 int64_t RegClass = getOpRegClassID(OpInfo);
6483 RegClass != -1 ? RI.getRegClass(RegClass) :
nullptr;
6492 int ConstantBusLimit = ST.getConstantBusLimit(
MI.getOpcode());
6493 int LiteralLimit = !
isVOP3(
MI) || ST.hasVOP3Literal() ? 1 : 0;
6497 if (!LiteralLimit--)
6507 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
6515 if (--ConstantBusLimit <= 0)
6527 if (!LiteralLimit--)
6529 if (--ConstantBusLimit <= 0)
6535 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
6539 if (!
Op.isReg() && !
Op.isFI() && !
Op.isRegMask() &&
6541 !
Op.isIdenticalTo(*MO))
6551 }
else if (IsInlineConst && ST.hasNoF16PseudoScalarTransInlineConstants() &&
6565 bool Is64BitOp = Is64BitFPOp ||
6572 (!ST.has64BitLiterals() || InstDesc.
getSize() != 4))
6581 if (!Is64BitFPOp && (int32_t)Imm < 0 &&
6599 bool IsGFX950Only = ST.hasGFX950Insts();
6600 bool IsGFX940Only = ST.hasGFX940Insts();
6602 if (!IsGFX950Only && !IsGFX940Only)
6620 unsigned Opcode =
MI.getOpcode();
6622 case AMDGPU::V_CVT_PK_BF8_F32_e64:
6623 case AMDGPU::V_CVT_PK_FP8_F32_e64:
6624 case AMDGPU::V_MQSAD_PK_U16_U8_e64:
6625 case AMDGPU::V_MQSAD_U32_U8_e64:
6626 case AMDGPU::V_PK_ADD_F16:
6627 case AMDGPU::V_PK_ADD_F32:
6628 case AMDGPU::V_PK_ADD_I16:
6629 case AMDGPU::V_PK_ADD_U16:
6630 case AMDGPU::V_PK_ASHRREV_I16:
6631 case AMDGPU::V_PK_FMA_F16:
6632 case AMDGPU::V_PK_FMA_F32:
6633 case AMDGPU::V_PK_FMAC_F16_e32:
6634 case AMDGPU::V_PK_FMAC_F16_e64:
6635 case AMDGPU::V_PK_LSHLREV_B16:
6636 case AMDGPU::V_PK_LSHRREV_B16:
6637 case AMDGPU::V_PK_MAD_I16:
6638 case AMDGPU::V_PK_MAD_U16:
6639 case AMDGPU::V_PK_MAX_F16:
6640 case AMDGPU::V_PK_MAX_I16:
6641 case AMDGPU::V_PK_MAX_U16:
6642 case AMDGPU::V_PK_MIN_F16:
6643 case AMDGPU::V_PK_MIN_I16:
6644 case AMDGPU::V_PK_MIN_U16:
6645 case AMDGPU::V_PK_MOV_B32:
6646 case AMDGPU::V_PK_MUL_F16:
6647 case AMDGPU::V_PK_MUL_F32:
6648 case AMDGPU::V_PK_MUL_LO_U16:
6649 case AMDGPU::V_PK_SUB_I16:
6650 case AMDGPU::V_PK_SUB_U16:
6651 case AMDGPU::V_QSAD_PK_U16_U8_e64:
6660 unsigned Opc =
MI.getOpcode();
6663 int Src0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0);
6666 int Src1Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1);
6672 if (HasImplicitSGPR && ST.getConstantBusLimit(
Opc) <= 1 && Src0.
isReg() &&
6673 RI.isSGPRReg(MRI, Src0.
getReg()))
6679 if (
Opc == AMDGPU::V_WRITELANE_B32) {
6681 if (Src0.
isReg() && RI.isVGPR(MRI, Src0.
getReg())) {
6687 if (Src1.
isReg() && RI.isVGPR(MRI, Src1.
getReg())) {
6698 if (
Opc == AMDGPU::V_FMAC_F32_e32 ||
Opc == AMDGPU::V_FMAC_F16_e32) {
6699 int Src2Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2);
6700 if (!RI.isVGPR(MRI,
MI.getOperand(Src2Idx).getReg()))
6712 if (
Opc == AMDGPU::V_READLANE_B32 && Src1.
isReg() &&
6713 RI.isVGPR(MRI, Src1.
getReg())) {
6726 if (HasImplicitSGPR || !
MI.isCommutable()) {
6743 if (CommutedOpc == -1) {
6748 MI.setDesc(
get(CommutedOpc));
6752 bool Src0Kill = Src0.
isKill();
6756 else if (Src1.
isReg()) {
6771 unsigned Opc =
MI.getOpcode();
6774 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0),
6775 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1),
6776 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2)
6779 if (
Opc == AMDGPU::V_PERMLANE16_B32_e64 ||
6780 Opc == AMDGPU::V_PERMLANEX16_B32_e64 ||
6781 Opc == AMDGPU::V_PERMLANE_BCAST_B32_e64 ||
6782 Opc == AMDGPU::V_PERMLANE_UP_B32_e64 ||
6783 Opc == AMDGPU::V_PERMLANE_DOWN_B32_e64 ||
6784 Opc == AMDGPU::V_PERMLANE_XOR_B32_e64 ||
6785 Opc == AMDGPU::V_PERMLANE_IDX_GEN_B32_e64) {
6795 if (VOP3Idx[2] != -1) {
6807 int ConstantBusLimit = ST.getConstantBusLimit(
Opc);
6808 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
6810 Register SGPRReg = findUsedSGPR(
MI, VOP3Idx);
6812 SGPRsUsed.
insert(SGPRReg);
6816 for (
int Idx : VOP3Idx) {
6825 if (LiteralLimit > 0 && ConstantBusLimit > 0) {
6837 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.
getReg())))
6844 if (ConstantBusLimit > 0) {
6856 if ((
Opc == AMDGPU::V_FMAC_F32_e64 ||
Opc == AMDGPU::V_FMAC_F16_e64) &&
6857 !RI.isVGPR(MRI,
MI.getOperand(VOP3Idx[2]).getReg()))
6863 for (
unsigned I = 0;
I < 3; ++
I) {
6876 SRC = RI.getCommonSubClass(SRC, DstRC);
6879 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
6881 if (RI.hasAGPRs(VRC)) {
6882 VRC = RI.getEquivalentVGPRClass(VRC);
6885 get(TargetOpcode::COPY), NewSrcReg)
6892 get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
6898 for (
unsigned i = 0; i < SubRegs; ++i) {
6901 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
6902 .
addReg(SrcReg, {}, RI.getSubRegFromChannel(i));
6908 get(AMDGPU::REG_SEQUENCE), DstReg);
6909 for (
unsigned i = 0; i < SubRegs; ++i) {
6911 MIB.
addImm(RI.getSubRegFromChannel(i));
6924 if (SBase && !RI.isSGPRClass(MRI.
getRegClass(SBase->getReg()))) {
6926 SBase->setReg(SGPR);
6929 if (SOff && !RI.isSGPRReg(MRI, SOff->
getReg())) {
6937 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::saddr);
6938 if (OldSAddrIdx < 0)
6951 if (RI.isSGPRReg(MRI, SAddr.
getReg()))
6954 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr);
6955 if (NewVAddrIdx < 0)
6958 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vaddr);
6962 if (OldVAddrIdx >= 0) {
6976 if (OldVAddrIdx == NewVAddrIdx) {
6987 assert(OldSAddrIdx == NewVAddrIdx);
6989 if (OldVAddrIdx >= 0) {
6990 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc,
6991 AMDGPU::OpName::vdst_in);
6995 if (NewVDstIn != -1) {
6996 int OldVDstIn = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst_in);
7002 if (NewVDstIn != -1) {
7003 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst);
7044 unsigned OpSubReg =
Op.getSubReg();
7047 RI.getRegClassForReg(MRI, OpReg), OpSubReg);
7063 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass)
7066 bool ImpDef = Def->isImplicitDef();
7067 while (!ImpDef && Def && Def->isCopy()) {
7068 if (Def->getOperand(1).getReg().isPhysical())
7071 ImpDef = Def && Def->isImplicitDef();
7073 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) &&
7089 const auto *BoolXExecRC =
TRI->getWaveMaskRegClass();
7093 for (
auto [Idx, ScalarOp] :
enumerate(ScalarOps)) {
7094 unsigned RegSize =
TRI->getRegSizeInBits(ScalarOp->getReg(), MRI);
7095 unsigned NumSubRegs =
RegSize / 32;
7096 Register VScalarOp = ScalarOp->getReg();
7099 TII.getRegClass(
TII.get(AMDGPU::V_READFIRSTLANE_B32), 1);
7101 if (NumSubRegs == 1) {
7104 TRI->getCommonSubClass(VScalarOpRC, RFLSrcRC);
7105 Common != VScalarOpRC) {
7112 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::V_READFIRSTLANE_B32), CurReg)
7117 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::V_CMP_EQ_U32_e64), NewCondReg)
7123 CondReg = NewCondReg;
7133 if (PhySGPRs.empty() || !PhySGPRs[Idx].isValid())
7134 ScalarOp->setReg(CurReg);
7137 BuildMI(*ScalarOp->getParent()->getParent(), ScalarOp->getParent(),
DL,
7138 TII.get(AMDGPU::COPY), PhySGPRs[Idx])
7140 ScalarOp->setReg(PhySGPRs[Idx]);
7142 ScalarOp->setIsKill();
7146 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 &&
7147 "Unhandled register size");
7149 for (
unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) {
7156 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo)
7157 .
addReg(VScalarOp, VScalarOpUndef,
TRI->getSubRegFromChannel(Idx));
7160 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi)
7161 .
addReg(VScalarOp, VScalarOpUndef,
7162 TRI->getSubRegFromChannel(Idx + 1));
7169 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::REG_SEQUENCE), CurReg)
7179 if (NumSubRegs <= 2)
7180 Cmp.addReg(VScalarOp);
7182 Cmp.addReg(VScalarOp, VScalarOpUndef,
7183 TRI->getSubRegFromChannel(Idx, 2));
7187 CondReg = NewCondReg;
7197 const auto *SScalarOpRC =
7203 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::REG_SEQUENCE), SScalarOp);
7204 unsigned Channel = 0;
7205 for (
Register Piece : ReadlanePieces) {
7206 Merge.addReg(Piece).addImm(
TRI->getSubRegFromChannel(Channel++));
7210 if (PhySGPRs.empty() || !PhySGPRs[Idx].isValid())
7211 ScalarOp->setReg(SScalarOp);
7213 BuildMI(*ScalarOp->getParent()->getParent(), ScalarOp->getParent(),
DL,
7214 TII.get(AMDGPU::COPY), PhySGPRs[Idx])
7216 ScalarOp->setReg(PhySGPRs[Idx]);
7218 ScalarOp->setIsKill();
7250 assert((PhySGPRs.empty() || PhySGPRs.size() == ScalarOps.
size()) &&
7251 "Physical SGPRs must be empty or match the number of scalar operands");
7257 if (!Begin.isValid())
7259 if (!End.isValid()) {
7265 const auto *BoolXExecRC =
TRI->getWaveMaskRegClass();
7274 std::numeric_limits<unsigned>::max()) !=
7292 for (
auto I = Begin;
I != AfterMI;
I++) {
7293 for (
auto &MO :
I->all_uses())
7329 for (
auto &Succ : RemainderBB->
successors()) {
7354static std::tuple<unsigned, unsigned>
7362 TII.buildExtractSubReg(
MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass,
7363 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
7370 uint64_t RsrcDataFormat =
TII.getDefaultRsrcDataFormat();
7387 .
addImm(AMDGPU::sub0_sub1)
7393 return std::tuple(RsrcPtr, NewSRsrc);
7430 if (
MI.getOpcode() == AMDGPU::PHI) {
7432 assert(!RI.isSGPRClass(VRC));
7435 for (
unsigned I = 1, E =
MI.getNumOperands();
I != E;
I += 2) {
7437 if (!
Op.isReg() || !
Op.getReg().isVirtual())
7453 if (
MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
7456 if (RI.hasVGPRs(DstRC)) {
7460 for (
unsigned I = 1, E =
MI.getNumOperands();
I != E;
I += 2) {
7462 if (!
Op.isReg() || !
Op.getReg().isVirtual())
7480 if (
MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
7485 if (DstRC != Src0RC) {
7494 if (
MI.getOpcode() == AMDGPU::SI_INIT_M0) {
7496 if (Src.isReg() && RI.hasVectorRegisters(MRI.
getRegClass(Src.getReg())))
7502 if (
MI.getOpcode() == AMDGPU::S_BITREPLICATE_B64_B32 ||
7503 MI.getOpcode() == AMDGPU::S_QUADMASK_B32 ||
7504 MI.getOpcode() == AMDGPU::S_QUADMASK_B64 ||
7505 MI.getOpcode() == AMDGPU::S_WQM_B32 ||
7506 MI.getOpcode() == AMDGPU::S_WQM_B64 ||
7507 MI.getOpcode() == AMDGPU::S_INVERSE_BALLOT_U32 ||
7508 MI.getOpcode() == AMDGPU::S_INVERSE_BALLOT_U64) {
7510 if (Src.isReg() && RI.hasVectorRegisters(MRI.
getRegClass(Src.getReg())))
7523 ? AMDGPU::OpName::rsrc
7524 : AMDGPU::OpName::srsrc;
7529 AMDGPU::OpName SampOpName =
7530 isMIMG(
MI) ? AMDGPU::OpName::ssamp : AMDGPU::OpName::samp;
7539 if (
MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
7547 if (
MI.getOpcode() == AMDGPU::S_SLEEP_VAR) {
7551 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src0);
7561 if (
MI.getOpcode() == AMDGPU::TENSOR_LOAD_TO_LDS_d2 ||
7562 MI.getOpcode() == AMDGPU::TENSOR_LOAD_TO_LDS_d4 ||
7563 MI.getOpcode() == AMDGPU::TENSOR_STORE_FROM_LDS_d2 ||
7564 MI.getOpcode() == AMDGPU::TENSOR_STORE_FROM_LDS_d4) {
7566 if (Src.isReg() && RI.hasVectorRegisters(MRI.
getRegClass(Src.getReg())))
7573 bool isSoffsetLegal =
true;
7575 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::soffset);
7576 if (SoffsetIdx != -1) {
7580 isSoffsetLegal =
false;
7584 bool isRsrcLegal =
true;
7586 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::srsrc);
7587 if (RsrcIdx != -1) {
7589 if (Rsrc->
isReg() && !RI.isSGPRReg(MRI, Rsrc->
getReg()))
7590 isRsrcLegal =
false;
7594 if (isRsrcLegal && isSoffsetLegal)
7622 const auto *BoolXExecRC = RI.getWaveMaskRegClass();
7626 unsigned RsrcPtr, NewSRsrc;
7633 .
addReg(RsrcPtr, {}, AMDGPU::sub0)
7634 .addReg(VAddr->
getReg(), {}, AMDGPU::sub0)
7640 .
addReg(RsrcPtr, {}, AMDGPU::sub1)
7641 .addReg(VAddr->
getReg(), {}, AMDGPU::sub1)
7654 }
else if (!VAddr && ST.hasAddr64()) {
7658 "FIXME: Need to emit flat atomics here");
7660 unsigned RsrcPtr, NewSRsrc;
7686 MIB.
addImm(CPol->getImm());
7691 MIB.
addImm(TFE->getImm());
7711 MI.removeFromParent();
7716 .
addReg(RsrcPtr, {}, AMDGPU::sub0)
7717 .addImm(AMDGPU::sub0)
7718 .
addReg(RsrcPtr, {}, AMDGPU::sub1)
7719 .addImm(AMDGPU::sub1);
7722 if (!isSoffsetLegal) {
7733 if (!isSoffsetLegal) {
7745 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::srsrc);
7746 if (RsrcIdx != -1) {
7747 DeferredList.insert(
MI);
7752 return DeferredList.contains(
MI);
7762 if (!ST.useRealTrue16Insts())
7765 unsigned Opcode =
MI.getOpcode();
7769 OpIdx >=
get(Opcode).getNumOperands() ||
7770 get(Opcode).operands()[
OpIdx].RegClass == -1)
7774 if (!
Op.isReg() || !
Op.getReg().isVirtual())
7778 if (!RI.isVGPRClass(CurrRC))
7781 int16_t RCID = getOpRegClassID(
get(Opcode).operands()[
OpIdx]);
7783 if (RI.getMatchingSuperRegClass(CurrRC, ExpectedRC, AMDGPU::lo16)) {
7784 Op.setSubReg(AMDGPU::lo16);
7785 }
else if (RI.getMatchingSuperRegClass(ExpectedRC, CurrRC, AMDGPU::lo16)) {
7795 Op.setReg(NewDstReg);
7808 assert(
MI->getOpcode() == AMDGPU::SI_CALL_ISEL &&
7809 "This only handle waterfall for SI_CALL_ISEL");
7816 while (Start->getOpcode() != AMDGPU::ADJCALLSTACKUP)
7819 while (End->getOpcode() != AMDGPU::ADJCALLSTACKDOWN)
7824 while (End !=
MBB.end() && End->isCopy() &&
7825 MI->definesRegister(End->getOperand(1).getReg(), &RI))
7835 while (!Worklist.
empty()) {
7841 moveToVALUImpl(Worklist, MDT, Inst, WaterFalls, V2SPhyCopiesToErase);
7847 moveToVALUImpl(Worklist, MDT, *Inst, WaterFalls, V2SPhyCopiesToErase);
7849 "Deferred MachineInstr are not supposed to re-populate worklist");
7852 for (std::pair<MachineInstr *, V2PhysSCopyInfo> &Entry : WaterFalls) {
7853 if (Entry.first->getOpcode() == AMDGPU::SI_CALL_ISEL)
7855 Entry.second.SGPRs);
7858 for (std::pair<MachineInstr *, bool> Entry : V2SPhyCopiesToErase)
7860 Entry.first->eraseFromParent();
7868 if (SubRegIndices.
size() <= 1) {
7871 get(AMDGPU::V_READFIRSTLANE_B32), NewDst)
7878 for (int16_t Indice : SubRegIndices) {
7881 get(AMDGPU::V_READFIRSTLANE_B32), NewDst)
7888 get(AMDGPU::REG_SEQUENCE), DstReg);
7889 for (
unsigned i = 0; i < SubRegIndices.size(); ++i) {
7891 MIB.
addImm(RI.getSubRegFromChannel(i));
7901 if (DstReg == AMDGPU::M0) {
7914 if (
I->getOpcode() == AMDGPU::SI_CALL_ISEL) {
7916 for (
unsigned i = 0; i <
UseMI->getNumOperands(); ++i) {
7917 if (
UseMI->getOperand(i).isReg() &&
7918 UseMI->getOperand(i).getReg() == DstReg) {
7922 V2SCopyInfo.MOs.push_back(MO);
7923 V2SCopyInfo.SGPRs.push_back(DstReg);
7927 }
else if (
I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG &&
7928 I->getOperand(0).isReg() &&
7929 I->getOperand(0).getReg() == DstReg) {
7932 }
else if (
I->readsRegister(DstReg, &RI)) {
7934 V2SPhyCopiesToErase[&Inst] =
false;
7936 if (
I->findRegisterDefOperand(DstReg, &RI))
7958 case AMDGPU::S_ADD_I32:
7959 case AMDGPU::S_SUB_I32: {
7963 std::tie(
Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT);
7971 case AMDGPU::S_MUL_U64:
7972 if (ST.hasVMulU64Inst()) {
7973 NewOpcode = AMDGPU::V_MUL_U64_e64;
7977 splitScalarSMulU64(Worklist, Inst, MDT);
7981 case AMDGPU::S_MUL_U64_U32_PSEUDO:
7982 case AMDGPU::S_MUL_I64_I32_PSEUDO:
7985 splitScalarSMulPseudo(Worklist, Inst, MDT);
7989 case AMDGPU::S_AND_B64:
7990 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT);
7994 case AMDGPU::S_OR_B64:
7995 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT);
7999 case AMDGPU::S_XOR_B64:
8000 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT);
8004 case AMDGPU::S_NAND_B64:
8005 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT);
8009 case AMDGPU::S_NOR_B64:
8010 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT);
8014 case AMDGPU::S_XNOR_B64:
8015 if (ST.hasDLInsts())
8016 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
8018 splitScalar64BitXnor(Worklist, Inst, MDT);
8022 case AMDGPU::S_ANDN2_B64:
8023 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT);
8027 case AMDGPU::S_ORN2_B64:
8028 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT);
8032 case AMDGPU::S_BREV_B64:
8033 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32,
true);
8037 case AMDGPU::S_NOT_B64:
8038 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
8042 case AMDGPU::S_BCNT1_I32_B64:
8043 splitScalar64BitBCNT(Worklist, Inst);
8047 case AMDGPU::S_BFE_I64:
8048 splitScalar64BitBFE(Worklist, Inst);
8052 case AMDGPU::S_FLBIT_I32_B64:
8053 splitScalar64BitCountOp(Worklist, Inst, AMDGPU::V_FFBH_U32_e32);
8056 case AMDGPU::S_FF1_I32_B64:
8057 splitScalar64BitCountOp(Worklist, Inst, AMDGPU::V_FFBL_B32_e32);
8061 case AMDGPU::S_LSHL_B32:
8062 if (ST.hasOnlyRevVALUShifts()) {
8063 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
8067 case AMDGPU::S_ASHR_I32:
8068 if (ST.hasOnlyRevVALUShifts()) {
8069 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
8073 case AMDGPU::S_LSHR_B32:
8074 if (ST.hasOnlyRevVALUShifts()) {
8075 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
8079 case AMDGPU::S_LSHL_B64:
8080 if (ST.hasOnlyRevVALUShifts()) {
8082 ? AMDGPU::V_LSHLREV_B64_pseudo_e64
8083 : AMDGPU::V_LSHLREV_B64_e64;
8087 case AMDGPU::S_ASHR_I64:
8088 if (ST.hasOnlyRevVALUShifts()) {
8089 NewOpcode = AMDGPU::V_ASHRREV_I64_e64;
8093 case AMDGPU::S_LSHR_B64:
8094 if (ST.hasOnlyRevVALUShifts()) {
8095 NewOpcode = AMDGPU::V_LSHRREV_B64_e64;
8100 case AMDGPU::S_ABS_I32:
8101 lowerScalarAbs(Worklist, Inst);
8105 case AMDGPU::S_ABSDIFF_I32:
8106 lowerScalarAbsDiff(Worklist, Inst);
8110 case AMDGPU::S_CBRANCH_SCC0:
8111 case AMDGPU::S_CBRANCH_SCC1: {
8114 bool IsSCC = CondReg == AMDGPU::SCC;
8122 case AMDGPU::S_BFE_U64:
8123 case AMDGPU::S_BFM_B64:
8126 case AMDGPU::S_PACK_LL_B32_B16:
8127 case AMDGPU::S_PACK_LH_B32_B16:
8128 case AMDGPU::S_PACK_HL_B32_B16:
8129 case AMDGPU::S_PACK_HH_B32_B16:
8130 movePackToVALU(Worklist, MRI, Inst);
8134 case AMDGPU::S_XNOR_B32:
8135 lowerScalarXnor(Worklist, Inst);
8139 case AMDGPU::S_NAND_B32:
8140 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32);
8144 case AMDGPU::S_NOR_B32:
8145 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32);
8149 case AMDGPU::S_ANDN2_B32:
8150 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32);
8154 case AMDGPU::S_ORN2_B32:
8155 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32);
8163 case AMDGPU::S_ADD_CO_PSEUDO:
8164 case AMDGPU::S_SUB_CO_PSEUDO: {
8165 unsigned Opc = (Inst.
getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
8166 ? AMDGPU::V_ADDC_U32_e64
8167 : AMDGPU::V_SUBB_U32_e64;
8168 const auto *CarryRC = RI.getWaveMaskRegClass();
8190 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
8194 case AMDGPU::S_UADDO_PSEUDO:
8195 case AMDGPU::S_USUBO_PSEUDO: {
8201 unsigned Opc = (Inst.
getOpcode() == AMDGPU::S_UADDO_PSEUDO)
8202 ? AMDGPU::V_ADD_CO_U32_e64
8203 : AMDGPU::V_SUB_CO_U32_e64;
8215 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
8219 case AMDGPU::S_LSHL1_ADD_U32:
8220 case AMDGPU::S_LSHL2_ADD_U32:
8221 case AMDGPU::S_LSHL3_ADD_U32:
8222 case AMDGPU::S_LSHL4_ADD_U32: {
8226 unsigned ShiftAmt = (Opcode == AMDGPU::S_LSHL1_ADD_U32 ? 1
8227 : Opcode == AMDGPU::S_LSHL2_ADD_U32 ? 2
8228 : Opcode == AMDGPU::S_LSHL3_ADD_U32 ? 3
8242 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
8246 case AMDGPU::S_CSELECT_B32:
8247 case AMDGPU::S_CSELECT_B64:
8248 lowerSelect(Worklist, Inst, MDT);
8251 case AMDGPU::S_CMP_EQ_I32:
8252 case AMDGPU::S_CMP_LG_I32:
8253 case AMDGPU::S_CMP_GT_I32:
8254 case AMDGPU::S_CMP_GE_I32:
8255 case AMDGPU::S_CMP_LT_I32:
8256 case AMDGPU::S_CMP_LE_I32:
8257 case AMDGPU::S_CMP_EQ_U32:
8258 case AMDGPU::S_CMP_LG_U32:
8259 case AMDGPU::S_CMP_GT_U32:
8260 case AMDGPU::S_CMP_GE_U32:
8261 case AMDGPU::S_CMP_LT_U32:
8262 case AMDGPU::S_CMP_LE_U32:
8263 case AMDGPU::S_CMP_EQ_U64:
8264 case AMDGPU::S_CMP_LG_U64:
8265 case AMDGPU::S_CMP_LT_F32:
8266 case AMDGPU::S_CMP_EQ_F32:
8267 case AMDGPU::S_CMP_LE_F32:
8268 case AMDGPU::S_CMP_GT_F32:
8269 case AMDGPU::S_CMP_LG_F32:
8270 case AMDGPU::S_CMP_GE_F32:
8271 case AMDGPU::S_CMP_O_F32:
8272 case AMDGPU::S_CMP_U_F32:
8273 case AMDGPU::S_CMP_NGE_F32:
8274 case AMDGPU::S_CMP_NLG_F32:
8275 case AMDGPU::S_CMP_NGT_F32:
8276 case AMDGPU::S_CMP_NLE_F32:
8277 case AMDGPU::S_CMP_NEQ_F32:
8278 case AMDGPU::S_CMP_NLT_F32: {
8283 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src0_modifiers) >=
8297 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
8301 case AMDGPU::S_CMP_LT_F16:
8302 case AMDGPU::S_CMP_EQ_F16:
8303 case AMDGPU::S_CMP_LE_F16:
8304 case AMDGPU::S_CMP_GT_F16:
8305 case AMDGPU::S_CMP_LG_F16:
8306 case AMDGPU::S_CMP_GE_F16:
8307 case AMDGPU::S_CMP_O_F16:
8308 case AMDGPU::S_CMP_U_F16:
8309 case AMDGPU::S_CMP_NGE_F16:
8310 case AMDGPU::S_CMP_NLG_F16:
8311 case AMDGPU::S_CMP_NGT_F16:
8312 case AMDGPU::S_CMP_NLE_F16:
8313 case AMDGPU::S_CMP_NEQ_F16:
8314 case AMDGPU::S_CMP_NLT_F16: {
8337 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
8341 case AMDGPU::S_CVT_HI_F32_F16: {
8344 if (ST.useRealTrue16Insts()) {
8349 .
addReg(TmpReg, {}, AMDGPU::hi16)
8365 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
8369 case AMDGPU::S_MINIMUM_F32:
8370 case AMDGPU::S_MAXIMUM_F32: {
8382 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
8386 case AMDGPU::S_MINIMUM_F16:
8387 case AMDGPU::S_MAXIMUM_F16: {
8389 ? &AMDGPU::VGPR_16RegClass
8390 : &AMDGPU::VGPR_32RegClass);
8402 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
8406 case AMDGPU::V_S_EXP_F16_e64:
8407 case AMDGPU::V_S_LOG_F16_e64:
8408 case AMDGPU::V_S_RCP_F16_e64:
8409 case AMDGPU::V_S_RSQ_F16_e64:
8410 case AMDGPU::V_S_SQRT_F16_e64: {
8412 ? &AMDGPU::VGPR_16RegClass
8413 : &AMDGPU::VGPR_32RegClass);
8425 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
8431 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
8439 if (NewOpcode == Opcode) {
8446 V2SPhyCopiesToErase);
8454 RI.getCommonSubClass(NewDstRC, SrcRC)) {
8461 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
8492 if (ST.useRealTrue16Insts() && Inst.
isCopy() &&
8496 if (RI.getMatchingSuperRegClass(NewDstRC, SrcRegRC, AMDGPU::lo16)) {
8502 get(AMDGPU::REG_SEQUENCE), NewDstReg)
8509 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8511 }
else if (RI.getMatchingSuperRegClass(SrcRegRC, NewDstRC,
8516 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8524 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8534 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8535 AMDGPU::OpName::src0_modifiers) >= 0)
8539 NewInstr->addOperand(Src);
8542 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
8545 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
8547 NewInstr.addImm(
Size);
8548 }
else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
8552 }
else if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
8557 "Scalar BFE is only implemented for constant width and offset");
8565 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8566 AMDGPU::OpName::src1_modifiers) >= 0)
8568 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src1) >= 0)
8570 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8571 AMDGPU::OpName::src2_modifiers) >= 0)
8573 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src2) >= 0)
8575 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::clamp) >= 0)
8577 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::omod) >= 0)
8579 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::op_sel) >= 0)
8585 NewInstr->addOperand(
Op);
8592 if (
Op.getReg() == AMDGPU::SCC) {
8594 if (
Op.isDef() && !
Op.isDead())
8595 addSCCDefUsersToVALUWorklist(
Op, Inst, Worklist);
8597 addSCCDefsToVALUWorklist(NewInstr, Worklist);
8602 if (NewInstr->getOperand(0).isReg() && NewInstr->getOperand(0).isDef()) {
8603 Register DstReg = NewInstr->getOperand(0).getReg();
8618 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8622std::pair<bool, MachineBasicBlock *>
8625 if (ST.hasAddNoCarryInsts()) {
8637 assert(
Opc == AMDGPU::S_ADD_I32 ||
Opc == AMDGPU::S_SUB_I32);
8639 unsigned NewOpc =
Opc == AMDGPU::S_ADD_I32 ?
8640 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64;
8651 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8652 return std::pair(
true, NewBB);
8655 return std::pair(
false,
nullptr);
8672 bool IsSCC = (CondReg == AMDGPU::SCC);
8686 const TargetRegisterClass *TC = RI.getWaveMaskRegClass();
8691 bool CopyFound =
false;
8692 for (MachineInstr &CandI :
8695 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI,
false,
false) !=
8697 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) {
8699 .
addReg(CandI.getOperand(1).getReg());
8711 ST.isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
8720 MachineInstr *NewInst;
8721 if (Inst.
getOpcode() == AMDGPU::S_CSELECT_B32) {
8722 NewInst =
BuildMI(
MBB, MII,
DL,
get(AMDGPU::V_CNDMASK_B32_e64), NewDestReg)
8737 addUsersToMoveToVALUWorklist(NewDestReg, MRI, Worklist);
8752 unsigned SubOp = ST.hasAddNoCarryInsts() ? AMDGPU::V_SUB_U32_e32
8753 : AMDGPU::V_SUB_CO_U32_e32;
8764 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8781 unsigned SubOp = ST.hasAddNoCarryInsts() ? AMDGPU::V_SUB_U32_e32
8782 : AMDGPU::V_SUB_CO_U32_e32;
8795 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8809 if (ST.hasDLInsts()) {
8819 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8825 bool Src0IsSGPR = Src0.
isReg() &&
8827 bool Src1IsSGPR = Src1.
isReg() &&
8841 }
else if (Src1IsSGPR) {
8859 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8865 unsigned Opcode)
const {
8889 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8894 unsigned Opcode)
const {
8918 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8933 const MCInstrDesc &InstDesc =
get(Opcode);
8934 const TargetRegisterClass *Src0RC = Src0.
isReg() ?
8936 &AMDGPU::SGPR_32RegClass;
8938 const TargetRegisterClass *Src0SubRC =
8939 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8942 AMDGPU::sub0, Src0SubRC);
8945 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
8946 const TargetRegisterClass *NewDestSubRC =
8947 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0);
8950 MachineInstr &LoHalf = *
BuildMI(
MBB, MII,
DL, InstDesc, DestSub0).
add(SrcReg0Sub0);
8953 AMDGPU::sub1, Src0SubRC);
8956 MachineInstr &HiHalf = *
BuildMI(
MBB, MII,
DL, InstDesc, DestSub1).
add(SrcReg0Sub1);
8970 Worklist.
insert(&LoHalf);
8971 Worklist.
insert(&HiHalf);
8977 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
9000 const TargetRegisterClass *Src0SubRC =
9001 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
9002 if (RI.isSGPRClass(Src0SubRC))
9003 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
9004 const TargetRegisterClass *Src1SubRC =
9005 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
9006 if (RI.isSGPRClass(Src1SubRC))
9007 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
9011 MachineOperand Op0L =
9013 MachineOperand Op1L =
9015 MachineOperand Op0H =
9017 MachineOperand Op1H =
9036 MachineInstr *Op1L_Op0H =
9042 MachineInstr *Op1H_Op0L =
9048 MachineInstr *Carry =
9053 MachineInstr *LoHalf =
9063 MachineInstr *HiHalf =
9086 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
9109 const TargetRegisterClass *Src0SubRC =
9110 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
9111 if (RI.isSGPRClass(Src0SubRC))
9112 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
9113 const TargetRegisterClass *Src1SubRC =
9114 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
9115 if (RI.isSGPRClass(Src1SubRC))
9116 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
9120 MachineOperand Op0L =
9122 MachineOperand Op1L =
9126 unsigned NewOpc =
Opc == AMDGPU::S_MUL_U64_U32_PSEUDO
9127 ? AMDGPU::V_MUL_HI_U32_e64
9128 : AMDGPU::V_MUL_HI_I32_e64;
9129 MachineInstr *HiHalf =
9132 MachineInstr *LoHalf =
9151 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
9167 const MCInstrDesc &InstDesc =
get(Opcode);
9168 const TargetRegisterClass *Src0RC = Src0.
isReg() ?
9170 &AMDGPU::SGPR_32RegClass;
9172 const TargetRegisterClass *Src0SubRC =
9173 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
9174 const TargetRegisterClass *Src1RC = Src1.
isReg() ?
9176 &AMDGPU::SGPR_32RegClass;
9178 const TargetRegisterClass *Src1SubRC =
9179 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
9182 AMDGPU::sub0, Src0SubRC);
9184 AMDGPU::sub0, Src1SubRC);
9186 AMDGPU::sub1, Src0SubRC);
9188 AMDGPU::sub1, Src1SubRC);
9191 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
9192 const TargetRegisterClass *NewDestSubRC =
9193 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0);
9196 MachineInstr &LoHalf = *
BuildMI(
MBB, MII,
DL, InstDesc, DestSub0)
9201 MachineInstr &HiHalf = *
BuildMI(
MBB, MII,
DL, InstDesc, DestSub1)
9214 Worklist.
insert(&LoHalf);
9215 Worklist.
insert(&HiHalf);
9218 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
9238 MachineOperand* Op0;
9239 MachineOperand* Op1;
9241 if (Src0.
isReg() && RI.isSGPRReg(MRI, Src0.
getReg())) {
9274 const MCInstrDesc &InstDesc =
get(AMDGPU::V_BCNT_U32_B32_e64);
9275 const TargetRegisterClass *SrcRC = Src.isReg() ?
9277 &AMDGPU::SGPR_32RegClass;
9282 const TargetRegisterClass *SrcSubRC =
9283 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0);
9286 AMDGPU::sub0, SrcSubRC);
9288 AMDGPU::sub1, SrcSubRC);
9298 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9317 Offset == 0 &&
"Not implemented");
9340 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9350 .
addReg(Src.getReg(), {}, AMDGPU::sub0);
9353 .
addReg(Src.getReg(), {}, AMDGPU::sub0)
9359 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9378 const MCInstrDesc &InstDesc =
get(Opcode);
9380 bool IsCtlz = Opcode == AMDGPU::V_FFBH_U32_e32;
9381 unsigned OpcodeAdd = ST.hasAddNoCarryInsts() ? AMDGPU::V_ADD_U32_e64
9382 : AMDGPU::V_ADD_CO_U32_e32;
9384 const TargetRegisterClass *SrcRC =
9385 Src.isReg() ? MRI.
getRegClass(Src.getReg()) : &AMDGPU::SGPR_32RegClass;
9386 const TargetRegisterClass *SrcSubRC =
9387 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0);
9389 MachineOperand SrcRegSub0 =
9391 MachineOperand SrcRegSub1 =
9404 .
addReg(IsCtlz ? MidReg1 : MidReg2)
9410 .
addReg(IsCtlz ? MidReg2 : MidReg1);
9414 addUsersToMoveToVALUWorklist(MidReg4, MRI, Worklist);
9417void SIInstrInfo::addUsersToMoveToVALUWorklist(
9421 MachineInstr &
UseMI = *MO.getParent();
9425 switch (
UseMI.getOpcode()) {
9428 case AMDGPU::SOFT_WQM:
9429 case AMDGPU::STRICT_WWM:
9430 case AMDGPU::STRICT_WQM:
9431 case AMDGPU::REG_SEQUENCE:
9433 case AMDGPU::INSERT_SUBREG:
9436 OpNo = MO.getOperandNo();
9443 if (!RI.hasVectorRegisters(OpRC))
9460 if (ST.useRealTrue16Insts()) {
9462 if (!Src0.
isReg() || !RI.isVGPR(MRI, Src0.
getReg())) {
9465 get(Src0.
isImm() ? AMDGPU::V_MOV_B32_e32 : AMDGPU::COPY), SrcReg0)
9471 if (!Src1.
isReg() || !RI.isVGPR(MRI, Src1.
getReg())) {
9474 get(Src1.
isImm() ? AMDGPU::V_MOV_B32_e32 : AMDGPU::COPY), SrcReg1)
9483 auto NewMI =
BuildMI(*
MBB, Inst,
DL,
get(AMDGPU::REG_SEQUENCE), ResultReg);
9485 case AMDGPU::S_PACK_LL_B32_B16:
9487 .addReg(SrcReg0, {},
9488 isSrc0Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
9489 .addImm(AMDGPU::lo16)
9490 .addReg(SrcReg1, {},
9491 isSrc1Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
9492 .addImm(AMDGPU::hi16);
9494 case AMDGPU::S_PACK_LH_B32_B16:
9496 .addReg(SrcReg0, {},
9497 isSrc0Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
9498 .addImm(AMDGPU::lo16)
9499 .addReg(SrcReg1, {}, AMDGPU::hi16)
9500 .addImm(AMDGPU::hi16);
9502 case AMDGPU::S_PACK_HL_B32_B16:
9503 NewMI.addReg(SrcReg0, {}, AMDGPU::hi16)
9504 .addImm(AMDGPU::lo16)
9505 .addReg(SrcReg1, {},
9506 isSrc1Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
9507 .addImm(AMDGPU::hi16);
9509 case AMDGPU::S_PACK_HH_B32_B16:
9510 NewMI.addReg(SrcReg0, {}, AMDGPU::hi16)
9511 .addImm(AMDGPU::lo16)
9512 .addReg(SrcReg1, {}, AMDGPU::hi16)
9513 .addImm(AMDGPU::hi16);
9521 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9526 case AMDGPU::S_PACK_LL_B32_B16: {
9545 case AMDGPU::S_PACK_LH_B32_B16: {
9555 case AMDGPU::S_PACK_HL_B32_B16: {
9566 case AMDGPU::S_PACK_HH_B32_B16: {
9586 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9595 assert(
Op.isReg() &&
Op.getReg() == AMDGPU::SCC &&
Op.isDef() &&
9596 !
Op.isDead() &&
Op.getParent() == &SCCDefInst);
9597 SmallVector<MachineInstr *, 4> CopyToDelete;
9600 for (MachineInstr &
MI :
9604 int SCCIdx =
MI.findRegisterUseOperandIdx(AMDGPU::SCC, &RI,
false);
9607 MachineRegisterInfo &MRI =
MI.getMF()->getRegInfo();
9608 Register DestReg =
MI.getOperand(0).getReg();
9615 MI.getOperand(SCCIdx).setReg(NewCond);
9621 if (
MI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI,
false,
false) != -1)
9624 for (
auto &Copy : CopyToDelete)
9625 Copy->eraseFromParent();
9633void SIInstrInfo::addSCCDefsToVALUWorklist(
MachineInstr *SCCUseInst,
9639 for (MachineInstr &
MI :
9642 if (
MI.modifiesRegister(AMDGPU::VCC, &RI))
9644 if (
MI.definesRegister(AMDGPU::SCC, &RI)) {
9653 const TargetRegisterClass *NewDstRC =
getOpRegClass(Inst, 0);
9661 case AMDGPU::REG_SEQUENCE:
9662 case AMDGPU::INSERT_SUBREG:
9664 case AMDGPU::SOFT_WQM:
9665 case AMDGPU::STRICT_WWM:
9666 case AMDGPU::STRICT_WQM: {
9668 if (RI.isAGPRClass(SrcRC)) {
9669 if (RI.isAGPRClass(NewDstRC))
9674 case AMDGPU::REG_SEQUENCE:
9675 case AMDGPU::INSERT_SUBREG:
9676 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC);
9679 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
9685 if (RI.isVGPRClass(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass)
9688 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
9702 int OpIndices[3])
const {
9703 const MCInstrDesc &
Desc =
MI.getDesc();
9719 const MachineRegisterInfo &MRI =
MI.getMF()->getRegInfo();
9721 for (
unsigned i = 0; i < 3; ++i) {
9722 int Idx = OpIndices[i];
9726 const MachineOperand &MO =
MI.getOperand(Idx);
9732 const TargetRegisterClass *OpRC =
9733 RI.getRegClass(getOpRegClassID(
Desc.operands()[Idx]));
9734 bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
9741 if (RI.isSGPRClass(RegRC))
9759 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
9760 SGPRReg = UsedSGPRs[0];
9763 if (!SGPRReg && UsedSGPRs[1]) {
9764 if (UsedSGPRs[1] == UsedSGPRs[2])
9765 SGPRReg = UsedSGPRs[1];
9772 AMDGPU::OpName OperandName)
const {
9773 if (OperandName == AMDGPU::OpName::NUM_OPERAND_NAMES)
9776 int Idx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), OperandName);
9780 return &
MI.getOperand(Idx);
9794 if (ST.isAmdHsaOS()) {
9797 RsrcDataFormat |= (1ULL << 56);
9802 RsrcDataFormat |= (2ULL << 59);
9805 return RsrcDataFormat;
9815 uint64_t EltSizeValue =
Log2_32(ST.getMaxPrivateElementSize(
true)) - 1;
9820 uint64_t IndexStride = ST.isWave64() ? 3 : 2;
9827 Rsrc23 &=
~AMDGPU::RSRC_DATA_FORMAT;
9833 unsigned Opc =
MI.getOpcode();
9839 return get(
Opc).mayLoad() &&
9846 if (!Addr || !Addr->
isFI())
9855 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::vdata);
9857 return MI.getOperand(VDataIdx).getReg();
9867 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::data);
9869 return MI.getOperand(DataIdx).getReg();
9903 unsigned Opc =
MI.getOpcode();
9905 unsigned DescSize =
Desc.getSize();
9910 unsigned Size = DescSize;
9914 if (
MI.isBranch() && ST.hasOffset3fBug())
9925 bool HasLiteral =
false;
9926 unsigned LiteralSize = 4;
9927 for (
int I = 0, E =
MI.getNumExplicitOperands();
I != E; ++
I) {
9932 if (ST.has64BitLiterals()) {
9933 switch (OpInfo.OperandType) {
9956 return HasLiteral ? DescSize + LiteralSize : DescSize;
9961 int VAddr0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vaddr0);
9965 int RSrcIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::srsrc);
9966 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4);
9970 case TargetOpcode::BUNDLE:
9971 return getInstBundleSize(
MI);
9972 case TargetOpcode::INLINEASM:
9973 case TargetOpcode::INLINEASM_BR: {
9975 const char *AsmStr =
MI.getOperand(0).getSymbolName();
9979 if (
MI.isMetaInstruction())
9983 const auto *D16Info = AMDGPU::getT16D16Helper(
Opc);
9986 unsigned LoInstOpcode = D16Info->LoOp;
9988 DescSize =
Desc.getSize();
9992 if (
Opc == AMDGPU::V_FMA_MIX_F16_t16 ||
Opc == AMDGPU::V_FMA_MIX_BF16_t16) {
9995 DescSize =
Desc.getSize();
10004 if (
MI.isBranch() && ST.hasOffset3fBug())
10005 return InstSizeVerifyMode::NoVerify;
10006 return InstSizeVerifyMode::ExactSize;
10013 if (
MI.memoperands_empty())
10025 static const std::pair<int, const char *> TargetIndices[] = {
10064std::pair<unsigned, unsigned>
10071 static const std::pair<unsigned, const char *> TargetFlags[] = {
10089 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
10105 return AMDGPU::WWM_COPY;
10107 return AMDGPU::COPY;
10124 if (!IsLRSplitInst && Opcode != AMDGPU::IMPLICIT_DEF)
10128 if (RI.isSGPRClass(RI.getRegClassForReg(MRI, Reg)))
10129 return IsLRSplitInst;
10142 bool IsNullOrVectorRegister =
true;
10146 IsNullOrVectorRegister = !RI.isSGPRClass(RI.getRegClassForReg(MRI, Reg));
10149 return IsNullOrVectorRegister &&
10151 (!
MI.isTerminator() &&
MI.getOpcode() != AMDGPU::COPY &&
10152 MI.modifiesRegister(AMDGPU::EXEC, &RI)));
10160 if (ST.hasAddNoCarryInsts())
10176 if (ST.hasAddNoCarryInsts())
10180 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC)
10182 : RS.scavengeRegisterBackwards(
10183 *RI.getBoolRC(),
I,
false,
10196 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
10197 case AMDGPU::SI_KILL_I1_TERMINATOR:
10206 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
10207 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR);
10208 case AMDGPU::SI_KILL_I1_PSEUDO:
10209 return get(AMDGPU::SI_KILL_I1_TERMINATOR);
10221 const unsigned OffsetBits =
10223 return (1 << OffsetBits) - 1;
10227 if (!ST.isWave32())
10230 if (
MI.isInlineAsm())
10233 if (
MI.getNumOperands() <
MI.getNumExplicitOperands())
10236 for (
auto &
Op :
MI.implicit_operands()) {
10237 if (
Op.isReg() &&
Op.getReg() == AMDGPU::VCC)
10238 Op.setReg(AMDGPU::VCC_LO);
10247 int Idx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::sbase);
10251 const int16_t RCID = getOpRegClassID(
MI.getDesc().operands()[Idx]);
10252 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass);
10268 if (Imm > MaxImm) {
10269 if (Imm <= MaxImm + 64) {
10271 Overflow = Imm - MaxImm;
10290 if (Overflow > 0) {
10298 if (ST.hasRestrictedSOffset())
10303 SOffset = Overflow;
10341 if (!ST.hasFlatInstOffsets())
10349 if (ST.hasNegativeUnalignedScratchOffsetBug() &&
10361std::pair<int64_t, int64_t>
10364 int64_t RemainderOffset = COffsetVal;
10365 int64_t ImmField = 0;
10370 if (AllowNegative) {
10372 int64_t
D = 1LL << NumBits;
10373 RemainderOffset = (COffsetVal /
D) *
D;
10374 ImmField = COffsetVal - RemainderOffset;
10376 if (ST.hasNegativeUnalignedScratchOffsetBug() &&
10378 (ImmField % 4) != 0) {
10380 RemainderOffset += ImmField % 4;
10381 ImmField -= ImmField % 4;
10383 }
else if (COffsetVal >= 0) {
10385 RemainderOffset = COffsetVal - ImmField;
10389 assert(RemainderOffset + ImmField == COffsetVal);
10390 return {ImmField, RemainderOffset};
10394 if (ST.hasNegativeScratchOffsetBug() &&
10402 switch (ST.getGeneration()) {
10431 case AMDGPU::V_MOVRELS_B32_dpp_gfx10:
10432 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10:
10433 case AMDGPU::V_MOVRELD_B32_dpp_gfx10:
10434 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10:
10435 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10:
10436 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10:
10437 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10:
10438 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10:
10445#define GENERATE_RENAMED_GFX9_CASES(OPCODE) \
10446 case OPCODE##_dpp: \
10447 case OPCODE##_e32: \
10448 case OPCODE##_e64: \
10449 case OPCODE##_e64_dpp: \
10450 case OPCODE##_sdwa:
10464 case AMDGPU::V_DIV_FIXUP_F16_gfx9_e64:
10465 case AMDGPU::V_DIV_FIXUP_F16_gfx9_fake16_e64:
10466 case AMDGPU::V_FMA_F16_gfx9_e64:
10467 case AMDGPU::V_FMA_F16_gfx9_fake16_e64:
10468 case AMDGPU::V_INTERP_P2_F16:
10469 case AMDGPU::V_MAD_F16_e64:
10470 case AMDGPU::V_MAD_U16_e64:
10471 case AMDGPU::V_MAD_I16_e64:
10480 "SIInsertWaitcnts should have promoted soft waitcnt instructions!");
10494 switch (ST.getGeneration()) {
10507 if (
isMAI(Opcode)) {
10515 if (MCOp == AMDGPU::INSTRUCTION_LIST_END && ST.hasGFX11_7Insts())
10518 if (MCOp == AMDGPU::INSTRUCTION_LIST_END && ST.hasGFX1250Insts())
10525 if (ST.hasGFX90AInsts()) {
10526 uint32_t NMCOp = AMDGPU::INSTRUCTION_LIST_END;
10527 if (ST.hasGFX940Insts())
10529 if (NMCOp == AMDGPU::INSTRUCTION_LIST_END)
10531 if (NMCOp == AMDGPU::INSTRUCTION_LIST_END)
10533 if (NMCOp != AMDGPU::INSTRUCTION_LIST_END)
10539 if (MCOp == AMDGPU::INSTRUCTION_LIST_END)
10558 for (
unsigned I = 0, E = (
MI.getNumOperands() - 1)/ 2;
I < E; ++
I)
10559 if (
MI.getOperand(1 + 2 *
I + 1).getImm() == SubReg) {
10560 auto &RegOp =
MI.getOperand(1 + 2 *
I);
10572 switch (
MI.getOpcode()) {
10574 case AMDGPU::REG_SEQUENCE:
10578 case AMDGPU::INSERT_SUBREG:
10579 if (RSR.
SubReg == (
unsigned)
MI.getOperand(3).getImm())
10596 if (!
P.Reg.isVirtual())
10601 while (
auto *
MI = DefInst) {
10603 switch (
MI->getOpcode()) {
10605 case AMDGPU::V_MOV_B32_e32: {
10606 auto &Op1 =
MI->getOperand(1);
10635 auto *DefBB =
DefMI.getParent();
10639 if (
UseMI.getParent() != DefBB)
10642 const int MaxInstScan = 20;
10646 auto E =
UseMI.getIterator();
10647 for (
auto I = std::next(
DefMI.getIterator());
I != E; ++
I) {
10648 if (
I->isDebugInstr())
10651 if (++NumInst > MaxInstScan)
10654 if (
I->modifiesRegister(AMDGPU::EXEC,
TRI))
10667 auto *DefBB =
DefMI.getParent();
10669 const int MaxUseScan = 10;
10673 auto &UseInst = *
Use.getParent();
10676 if (UseInst.getParent() != DefBB || UseInst.isPHI())
10679 if (++NumUse > MaxUseScan)
10686 const int MaxInstScan = 20;
10690 for (
auto I = std::next(
DefMI.getIterator()); ; ++
I) {
10693 if (
I->isDebugInstr())
10696 if (++NumInst > MaxInstScan)
10709 if (Reg == VReg && --NumUse == 0)
10711 }
else if (
TRI->regsOverlap(Reg, AMDGPU::EXEC))
10720 auto Cur =
MBB.begin();
10721 if (Cur !=
MBB.end())
10723 if (!Cur->isPHI() && Cur->readsRegister(Dst,
nullptr))
10726 }
while (Cur !=
MBB.end() && Cur != LastPHIIt);
10735 if (InsPt !=
MBB.end() &&
10736 (InsPt->getOpcode() == AMDGPU::SI_IF ||
10737 InsPt->getOpcode() == AMDGPU::SI_ELSE ||
10738 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) &&
10739 InsPt->definesRegister(Src,
nullptr)) {
10743 .
addReg(Src, {}, SrcSubReg)
10786 if (isFullCopyInstr(
MI)) {
10787 Register DstReg =
MI.getOperand(0).getReg();
10788 Register SrcReg =
MI.getOperand(1).getReg();
10810 unsigned *PredCost)
const {
10811 if (
MI.isBundle()) {
10814 unsigned Lat = 0,
Count = 0;
10815 for (++
I;
I != E &&
I->isBundledWithPred(); ++
I) {
10817 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*
I));
10819 return Lat +
Count - 1;
10822 return SchedModel.computeInstrLatency(&
MI);
10829 return *CallAddrOp;
10836 unsigned Opcode =
MI.getOpcode();
10838 auto HandleAddrSpaceCast = [
this, &MRI](
const MachineInstr &
MI) {
10841 :
MI.getOperand(1).getReg();
10845 unsigned SrcAS = SrcTy.getAddressSpace();
10848 ST.hasGloballyAddressableScratch()
10856 if (Opcode == TargetOpcode::G_ADDRSPACE_CAST)
10857 return HandleAddrSpaceCast(
MI);
10860 auto IID = GI->getIntrinsicID();
10867 case Intrinsic::amdgcn_addrspacecast_nonnull:
10868 return HandleAddrSpaceCast(
MI);
10869 case Intrinsic::amdgcn_if:
10870 case Intrinsic::amdgcn_else:
10884 if (Opcode == AMDGPU::G_LOAD || Opcode == AMDGPU::G_ZEXTLOAD ||
10885 Opcode == AMDGPU::G_SEXTLOAD) {
10886 if (
MI.memoperands_empty())
10890 return mmo->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
10891 mmo->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS;
10899 if (SIInstrInfo::isGenericAtomicRMWOpcode(Opcode) ||
10900 Opcode == AMDGPU::G_ATOMIC_CMPXCHG ||
10901 Opcode == AMDGPU::G_ATOMIC_CMPXCHG_WITH_SUCCESS ||
10910 Formatter = std::make_unique<AMDGPUMIRFormatter>(ST);
10911 return Formatter.get();
10919 unsigned opcode =
MI.getOpcode();
10920 if (opcode == AMDGPU::V_READLANE_B32 ||
10921 opcode == AMDGPU::V_READFIRSTLANE_B32 ||
10922 opcode == AMDGPU::SI_RESTORE_S32_FROM_VGPR)
10925 if (isCopyInstr(
MI)) {
10929 RI.getPhysRegBaseClass(srcOp.
getReg());
10937 if (
MI.isPreISelOpcode())
10952 if (
MI.memoperands_empty())
10956 return mmo->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
10957 mmo->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS;
10972 for (
unsigned I = 0, E =
MI.getNumOperands();
I != E; ++
I) {
10974 if (!
SrcOp.isReg())
10978 if (!Reg || !
SrcOp.readsReg())
10984 if (RegBank && RegBank->
getID() != AMDGPU::SGPRRegBankID)
11011 F,
"ds_ordered_count unsupported for this calling conv"));
11025 Register &SrcReg2, int64_t &CmpMask,
11026 int64_t &CmpValue)
const {
11027 if (!
MI.getOperand(0).isReg() ||
MI.getOperand(0).getSubReg())
11030 switch (
MI.getOpcode()) {
11033 case AMDGPU::S_CMP_EQ_U32:
11034 case AMDGPU::S_CMP_EQ_I32:
11035 case AMDGPU::S_CMP_LG_U32:
11036 case AMDGPU::S_CMP_LG_I32:
11037 case AMDGPU::S_CMP_LT_U32:
11038 case AMDGPU::S_CMP_LT_I32:
11039 case AMDGPU::S_CMP_GT_U32:
11040 case AMDGPU::S_CMP_GT_I32:
11041 case AMDGPU::S_CMP_LE_U32:
11042 case AMDGPU::S_CMP_LE_I32:
11043 case AMDGPU::S_CMP_GE_U32:
11044 case AMDGPU::S_CMP_GE_I32:
11045 case AMDGPU::S_CMP_EQ_U64:
11046 case AMDGPU::S_CMP_LG_U64:
11047 SrcReg =
MI.getOperand(0).getReg();
11048 if (
MI.getOperand(1).isReg()) {
11049 if (
MI.getOperand(1).getSubReg())
11051 SrcReg2 =
MI.getOperand(1).getReg();
11053 }
else if (
MI.getOperand(1).isImm()) {
11055 CmpValue =
MI.getOperand(1).getImm();
11061 case AMDGPU::S_CMPK_EQ_U32:
11062 case AMDGPU::S_CMPK_EQ_I32:
11063 case AMDGPU::S_CMPK_LG_U32:
11064 case AMDGPU::S_CMPK_LG_I32:
11065 case AMDGPU::S_CMPK_LT_U32:
11066 case AMDGPU::S_CMPK_LT_I32:
11067 case AMDGPU::S_CMPK_GT_U32:
11068 case AMDGPU::S_CMPK_GT_I32:
11069 case AMDGPU::S_CMPK_LE_U32:
11070 case AMDGPU::S_CMPK_LE_I32:
11071 case AMDGPU::S_CMPK_GE_U32:
11072 case AMDGPU::S_CMPK_GE_I32:
11073 SrcReg =
MI.getOperand(0).getReg();
11075 CmpValue =
MI.getOperand(1).getImm();
11085 if (S->isLiveIn(AMDGPU::SCC))
11094bool SIInstrInfo::invertSCCUse(
MachineInstr *SCCDef)
const {
11097 bool SCCIsDead =
false;
11100 constexpr unsigned ScanLimit = 12;
11101 unsigned Count = 0;
11102 for (MachineInstr &
MI :
11104 if (++
Count > ScanLimit)
11106 if (
MI.readsRegister(AMDGPU::SCC, &RI)) {
11107 if (
MI.getOpcode() == AMDGPU::S_CSELECT_B32 ||
11108 MI.getOpcode() == AMDGPU::S_CSELECT_B64 ||
11109 MI.getOpcode() == AMDGPU::S_CBRANCH_SCC0 ||
11110 MI.getOpcode() == AMDGPU::S_CBRANCH_SCC1)
11115 if (
MI.definesRegister(AMDGPU::SCC, &RI)) {
11128 for (MachineInstr *
MI : InvertInstr) {
11129 if (
MI->getOpcode() == AMDGPU::S_CSELECT_B32 ||
11130 MI->getOpcode() == AMDGPU::S_CSELECT_B64) {
11132 }
else if (
MI->getOpcode() == AMDGPU::S_CBRANCH_SCC0 ||
11133 MI->getOpcode() == AMDGPU::S_CBRANCH_SCC1) {
11134 MI->setDesc(
get(
MI->getOpcode() == AMDGPU::S_CBRANCH_SCC0
11135 ? AMDGPU::S_CBRANCH_SCC1
11136 : AMDGPU::S_CBRANCH_SCC0));
11149 bool NeedInversion)
const {
11150 MachineInstr *KillsSCC =
nullptr;
11155 if (
MI.modifiesRegister(AMDGPU::SCC, &RI))
11157 if (
MI.killsRegister(AMDGPU::SCC, &RI))
11160 if (NeedInversion && !invertSCCUse(SCCRedefine))
11162 if (MachineOperand *SccDef =
11164 SccDef->setIsDead(
false);
11172 if (Def.getOpcode() != AMDGPU::S_CSELECT_B32 &&
11173 Def.getOpcode() != AMDGPU::S_CSELECT_B64)
11175 bool Op1IsNonZeroImm =
11176 Def.getOperand(1).isImm() && Def.getOperand(1).getImm() != 0;
11177 bool Op2IsZeroImm =
11178 Def.getOperand(2).isImm() && Def.getOperand(2).getImm() == 0;
11179 if (!Op1IsNonZeroImm || !Op2IsZeroImm)
11185 unsigned &NewDefOpc) {
11188 if (Def.getOpcode() != AMDGPU::S_ADD_I32 &&
11189 Def.getOpcode() != AMDGPU::S_ADD_U32)
11195 if ((!AddSrc1.
isImm() || AddSrc1.
getImm() != 1) &&
11201 if (Def.getOpcode() == AMDGPU::S_ADD_I32) {
11203 Def.findRegisterDefOperand(AMDGPU::SCC,
nullptr);
11206 NewDefOpc = AMDGPU::S_ADD_U32;
11208 NeedInversion = !NeedInversion;
11213 Register SrcReg2, int64_t CmpMask,
11222 const auto optimizeCmpSelect = [&CmpInstr, SrcReg, CmpValue, MRI,
11223 this](
bool NeedInversion) ->
bool {
11247 unsigned NewDefOpc = Def->getOpcode();
11253 if (!optimizeSCC(Def, &CmpInstr, NeedInversion))
11256 if (NewDefOpc != Def->getOpcode())
11257 Def->setDesc(
get(NewDefOpc));
11266 if (Def->getOpcode() == AMDGPU::S_OR_B32 &&
11273 if (Def1 && Def1->
getOpcode() == AMDGPU::COPY && Def2 &&
11281 optimizeSCC(
Select, Def,
false);
11288 const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue, MRI,
11289 this](int64_t ExpectedValue,
unsigned SrcSize,
11290 bool IsReversible,
bool IsSigned) ->
bool {
11318 if (Def->getOpcode() != AMDGPU::S_AND_B32 &&
11319 Def->getOpcode() != AMDGPU::S_AND_B64)
11323 const auto isMask = [&Mask, SrcSize](
const MachineOperand *MO) ->
bool {
11334 SrcOp = &Def->getOperand(2);
11335 else if (isMask(&Def->getOperand(2)))
11336 SrcOp = &Def->getOperand(1);
11344 if (IsSigned && BitNo == SrcSize - 1)
11347 ExpectedValue <<= BitNo;
11349 bool IsReversedCC =
false;
11350 if (CmpValue != ExpectedValue) {
11353 IsReversedCC = CmpValue == (ExpectedValue ^ Mask);
11358 Register DefReg = Def->getOperand(0).getReg();
11362 if (!optimizeSCC(Def, &CmpInstr,
false))
11373 unsigned NewOpc = (SrcSize == 32) ? IsReversedCC ? AMDGPU::S_BITCMP0_B32
11374 : AMDGPU::S_BITCMP1_B32
11375 : IsReversedCC ? AMDGPU::S_BITCMP0_B64
11376 : AMDGPU::S_BITCMP1_B64;
11381 Def->eraseFromParent();
11389 case AMDGPU::S_CMP_EQ_U32:
11390 case AMDGPU::S_CMP_EQ_I32:
11391 case AMDGPU::S_CMPK_EQ_U32:
11392 case AMDGPU::S_CMPK_EQ_I32:
11393 return optimizeCmpAnd(1, 32,
true,
false) ||
11394 optimizeCmpSelect(
true);
11395 case AMDGPU::S_CMP_GE_U32:
11396 case AMDGPU::S_CMPK_GE_U32:
11397 return optimizeCmpAnd(1, 32,
false,
false);
11398 case AMDGPU::S_CMP_GE_I32:
11399 case AMDGPU::S_CMPK_GE_I32:
11400 return optimizeCmpAnd(1, 32,
false,
true);
11401 case AMDGPU::S_CMP_EQ_U64:
11402 return optimizeCmpAnd(1, 64,
true,
false);
11403 case AMDGPU::S_CMP_LG_U32:
11404 case AMDGPU::S_CMP_LG_I32:
11405 case AMDGPU::S_CMPK_LG_U32:
11406 case AMDGPU::S_CMPK_LG_I32:
11407 return optimizeCmpAnd(0, 32,
true,
false) ||
11408 optimizeCmpSelect(
false);
11409 case AMDGPU::S_CMP_GT_U32:
11410 case AMDGPU::S_CMPK_GT_U32:
11411 return optimizeCmpAnd(0, 32,
false,
false);
11412 case AMDGPU::S_CMP_GT_I32:
11413 case AMDGPU::S_CMPK_GT_I32:
11414 return optimizeCmpAnd(0, 32,
false,
true);
11415 case AMDGPU::S_CMP_LG_U64:
11416 return optimizeCmpAnd(0, 64,
true,
false) ||
11417 optimizeCmpSelect(
false);
11424 AMDGPU::OpName
OpName)
const {
11425 if (!ST.needsAlignedVGPRs())
11428 int OpNo = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
OpName);
11440 bool IsAGPR = RI.isAGPR(MRI, DataReg);
11442 IsAGPR ? &AMDGPU::AGPR_32RegClass : &AMDGPU::VGPR_32RegClass);
11446 : &AMDGPU::VReg_64_Align2RegClass);
11448 .
addReg(DataReg, {},
Op.getSubReg())
11453 Op.setSubReg(AMDGPU::sub0);
11468 if (ST.hasGFX1250Insts())
11475 unsigned Opcode =
MI.getOpcode();
11481 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
11482 Opcode == AMDGPU::V_ACCVGPR_READ_B32_e64)
11485 if (!ST.hasGFX940Insts())
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isUndef(const MachineInstr &MI)
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static cl::opt< bool > Fix16BitCopies("amdgpu-fix-16-bit-physreg-copies", cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), cl::init(true), cl::ReallyHidden)
static void emitLoadScalarOpsFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, MachineBasicBlock &LoopBB, MachineBasicBlock &BodyBB, const DebugLoc &DL, ArrayRef< MachineOperand * > ScalarOps, ArrayRef< Register > PhySGPRs={})
static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RC, bool Forward)
static unsigned getNewFMAInst(const GCNSubtarget &ST, unsigned Opc)
static void indirectCopyToAGPR(const SIInstrInfo &TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, RegScavenger &RS, bool RegsOverlap, Register ImpDefSuperReg=Register(), Register ImpUseSuperReg=Register())
Handle copying from SGPR to AGPR, or from AGPR to AGPR on GFX908.
static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize)
static bool compareMachineOp(const MachineOperand &Op0, const MachineOperand &Op1)
static bool isStride64(unsigned Opc)
static MachineBasicBlock * generateWaterFallLoop(const SIInstrInfo &TII, MachineInstr &MI, ArrayRef< MachineOperand * > ScalarOps, MachineDominatorTree *MDT, MachineBasicBlock::iterator Begin=nullptr, MachineBasicBlock::iterator End=nullptr, ArrayRef< Register > PhySGPRs={})
#define GENERATE_RENAMED_GFX9_CASES(OPCODE)
static std::tuple< unsigned, unsigned > extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc)
static bool followSubRegDef(MachineInstr &MI, TargetInstrInfo::RegSubRegPair &RSR)
static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize)
static MachineInstr * swapImmOperands(MachineInstr &MI, MachineOperand &NonRegOp1, MachineOperand &NonRegOp2)
static void copyFlagsToImplicitVCC(MachineInstr &MI, const MachineOperand &Orig)
static bool offsetsDoNotOverlap(LocationSize WidthA, int OffsetA, LocationSize WidthB, int OffsetB)
static unsigned getWWMRegSpillSaveOpcode(unsigned Size, bool IsVectorSuperClass)
static bool memOpsHaveSameBaseOperands(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2)
static unsigned getWWMRegSpillRestoreOpcode(unsigned Size, bool IsVectorSuperClass)
static bool setsSCCIfResultIsZero(const MachineInstr &Def, bool &NeedInversion, unsigned &NewDefOpc)
static bool isSCCDeadOnExit(MachineBasicBlock *MBB)
static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI, int64_t &Imm, MachineInstr **DefMI=nullptr)
static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize)
static unsigned subtargetEncodingFamily(const GCNSubtarget &ST)
static void preserveCondRegFlags(MachineOperand &CondReg, const MachineOperand &OrigCond)
static Register findImplicitSGPRRead(const MachineInstr &MI)
static unsigned getNewFMAAKInst(const GCNSubtarget &ST, unsigned Opc)
static cl::opt< unsigned > BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), cl::desc("Restrict range of branch instructions (DEBUG)"))
static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, MachineInstr &NewMI)
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static unsigned getSGPRSpillRestoreOpcode(unsigned Size)
static bool isRegOrFI(const MachineOperand &MO)
static unsigned getSGPRSpillSaveOpcode(unsigned Size)
static constexpr AMDGPU::OpName ModifierOpNames[]
static unsigned getVGPRSpillSaveOpcode(unsigned Size)
static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const char *Msg="illegal VGPR to SGPR copy")
static MachineInstr * swapRegAndNonRegOperand(MachineInstr &MI, MachineOperand &RegOp, MachineOperand &NonRegOp)
static bool shouldReadExec(const MachineInstr &MI)
static unsigned getNewFMAMKInst(const GCNSubtarget &ST, unsigned Opc)
static bool isRenamedInGFX9(int Opcode)
static TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd)
static bool changesVGPRIndexingMode(const MachineInstr &MI)
static bool isSubRegOf(const SIRegisterInfo &TRI, const MachineOperand &SuperVec, const MachineOperand &SubReg)
static bool foldableSelect(const MachineInstr &Def)
static bool nodesHaveSameOperandValue(SDNode *N0, SDNode *N1, AMDGPU::OpName OpName)
Returns true if both nodes have the same value for the given operand Op, or if both nodes do not have...
static unsigned getAVSpillSaveOpcode(unsigned Size)
static unsigned getNumOperandsNoGlue(SDNode *Node)
static bool canRemat(const MachineInstr &MI)
static unsigned getAVSpillRestoreOpcode(unsigned Size)
static unsigned getVGPRSpillRestoreOpcode(unsigned Size)
Interface definition for SIInstrInfo.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
const unsigned CSelectOpc
static const LaneMaskConstants & get(const GCNSubtarget &ST)
const unsigned XorTermOpc
const unsigned OrSaveExecOpc
const unsigned AndSaveExecOpc
static LLVM_ABI Semantics SemanticsToEnum(const llvm::fltSemantics &Sem)
Class for arbitrary precision integers.
int64_t getSExtValue() const
Get sign extended value.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
Get the first element.
size_t size() const
Get the array size.
bool empty() const
Check if the array is empty.
uint64_t getZExtValue() const
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Diagnostic information for unsupported feature in backend.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
CycleT * getCycle(const BlockT *Block) const
Find the innermost cycle containing a given block.
void getExitingBlocks(SmallVectorImpl< BlockT * > &TmpStorage) const
Return all blocks of this cycle that have successor outside of this cycle.
bool contains(const BlockT *Block) const
Return whether Block is contained in the cycle.
const GenericCycle * getParentCycle() const
Itinerary data supplied by a subtarget to be used by a target.
constexpr unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
LiveInterval - This class represents the liveness of a register, or stack slot.
bool hasInterval(Register Reg) const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
LiveInterval & getInterval(Register Reg)
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
This class represents the liveness of a register, stack slot, etc.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
static const MCBinaryExpr * createAnd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCBinaryExpr * createAShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
unsigned getOpcode() const
Return the opcode number for this descriptor.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint8_t OperandType
Information about the type of the operand.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
LLVM_ABI void setVariableValue(const MCExpr *Value)
Helper class for constructing bundles of MachineInstrs.
MachineBasicBlock::instr_iterator begin() const
Return an iterator to the first bundled instruction.
MIBundleBuilder & append(MachineInstr *MI)
Insert MI into MBB by appending it to the instructions in the bundle.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, MCRegister Reg, const_iterator Before, unsigned Neighborhood=10) const
Return whether (physical) register Reg has been defined and not killed as of just before Before.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
mop_range implicit_operands()
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
LLVM_ABI void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
LLVM_ABI void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo)
Clear all kill flags affecting Reg.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
LLVM_ABI unsigned getOperandNo() const
Returns the index of this operand in the instruction that it belongs to.
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
LLVM_ABI void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
LLVM_ABI void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
void setIsKill(bool Val=true)
LLVM_ABI void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setOffset(int64_t Offset)
unsigned getTargetFlags() const
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isTargetIndex() const
isTargetIndex - Tests if this is a MO_TargetIndex operand.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
iterator_range< use_nodbg_iterator > use_nodbg_operands(Register Reg) const
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
LLVM_ABI void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps)
Move NumOps operands from Src to Dst, updating use-def lists as needed.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool reservedRegsFrozen() const
reservedRegsFrozen - Returns true after freezeReservedRegs() was called to ensure the set of reserved...
LLVM_ABI void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
void setRegAllocationHint(Register VReg, unsigned Type, Register PrefReg)
setRegAllocationHint - Specify a register allocation hint for the specified virtual register.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
void setSimpleHint(Register VReg, Register PrefReg)
Specify the preferred (target independent) register allocation hint for the specified virtual registe...
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
iterator_range< use_iterator > use_operands(Register Reg) const
LLVM_ABI void removeRegOperandFromUseList(MachineOperand *MO)
Remove MO from its use-def list.
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
LLVM_ABI void addRegOperandToUseList(MachineOperand *MO)
Add MO to the linked list of operands for its register.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isLegalMUBUFImmOffset(unsigned Imm) const
bool isInlineConstant(const APInt &Imm) const
void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const
Fix operands in MI to satisfy constant bus requirements.
bool canAddToBBProlog(const MachineInstr &MI) const
static bool isDS(const MachineInstr &MI)
MachineBasicBlock * legalizeOperands(MachineInstr &MI, MachineDominatorTree *MDT=nullptr) const
Legalize all operands in this instruction.
bool areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, int64_t &Offset0, int64_t &Offset1) const override
unsigned getLiveRangeSplitOpcode(Register Reg, const MachineFunction &MF) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const final
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
static bool isNeverUniform(const MachineInstr &MI)
bool isXDLWMMA(const MachineInstr &MI) const
bool isBasicBlockPrologue(const MachineInstr &MI, Register Reg=Register()) const override
bool isSpill(uint32_t Opcode) const
uint64_t getDefaultRsrcDataFormat() const
static bool isSOPP(const MachineInstr &MI)
bool mayAccessScratch(const MachineInstr &MI) const
bool isIGLP(unsigned Opcode) const
static bool isFLATScratch(const MachineInstr &MI)
const MCInstrDesc & getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize, bool IsSGPR) const
MachineInstrBuilder getAddNoCarry(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg) const
Return a partially built integer add instruction without carry.
bool mayAccessFlatAddressSpace(const MachineInstr &MI) const
bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0, int64_t Offset1, unsigned NumLoads) const override
bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, Align Alignment=Align(4)) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
void moveToVALU(SIInstrWorklist &Worklist, MachineDominatorTree *MDT) const
Replace the instructions opcode with the equivalent VALU opcode.
static bool isSMRD(const MachineInstr &MI)
void restoreExec(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, SlotIndexes *Indexes=nullptr) const
bool usesConstantBus(const MachineRegisterInfo &MRI, const MachineOperand &MO, const MCOperandInfo &OpInfo) const
Returns true if this operand uses the constant bus.
static unsigned getMaxMUBUFImmOffset(const GCNSubtarget &ST)
static unsigned getFoldableCopySrcIdx(const MachineInstr &MI)
unsigned getOpSize(uint32_t Opcode, unsigned OpNo) const
Return the size in bytes of the operand OpNo on the given.
void legalizeOperandsFLAT(MachineRegisterInfo &MRI, MachineInstr &MI) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
static std::optional< int64_t > extractSubregFromImm(int64_t ImmVal, unsigned SubRegIndex)
Return the extracted immediate value in a subregister use from a constant materialized in a super reg...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static bool isMTBUF(const MachineInstr &MI)
const MCInstrDesc & getIndirectGPRIDXPseudo(unsigned VecSize, bool IsIndirectSrc) const
void insertReturn(MachineBasicBlock &MBB) const
static bool isDGEMM(unsigned Opcode)
static bool isEXP(const MachineInstr &MI)
static bool isSALU(const MachineInstr &MI)
static bool setsSCCIfResultIsNonZero(const MachineInstr &MI)
const MIRFormatter * getMIRFormatter() const override
void legalizeGenericOperand(MachineBasicBlock &InsertMBB, MachineBasicBlock::iterator I, const TargetRegisterClass *DstRC, MachineOperand &Op, MachineRegisterInfo &MRI, const DebugLoc &DL) const
MachineInstr * buildShrunkInst(MachineInstr &MI, unsigned NewOpcode) const
static bool isVOP2(const MachineInstr &MI)
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
static bool isSDWA(const MachineInstr &MI)
const MCInstrDesc & getKillTerminatorFromPseudo(unsigned Opcode) const
void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const override
static bool isGather4(const MachineInstr &MI)
MachineInstr * getWholeWaveFunctionSetup(MachineFunction &MF) const
bool isLegalVSrcOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO would be a valid operand for the given operand definition OpInfo.
static bool isDOT(const MachineInstr &MI)
InstSizeVerifyMode getInstSizeVerifyMode(const MachineInstr &MI) const override
MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const override
bool hasModifiers(unsigned Opcode) const
Return true if this instruction has any modifiers.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
static bool isSWMMAC(const MachineInstr &MI)
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
bool isHighLatencyDef(int Opc) const override
void legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const
Legalize the OpIndex operand of this instruction by inserting a MOV.
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
static bool isVOPC(const MachineInstr &MI)
void removeModOperands(MachineInstr &MI) const
std::pair< int64_t, int64_t > splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace, uint64_t FlatVariant) const
Split COffsetVal into {immediate offset field, remainder offset} values.
unsigned getVectorRegSpillRestoreOpcode(Register Reg, const TargetRegisterClass *RC, unsigned Size, const SIMachineFunctionInfo &MFI) const
bool isXDL(const MachineInstr &MI) const
Register isStackAccess(const MachineInstr &MI, int &FrameIndex, TypeSize &MemBytes) const
static bool isVIMAGE(const MachineInstr &MI)
void enforceOperandRCAlignment(MachineInstr &MI, AMDGPU::OpName OpName) const
static bool isSOP2(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
bool hasRAWDependency(const MachineInstr &FirstMI, const MachineInstr &SecondMI) const
bool isLegalAV64PseudoImm(uint64_t Imm) const
Check if this immediate value can be used for AV_MOV_B64_IMM_PSEUDO.
bool isNeverCoissue(MachineInstr &MI) const
static bool isBUF(const MachineInstr &MI)
void handleCopyToPhysHelper(SIInstrWorklist &Worklist, Register DstReg, MachineInstr &Inst, MachineRegisterInfo &MRI, DenseMap< MachineInstr *, V2PhysSCopyInfo > &WaterFalls, DenseMap< MachineInstr *, bool > &V2SPhyCopiesToErase) const
bool hasModifiersSet(const MachineInstr &MI, AMDGPU::OpName OpName) const
const TargetRegisterClass * getPreferredSelectRegClass(unsigned Size) const
bool isLegalToSwap(const MachineInstr &MI, unsigned fromIdx, unsigned toIdx) const
static bool isFLATGlobal(const MachineInstr &MI)
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isGlobalMemoryObject(const MachineInstr *MI) const override
static bool isVSAMPLE(const MachineInstr &MI)
bool isBufferSMRD(const MachineInstr &MI) const
static bool isKillTerminator(unsigned Opcode)
bool isVOPDAntidependencyAllowed(const MachineInstr &MI) const
If OpX is multicycle, anti-dependencies are not allowed.
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx0, unsigned &SrcOpIdx1) const override
void insertScratchExecCopy(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, bool IsSCCLive, SlotIndexes *Indexes=nullptr) const
bool hasVALU32BitEncoding(unsigned Opcode) const
Return true if this 64-bit VALU instruction has a 32-bit encoding.
unsigned getMovOpcode(const TargetRegisterClass *DstRC) const
Register isSGPRStackAccess(const MachineInstr &MI, int &FrameIndex, TypeSize &MemBytes) const
unsigned buildExtractSubReg(MachineBasicBlock::iterator MI, MachineRegisterInfo &MRI, const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC, unsigned SubIdx, const TargetRegisterClass *SubRC) const
void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const
Legalize operands in MI by either commuting it or inserting a copy of src1.
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const final
static bool isTRANS(const MachineInstr &MI)
static bool isImage(const MachineInstr &MI)
static bool isSOPK(const MachineInstr &MI)
const TargetRegisterClass * getOpRegClass(const MachineInstr &MI, unsigned OpNo) const
Return the correct register class for OpNo.
MachineBasicBlock * insertSimulatedTrap(MachineRegisterInfo &MRI, MachineBasicBlock &MBB, MachineInstr &MI, const DebugLoc &DL) const
Build instructions that simulate the behavior of a s_trap 2 instructions for hardware (namely,...
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static unsigned getDSShaderTypeValue(const MachineFunction &MF)
static bool isFoldableCopy(const MachineInstr &MI)
bool mayAccessLDSThroughFlat(const MachineInstr &MI) const
bool isIgnorableUse(const MachineOperand &MO) const override
static bool isMUBUF(const MachineInstr &MI)
bool expandPostRAPseudo(MachineInstr &MI) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
void createWaterFallForSiCall(MachineInstr *MI, MachineDominatorTree *MDT, ArrayRef< MachineOperand * > ScalarOps, ArrayRef< Register > PhySGPRs={}) const
Wrapper function for generating waterfall for instruction MI This function take into consideration of...
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
static bool isSegmentSpecificFLAT(const MachineInstr &MI)
bool isReMaterializableImpl(const MachineInstr &MI) const override
static bool isVOP3(const MCInstrDesc &Desc)
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool physRegUsesConstantBus(const MachineOperand &Reg) const
static bool isF16PseudoScalarTrans(unsigned Opcode)
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const
static bool isDPP(const MachineInstr &MI)
bool analyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const
static bool isMFMA(const MachineInstr &MI)
bool isLowLatencyInstruction(const MachineInstr &MI) const
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
void mutateAndCleanupImplicit(MachineInstr &MI, const MCInstrDesc &NewDesc) const
ValueUniformity getGenericValueUniformity(const MachineInstr &MI) const
static bool isMAI(const MCInstrDesc &Desc)
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, LaneBitmask UsedLanes=LaneBitmask::getAll()) const override
static bool usesLGKM_CNT(const MachineInstr &MI)
void legalizeOperandsVALUt16(MachineInstr &Inst, MachineRegisterInfo &MRI) const
Fix operands in Inst to fix 16bit SALU to VALU lowering.
bool isImmOperandLegal(const MCInstrDesc &InstDesc, unsigned OpNo, const MachineOperand &MO) const
bool canShrink(const MachineInstr &MI, const MachineRegisterInfo &MRI) const
const MachineOperand & getCalleeOperand(const MachineInstr &MI) const override
bool isAsmOnlyOpcode(int MCOp) const
Check if this instruction should only be used by assembler.
bool isAlwaysGDS(uint32_t Opcode) const
static bool isVGPRSpill(const MachineInstr &MI)
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
This is used by the post-RA scheduler (SchedulePostRAList.cpp).
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, uint64_t FlatVariant) const
Returns if Offset is legal for the subtarget as the offset to a FLAT encoded instruction with the giv...
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
int64_t getNamedImmOperand(const MachineInstr &MI, AMDGPU::OpName OperandName) const
Get required immediate operand.
ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const override
bool regUsesConstantBus(const MachineOperand &Reg, const MachineRegisterInfo &MRI) const
static bool isMIMG(const MachineInstr &MI)
MachineOperand buildExtractSubRegOrImm(MachineBasicBlock::iterator MI, MachineRegisterInfo &MRI, const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC, unsigned SubIdx, const TargetRegisterClass *SubRC) const
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
bool isLegalRegOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO (a register operand) is a legal register for the given operand description or operand ind...
bool allowNegativeFlatOffset(uint64_t FlatVariant) const
Returns true if negative offsets are allowed for the given FlatVariant.
static unsigned getNumWaitStates(const MachineInstr &MI)
Return the number of wait states that result from executing this instruction.
unsigned getVectorRegSpillSaveOpcode(Register Reg, const TargetRegisterClass *RC, unsigned Size, const SIMachineFunctionInfo &MFI) const
unsigned getVALUOp(const MachineInstr &MI) const
static bool modifiesModeRegister(const MachineInstr &MI)
Return true if the instruction modifies the mode register.q.
Register readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, MachineRegisterInfo &MRI, const TargetRegisterClass *DstRC=nullptr) const
Copy a value from a VGPR (SrcReg) to SGPR.
bool hasDivergentBranch(const MachineBasicBlock *MBB) const
Return whether the block terminate with divergent branch.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void fixImplicitOperands(MachineInstr &MI) const
bool moveFlatAddrToVGPR(MachineInstr &Inst) const
Change SADDR form of a FLAT Inst to its VADDR form if saddr operand was moved to VGPR.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
void createReadFirstLaneFromCopyToPhysReg(MachineRegisterInfo &MRI, Register DstReg, MachineInstr &Inst) const
bool swapSourceModifiers(MachineInstr &MI, MachineOperand &Src0, AMDGPU::OpName Src0OpName, MachineOperand &Src1, AMDGPU::OpName Src1OpName) const
Register insertNE(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SrcReg, int Value) const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
bool hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const
This function is used to determine if an instruction can be safely executed under EXEC = 0 without ha...
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
static bool isAtomic(const MachineInstr &MI)
bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, Register DstReg, Register TrueReg, Register FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const override
bool isLiteralOperandLegal(const MCInstrDesc &InstDesc, const MCOperandInfo &OpInfo) const
static bool isWWMRegSpillOpcode(uint32_t Opcode)
static bool sopkIsZext(unsigned Opcode)
static bool isSGPRSpill(const MachineInstr &MI)
static bool isWMMA(const MachineInstr &MI)
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
bool mayReadEXEC(const MachineRegisterInfo &MRI, const MachineInstr &MI) const
Returns true if the instruction could potentially depend on the value of exec.
void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
void insertVectorSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
std::pair< MachineInstr *, MachineInstr * > expandMovDPP64(MachineInstr &MI) const
Register insertEQ(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SrcReg, int Value) const
static bool isSOPC(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isVALU(const MachineInstr &MI)
bool isBarrier(unsigned Opcode) const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx0, unsigned OpIdx1) const override
int pseudoToMCOpcode(int Opcode) const
Return a target-specific opcode if Opcode is a pseudo instruction.
const MCInstrDesc & getMCOpcodeFromPseudo(unsigned Opcode) const
Return the descriptor of the target-specific machine instruction that corresponds to the specified ps...
bool isLegalGFX12PlusPackedMathFP32Operand(const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN, const MachineOperand *MO=nullptr) const
Check if MO would be a legal operand for gfx12+ packed math FP32 instructions.
static bool usesVM_CNT(const MachineInstr &MI)
MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const override
static bool isFixedSize(const MachineInstr &MI)
bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo, MachineCycleInfo *CI) const override
LLVM_READONLY int commuteOpcode(unsigned Opc) const
ValueUniformity getValueUniformity(const MachineInstr &MI) const final
uint64_t getScratchRsrcWords23() const
LLVM_READONLY MachineOperand * getNamedOperand(MachineInstr &MI, AMDGPU::OpName OperandName) const
Returns the operand named Op.
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool isOperandLegal(const MachineInstr &MI, unsigned OpIdx, const MachineOperand *MO=nullptr) const
Check if MO is a legal operand if it was the OpIdx Operand for MI.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
std::optional< int64_t > getImmOrMaterializedImm(MachineOperand &Op) const
void moveToVALUImpl(SIInstrWorklist &Worklist, MachineDominatorTree *MDT, MachineInstr &Inst, DenseMap< MachineInstr *, V2PhysSCopyInfo > &WaterFalls, DenseMap< MachineInstr *, bool > &V2SPhyCopiesToErase) const
static bool isLDSDMA(const MachineInstr &MI)
static bool isVOP1(const MachineInstr &MI)
SIInstrInfo(const GCNSubtarget &ST)
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool hasAnyModifiersSet(const MachineInstr &MI) const
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
Register getLongBranchReservedReg() const
bool isWholeWaveFunction() const
Register getStackPtrOffsetReg() const
unsigned getMaxMemoryClusterDWords() const
void setHasSpilledVGPRs(bool Spill=true)
bool isWWMReg(Register Reg) const
bool checkFlag(Register Reg, uint8_t Flag) const
void setHasSpilledSGPRs(bool Spill=true)
unsigned getScratchReservedForDynamicVGPRs() const
static unsigned getSubRegFromChannel(unsigned Channel, unsigned NumRegs=1)
ArrayRef< int16_t > getRegSplitParts(const TargetRegisterClass *RC, unsigned EltSize) const
unsigned getHWRegIndex(MCRegister Reg) const
bool isSGPRReg(const MachineRegisterInfo &MRI, Register Reg) const
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
unsigned getChannelFromSubReg(unsigned SubReg) const
static bool isAGPRClass(const TargetRegisterClass *RC)
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
SlotIndex insertMachineInstrInMaps(MachineInstr &MI, bool Late=false)
Insert the given machine instruction into the mapping.
Implements a dense probed hash-table based set with some number of buckets stored inline.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Represent a constant reference to a string, i.e.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual const MachineOperand & getCalleeOperand(const MachineInstr &MI) const
Returns the callee operand from the given MI.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, LaneBitmask UsedLanes=LaneBitmask::getAll()) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
virtual MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
const MCAsmInfo & getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
bool isPackedFP32Inst(unsigned Opc)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
const uint64_t RSRC_DATA_FORMAT
bool isPKFMACF16InlineConstant(uint32_t Literal, bool IsGFX11Plus)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool getWMMAIsXDL(unsigned Opc)
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
bool isInlinableLiteralV2I16(uint32_t Literal)
bool isDPMACCInstruction(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
LLVM_READONLY int32_t getCommuteRev(uint32_t Opcode)
LLVM_READONLY int32_t getCommuteOrig(uint32_t Opcode)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool isGFX12Plus(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
LLVM_READONLY int32_t getGlobalVaddrOp(uint32_t Opcode)
LLVM_READNONE bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC)
LLVM_READONLY int32_t getMFMAEarlyClobberOp(uint32_t Opcode)
bool getMAIIsGFX940XDL(unsigned Opc)
const uint64_t RSRC_ELEMENT_SIZE_SHIFT
bool isIntrinsicAlwaysUniform(unsigned IntrID)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
LLVM_READONLY int32_t getIfAddr64Inst(uint32_t Opcode)
Check if Opcode is an Addr64 opcode.
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
const uint64_t RSRC_TID_ENABLE
LLVM_READONLY int32_t getVOPe32(uint32_t Opcode)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
constexpr bool isSISrcOperand(const MCOperandInfo &OpInfo)
Is this an AMDGPU specific source operand?
bool isGenericAtomic(unsigned Opc)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
LLVM_READONLY int32_t getAddr64Inst(uint32_t Opcode)
int32_t getMCOpcode(uint32_t Opcode, unsigned Gen)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_IMM_V2FP16_SPLAT
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_INLINE_C_AV64_PSEUDO
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
LLVM_READONLY int32_t getBasicFromSDWAOp(uint32_t Opcode)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
const uint64_t RSRC_INDEX_STRIDE_SHIFT
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
LLVM_READONLY int32_t getFlatScratchInstSVfromSS(uint32_t Opcode)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
LLVM_READNONE constexpr bool isGraphics(CallingConv::ID CC)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
Not(const Pred &P) -> Not< Pred >
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
LLVM_ABI void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
TargetInstrInfo::RegSubRegPair getRegSubRegPair(const MachineOperand &O)
Create RegSubRegPair from a register MachineOperand.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
constexpr uint64_t maxUIntN(uint64_t N)
Gets the maximum value for a N-bit unsigned integer.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
bool execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, Register VReg, const MachineInstr &DefMI, const MachineInstr &UseMI)
Return false if EXEC is not changed between the def of VReg at DefMI and the use at UseMI.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
TargetInstrInfo::RegSubRegPair getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg)
Return the SubReg component from REG_SEQUENCE.
static const MachineMemOperand::Flags MONoClobber
Mark the MMO of a uniform load if there are no potentially clobbering stores on any path from the sta...
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto reverse(ContainerTy &&C)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
MachineInstr * getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, const MachineRegisterInfo &MRI)
Return the defining instruction for a given reg:subreg pair skipping copy like instructions and subre...
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
static const MachineMemOperand::Flags MOCooperative
Mark the MMO of cooperative load/store atomics.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionAddr VTableAddr uintptr_t uintptr_t Data
@ Xor
Bitwise or logical XOR of integers.
@ Sub
Subtraction of integers.
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned DefaultMemoryClusterDWordsLimit
constexpr unsigned BitWidth
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
static const MachineMemOperand::Flags MOLastUse
Mark the MMO of a load as the last use.
constexpr T reverseBits(T Val)
Reverse the bits in Val.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr RegState getUndefRegState(bool B)
ValueUniformity
Enum describing how values behave with respect to uniformity and divergence, to answer the question: ...
@ AlwaysUniform
The result value is always uniform.
@ NeverUniform
The result value can never be assumed to be uniform.
@ Default
The result value is uniform if and only if all operands are uniform.
MachineCycleInfo::CycleT MachineCycle
static const MachineMemOperand::Flags MOThreadPrivate
Mark the MMO of accesses to memory locations that are never written to by other threads.
bool execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, Register VReg, const MachineInstr &DefMI)
Return false if EXEC is not changed between the def of VReg at DefMI and all its uses.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Helper struct for the implementation of 3-address conversion to communicate updates made to instructi...
MachineInstr * RemoveMIUse
Other instruction whose def is no longer used by the converted instruction.
static constexpr uint64_t encode(Fields... Values)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
constexpr bool all() const
SparseBitVector AliveBlocks
AliveBlocks - Set of blocks in which this value is alive completely through.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Utility to store machine instructions worklist.
MachineInstr * top() const
bool isDeferred(MachineInstr *MI)
SetVector< MachineInstr * > & getDeferredList()
void insert(MachineInstr *MI)
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.